summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/ac.c2
-rw-r--r--drivers/acpi/battery.c4
-rw-r--r--drivers/acpi/blacklist.c58
-rw-r--r--drivers/acpi/button.c2
-rw-r--r--drivers/acpi/container.c5
-rw-r--r--drivers/acpi/dock.c13
-rw-r--r--drivers/acpi/fan.c3
-rw-r--r--drivers/acpi/pci_irq.c1
-rw-r--r--drivers/acpi/proc.c2
-rw-r--r--drivers/acpi/processor_throttling.c69
-rw-r--r--drivers/acpi/sbs.c4
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/acpi/utils.c4
-rw-r--r--drivers/acpi/video.c147
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/ahci.c18
-rw-r--r--drivers/ata/libata-pmp.c7
-rw-r--r--drivers/ata/pata_imx.c8
-rw-r--r--drivers/ata/sata_mv.c16
-rw-r--r--drivers/ata/sata_sil.c1
-rw-r--r--drivers/base/component.c8
-rw-r--r--drivers/base/dma-buf.c25
-rw-r--r--drivers/base/firmware_class.c1
-rw-r--r--drivers/block/null_blk.c97
-rw-r--r--drivers/block/nvme-core.c610
-rw-r--r--drivers/block/nvme-scsi.c147
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkback/blkback.c81
-rw-r--r--drivers/block/xen-blkback/common.h5
-rw-r--r--drivers/block/xen-blkback/xenbus.c14
-rw-r--r--drivers/block/xen-blkfront.c11
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/virtio_console.c9
-rw-r--r--drivers/clocksource/bcm_kona_timer.c54
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/intel_pstate.c61
-rw-r--r--drivers/cpufreq/powernow-k8.c10
-rw-r--r--drivers/crypto/nx/nx-842.c29
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/imx-sdma.c1
-rw-r--r--drivers/dma/ioat/dma.c52
-rw-r--r--drivers/dma/ioat/dma.h1
-rw-r--r--drivers/dma/ioat/dma_v2.c11
-rw-r--r--drivers/dma/ioat/dma_v3.c3
-rw-r--r--drivers/dma/mv_xor.c24
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/edac/edac_mc.c13
-rw-r--r--drivers/edac/edac_mc_sysfs.c10
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/i7300_edac.c38
-rw-r--r--drivers/edac/i7core_edac.c9
-rw-r--r--drivers/extcon/extcon-arizona.c12
-rw-r--r--drivers/fmc/fmc-write-eeprom.c2
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-bcm-kona.c4
-rw-r--r--drivers/gpio/gpio-clps711x.c1
-rw-r--r--drivers/gpio/gpio-intel-mid.c4
-rw-r--r--drivers/gpio/gpio-xtensa.c16
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c12
-rw-r--r--drivers/gpu/drm/exynos/Kconfig4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c66
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c19
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c14
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c31
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c179
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/nouveau/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c31
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c32
-rw-r--r--drivers/gpu/drm/radeon/btcd.h4
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c15
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c26
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/r600.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c14
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c18
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/si.c4
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c1
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/rgb.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h153
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h11
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c141
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c337
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c96
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c469
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c6
-rw-r--r--drivers/gpu/host1x/job.c2
-rw-r--r--drivers/hid/hid-apple.c3
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-hyperv.c11
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hid/hid-microsoft.c4
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-sensor-hub.c3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c2
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hv/connection.c13
-rw-r--r--drivers/hwmon/da9055-hwmon.c4
-rw-r--r--drivers/hwmon/max1668.c2
-rw-r--r--drivers/hwmon/ntc_thermistor.c6
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c68
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c33
-rw-r--r--drivers/iio/accel/bma180.c16
-rw-r--r--drivers/iio/adc/max1363.c2
-rw-r--r--drivers/iio/gyro/Kconfig2
-rw-r--r--drivers/iio/gyro/st_gyro.h1
-rw-r--r--drivers/iio/gyro/st_gyro_core.c9
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c1
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c1
-rw-r--r--drivers/iio/imu/adis16400.h1
-rw-r--r--drivers/iio/imu/adis16400_core.c10
-rw-r--r--drivers/iio/light/cm32181.c16
-rw-r--r--drivers/iio/light/cm36651.c45
-rw-r--r--drivers/iio/light/tsl2563.c16
-rw-r--r--drivers/iio/magnetometer/ak8975.c16
-rw-r--r--drivers/iio/magnetometer/mag3110.c8
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c185
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c22
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c18
-rw-r--r--drivers/infiniband/hw/mlx5/user.h7
-rw-r--r--drivers/infiniband/hw/nes/nes.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c5
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c9
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c14
-rw-r--r--drivers/input/misc/arizona-haptics.c19
-rw-r--r--drivers/iommu/arm-smmu.c105
-rw-r--r--drivers/iommu/omap-iommu-debug.c4
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c4
-rw-r--r--drivers/irqchip/irq-metag-ext.c2
-rw-r--r--drivers/irqchip/irq-metag.c2
-rw-r--r--drivers/irqchip/irq-orion.c22
-rw-r--r--drivers/irqchip/irq-zevio.c127
-rw-r--r--drivers/isdn/hisax/q931.c2
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/bset.c7
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/extents.c2
-rw-r--r--drivers/md/bcache/request.c6
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/dm-cache-target.c13
-rw-r--r--drivers/md/dm-io.c23
-rw-r--r--drivers/md/dm-mpath.c7
-rw-r--r--drivers/md/dm-raid1.c3
-rw-r--r--drivers/md/dm-thin-metadata.c21
-rw-r--r--drivers/md/dm-thin-metadata.h10
-rw-r--r--drivers/md/dm-thin.c39
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c2
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.h11
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid5.c90
-rw-r--r--drivers/media/dvb-frontends/cx24117.c10
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/s5k5baf.c30
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-gpio.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c5
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c7
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c8
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.h2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c1
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c12
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c10
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c10
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c5
-rw-r--r--drivers/message/i2o/i2o_config.c4
-rw-r--r--drivers/mfd/da9055-i2c.c12
-rw-r--r--drivers/mfd/max14577.c2
-rw-r--r--drivers/mfd/max8997.c6
-rw-r--r--drivers/mfd/max8998.c6
-rw-r--r--drivers/mfd/sec-core.c2
-rw-r--r--drivers/mfd/tps65217.c4
-rw-r--r--drivers/mfd/wm8994-core.c2
-rw-r--r--drivers/misc/genwqe/card_dev.c1
-rw-r--r--drivers/misc/mei/client.c15
-rw-r--r--drivers/misc/mic/host/mic_virtio.c3
-rw-r--r--drivers/misc/sgi-gru/grukdump.c11
-rw-r--r--drivers/mmc/card/queue.c2
-rw-r--r--drivers/mtd/nand/nand_base.c2
-rw-r--r--drivers/mtd/nand/omap2.c61
-rw-r--r--drivers/mtd/ubi/fastmap.c8
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/bonding/bond_3ad.c6
-rw-r--r--drivers/net/bonding/bond_3ad.h1
-rw-r--r--drivers/net/bonding/bond_main.c35
-rw-r--r--drivers/net/bonding/bond_options.c2
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/dev.c15
-rw-r--r--drivers/net/can/flexcan.c7
-rw-r--r--drivers/net/can/janz-ican3.c20
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/can/vcan.c9
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c3
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c17
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/ethoc.c138
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c4
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/marvell/Kconfig6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c6
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c330
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c25
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c13
-rw-r--r--drivers/net/hyperv/netvsc_drv.c53
-rw-r--r--drivers/net/irda/Kconfig7
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ep7211-sir.c70
-rw-r--r--drivers/net/irda/irtty-sir.c1
-rw-r--r--drivers/net/macvlan.c5
-rw-r--r--drivers/net/phy/dp83640.c32
-rw-r--r--drivers/net/phy/mdio-sun4i.c3
-rw-r--r--drivers/net/phy/phy_device.c38
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/Kconfig15
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix_devices.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c4
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/net/usb/hso.c32
-rw-r--r--drivers/net/usb/mcs7830.c5
-rw-r--r--drivers/net/usb/net1080.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c11
-rw-r--r--drivers/net/usb/r8152.c17
-rw-r--r--drivers/net/usb/rndis_host.c4
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c4
-rw-r--r--drivers/net/usb/sr9800.c874
-rw-r--r--drivers/net/usb/sr9800.h202
-rw-r--r--drivers/net/usb/usbnet.c25
-rw-r--r--drivers/net/vxlan.c3
-rw-r--r--drivers/net/wan/dlci.c5
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c63
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c73
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c7
-rw-r--r--drivers/net/wireless/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c23
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8187.h10
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c18
-rw-r--r--drivers/net/xen-netback/common.h6
-rw-r--r--drivers/net/xen-netback/interface.c1
-rw-r--r--drivers/net/xen-netback/netback.c16
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/of/address.c5
-rw-r--r--drivers/of/base.c128
-rw-r--r--drivers/of/of_mdio.c22
-rw-r--r--drivers/of/selftest.c67
-rw-r--r--drivers/of/testcase-data/testcases.dtsi3
-rw-r--r--drivers/of/testcase-data/tests-interrupts.dtsi58
-rw-r--r--drivers/of/testcase-data/tests-match.dtsi19
-rw-r--r--drivers/of/testcase-data/tests-phandle.dtsi39
-rw-r--r--drivers/pci/host/pci-mvebu.c11
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c70
-rw-r--r--drivers/pci/msi.c10
-rw-r--r--drivers/pci/pci.c10
-rw-r--r--drivers/phy/Kconfig3
-rw-r--r--drivers/phy/phy-core.c76
-rw-r--r--drivers/phy/phy-exynos-dp-video.c8
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c10
-rw-r--r--drivers/phy/phy-mvebu-sata.c10
-rw-r--r--drivers/phy/phy-omap-usb2.c10
-rw-r--r--drivers/phy/phy-twl4030-usb.c10
-rw-r--r--drivers/pinctrl/core.c8
-rw-r--r--drivers/pinctrl/pinctrl-at91.c10
-rw-r--r--drivers/pinctrl/pinctrl-imx1-core.c10
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c2
-rw-r--r--drivers/pinctrl/sirf/pinctrl-prima2.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c15
-rw-r--r--drivers/power/ds2782_battery.c2
-rw-r--r--drivers/power/isp1704_charger.c2
-rw-r--r--drivers/power/max17040_battery.c5
-rw-r--r--drivers/pwm/pwm-lp3943.c4
-rw-r--r--drivers/regulator/ab3100.c4
-rw-r--r--drivers/regulator/core.c11
-rw-r--r--drivers/regulator/da9055-regulator.c4
-rw-r--r--drivers/regulator/da9063-regulator.c4
-rw-r--r--drivers/regulator/max14577.c10
-rw-r--r--drivers/regulator/s2mps11.c1
-rw-r--r--drivers/regulator/s5m8767.c4
-rw-r--r--drivers/s390/cio/chsc.c1
-rw-r--r--drivers/s390/cio/cio.c40
-rw-r--r--drivers/s390/cio/qdio.h14
-rw-r--r--drivers/s390/cio/qdio_main.c2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c24
-rw-r--r--drivers/sbus/char/jsflash.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h3
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c158
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h7
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/spi/Kconfig4
-rw-r--r--drivers/spi/spi-nuc900.c2
-rw-r--r--drivers/spi/spi.c4
-rw-r--r--drivers/staging/android/ashmem.c45
-rw-r--r--drivers/staging/android/binder.c3
-rw-r--r--drivers/staging/android/ion/compat_ion.c26
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c12
-rw-r--r--drivers/staging/android/ion/ion_heap.c2
-rw-r--r--drivers/staging/android/ion/ion_priv.h1
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c6
-rw-r--r--drivers/staging/android/sw_sync.h17
-rw-r--r--drivers/staging/android/sync.c14
-rw-r--r--drivers/staging/bcm/Bcmnet.c2
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c17
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c6
-rw-r--r--drivers/staging/dgrp/dgrp_net_ops.c330
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.c3
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h6
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c13
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c7
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c55
-rw-r--r--drivers/staging/imx-drm/imx-hdmi.c22
-rw-r--r--drivers/staging/lustre/TODO5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c5
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h3
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c2
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c6
-rw-r--r--drivers/staging/media/go7007/go7007-loader.c4
-rw-r--r--drivers/staging/netlogic/xlr_net.c7
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c273
-rw-r--r--drivers/staging/ozwpan/ozproto.c3
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c22
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c12
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c3
-rw-r--r--drivers/staging/rtl8821ae/Kconfig2
-rw-r--r--drivers/staging/rtl8821ae/wifi.h2
-rw-r--r--drivers/staging/usbip/userspace/libsrc/names.c8
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c3
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c4
-rw-r--r--drivers/target/target_core_alua.c2
-rw-r--r--drivers/target/target_core_pr.c11
-rw-r--r--drivers/target/target_core_sbc.c19
-rw-r--r--drivers/target/target_core_spc.c4
-rw-r--r--drivers/target/target_core_transport.c8
-rw-r--r--drivers/tty/hvc/hvc_opal.c8
-rw-r--r--drivers/tty/hvc/hvc_rtas.c12
-rw-r--r--drivers/tty/hvc/hvc_udbg.c9
-rw-r--r--drivers/tty/hvc/hvc_xen.c17
-rw-r--r--drivers/tty/n_gsm.c11
-rw-r--r--drivers/tty/n_tty.c14
-rw-r--r--drivers/tty/serial/8250/8250_core.c18
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/8250/8250_pci.c3
-rw-r--r--drivers/tty/serial/omap-serial.c11
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c4
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/usb/chipidea/udc.c4
-rw-r--r--drivers/usb/core/driver.c24
-rw-r--r--drivers/usb/core/hcd.c1
-rw-r--r--drivers/usb/core/hub.c7
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/dwc2/core.c2
-rw-r--r--drivers/usb/dwc2/hcd.c11
-rw-r--r--drivers/usb/dwc2/platform.c3
-rw-r--r--drivers/usb/gadget/bcm63xx_udc.c58
-rw-r--r--drivers/usb/gadget/f_fs.c7
-rw-r--r--drivers/usb/gadget/printer.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c13
-rw-r--r--drivers/usb/host/ehci-hub.c26
-rw-r--r--drivers/usb/host/xhci-dbg.c6
-rw-r--r--drivers/usb/host/xhci-mem.c14
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-ring.c68
-rw-r--r--drivers/usb/host/xhci.c38
-rw-r--r--drivers/usb/host/xhci.h41
-rw-r--r--drivers/usb/musb/musb_core.c15
-rw-r--r--drivers/usb/musb/musb_host.c3
-rw-r--r--drivers/usb/musb/musb_virthub.c26
-rw-r--r--drivers/usb/musb/omap2430.c2
-rw-r--r--drivers/usb/phy/phy-msm-usb.c57
-rw-r--r--drivers/usb/phy/phy.c8
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h13
-rw-r--r--drivers/usb/serial/option.c6
-rw-r--r--drivers/usb/serial/qcserial.c3
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
-rw-r--r--drivers/usb/storage/Kconfig4
-rw-r--r--drivers/usb/storage/scsiglue.c6
-rw-r--r--drivers/usb/storage/unusual_cypress.h2
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/vhost/net.c47
-rw-r--r--drivers/vhost/scsi.c6
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/exynos/Kconfig3
-rw-r--r--drivers/video/omap2/dss/dispc.c16
-rw-r--r--drivers/video/omap2/dss/dpi.c2
-rw-r--r--drivers/video/omap2/dss/sdi.c2
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c4
-rw-r--r--drivers/vme/bridges/vme_tsi148.c4
-rw-r--r--drivers/watchdog/Kconfig1
-rw-r--r--drivers/watchdog/w83697hf_wdt.c2
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/gntdev.c13
-rw-r--r--drivers/xen/grant-table.c89
-rw-r--r--drivers/xen/xencomm.c219
532 files changed, 7611 insertions, 3241 deletions
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index e7515aa43d6b..6f190bc2b8b7 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -243,6 +243,8 @@ static int acpi_ac_resume(struct device *dev)
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
return 0;
}
+#else
+#define acpi_ac_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 470e7542bf31..797a6938d051 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -549,7 +549,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
{
unsigned long x;
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
- if (sscanf(buf, "%ld\n", &x) == 1)
+ if (sscanf(buf, "%lu\n", &x) == 1)
battery->alarm = x/1000;
if (acpi_battery_present(battery))
acpi_battery_set_alarm(battery);
@@ -841,6 +841,8 @@ static int acpi_battery_resume(struct device *dev)
acpi_battery_update(battery);
return 0;
}
+#else
+#define acpi_battery_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 10e4964d051a..afec4526c48a 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -260,14 +260,6 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{
.callback = dmi_disable_osi_win8,
- .ident = "Dell Inspiron 15R SE",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
.ident = "ThinkPad Edge E530",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -322,56 +314,6 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
},
},
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP ProBook 2013 models",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "),
- DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP EliteBook 2013 models",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
- DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP ZBook 14",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP ZBook 15",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP ZBook 17",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP EliteBook 8780w",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
- },
- },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 11c11f6b8fa1..714e957a871a 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -80,6 +80,8 @@ static void acpi_button_notify(struct acpi_device *device, u32 event);
#ifdef CONFIG_PM_SLEEP
static int acpi_button_resume(struct device *dev);
+#else
+#define acpi_button_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 0b6ae6eb5c4a..368f9ddb8480 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -79,9 +79,10 @@ static int container_device_attach(struct acpi_device *adev,
ACPI_COMPANION_SET(dev, adev);
dev->release = acpi_container_release;
ret = device_register(dev);
- if (ret)
+ if (ret) {
+ put_device(dev);
return ret;
-
+ }
adev->driver_data = dev;
return 1;
}
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index c431c88faaff..5bfd769fc91f 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -609,7 +609,7 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
static void dock_notify(struct dock_station *ds, u32 event)
{
acpi_handle handle = ds->handle;
- struct acpi_device *ad;
+ struct acpi_device *adev = NULL;
int surprise_removal = 0;
/*
@@ -632,7 +632,8 @@ static void dock_notify(struct dock_station *ds, u32 event)
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
- if (!dock_in_progress(ds) && acpi_bus_get_device(handle, &ad)) {
+ acpi_bus_get_device(handle, &adev);
+ if (!dock_in_progress(ds) && !acpi_device_enumerated(adev)) {
begin_dock(ds);
dock(ds);
if (!dock_present(ds)) {
@@ -712,13 +713,11 @@ static acpi_status __init find_dock_devices(acpi_handle handle, u32 lvl,
static ssize_t show_docked(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct acpi_device *tmp;
-
struct dock_station *dock_station = dev->platform_data;
+ struct acpi_device *adev = NULL;
- if (!acpi_bus_get_device(dock_station->handle, &tmp))
- return snprintf(buf, PAGE_SIZE, "1\n");
- return snprintf(buf, PAGE_SIZE, "0\n");
+ acpi_bus_get_device(dock_station->handle, &adev);
+ return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev));
}
static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 1fb62900f32a..09e423f3d8ad 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -55,6 +55,9 @@ MODULE_DEVICE_TABLE(acpi, fan_device_ids);
#ifdef CONFIG_PM_SLEEP
static int acpi_fan_suspend(struct device *dev);
static int acpi_fan_resume(struct device *dev);
+#else
+#define acpi_fan_suspend NULL
+#define acpi_fan_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 52d45ea2bc4f..361b40c10c3f 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -430,6 +430,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
pin_name(pin));
}
+ kfree(entry);
return 0;
}
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 50fe34ffe932..75c28eae8860 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -60,7 +60,7 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
seq_printf(seq, "%c%-8s %s:%s\n",
dev->wakeup.flags.run_wake ? '*' : ' ',
(device_may_wakeup(&dev->dev) ||
- (ldev && device_may_wakeup(ldev))) ?
+ device_may_wakeup(ldev)) ?
"enabled" : "disabled",
ldev->bus ? ldev->bus->name :
"no-bus", dev_name(ldev));
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 28baa05b8018..84243c32e29c 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -56,6 +56,12 @@ struct throttling_tstate {
int target_state; /* target T-state */
};
+struct acpi_processor_throttling_arg {
+ struct acpi_processor *pr;
+ int target_state;
+ bool force;
+};
+
#define THROTTLING_PRECHANGE (1)
#define THROTTLING_POSTCHANGE (2)
@@ -1060,16 +1066,24 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
return 0;
}
+static long acpi_processor_throttling_fn(void *data)
+{
+ struct acpi_processor_throttling_arg *arg = data;
+ struct acpi_processor *pr = arg->pr;
+
+ return pr->throttling.acpi_processor_set_throttling(pr,
+ arg->target_state, arg->force);
+}
+
int acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force)
{
- cpumask_var_t saved_mask;
int ret = 0;
unsigned int i;
struct acpi_processor *match_pr;
struct acpi_processor_throttling *p_throttling;
+ struct acpi_processor_throttling_arg arg;
struct throttling_tstate t_state;
- cpumask_var_t online_throttling_cpus;
if (!pr)
return -EINVAL;
@@ -1080,14 +1094,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
- return -ENOMEM;
-
- if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
- free_cpumask_var(saved_mask);
- return -ENOMEM;
- }
-
if (cpu_is_offline(pr->id)) {
/*
* the cpu pointed by pr->id is offline. Unnecessary to change
@@ -1096,17 +1102,15 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
return -ENODEV;
}
- cpumask_copy(saved_mask, &current->cpus_allowed);
t_state.target_state = state;
p_throttling = &(pr->throttling);
- cpumask_and(online_throttling_cpus, cpu_online_mask,
- p_throttling->shared_cpu_map);
+
/*
* The throttling notifier will be called for every
* affected cpu in order to get one proper T-state.
* The notifier event is THROTTLING_PRECHANGE.
*/
- for_each_cpu(i, online_throttling_cpus) {
+ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
&t_state);
@@ -1118,21 +1122,18 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
- /* FIXME: use work_on_cpu() */
- if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
- /* Can't migrate to the pr->id CPU. Exit */
- ret = -ENODEV;
- goto exit;
- }
- ret = p_throttling->acpi_processor_set_throttling(pr,
- t_state.target_state, force);
+ arg.pr = pr;
+ arg.target_state = state;
+ arg.force = force;
+ ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
* it is necessary to set T-state for every affected
* cpus.
*/
- for_each_cpu(i, online_throttling_cpus) {
+ for_each_cpu_and(i, cpu_online_mask,
+ p_throttling->shared_cpu_map) {
match_pr = per_cpu(processors, i);
/*
* If the pointer is invalid, we will report the
@@ -1153,13 +1154,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
"on CPU %d\n", i));
continue;
}
- t_state.cpu = i;
- /* FIXME: use work_on_cpu() */
- if (set_cpus_allowed_ptr(current, cpumask_of(i)))
- continue;
- ret = match_pr->throttling.
- acpi_processor_set_throttling(
- match_pr, t_state.target_state, force);
+
+ arg.pr = match_pr;
+ arg.target_state = state;
+ arg.force = force;
+ ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
+ &arg);
}
}
/*
@@ -1168,17 +1168,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
* affected cpu to update the T-states.
* The notifier event is THROTTLING_POSTCHANGE
*/
- for_each_cpu(i, online_throttling_cpus) {
+ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
&t_state);
}
- /* restore the previous state */
- /* FIXME: use work_on_cpu() */
- set_cpus_allowed_ptr(current, saved_mask);
-exit:
- free_cpumask_var(online_throttling_cpus);
- free_cpumask_var(saved_mask);
+
return ret;
}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index d465ae6cdd00..dbd48498b938 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -450,7 +450,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
{
unsigned long x;
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
- if (sscanf(buf, "%ld\n", &x) == 1)
+ if (sscanf(buf, "%lu\n", &x) == 1)
battery->alarm_capacity = x /
(1000 * acpi_battery_scale(battery));
if (battery->present)
@@ -668,6 +668,8 @@ static int acpi_sbs_resume(struct device *dev)
acpi_sbs_callback(sbs);
return 0;
}
+#else
+#define acpi_sbs_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 7384158c7f87..57b053f424d1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -484,7 +484,6 @@ static void acpi_device_hotplug(void *data, u32 src)
static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
{
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
- struct acpi_scan_handler *handler = data;
struct acpi_device *adev;
acpi_status status;
@@ -500,7 +499,10 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
break;
case ACPI_NOTIFY_EJECT_REQUEST:
acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
- if (!handler->hotplug.enabled) {
+ if (!adev->handler)
+ goto err_out;
+
+ if (!adev->handler->hotplug.enabled) {
acpi_handle_err(handle, "Eject disabled\n");
ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
goto err_out;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 8349a555b92b..08626c851be7 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -102,6 +102,8 @@ MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
#ifdef CONFIG_PM_SLEEP
static int acpi_thermal_resume(struct device *dev);
+#else
+#define acpi_thermal_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 0347a37eb438..85e3b612bdc0 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -99,10 +99,6 @@ acpi_extract_package(union acpi_object *package,
union acpi_object *element = &(package->package.elements[i]);
- if (!element) {
- return AE_BAD_DATA;
- }
-
switch (element->type) {
case ACPI_TYPE_INTEGER:
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index b727d105046d..b6ba88ed31ae 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -81,11 +81,12 @@ static bool allow_duplicates;
module_param(allow_duplicates, bool, 0644);
/*
- * For Windows 8 systems: if set ture and the GPU driver has
- * registered a backlight interface, skip registering ACPI video's.
+ * For Windows 8 systems: used to decide if video module
+ * should skip registering backlight interface of its own.
*/
-static bool use_native_backlight = false;
-module_param(use_native_backlight, bool, 0644);
+static int use_native_backlight_param = -1;
+module_param_named(use_native_backlight, use_native_backlight_param, int, 0444);
+static bool use_native_backlight_dmi = false;
static int register_count;
static struct mutex video_list_lock;
@@ -231,9 +232,17 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
static int acpi_video_switch_brightness(struct acpi_video_device *device,
int event);
+static bool acpi_video_use_native_backlight(void)
+{
+ if (use_native_backlight_param != -1)
+ return use_native_backlight_param;
+ else
+ return use_native_backlight_dmi;
+}
+
static bool acpi_video_verify_backlight_support(void)
{
- if (acpi_osi_is_win8() && use_native_backlight &&
+ if (acpi_osi_is_win8() && acpi_video_use_native_backlight() &&
backlight_device_registered(BACKLIGHT_RAW))
return false;
return acpi_video_backlight_support();
@@ -398,6 +407,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
return 0;
}
+static int __init video_set_use_native_backlight(const struct dmi_system_id *d)
+{
+ use_native_backlight_dmi = true;
+ return 0;
+}
+
static struct dmi_system_id video_dmi_table[] __initdata = {
/*
* Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
@@ -442,6 +457,120 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
},
},
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad T430s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad X230",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad X1 Carbon",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Lenovo Yoga 13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Dell Inspiron 7520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Acer Aspire 5733Z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Acer Aspire V5-431",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-431"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ProBook 4340s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4340s"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ProBook 2013 models",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "),
+ DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP EliteBook 2013 models",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
+ DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ZBook 14",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ZBook 15",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ZBook 17",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP EliteBook 8780w",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
+ },
+ },
{}
};
@@ -685,6 +814,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
union acpi_object *o;
struct acpi_video_device_brightness *br = NULL;
int result = -EINVAL;
+ u32 value;
if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
@@ -715,7 +845,12 @@ acpi_video_init_brightness(struct acpi_video_device *device)
printk(KERN_ERR PREFIX "Invalid data\n");
continue;
}
- br->levels[count] = (u32) o->integer.value;
+ value = (u32) o->integer.value;
+ /* Skip duplicate entries */
+ if (count > 2 && br->levels[count - 1] == value)
+ continue;
+
+ br->levels[count] = value;
if (br->levels[count] > max_level)
max_level = br->levels[count];
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index f0447d3daf2c..19080c8e2f2a 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -168,14 +168,6 @@ static struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
- {
- .callback = video_detect_force_vendor,
- .ident = "Lenovo Yoga 13",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
- },
- },
{ },
};
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 4e737728aee2..868429a47be4 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -247,6 +247,7 @@ config SATA_HIGHBANK
config SATA_MV
tristate "Marvell SATA support"
+ select GENERIC_PHY
help
This option enables support for the Marvell Serial ATA family.
Currently supports 88SX[56]0[48][01] PCI(-X) chips,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dc2756fb6f33..c81d809c111b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -61,6 +61,7 @@ enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_ign_iferr,
+ board_ahci_noncq,
board_ahci_nosntf,
board_ahci_yes_fbs,
@@ -121,6 +122,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_noncq] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_nosntf] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
.flags = AHCI_FLAG_COMMON,
@@ -452,6 +460,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+ /*
+ * Samsung SSDs found on some macbooks. NCQ times out.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=60731
+ */
+ { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
+
/* Enmotus */
{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
@@ -1170,8 +1184,10 @@ static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
nvec = rc;
rc = pci_enable_msi_block(pdev, nvec);
- if (rc)
+ if (rc < 0)
goto intx;
+ else if (rc > 0)
+ goto single_msi;
return nvec;
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 20fd337a5731..7ccc084bf1df 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -447,8 +447,11 @@ static void sata_pmp_quirks(struct ata_port *ap)
* otherwise. Don't try hard to recover it.
*/
ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
- } else if (vendor == 0x197b && devid == 0x2352) {
- /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */
+ } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) {
+ /*
+ * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350?
+ * 0x0325: jmicron JMB394.
+ */
ata_for_each_link(link, ap, EDGE) {
/* SRST breaks detection and disks get misclassified
* LPM disabled to avoid potential problems
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 26386f0b89a8..b0b18ec5465f 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -119,7 +119,9 @@ static int pata_imx_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
@@ -212,7 +214,9 @@ static int pata_imx_resume(struct device *dev)
struct ata_host *host = dev_get_drvdata(dev);
struct pata_imx_priv *priv = host->private_data;
- clk_prepare_enable(priv->clk);
+ int ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
__raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 20a7517bd339..05c8a44adf8e 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4104,7 +4104,6 @@ static int mv_platform_probe(struct platform_device *pdev)
if (!hpriv->port_phys)
return -ENOMEM;
host->private_data = hpriv;
- hpriv->n_ports = n_ports;
hpriv->board_idx = chip_soc;
host->iomap = NULL;
@@ -4126,17 +4125,24 @@ static int mv_platform_probe(struct platform_device *pdev)
clk_prepare_enable(hpriv->port_clks[port]);
sprintf(port_number, "port%d", port);
- hpriv->port_phys[port] = devm_phy_get(&pdev->dev, port_number);
+ hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
+ port_number);
if (IS_ERR(hpriv->port_phys[port])) {
rc = PTR_ERR(hpriv->port_phys[port]);
hpriv->port_phys[port] = NULL;
- if ((rc != -EPROBE_DEFER) && (rc != -ENODEV))
- dev_warn(&pdev->dev, "error getting phy");
+ if (rc != -EPROBE_DEFER)
+ dev_warn(&pdev->dev, "error getting phy %d", rc);
+
+ /* Cleanup only the initialized ports */
+ hpriv->n_ports = port;
goto err;
} else
phy_power_on(hpriv->port_phys[port]);
}
+ /* All the ports have been initialized */
+ hpriv->n_ports = n_ports;
+
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
@@ -4174,7 +4180,7 @@ err:
clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
- for (port = 0; port < n_ports; port++) {
+ for (port = 0; port < hpriv->n_ports; port++) {
if (!IS_ERR(hpriv->port_clks[port])) {
clk_disable_unprepare(hpriv->port_clks[port]);
clk_put(hpriv->port_clks[port]);
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index d67fc351343c..b7695e804635 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -157,6 +157,7 @@ static const struct sil_drivelist {
{ "ST380011ASL", SIL_QUIRK_MOD15WRITE },
{ "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
{ "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
+ { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
{ "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
{ }
};
diff --git a/drivers/base/component.c b/drivers/base/component.c
index c53efe6c6d8e..c4778995cd72 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -133,9 +133,16 @@ static int try_to_bring_up_master(struct master *master,
goto out;
}
+ if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
/* Found all components */
ret = master->ops->bind(master->dev);
if (ret < 0) {
+ devres_release_group(master->dev, NULL);
+ dev_info(master->dev, "master bind failed: %d\n", ret);
master_remove_components(master);
goto out;
}
@@ -166,6 +173,7 @@ static void take_down_master(struct master *master)
{
if (master->bound) {
master->ops->unbind(master->dev);
+ devres_release_group(master->dev, NULL);
master->bound = false;
}
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 1e16cbd61da2..61d6d62cc0d3 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -616,36 +616,35 @@ static int dma_buf_describe(struct seq_file *s)
if (ret)
return ret;
- seq_printf(s, "\nDma-buf Objects:\n");
- seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n");
+ seq_puts(s, "\nDma-buf Objects:\n");
+ seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
ret = mutex_lock_interruptible(&buf_obj->lock);
if (ret) {
- seq_printf(s,
- "\tERROR locking buffer object: skipping\n");
+ seq_puts(s,
+ "\tERROR locking buffer object: skipping\n");
continue;
}
- seq_printf(s, "\t");
-
- seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n",
- buf_obj->exp_name, buf_obj->size,
+ seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
+ buf_obj->size,
buf_obj->file->f_flags, buf_obj->file->f_mode,
- (long)(buf_obj->file->f_count.counter));
+ (long)(buf_obj->file->f_count.counter),
+ buf_obj->exp_name);
- seq_printf(s, "\t\tAttached Devices:\n");
+ seq_puts(s, "\tAttached Devices:\n");
attach_count = 0;
list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
- seq_printf(s, "\t\t");
+ seq_puts(s, "\t");
- seq_printf(s, "%s\n", attach_obj->dev->init_name);
+ seq_printf(s, "%s\n", dev_name(attach_obj->dev));
attach_count++;
}
- seq_printf(s, "\n\t\tTotal %d devices attached\n",
+ seq_printf(s, "Total %d devices attached\n\n",
attach_count);
count++;
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 8a97ddfa6122..c30df50e4440 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -1580,6 +1580,7 @@ static int fw_pm_notify(struct notifier_block *notify_block,
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
+ case PM_RESTORE_PREPARE:
kill_requests_without_uevent();
device_cache_fw_images();
break;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 3107282a9741..091b9ea14feb 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -60,7 +60,9 @@ enum {
NULL_IRQ_NONE = 0,
NULL_IRQ_SOFTIRQ = 1,
NULL_IRQ_TIMER = 2,
+};
+enum {
NULL_Q_BIO = 0,
NULL_Q_RQ = 1,
NULL_Q_MQ = 2,
@@ -172,18 +174,20 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
- if (cmd->rq) {
- if (queue_mode == NULL_Q_MQ)
- blk_mq_end_io(cmd->rq, 0);
- else {
- INIT_LIST_HEAD(&cmd->rq->queuelist);
- blk_end_request_all(cmd->rq, 0);
- }
- } else if (cmd->bio)
+ switch (queue_mode) {
+ case NULL_Q_MQ:
+ blk_mq_end_io(cmd->rq, 0);
+ return;
+ case NULL_Q_RQ:
+ INIT_LIST_HEAD(&cmd->rq->queuelist);
+ blk_end_request_all(cmd->rq, 0);
+ break;
+ case NULL_Q_BIO:
bio_endio(cmd->bio, 0);
+ break;
+ }
- if (queue_mode != NULL_Q_MQ)
- free_cmd(cmd);
+ free_cmd(cmd);
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
@@ -195,6 +199,7 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
cq = &per_cpu(completion_queues, smp_processor_id());
while ((entry = llist_del_all(&cq->list)) != NULL) {
+ entry = llist_reverse_order(entry);
do {
cmd = container_of(entry, struct nullb_cmd, ll_list);
end_cmd(cmd);
@@ -221,61 +226,31 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
static void null_softirq_done_fn(struct request *rq)
{
- blk_end_request_all(rq, 0);
-}
-
-#ifdef CONFIG_SMP
-
-static void null_ipi_cmd_end_io(void *data)
-{
- struct completion_queue *cq;
- struct llist_node *entry, *next;
- struct nullb_cmd *cmd;
-
- cq = &per_cpu(completion_queues, smp_processor_id());
-
- entry = llist_del_all(&cq->list);
-
- while (entry) {
- next = entry->next;
- cmd = llist_entry(entry, struct nullb_cmd, ll_list);
- end_cmd(cmd);
- entry = next;
- }
-}
-
-static void null_cmd_end_ipi(struct nullb_cmd *cmd)
-{
- struct call_single_data *data = &cmd->csd;
- int cpu = get_cpu();
- struct completion_queue *cq = &per_cpu(completion_queues, cpu);
-
- cmd->ll_list.next = NULL;
-
- if (llist_add(&cmd->ll_list, &cq->list)) {
- data->func = null_ipi_cmd_end_io;
- data->flags = 0;
- __smp_call_function_single(cpu, data, 0);
- }
-
- put_cpu();
+ end_cmd(rq->special);
}
-#endif /* CONFIG_SMP */
-
static inline void null_handle_cmd(struct nullb_cmd *cmd)
{
/* Complete IO by inline, softirq or timer */
switch (irqmode) {
- case NULL_IRQ_NONE:
- end_cmd(cmd);
- break;
case NULL_IRQ_SOFTIRQ:
-#ifdef CONFIG_SMP
- null_cmd_end_ipi(cmd);
-#else
+ switch (queue_mode) {
+ case NULL_Q_MQ:
+ blk_mq_complete_request(cmd->rq);
+ break;
+ case NULL_Q_RQ:
+ blk_complete_request(cmd->rq);
+ break;
+ case NULL_Q_BIO:
+ /*
+ * XXX: no proper submitting cpu information available.
+ */
+ end_cmd(cmd);
+ break;
+ }
+ break;
+ case NULL_IRQ_NONE:
end_cmd(cmd);
-#endif
break;
case NULL_IRQ_TIMER:
null_cmd_end_timer(cmd);
@@ -411,6 +386,7 @@ static struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.map_queue = blk_mq_map_queue,
.init_hctx = null_init_hctx,
+ .complete = null_softirq_done_fn,
};
static struct blk_mq_reg null_mq_reg = {
@@ -609,13 +585,6 @@ static int __init null_init(void)
{
unsigned int i;
-#if !defined(CONFIG_SMP)
- if (irqmode == NULL_IRQ_SOFTIRQ) {
- pr_warn("null_blk: softirq completions not available.\n");
- pr_warn("null_blk: using direct completions.\n");
- irqmode = NULL_IRQ_NONE;
- }
-#endif
if (bs > PAGE_SIZE) {
pr_warn("null_blk: invalid block size\n");
pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 1f14ac403945..51824d1f23ea 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -46,7 +46,6 @@
#define NVME_Q_DEPTH 1024
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
-#define NVME_MINORS 64
#define ADMIN_TIMEOUT (60 * HZ)
static int nvme_major;
@@ -58,6 +57,17 @@ module_param(use_threaded_interrupts, int, 0);
static DEFINE_SPINLOCK(dev_list_lock);
static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
+static struct workqueue_struct *nvme_workq;
+
+static void nvme_reset_failed_dev(struct work_struct *ws);
+
+struct async_cmd_info {
+ struct kthread_work work;
+ struct kthread_worker *worker;
+ u32 result;
+ int status;
+ void *ctx;
+};
/*
* An NVM Express queue. Each device has at least two (one for admin
@@ -66,6 +76,7 @@ static struct task_struct *nvme_thread;
struct nvme_queue {
struct device *q_dmadev;
struct nvme_dev *dev;
+ char irqname[24]; /* nvme4294967295-65535\0 */
spinlock_t q_lock;
struct nvme_command *sq_cmds;
volatile struct nvme_completion *cqes;
@@ -80,9 +91,11 @@ struct nvme_queue {
u16 sq_head;
u16 sq_tail;
u16 cq_head;
+ u16 qid;
u8 cq_phase;
u8 cqe_seen;
u8 q_suspended;
+ struct async_cmd_info cmdinfo;
unsigned long cmdid_data[];
};
@@ -97,6 +110,7 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
@@ -111,6 +125,7 @@ struct nvme_cmd_info {
nvme_completion_fn fn;
void *ctx;
unsigned long timeout;
+ int aborted;
};
static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
@@ -154,6 +169,7 @@ static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
info[cmdid].fn = handler;
info[cmdid].ctx = ctx;
info[cmdid].timeout = jiffies + timeout;
+ info[cmdid].aborted = 0;
return cmdid;
}
@@ -172,6 +188,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
+#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE)
static void special_completion(struct nvme_dev *dev, void *ctx,
struct nvme_completion *cqe)
@@ -180,6 +197,10 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
return;
if (ctx == CMD_CTX_FLUSH)
return;
+ if (ctx == CMD_CTX_ABORT) {
+ ++dev->abort_limit;
+ return;
+ }
if (ctx == CMD_CTX_COMPLETED) {
dev_warn(&dev->pci_dev->dev,
"completed id %d twice on queue %d\n",
@@ -196,6 +217,15 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
}
+static void async_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ struct async_cmd_info *cmdinfo = ctx;
+ cmdinfo->result = le32_to_cpup(&cqe->result);
+ cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+ queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
+}
+
/*
* Called with local interrupts disabled and the q_lock held. May not sleep.
*/
@@ -693,7 +723,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
return 0;
- writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
+ writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
nvmeq->cq_head = head;
nvmeq->cq_phase = phase;
@@ -804,12 +834,34 @@ int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
return cmdinfo.status;
}
+static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd,
+ struct async_cmd_info *cmdinfo, unsigned timeout)
+{
+ int cmdid;
+
+ cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout);
+ if (cmdid < 0)
+ return cmdid;
+ cmdinfo->status = -EINTR;
+ cmd->common.command_id = cmdid;
+ nvme_submit_cmd(nvmeq, cmd);
+ return 0;
+}
+
int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result)
{
return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
}
+static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
+ struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
+{
+ return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo,
+ ADMIN_TIMEOUT);
+}
+
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
int status;
@@ -920,6 +972,56 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
}
/**
+ * nvme_abort_cmd - Attempt aborting a command
+ * @cmdid: Command id of a timed out IO
+ * @queue: The queue with timed out IO
+ *
+ * Schedule controller reset if the command was already aborted once before and
+ * still hasn't been returned to the driver, or if this is the admin queue.
+ */
+static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
+{
+ int a_cmdid;
+ struct nvme_command cmd;
+ struct nvme_dev *dev = nvmeq->dev;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+
+ if (!nvmeq->qid || info[cmdid].aborted) {
+ if (work_busy(&dev->reset_work))
+ return;
+ list_del_init(&dev->node);
+ dev_warn(&dev->pci_dev->dev,
+ "I/O %d QID %d timeout, reset controller\n", cmdid,
+ nvmeq->qid);
+ PREPARE_WORK(&dev->reset_work, nvme_reset_failed_dev);
+ queue_work(nvme_workq, &dev->reset_work);
+ return;
+ }
+
+ if (!dev->abort_limit)
+ return;
+
+ a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion,
+ ADMIN_TIMEOUT);
+ if (a_cmdid < 0)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abort.opcode = nvme_admin_abort_cmd;
+ cmd.abort.cid = cmdid;
+ cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
+ cmd.abort.command_id = a_cmdid;
+
+ --dev->abort_limit;
+ info[cmdid].aborted = 1;
+ info[cmdid].timeout = jiffies + ADMIN_TIMEOUT;
+
+ dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
+ nvmeq->qid);
+ nvme_submit_cmd(dev->queues[0], &cmd);
+}
+
+/**
* nvme_cancel_ios - Cancel outstanding I/Os
* @queue: The queue to cancel I/Os on
* @timeout: True to only cancel I/Os which have timed out
@@ -942,7 +1044,12 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
continue;
if (info[cmdid].ctx == CMD_CTX_CANCELLED)
continue;
- dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
+ if (timeout && nvmeq->dev->initialized) {
+ nvme_abort_cmd(cmdid, nvmeq);
+ continue;
+ }
+ dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
+ nvmeq->qid);
ctx = cancel_cmdid(nvmeq, cmdid, &fn);
fn(nvmeq->dev, ctx, &cqe);
}
@@ -964,26 +1071,31 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
kfree(nvmeq);
}
-static void nvme_free_queues(struct nvme_dev *dev)
+static void nvme_free_queues(struct nvme_dev *dev, int lowest)
{
int i;
- for (i = dev->queue_count - 1; i >= 0; i--) {
+ for (i = dev->queue_count - 1; i >= lowest; i--) {
nvme_free_queue(dev->queues[i]);
dev->queue_count--;
dev->queues[i] = NULL;
}
}
-static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+/**
+ * nvme_suspend_queue - put queue into suspended state
+ * @nvmeq - queue to suspend
+ *
+ * Returns 1 if already suspended, 0 otherwise.
+ */
+static int nvme_suspend_queue(struct nvme_queue *nvmeq)
{
- struct nvme_queue *nvmeq = dev->queues[qid];
- int vector = dev->entry[nvmeq->cq_vector].vector;
+ int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
spin_lock_irq(&nvmeq->q_lock);
if (nvmeq->q_suspended) {
spin_unlock_irq(&nvmeq->q_lock);
- return;
+ return 1;
}
nvmeq->q_suspended = 1;
spin_unlock_irq(&nvmeq->q_lock);
@@ -991,18 +1103,35 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
irq_set_affinity_hint(vector, NULL);
free_irq(vector, nvmeq);
- /* Don't tell the adapter to delete the admin queue */
- if (qid) {
- adapter_delete_sq(dev, qid);
- adapter_delete_cq(dev, qid);
- }
+ return 0;
+}
+static void nvme_clear_queue(struct nvme_queue *nvmeq)
+{
spin_lock_irq(&nvmeq->q_lock);
nvme_process_cq(nvmeq);
nvme_cancel_ios(nvmeq, false);
spin_unlock_irq(&nvmeq->q_lock);
}
+static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+{
+ struct nvme_queue *nvmeq = dev->queues[qid];
+
+ if (!nvmeq)
+ return;
+ if (nvme_suspend_queue(nvmeq))
+ return;
+
+ /* Don't tell the adapter to delete the admin queue.
+ * Don't tell a removed adapter to delete IO queues. */
+ if (qid && readl(&dev->bar->csts) != -1) {
+ adapter_delete_sq(dev, qid);
+ adapter_delete_cq(dev, qid);
+ }
+ nvme_clear_queue(nvmeq);
+}
+
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth, int vector)
{
@@ -1025,15 +1154,18 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_dmadev = dmadev;
nvmeq->dev = dev;
+ snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
+ dev->instance, qid);
spin_lock_init(&nvmeq->q_lock);
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
init_waitqueue_head(&nvmeq->sq_full);
init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
bio_list_init(&nvmeq->sq_cong);
- nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
nvmeq->cq_vector = vector;
+ nvmeq->qid = qid;
nvmeq->q_suspended = 1;
dev->queue_count++;
@@ -1052,11 +1184,10 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
{
if (use_threaded_interrupts)
return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
- nvme_irq_check, nvme_irq,
- IRQF_DISABLED | IRQF_SHARED,
+ nvme_irq_check, nvme_irq, IRQF_SHARED,
name, nvmeq);
return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
- IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
+ IRQF_SHARED, name, nvmeq);
}
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
@@ -1067,7 +1198,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
nvmeq->sq_tail = 0;
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
- nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
memset(nvmeq->cmdid_data, 0, extra);
memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
nvme_cancel_ios(nvmeq, false);
@@ -1087,13 +1218,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
if (result < 0)
goto release_cq;
- result = queue_request_irq(dev, nvmeq, "nvme");
+ result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result < 0)
goto release_sq;
- spin_lock(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->q_lock);
nvme_init_queue(nvmeq, qid);
- spin_unlock(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->q_lock);
return result;
@@ -1205,13 +1336,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
if (result)
return result;
- result = queue_request_irq(dev, nvmeq, "nvme admin");
+ result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result)
return result;
- spin_lock(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->q_lock);
nvme_init_queue(nvmeq, 0);
- spin_unlock(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->q_lock);
return result;
}
@@ -1487,10 +1618,47 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
}
}
+#ifdef CONFIG_COMPAT
+static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+ switch (cmd) {
+ case SG_IO:
+ return nvme_sg_io32(ns, arg);
+ }
+ return nvme_ioctl(bdev, mode, cmd, arg);
+}
+#else
+#define nvme_compat_ioctl NULL
+#endif
+
+static int nvme_open(struct block_device *bdev, fmode_t mode)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+ struct nvme_dev *dev = ns->dev;
+
+ kref_get(&dev->kref);
+ return 0;
+}
+
+static void nvme_free_dev(struct kref *kref);
+
+static void nvme_release(struct gendisk *disk, fmode_t mode)
+{
+ struct nvme_ns *ns = disk->private_data;
+ struct nvme_dev *dev = ns->dev;
+
+ kref_put(&dev->kref, nvme_free_dev);
+}
+
static const struct block_device_operations nvme_fops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
- .compat_ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_compat_ioctl,
+ .open = nvme_open,
+ .release = nvme_release,
};
static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
@@ -1514,13 +1682,25 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
static int nvme_kthread(void *data)
{
- struct nvme_dev *dev;
+ struct nvme_dev *dev, *next;
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
spin_lock(&dev_list_lock);
- list_for_each_entry(dev, &dev_list, node) {
+ list_for_each_entry_safe(dev, next, &dev_list, node) {
int i;
+ if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
+ dev->initialized) {
+ if (work_busy(&dev->reset_work))
+ continue;
+ list_del_init(&dev->node);
+ dev_warn(&dev->pci_dev->dev,
+ "Failed status, reset controller\n");
+ PREPARE_WORK(&dev->reset_work,
+ nvme_reset_failed_dev);
+ queue_work(nvme_workq, &dev->reset_work);
+ continue;
+ }
for (i = 0; i < dev->queue_count; i++) {
struct nvme_queue *nvmeq = dev->queues[i];
if (!nvmeq)
@@ -1541,33 +1721,6 @@ static int nvme_kthread(void *data)
return 0;
}
-static DEFINE_IDA(nvme_index_ida);
-
-static int nvme_get_ns_idx(void)
-{
- int index, error;
-
- do {
- if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
- return -1;
-
- spin_lock(&dev_list_lock);
- error = ida_get_new(&nvme_index_ida, &index);
- spin_unlock(&dev_list_lock);
- } while (error == -EAGAIN);
-
- if (error)
- index = -1;
- return index;
-}
-
-static void nvme_put_ns_idx(int index)
-{
- spin_lock(&dev_list_lock);
- ida_remove(&nvme_index_ida, index);
- spin_unlock(&dev_list_lock);
-}
-
static void nvme_config_discard(struct nvme_ns *ns)
{
u32 logical_block_size = queue_logical_block_size(ns->queue);
@@ -1601,7 +1754,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
ns->dev = dev;
ns->queue->queuedata = ns;
- disk = alloc_disk(NVME_MINORS);
+ disk = alloc_disk(0);
if (!disk)
goto out_free_queue;
ns->ns_id = nsid;
@@ -1614,12 +1767,12 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
disk->major = nvme_major;
- disk->minors = NVME_MINORS;
- disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
+ disk->first_minor = 0;
disk->fops = &nvme_fops;
disk->private_data = ns;
disk->queue = ns->queue;
disk->driverfs_dev = &dev->pci_dev->dev;
+ disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
@@ -1635,15 +1788,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
return NULL;
}
-static void nvme_ns_free(struct nvme_ns *ns)
-{
- int index = ns->disk->first_minor / NVME_MINORS;
- put_disk(ns->disk);
- nvme_put_ns_idx(index);
- blk_cleanup_queue(ns->queue);
- kfree(ns);
-}
-
static int set_queue_count(struct nvme_dev *dev, int count)
{
int status;
@@ -1659,11 +1803,12 @@ static int set_queue_count(struct nvme_dev *dev, int count)
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
- return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+ return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
}
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
+ struct nvme_queue *adminq = dev->queues[0];
struct pci_dev *pdev = dev->pci_dev;
int result, cpu, i, vecs, nr_io_queues, size, q_depth;
@@ -1690,7 +1835,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}
/* Deregister the admin queue's interrupt */
- free_irq(dev->entry[0].vector, dev->queues[0]);
+ free_irq(dev->entry[0].vector, adminq);
vecs = nr_io_queues;
for (i = 0; i < vecs; i++)
@@ -1728,9 +1873,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
nr_io_queues = vecs;
- result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+ result = queue_request_irq(dev, adminq, adminq->irqname);
if (result) {
- dev->queues[0]->q_suspended = 1;
+ adminq->q_suspended = 1;
goto free_queues;
}
@@ -1739,9 +1884,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
struct nvme_queue *nvmeq = dev->queues[i];
- spin_lock(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->q_lock);
nvme_cancel_ios(nvmeq, false);
- spin_unlock(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->q_lock);
nvme_free_queue(nvmeq);
dev->queue_count--;
@@ -1782,7 +1927,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
return 0;
free_queues:
- nvme_free_queues(dev);
+ nvme_free_queues(dev, 1);
return result;
}
@@ -1794,6 +1939,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
static int nvme_dev_add(struct nvme_dev *dev)
{
+ struct pci_dev *pdev = dev->pci_dev;
int res;
unsigned nn, i;
struct nvme_ns *ns;
@@ -1803,8 +1949,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
dma_addr_t dma_addr;
int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
- mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
- GFP_KERNEL);
+ mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
if (!mem)
return -ENOMEM;
@@ -1817,13 +1962,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
ctrl = mem;
nn = le32_to_cpup(&ctrl->nn);
dev->oncs = le16_to_cpup(&ctrl->oncs);
+ dev->abort_limit = ctrl->acl + 1;
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
if (ctrl->mdts)
dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
- if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
- (dev->pci_dev->device == 0x0953) && ctrl->vs[3])
+ if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+ (pdev->device == 0x0953) && ctrl->vs[3])
dev->stripe_size = 1 << (ctrl->vs[3] + shift);
id_ns = mem;
@@ -1871,16 +2017,21 @@ static int nvme_dev_map(struct nvme_dev *dev)
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
goto disable;
- pci_set_drvdata(pdev, dev);
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
if (!dev->bar)
goto disable;
-
- dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap));
+ if (readl(&dev->bar->csts) == -1) {
+ result = -ENODEV;
+ goto unmap;
+ }
+ dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
dev->dbs = ((void __iomem *)dev->bar) + 4096;
return 0;
+ unmap:
+ iounmap(dev->bar);
+ dev->bar = NULL;
disable:
pci_release_regions(pdev);
disable_pci:
@@ -1898,37 +2049,183 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
if (dev->bar) {
iounmap(dev->bar);
dev->bar = NULL;
+ pci_release_regions(dev->pci_dev);
}
- pci_release_regions(dev->pci_dev);
if (pci_is_enabled(dev->pci_dev))
pci_disable_device(dev->pci_dev);
}
+struct nvme_delq_ctx {
+ struct task_struct *waiter;
+ struct kthread_worker *worker;
+ atomic_t refcount;
+};
+
+static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
+{
+ dq->waiter = current;
+ mb();
+
+ for (;;) {
+ set_current_state(TASK_KILLABLE);
+ if (!atomic_read(&dq->refcount))
+ break;
+ if (!schedule_timeout(ADMIN_TIMEOUT) ||
+ fatal_signal_pending(current)) {
+ set_current_state(TASK_RUNNING);
+
+ nvme_disable_ctrl(dev, readq(&dev->bar->cap));
+ nvme_disable_queue(dev, 0);
+
+ send_sig(SIGKILL, dq->worker->task, 1);
+ flush_kthread_worker(dq->worker);
+ return;
+ }
+ }
+ set_current_state(TASK_RUNNING);
+}
+
+static void nvme_put_dq(struct nvme_delq_ctx *dq)
+{
+ atomic_dec(&dq->refcount);
+ if (dq->waiter)
+ wake_up_process(dq->waiter);
+}
+
+static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
+{
+ atomic_inc(&dq->refcount);
+ return dq;
+}
+
+static void nvme_del_queue_end(struct nvme_queue *nvmeq)
+{
+ struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
+
+ nvme_clear_queue(nvmeq);
+ nvme_put_dq(dq);
+}
+
+static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
+ kthread_work_func_t fn)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.delete_queue.opcode = opcode;
+ c.delete_queue.qid = cpu_to_le16(nvmeq->qid);
+
+ init_kthread_work(&nvmeq->cmdinfo.work, fn);
+ return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo);
+}
+
+static void nvme_del_cq_work_handler(struct kthread_work *work)
+{
+ struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+ cmdinfo.work);
+ nvme_del_queue_end(nvmeq);
+}
+
+static int nvme_delete_cq(struct nvme_queue *nvmeq)
+{
+ return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
+ nvme_del_cq_work_handler);
+}
+
+static void nvme_del_sq_work_handler(struct kthread_work *work)
+{
+ struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+ cmdinfo.work);
+ int status = nvmeq->cmdinfo.status;
+
+ if (!status)
+ status = nvme_delete_cq(nvmeq);
+ if (status)
+ nvme_del_queue_end(nvmeq);
+}
+
+static int nvme_delete_sq(struct nvme_queue *nvmeq)
+{
+ return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
+ nvme_del_sq_work_handler);
+}
+
+static void nvme_del_queue_start(struct kthread_work *work)
+{
+ struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+ cmdinfo.work);
+ allow_signal(SIGKILL);
+ if (nvme_delete_sq(nvmeq))
+ nvme_del_queue_end(nvmeq);
+}
+
+static void nvme_disable_io_queues(struct nvme_dev *dev)
+{
+ int i;
+ DEFINE_KTHREAD_WORKER_ONSTACK(worker);
+ struct nvme_delq_ctx dq;
+ struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
+ &worker, "nvme%d", dev->instance);
+
+ if (IS_ERR(kworker_task)) {
+ dev_err(&dev->pci_dev->dev,
+ "Failed to create queue del task\n");
+ for (i = dev->queue_count - 1; i > 0; i--)
+ nvme_disable_queue(dev, i);
+ return;
+ }
+
+ dq.waiter = NULL;
+ atomic_set(&dq.refcount, 0);
+ dq.worker = &worker;
+ for (i = dev->queue_count - 1; i > 0; i--) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+
+ if (nvme_suspend_queue(nvmeq))
+ continue;
+ nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
+ nvmeq->cmdinfo.worker = dq.worker;
+ init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
+ queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
+ }
+ nvme_wait_dq(&dq, dev);
+ kthread_stop(kworker_task);
+}
+
static void nvme_dev_shutdown(struct nvme_dev *dev)
{
int i;
- for (i = dev->queue_count - 1; i >= 0; i--)
- nvme_disable_queue(dev, i);
+ dev->initialized = 0;
spin_lock(&dev_list_lock);
list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
- if (dev->bar)
+ if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
+ for (i = dev->queue_count - 1; i >= 0; i--) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+ nvme_suspend_queue(nvmeq);
+ nvme_clear_queue(nvmeq);
+ }
+ } else {
+ nvme_disable_io_queues(dev);
nvme_shutdown_ctrl(dev);
+ nvme_disable_queue(dev, 0);
+ }
nvme_dev_unmap(dev);
}
static void nvme_dev_remove(struct nvme_dev *dev)
{
- struct nvme_ns *ns, *next;
+ struct nvme_ns *ns;
- list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
- list_del(&ns->list);
- del_gendisk(ns->disk);
- nvme_ns_free(ns);
+ list_for_each_entry(ns, &dev->namespaces, list) {
+ if (ns->disk->flags & GENHD_FL_UP)
+ del_gendisk(ns->disk);
+ if (!blk_queue_dying(ns->queue))
+ blk_cleanup_queue(ns->queue);
}
}
@@ -1985,14 +2282,22 @@ static void nvme_release_instance(struct nvme_dev *dev)
spin_unlock(&dev_list_lock);
}
+static void nvme_free_namespaces(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns, *next;
+
+ list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+ list_del(&ns->list);
+ put_disk(ns->disk);
+ kfree(ns);
+ }
+}
+
static void nvme_free_dev(struct kref *kref)
{
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
- nvme_dev_remove(dev);
- nvme_dev_shutdown(dev);
- nvme_free_queues(dev);
- nvme_release_instance(dev);
- nvme_release_prp_pools(dev);
+
+ nvme_free_namespaces(dev);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -2056,6 +2361,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
return result;
disable:
+ nvme_disable_queue(dev, 0);
spin_lock(&dev_list_lock);
list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
@@ -2064,6 +2370,71 @@ static int nvme_dev_start(struct nvme_dev *dev)
return result;
}
+static int nvme_remove_dead_ctrl(void *arg)
+{
+ struct nvme_dev *dev = (struct nvme_dev *)arg;
+ struct pci_dev *pdev = dev->pci_dev;
+
+ if (pci_get_drvdata(pdev))
+ pci_stop_and_remove_bus_device(pdev);
+ kref_put(&dev->kref, nvme_free_dev);
+ return 0;
+}
+
+static void nvme_remove_disks(struct work_struct *ws)
+{
+ int i;
+ struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+
+ nvme_dev_remove(dev);
+ spin_lock(&dev_list_lock);
+ for (i = dev->queue_count - 1; i > 0; i--) {
+ BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
+ nvme_free_queue(dev->queues[i]);
+ dev->queue_count--;
+ dev->queues[i] = NULL;
+ }
+ spin_unlock(&dev_list_lock);
+}
+
+static int nvme_dev_resume(struct nvme_dev *dev)
+{
+ int ret;
+
+ ret = nvme_dev_start(dev);
+ if (ret && ret != -EBUSY)
+ return ret;
+ if (ret == -EBUSY) {
+ spin_lock(&dev_list_lock);
+ PREPARE_WORK(&dev->reset_work, nvme_remove_disks);
+ queue_work(nvme_workq, &dev->reset_work);
+ spin_unlock(&dev_list_lock);
+ }
+ dev->initialized = 1;
+ return 0;
+}
+
+static void nvme_dev_reset(struct nvme_dev *dev)
+{
+ nvme_dev_shutdown(dev);
+ if (nvme_dev_resume(dev)) {
+ dev_err(&dev->pci_dev->dev, "Device failed to resume\n");
+ kref_get(&dev->kref);
+ if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
+ dev->instance))) {
+ dev_err(&dev->pci_dev->dev,
+ "Failed to start controller remove task\n");
+ kref_put(&dev->kref, nvme_free_dev);
+ }
+ }
+}
+
+static void nvme_reset_failed_dev(struct work_struct *ws)
+{
+ struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+ nvme_dev_reset(dev);
+}
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int result = -ENOMEM;
@@ -2082,8 +2453,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto free;
INIT_LIST_HEAD(&dev->namespaces);
+ INIT_WORK(&dev->reset_work, nvme_reset_failed_dev);
dev->pci_dev = pdev;
-
+ pci_set_drvdata(pdev, dev);
result = nvme_set_instance(dev);
if (result)
goto free;
@@ -2099,6 +2471,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto release_pools;
}
+ kref_init(&dev->kref);
result = nvme_dev_add(dev);
if (result)
goto shutdown;
@@ -2113,15 +2486,16 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto remove;
- kref_init(&dev->kref);
+ dev->initialized = 1;
return 0;
remove:
nvme_dev_remove(dev);
+ nvme_free_namespaces(dev);
shutdown:
nvme_dev_shutdown(dev);
release_pools:
- nvme_free_queues(dev);
+ nvme_free_queues(dev, 0);
nvme_release_prp_pools(dev);
release:
nvme_release_instance(dev);
@@ -2132,10 +2506,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return result;
}
+static void nvme_shutdown(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+ nvme_dev_shutdown(dev);
+}
+
static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ spin_lock(&dev_list_lock);
+ list_del_init(&dev->node);
+ spin_unlock(&dev_list_lock);
+
+ pci_set_drvdata(pdev, NULL);
+ flush_work(&dev->reset_work);
misc_deregister(&dev->miscdev);
+ nvme_dev_remove(dev);
+ nvme_dev_shutdown(dev);
+ nvme_free_queues(dev, 0);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev);
}
@@ -2159,13 +2551,12 @@ static int nvme_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
- int ret;
- ret = nvme_dev_start(ndev);
- /* XXX: should remove gendisks if resume fails */
- if (ret)
- nvme_free_queues(ndev);
- return ret;
+ if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
+ PREPARE_WORK(&ndev->reset_work, nvme_reset_failed_dev);
+ queue_work(nvme_workq, &ndev->reset_work);
+ }
+ return 0;
}
static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
@@ -2192,6 +2583,7 @@ static struct pci_driver nvme_driver = {
.id_table = nvme_id_table,
.probe = nvme_probe,
.remove = nvme_remove,
+ .shutdown = nvme_shutdown,
.driver = {
.pm = &nvme_dev_pm_ops,
},
@@ -2206,9 +2598,14 @@ static int __init nvme_init(void)
if (IS_ERR(nvme_thread))
return PTR_ERR(nvme_thread);
+ result = -ENOMEM;
+ nvme_workq = create_singlethread_workqueue("nvme");
+ if (!nvme_workq)
+ goto kill_kthread;
+
result = register_blkdev(nvme_major, "nvme");
if (result < 0)
- goto kill_kthread;
+ goto kill_workq;
else if (result > 0)
nvme_major = result;
@@ -2219,6 +2616,8 @@ static int __init nvme_init(void)
unregister_blkdev:
unregister_blkdev(nvme_major, "nvme");
+ kill_workq:
+ destroy_workqueue(nvme_workq);
kill_kthread:
kthread_stop(nvme_thread);
return result;
@@ -2228,6 +2627,7 @@ static void __exit nvme_exit(void)
{
pci_unregister_driver(&nvme_driver);
unregister_blkdev(nvme_major, "nvme");
+ destroy_workqueue(nvme_workq);
kthread_stop(nvme_thread);
}
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 4a4ff4eb8e23..4a0ceb64e269 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -25,6 +25,7 @@
#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
+#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
@@ -3038,6 +3039,152 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
return retcode;
}
+#ifdef CONFIG_COMPAT
+typedef struct sg_io_hdr32 {
+ compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
+ compat_int_t dxfer_direction; /* [i] data transfer direction */
+ unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
+ unsigned char mx_sb_len; /* [i] max length to write to sbp */
+ unsigned short iovec_count; /* [i] 0 implies no scatter gather */
+ compat_uint_t dxfer_len; /* [i] byte count of data transfer */
+ compat_uint_t dxferp; /* [i], [*io] points to data transfer memory
+ or scatter gather list */
+ compat_uptr_t cmdp; /* [i], [*i] points to command to perform */
+ compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */
+ compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
+ compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */
+ compat_int_t pack_id; /* [i->o] unused internally (normally) */
+ compat_uptr_t usr_ptr; /* [i->o] unused internally */
+ unsigned char status; /* [o] scsi status */
+ unsigned char masked_status; /* [o] shifted, masked scsi status */
+ unsigned char msg_status; /* [o] messaging level data (optional) */
+ unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
+ unsigned short host_status; /* [o] errors from host adapter */
+ unsigned short driver_status; /* [o] errors from software driver */
+ compat_int_t resid; /* [o] dxfer_len - actual_transferred */
+ compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */
+ compat_uint_t info; /* [o] auxiliary information */
+} sg_io_hdr32_t; /* 64 bytes long (on sparc32) */
+
+typedef struct sg_iovec32 {
+ compat_uint_t iov_base;
+ compat_uint_t iov_len;
+} sg_iovec32_t;
+
+static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count)
+{
+ sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1);
+ sg_iovec32_t __user *iov32 = dxferp;
+ int i;
+
+ for (i = 0; i < iovec_count; i++) {
+ u32 base, len;
+
+ if (get_user(base, &iov32[i].iov_base) ||
+ get_user(len, &iov32[i].iov_len) ||
+ put_user(compat_ptr(base), &iov[i].iov_base) ||
+ put_user(len, &iov[i].iov_len))
+ return -EFAULT;
+ }
+
+ if (put_user(iov, &sgio->dxferp))
+ return -EFAULT;
+ return 0;
+}
+
+int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg)
+{
+ sg_io_hdr32_t __user *sgio32 = (sg_io_hdr32_t __user *)arg;
+ sg_io_hdr_t __user *sgio;
+ u16 iovec_count;
+ u32 data;
+ void __user *dxferp;
+ int err;
+ int interface_id;
+
+ if (get_user(interface_id, &sgio32->interface_id))
+ return -EFAULT;
+ if (interface_id != 'S')
+ return -EINVAL;
+
+ if (get_user(iovec_count, &sgio32->iovec_count))
+ return -EFAULT;
+
+ {
+ void __user *top = compat_alloc_user_space(0);
+ void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) +
+ (iovec_count * sizeof(sg_iovec_t)));
+ if (new > top)
+ return -EINVAL;
+
+ sgio = new;
+ }
+
+ /* Ok, now construct. */
+ if (copy_in_user(&sgio->interface_id, &sgio32->interface_id,
+ (2 * sizeof(int)) +
+ (2 * sizeof(unsigned char)) +
+ (1 * sizeof(unsigned short)) +
+ (1 * sizeof(unsigned int))))
+ return -EFAULT;
+
+ if (get_user(data, &sgio32->dxferp))
+ return -EFAULT;
+ dxferp = compat_ptr(data);
+ if (iovec_count) {
+ if (sg_build_iovec(sgio, dxferp, iovec_count))
+ return -EFAULT;
+ } else {
+ if (put_user(dxferp, &sgio->dxferp))
+ return -EFAULT;
+ }
+
+ {
+ unsigned char __user *cmdp;
+ unsigned char __user *sbp;
+
+ if (get_user(data, &sgio32->cmdp))
+ return -EFAULT;
+ cmdp = compat_ptr(data);
+
+ if (get_user(data, &sgio32->sbp))
+ return -EFAULT;
+ sbp = compat_ptr(data);
+
+ if (put_user(cmdp, &sgio->cmdp) ||
+ put_user(sbp, &sgio->sbp))
+ return -EFAULT;
+ }
+
+ if (copy_in_user(&sgio->timeout, &sgio32->timeout,
+ 3 * sizeof(int)))
+ return -EFAULT;
+
+ if (get_user(data, &sgio32->usr_ptr))
+ return -EFAULT;
+ if (put_user(compat_ptr(data), &sgio->usr_ptr))
+ return -EFAULT;
+
+ err = nvme_sg_io(ns, sgio);
+ if (err >= 0) {
+ void __user *datap;
+
+ if (copy_in_user(&sgio32->pack_id, &sgio->pack_id,
+ sizeof(int)) ||
+ get_user(datap, &sgio->usr_ptr) ||
+ put_user((u32)(unsigned long)datap,
+ &sgio32->usr_ptr) ||
+ copy_in_user(&sgio32->status, &sgio->status,
+ (4 * sizeof(unsigned char)) +
+ (2 * sizeof(unsigned short)) +
+ (3 * sizeof(int))))
+ err = -EFAULT;
+ }
+
+ return err;
+}
+#endif
+
int nvme_sg_get_version_num(int __user *ip)
{
return put_user(sg_version_num, ip);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6a680d4de7f1..b1cb3f4c4db4 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -110,9 +110,9 @@ static int __virtblk_add_req(struct virtqueue *vq,
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
-static inline void virtblk_request_done(struct virtblk_req *vbr)
+static inline void virtblk_request_done(struct request *req)
{
- struct request *req = vbr->req;
+ struct virtblk_req *vbr = req->special;
int error = virtblk_result(vbr);
if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -138,7 +138,7 @@ static void virtblk_done(struct virtqueue *vq)
do {
virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
- virtblk_request_done(vbr);
+ blk_mq_complete_request(vbr->req);
req_done = true;
}
if (unlikely(virtqueue_is_broken(vq)))
@@ -479,6 +479,7 @@ static struct blk_mq_ops virtio_mq_ops = {
.map_queue = blk_mq_map_queue,
.alloc_hctx = blk_mq_alloc_single_hw_queue,
.free_hctx = blk_mq_free_single_hw_queue,
+ .complete = virtblk_request_done,
};
static struct blk_mq_reg virtio_mq_reg = {
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index da18046d0e07..64c60edcdfbc 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -285,7 +285,8 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
!rb_next(&persistent_gnt->node)) {
- ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
+ ret = gnttab_unmap_refs(unmap, NULL, pages,
+ segs_to_unmap);
BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0;
@@ -298,7 +299,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
BUG_ON(num != 0);
}
-static void unmap_purged_grants(struct work_struct *work)
+void xen_blkbk_unmap_purged_grants(struct work_struct *work)
{
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -320,7 +321,8 @@ static void unmap_purged_grants(struct work_struct *work)
pages[segs_to_unmap] = persistent_gnt->page;
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
- ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
+ ret = gnttab_unmap_refs(unmap, NULL, pages,
+ segs_to_unmap);
BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0;
@@ -328,7 +330,7 @@ static void unmap_purged_grants(struct work_struct *work)
kfree(persistent_gnt);
}
if (segs_to_unmap > 0) {
- ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
+ ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap);
}
@@ -373,7 +375,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
- INIT_LIST_HEAD(&blkif->persistent_purge_list);
+ BUG_ON(!list_empty(&blkif->persistent_purge_list));
root = &blkif->persistent_gnts;
purge_list:
foreach_grant_safe(persistent_gnt, n, root, node) {
@@ -418,7 +420,6 @@ finished:
blkif->vbd.overflow_max_grants = 0;
/* We can defer this work */
- INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
schedule_work(&blkif->persistent_purge_work);
pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
return;
@@ -623,9 +624,23 @@ purge_gnt_list:
print_stats(blkif);
}
- /* Since we are shutting down remove all pages from the buffer */
- shrink_free_pagepool(blkif, 0 /* All */);
+ /* Drain pending purge work */
+ flush_work(&blkif->persistent_purge_work);
+ if (log_stats)
+ print_stats(blkif);
+
+ blkif->xenblkd = NULL;
+ xen_blkif_put(blkif);
+
+ return 0;
+}
+
+/*
+ * Remove persistent grants and empty the pool of free pages
+ */
+void xen_blkbk_free_caches(struct xen_blkif *blkif)
+{
/* Free all persistent grant pages */
if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
free_persistent_gnts(blkif, &blkif->persistent_gnts,
@@ -634,13 +649,8 @@ purge_gnt_list:
BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
blkif->persistent_gnt_c = 0;
- if (log_stats)
- print_stats(blkif);
-
- blkif->xenblkd = NULL;
- xen_blkif_put(blkif);
-
- return 0;
+ /* Since we are shutting down remove all pages from the buffer */
+ shrink_free_pagepool(blkif, 0 /* All */);
}
/*
@@ -668,14 +678,15 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
GNTMAP_host_map, pages[i]->handle);
pages[i]->handle = BLKBACK_INVALID_HANDLE;
if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
- ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
+ ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
+ invcount);
BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount);
invcount = 0;
}
}
if (invcount) {
- ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
+ ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount);
}
@@ -737,7 +748,7 @@ again:
}
if (segs_to_map) {
- ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map);
+ ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
BUG_ON(ret);
}
@@ -835,7 +846,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
struct grant_page **pages = pending_req->indirect_pages;
struct xen_blkif *blkif = pending_req->blkif;
int indirect_grefs, rc, n, nseg, i;
- struct blkif_request_segment_aligned *segments = NULL;
+ struct blkif_request_segment *segments = NULL;
nseg = pending_req->nr_pages;
indirect_grefs = INDIRECT_PAGES(nseg);
@@ -931,9 +942,7 @@ static void xen_blk_drain_io(struct xen_blkif *blkif)
{
atomic_set(&blkif->drain, 1);
do {
- /* The initial value is one, and one refcnt taken at the
- * start of the xen_blkif_schedule thread. */
- if (atomic_read(&blkif->refcnt) <= 2)
+ if (atomic_read(&blkif->inflight) == 0)
break;
wait_for_completion_interruptible_timeout(
&blkif->drain_complete, HZ);
@@ -973,17 +982,30 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
* the proper response on the ring.
*/
if (atomic_dec_and_test(&pending_req->pendcnt)) {
- xen_blkbk_unmap(pending_req->blkif,
+ struct xen_blkif *blkif = pending_req->blkif;
+
+ xen_blkbk_unmap(blkif,
pending_req->segments,
pending_req->nr_pages);
- make_response(pending_req->blkif, pending_req->id,
+ make_response(blkif, pending_req->id,
pending_req->operation, pending_req->status);
- xen_blkif_put(pending_req->blkif);
- if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
- if (atomic_read(&pending_req->blkif->drain))
- complete(&pending_req->blkif->drain_complete);
+ free_req(blkif, pending_req);
+ /*
+ * Make sure the request is freed before releasing blkif,
+ * or there could be a race between free_req and the
+ * cleanup done in xen_blkif_free during shutdown.
+ *
+ * NB: The fact that we might try to wake up pending_free_wq
+ * before drain_complete (in case there's a drain going on)
+ * it's not a problem with our current implementation
+ * because we can assure there's no thread waiting on
+ * pending_free_wq if there's a drain going on, but it has
+ * to be taken into account if the current model is changed.
+ */
+ if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
+ complete(&blkif->drain_complete);
}
- free_req(pending_req->blkif, pending_req);
+ xen_blkif_put(blkif);
}
}
@@ -1237,6 +1259,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
* below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
*/
xen_blkif_get(blkif);
+ atomic_inc(&blkif->inflight);
for (i = 0; i < nseg; i++) {
while ((bio == NULL) ||
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 8d8807563d99..be052773ad03 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -57,7 +57,7 @@
#define MAX_INDIRECT_SEGMENTS 256
#define SEGS_PER_INDIRECT_FRAME \
- (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+ (PAGE_SIZE/sizeof(struct blkif_request_segment))
#define MAX_INDIRECT_PAGES \
((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
#define INDIRECT_PAGES(_segs) \
@@ -278,6 +278,7 @@ struct xen_blkif {
/* for barrier (drain) requests */
struct completion drain_complete;
atomic_t drain;
+ atomic_t inflight;
/* One thread per one blkif. */
struct task_struct *xenblkd;
unsigned int waiting_reqs;
@@ -376,6 +377,7 @@ int xen_blkif_xenbus_init(void);
irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
int xen_blkif_schedule(void *arg);
int xen_blkif_purge_persistent(void *arg);
+void xen_blkbk_free_caches(struct xen_blkif *blkif);
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
struct backend_info *be, int state);
@@ -383,6 +385,7 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct backend_info *be, int state);
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
+void xen_blkbk_unmap_purged_grants(struct work_struct *work);
static inline void blkif_get_x86_32_req(struct blkif_request *dst,
struct blkif_x86_32_request *src)
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index c2014a0aa206..9a547e6b6ebf 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -125,8 +125,11 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
blkif->persistent_gnts.rb_node = NULL;
spin_lock_init(&blkif->free_pages_lock);
INIT_LIST_HEAD(&blkif->free_pages);
+ INIT_LIST_HEAD(&blkif->persistent_purge_list);
blkif->free_pages_num = 0;
atomic_set(&blkif->persistent_gnt_in_use, 0);
+ atomic_set(&blkif->inflight, 0);
+ INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants);
INIT_LIST_HEAD(&blkif->pending_free);
@@ -259,6 +262,17 @@ static void xen_blkif_free(struct xen_blkif *blkif)
if (!atomic_dec_and_test(&blkif->refcnt))
BUG();
+ /* Remove all persistent grants and the cache of ballooned pages. */
+ xen_blkbk_free_caches(blkif);
+
+ /* Make sure everything is drained before shutting down */
+ BUG_ON(blkif->persistent_gnt_c != 0);
+ BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
+ BUG_ON(blkif->free_pages_num != 0);
+ BUG_ON(!list_empty(&blkif->persistent_purge_list));
+ BUG_ON(!list_empty(&blkif->free_pages));
+ BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
+
/* Check that there is no request in use */
list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
list_del(&req->free_list);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8dcfb54f1603..efe1b4761735 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock);
#define DEV_NAME "xvd" /* name in /dev */
#define SEGS_PER_INDIRECT_FRAME \
- (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+ (PAGE_SIZE/sizeof(struct blkif_request_segment))
#define INDIRECT_GREFS(_segs) \
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
@@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req)
unsigned long id;
unsigned int fsect, lsect;
int i, ref, n;
- struct blkif_request_segment_aligned *segments = NULL;
+ struct blkif_request_segment *segments = NULL;
/*
* Used to store if we are able to queue the request by just using
@@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req)
} else {
n = i % SEGS_PER_INDIRECT_FRAME;
segments[n] =
- (struct blkif_request_segment_aligned) {
+ (struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
.last_sect = lsect };
@@ -1904,13 +1904,16 @@ static void blkback_changed(struct xenbus_device *dev,
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateConnected:
blkfront_connect(info);
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's Closing state -- fallthrough */
case XenbusStateClosing:
blkfront_closing(info);
break;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index fa3243d71c76..1386749b48ff 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -499,6 +499,7 @@ config RAW_DRIVER
config MAX_RAW_DEVS
int "Maximum number of RAW devices to support (1-65536)"
depends on RAW_DRIVER
+ range 1 65536
default "256"
help
The maximum number of RAW devices that are supported.
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index f3223aac4df1..6e8d65e9b1d3 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -190,7 +190,7 @@ static int bind_get(int number, dev_t *dev)
struct raw_device_data *rawdev;
struct block_device *bdev;
- if (number <= 0 || number >= MAX_RAW_MINORS)
+ if (number <= 0 || number >= max_raw_minors)
return -EINVAL;
rawdev = &raw_devices[number];
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index feea87cc6b8f..6928d094451d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -890,12 +890,10 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
} else {
/* Failback to copying a page */
struct page *page = alloc_page(GFP_KERNEL);
- char *src = buf->ops->map(pipe, buf, 1);
- char *dst;
+ char *src;
if (!page)
return -ENOMEM;
- dst = kmap(page);
offset = sd->pos & ~PAGE_MASK;
@@ -903,9 +901,8 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
- memcpy(dst + offset, src + buf->offset, len);
-
- kunmap(page);
+ src = buf->ops->map(pipe, buf, 1);
+ memcpy(page_address(page) + offset, src + buf->offset, len);
buf->ops->unmap(pipe, buf, src);
sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 974b2db2fe10..0595dc6c453e 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -99,31 +99,6 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
return;
}
-static void __init kona_timers_init(struct device_node *node)
-{
- u32 freq;
- struct clk *external_clk;
-
- external_clk = of_clk_get_by_name(node, NULL);
-
- if (!IS_ERR(external_clk)) {
- arch_timer_rate = clk_get_rate(external_clk);
- clk_prepare_enable(external_clk);
- } else if (!of_property_read_u32(node, "clock-frequency", &freq)) {
- arch_timer_rate = freq;
- } else {
- panic("unable to determine clock-frequency");
- }
-
- /* Setup IRQ numbers */
- timers.tmr_irq = irq_of_parse_and_map(node, 0);
-
- /* Setup IO addresses */
- timers.tmr_regs = of_iomap(node, 0);
-
- kona_timer_disable_and_clear(timers.tmr_regs);
-}
-
static int kona_timer_set_next_event(unsigned long clc,
struct clock_event_device *unused)
{
@@ -198,7 +173,34 @@ static struct irqaction kona_timer_irq = {
static void __init kona_timer_init(struct device_node *node)
{
- kona_timers_init(node);
+ u32 freq;
+ struct clk *external_clk;
+
+ if (!of_device_is_available(node)) {
+ pr_info("Kona Timer v1 marked as disabled in device tree\n");
+ return;
+ }
+
+ external_clk = of_clk_get_by_name(node, NULL);
+
+ if (!IS_ERR(external_clk)) {
+ arch_timer_rate = clk_get_rate(external_clk);
+ clk_prepare_enable(external_clk);
+ } else if (!of_property_read_u32(node, "clock-frequency", &freq)) {
+ arch_timer_rate = freq;
+ } else {
+ pr_err("Kona Timer v1 unable to determine clock-frequency");
+ return;
+ }
+
+ /* Setup IRQ numbers */
+ timers.tmr_irq = irq_of_parse_and_map(node, 0);
+
+ /* Setup IO addresses */
+ timers.tmr_regs = of_iomap(node, 0);
+
+ kona_timer_disable_and_clear(timers.tmr_regs);
+
kona_timer_clockevents_init();
setup_irq(timers.tmr_irq, &kona_timer_irq);
kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 08ca8c9f41cd..cb003a6b72c8 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1323,8 +1323,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
up_read(&policy->rwsem);
if (cpu != policy->cpu) {
- if (!frozen)
- sysfs_remove_link(&dev->kobj, "cpufreq");
+ sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
if (new_cpu >= 0) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 7e257b233602..2cd36b9297f3 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -34,12 +34,15 @@
#define SAMPLE_COUNT 3
-#define BYT_RATIOS 0x66a
-#define BYT_VIDS 0x66b
+#define BYT_RATIOS 0x66a
+#define BYT_VIDS 0x66b
+#define BYT_TURBO_RATIOS 0x66c
-#define FRAC_BITS 8
+
+#define FRAC_BITS 6
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
+#define FP_ROUNDUP(X) ((X) += 1 << FRAC_BITS)
static inline int32_t mul_fp(int32_t x, int32_t y)
{
@@ -51,12 +54,11 @@ static inline int32_t div_fp(int32_t x, int32_t y)
return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
}
-static u64 energy_divisor;
-
struct sample {
int32_t core_pct_busy;
u64 aperf;
u64 mperf;
+ unsigned long long tsc;
int freq;
};
@@ -96,6 +98,7 @@ struct cpudata {
u64 prev_aperf;
u64 prev_mperf;
+ unsigned long long prev_tsc;
int sample_ptr;
struct sample samples[SAMPLE_COUNT];
};
@@ -357,7 +360,7 @@ static int byt_get_min_pstate(void)
{
u64 value;
rdmsrl(BYT_RATIOS, value);
- return value & 0xFF;
+ return (value >> 8) & 0xFF;
}
static int byt_get_max_pstate(void)
@@ -367,6 +370,13 @@ static int byt_get_max_pstate(void)
return (value >> 16) & 0xFF;
}
+static int byt_get_turbo_pstate(void)
+{
+ u64 value;
+ rdmsrl(BYT_TURBO_RATIOS, value);
+ return value & 0x3F;
+}
+
static void byt_set_pstate(struct cpudata *cpudata, int pstate)
{
u64 val;
@@ -469,7 +479,7 @@ static struct cpu_defaults byt_params = {
.funcs = {
.get_max = byt_get_max_pstate,
.get_min = byt_get_min_pstate,
- .get_turbo = byt_get_max_pstate,
+ .get_turbo = byt_get_turbo_pstate,
.set = byt_set_pstate,
.get_vid = byt_get_vid,
},
@@ -547,31 +557,48 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
static inline void intel_pstate_calc_busy(struct cpudata *cpu,
struct sample *sample)
{
- u64 core_pct;
- core_pct = div64_u64(int_tofp(sample->aperf * 100),
- sample->mperf);
- sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
+ int32_t core_pct;
+ int32_t c0_pct;
+
+ core_pct = div_fp(int_tofp((sample->aperf)),
+ int_tofp((sample->mperf)));
+ core_pct = mul_fp(core_pct, int_tofp(100));
+ FP_ROUNDUP(core_pct);
+
+ c0_pct = div_fp(int_tofp(sample->mperf), int_tofp(sample->tsc));
- sample->core_pct_busy = core_pct;
+ sample->freq = fp_toint(
+ mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
+
+ sample->core_pct_busy = mul_fp(core_pct, c0_pct);
}
static inline void intel_pstate_sample(struct cpudata *cpu)
{
u64 aperf, mperf;
+ unsigned long long tsc;
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
+ tsc = native_read_tsc();
+
+ aperf = aperf >> FRAC_BITS;
+ mperf = mperf >> FRAC_BITS;
+ tsc = tsc >> FRAC_BITS;
cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
cpu->samples[cpu->sample_ptr].aperf = aperf;
cpu->samples[cpu->sample_ptr].mperf = mperf;
+ cpu->samples[cpu->sample_ptr].tsc = tsc;
cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
+ cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc;
intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf;
+ cpu->prev_tsc = tsc;
}
static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
@@ -590,7 +617,8 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate);
- return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+ core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+ return FP_ROUNDUP(core_busy);
}
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
@@ -617,12 +645,10 @@ static void intel_pstate_timer_func(unsigned long __data)
{
struct cpudata *cpu = (struct cpudata *) __data;
struct sample *sample;
- u64 energy;
intel_pstate_sample(cpu);
sample = &cpu->samples[cpu->sample_ptr];
- rdmsrl(MSR_PKG_ENERGY_STATUS, energy);
intel_pstate_adjust_busy_pstate(cpu);
@@ -631,7 +657,6 @@ static void intel_pstate_timer_func(unsigned long __data)
cpu->pstate.current_pstate,
sample->mperf,
sample->aperf,
- div64_u64(energy, energy_divisor),
sample->freq);
intel_pstate_set_sample_time(cpu);
@@ -913,7 +938,6 @@ static int __init intel_pstate_init(void)
int cpu, rc = 0;
const struct x86_cpu_id *id;
struct cpu_defaults *cpu_info;
- u64 units;
if (no_load)
return -ENODEV;
@@ -947,9 +971,6 @@ static int __init intel_pstate_init(void)
if (rc)
goto out;
- rdmsrl(MSR_RAPL_POWER_UNIT, units);
- energy_divisor = 1 << ((units >> 8) & 0x1f); /* bits{12:8} */
-
intel_pstate_debug_expose_params();
intel_pstate_sysfs_expose_params();
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index e10b646634d7..6684e0342792 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1076,7 +1076,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data;
struct init_on_cpu init_on_cpu;
- int rc;
+ int rc, cpu;
smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
if (rc)
@@ -1140,7 +1140,9 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
- per_cpu(powernow_data, pol->cpu) = data;
+ /* Point all the CPUs in this policy to the same data */
+ for_each_cpu(cpu, pol->cpus)
+ per_cpu(powernow_data, cpu) = data;
return 0;
@@ -1155,6 +1157,7 @@ err_out:
static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
+ int cpu;
if (!data)
return -EINVAL;
@@ -1165,7 +1168,8 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
kfree(data->powernow_table);
kfree(data);
- per_cpu(powernow_data, pol->cpu) = NULL;
+ for_each_cpu(cpu, pol->cpus)
+ per_cpu(powernow_data, cpu) = NULL;
return 0;
}
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 6c4c000671c5..1e5481d88a26 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -158,6 +158,15 @@ static inline unsigned long nx842_get_scatterlist_size(
return sl->entry_nr * sizeof(struct nx842_slentry);
}
+static inline unsigned long nx842_get_pa(void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ return page_to_phys(vmalloc_to_page(addr))
+ + offset_in_page(addr);
+ else
+ return __pa(addr);
+}
+
static int nx842_build_scatterlist(unsigned long buf, int len,
struct nx842_scatterlist *sl)
{
@@ -168,7 +177,7 @@ static int nx842_build_scatterlist(unsigned long buf, int len,
entry = sl->entries;
while (len) {
- entry->ptr = __pa(buf);
+ entry->ptr = nx842_get_pa((void *)buf);
nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE);
if (nextpage < buf + len) {
/* we aren't at the end yet */
@@ -370,8 +379,8 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
op.flags = NX842_OP_COMPRESS;
csbcpb = &workmem->csbcpb;
memset(csbcpb, 0, sizeof(*csbcpb));
- op.csbcpb = __pa(csbcpb);
- op.out = __pa(slout.entries);
+ op.csbcpb = nx842_get_pa(csbcpb);
+ op.out = nx842_get_pa(slout.entries);
for (i = 0; i < hdr->blocks_nr; i++) {
/*
@@ -401,13 +410,13 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
*/
if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
/* Create direct DDE */
- op.in = __pa(inbuf);
+ op.in = nx842_get_pa((void *)inbuf);
op.inlen = max_sync_size;
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(inbuf, max_sync_size, &slin);
- op.in = __pa(slin.entries);
+ op.in = nx842_get_pa(slin.entries);
op.inlen = -nx842_get_scatterlist_size(&slin);
}
@@ -565,7 +574,7 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
op.flags = NX842_OP_DECOMPRESS;
csbcpb = &workmem->csbcpb;
memset(csbcpb, 0, sizeof(*csbcpb));
- op.csbcpb = __pa(csbcpb);
+ op.csbcpb = nx842_get_pa(csbcpb);
/*
* max_sync_size may have changed since compression,
@@ -597,12 +606,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
if (likely((inbuf & NX842_HW_PAGE_MASK) ==
((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) {
/* Create direct DDE */
- op.in = __pa(inbuf);
+ op.in = nx842_get_pa((void *)inbuf);
op.inlen = hdr->sizes[i];
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin);
- op.in = __pa(slin.entries);
+ op.in = nx842_get_pa(slin.entries);
op.inlen = -nx842_get_scatterlist_size(&slin);
}
@@ -613,12 +622,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
*/
if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
/* Create direct DDE */
- op.out = __pa(outbuf);
+ op.out = nx842_get_pa((void *)outbuf);
op.outlen = max_sync_size;
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(outbuf, max_sync_size, &slout);
- op.out = __pa(slout.entries);
+ op.out = nx842_get_pa(slout.entries);
op.outlen = -nx842_get_scatterlist_size(&slout);
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9bed1a2a67a1..605b016bcea4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -346,6 +346,7 @@ config MOXART_DMA
tristate "MOXART DMA support"
depends on ARCH_MOXART
select DMA_ENGINE
+ select DMA_OF
select DMA_VIRTUAL_CHANNELS
help
Enable support for the MOXA ART SoC DMA controller.
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 4e7918339b12..19041cefabb1 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -449,6 +449,7 @@ static const struct of_device_id sdma_dt_ids[] = {
{ .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
+ { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 87529181efcc..4e3549a16132 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -77,7 +77,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
chan = ioat_chan_by_index(instance, bit);
- tasklet_schedule(&chan->cleanup_task);
+ if (test_bit(IOAT_RUN, &chan->state))
+ tasklet_schedule(&chan->cleanup_task);
}
writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
@@ -93,7 +94,8 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
{
struct ioat_chan_common *chan = data;
- tasklet_schedule(&chan->cleanup_task);
+ if (test_bit(IOAT_RUN, &chan->state))
+ tasklet_schedule(&chan->cleanup_task);
return IRQ_HANDLED;
}
@@ -116,7 +118,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
chan->timer.function = device->timer_fn;
chan->timer.data = data;
tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
- tasklet_disable(&chan->cleanup_task);
}
/**
@@ -354,13 +355,49 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
writel(((u64) chan->completion_dma) >> 32,
chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
- tasklet_enable(&chan->cleanup_task);
+ set_bit(IOAT_RUN, &chan->state);
ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
__func__, ioat->desccount);
return ioat->desccount;
}
+void ioat_stop(struct ioat_chan_common *chan)
+{
+ struct ioatdma_device *device = chan->device;
+ struct pci_dev *pdev = device->pdev;
+ int chan_id = chan_num(chan);
+ struct msix_entry *msix;
+
+ /* 1/ stop irq from firing tasklets
+ * 2/ stop the tasklet from re-arming irqs
+ */
+ clear_bit(IOAT_RUN, &chan->state);
+
+ /* flush inflight interrupts */
+ switch (device->irq_mode) {
+ case IOAT_MSIX:
+ msix = &device->msix_entries[chan_id];
+ synchronize_irq(msix->vector);
+ break;
+ case IOAT_MSI:
+ case IOAT_INTX:
+ synchronize_irq(pdev->irq);
+ break;
+ default:
+ break;
+ }
+
+ /* flush inflight timers */
+ del_timer_sync(&chan->timer);
+
+ /* flush inflight tasklet runs */
+ tasklet_kill(&chan->cleanup_task);
+
+ /* final cleanup now that everything is quiesced and can't re-arm */
+ device->cleanup_fn((unsigned long) &chan->common);
+}
+
/**
* ioat1_dma_free_chan_resources - release all the descriptors
* @chan: the channel to be cleaned
@@ -379,9 +416,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c)
if (ioat->desccount == 0)
return;
- tasklet_disable(&chan->cleanup_task);
- del_timer_sync(&chan->timer);
- ioat1_cleanup(ioat);
+ ioat_stop(chan);
/* Delay 100ms after reset to allow internal DMA logic to quiesce
* before removing DMA descriptor resources.
@@ -526,8 +561,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
static void ioat1_cleanup_event(unsigned long data)
{
struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat1_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 11fb877ddca9..e982f00a9843 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -356,6 +356,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *device);
int ioat_dma_setup_interrupts(struct ioatdma_device *device);
+void ioat_stop(struct ioat_chan_common *chan);
extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
extern struct ioat_sysfs_entry ioat_cap_attr;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5d3affe7e976..8d1058085eeb 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -190,8 +190,11 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
void ioat2_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat2_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
@@ -553,10 +556,10 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat->issued = 0;
ioat->tail = 0;
ioat->alloc_order = order;
+ set_bit(IOAT_RUN, &chan->state);
spin_unlock_bh(&ioat->prep_lock);
spin_unlock_bh(&chan->cleanup_lock);
- tasklet_enable(&chan->cleanup_task);
ioat2_start_null_desc(ioat);
/* check that we got off the ground */
@@ -566,7 +569,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
if (is_ioat_active(status) || is_ioat_idle(status)) {
- set_bit(IOAT_RUN, &chan->state);
return 1 << ioat->alloc_order;
} else {
u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -809,11 +811,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
if (!ioat->ring)
return;
- tasklet_disable(&chan->cleanup_task);
- del_timer_sync(&chan->timer);
- device->cleanup_fn((unsigned long) c);
+ ioat_stop(chan);
device->reset_hw(chan);
- clear_bit(IOAT_RUN, &chan->state);
spin_lock_bh(&chan->cleanup_lock);
spin_lock_bh(&ioat->prep_lock);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 820817e97e62..b9b38a1cf92f 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -464,8 +464,11 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
static void ioat3_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat3_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 53fb0c8365b0..766b68ed505c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -497,8 +497,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
if (!mv_can_chain(grp_start))
goto submit_done;
- dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
- old_chain_tail->async_tx.phys);
+ dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
+ &old_chain_tail->async_tx.phys);
/* fix up the hardware chain */
mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
@@ -527,7 +527,8 @@ submit_done:
/* returns the number of allocated descriptors */
static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
{
- char *hw_desc;
+ void *virt_desc;
+ dma_addr_t dma_desc;
int idx;
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *slot = NULL;
@@ -542,17 +543,16 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
" %d descriptor slots", idx);
break;
}
- hw_desc = (char *) mv_chan->dma_desc_pool_virt;
- slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+ virt_desc = mv_chan->dma_desc_pool_virt;
+ slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->tx_list);
- hw_desc = (char *) mv_chan->dma_desc_pool;
- slot->async_tx.phys =
- (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+ dma_desc = mv_chan->dma_desc_pool;
+ slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
slot->idx = idx++;
spin_lock_bh(&mv_chan->lock);
@@ -582,8 +582,8 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
int slot_cnt;
dev_dbg(mv_chan_to_devp(mv_chan),
- "%s dest: %x src %x len: %u flags: %ld\n",
- __func__, dest, src, len, flags);
+ "%s dest: %pad src %pad len: %u flags: %ld\n",
+ __func__, &dest, &src, len, flags);
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
@@ -626,8 +626,8 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
dev_dbg(mv_chan_to_devp(mv_chan),
- "%s src_cnt: %d len: dest %x %u flags: %ld\n",
- __func__, src_cnt, len, dest, flags);
+ "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
+ __func__, src_cnt, len, &dest, flags);
spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 00a2de957b23..bf18c786ed40 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1641,6 +1641,7 @@ static void dma_tasklet(unsigned long data)
struct d40_chan *d40c = (struct d40_chan *) data;
struct d40_desc *d40d;
unsigned long flags;
+ bool callback_active;
dma_async_tx_callback callback;
void *callback_param;
@@ -1668,6 +1669,7 @@ static void dma_tasklet(unsigned long data)
}
/* Callback to client */
+ callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
callback = d40d->txd.callback;
callback_param = d40d->txd.callback_param;
@@ -1690,7 +1692,7 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&d40c->lock, flags);
- if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
+ if (callback_active && callback)
callback(callback_param);
return;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index e8c9ef03495b..33edd6766344 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -559,7 +559,8 @@ static void edac_mc_workq_function(struct work_struct *work_req)
*
* called with the mem_ctls_mutex held
*/
-static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
+static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
+ bool init)
{
edac_dbg(0, "\n");
@@ -567,7 +568,9 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
if (mci->op_state != OP_RUNNING_POLL)
return;
- INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+ if (init)
+ INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+
mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
}
@@ -601,7 +604,7 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
* user space has updated our poll period value, need to
* reset our workq delays
*/
-void edac_mc_reset_delay_period(int value)
+void edac_mc_reset_delay_period(unsigned long value)
{
struct mem_ctl_info *mci;
struct list_head *item;
@@ -611,7 +614,7 @@ void edac_mc_reset_delay_period(int value)
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
- edac_mc_workq_setup(mci, (unsigned long) value);
+ edac_mc_workq_setup(mci, value, false);
}
mutex_unlock(&mem_ctls_mutex);
@@ -782,7 +785,7 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
/* This instance is NOW RUNNING */
mci->op_state = OP_RUNNING_POLL;
- edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+ edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true);
} else {
mci->op_state = OP_RUNNING_INTERRUPT;
}
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 3c0d67381a34..01fae8289cf0 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -52,18 +52,20 @@ int edac_mc_get_poll_msec(void)
static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
{
- long l;
+ unsigned long l;
int ret;
if (!val)
return -EINVAL;
- ret = kstrtol(val, 0, &l);
+ ret = kstrtoul(val, 0, &l);
if (ret)
return ret;
- if ((int)l != l)
+
+ if (l < 1000)
return -EINVAL;
- *((int *)kp->arg) = l;
+
+ *((unsigned long *)kp->arg) = l;
/* notify edac_mc engine to reset the poll period */
edac_mc_reset_delay_period(l);
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 3d139c6e7fe3..f2118bfcf8df 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -52,7 +52,7 @@ extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
extern void edac_device_reset_delay_period(struct edac_device_ctl_info
*edac_dev, unsigned long value);
-extern void edac_mc_reset_delay_period(int value);
+extern void edac_mc_reset_delay_period(unsigned long value);
extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index aea80a5e2bba..dcac982fdc7a 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -943,33 +943,35 @@ static int i7300_get_devices(struct mem_ctl_info *mci)
/* Attempt to 'get' the MCH register we want */
pdev = NULL;
- while (!pvt->pci_dev_16_1_fsb_addr_map ||
- !pvt->pci_dev_16_2_fsb_err_regs) {
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev);
- if (!pdev) {
- /* End of list, leave */
- i7300_printk(KERN_ERR,
- "'system address,Process Bus' "
- "device not found:"
- "vendor 0x%x device 0x%x ERR funcs "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
- goto error;
- }
-
+ while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
+ pdev))) {
/* Store device 16 funcs 1 and 2 */
switch (PCI_FUNC(pdev->devfn)) {
case 1:
- pvt->pci_dev_16_1_fsb_addr_map = pdev;
+ if (!pvt->pci_dev_16_1_fsb_addr_map)
+ pvt->pci_dev_16_1_fsb_addr_map =
+ pci_dev_get(pdev);
break;
case 2:
- pvt->pci_dev_16_2_fsb_err_regs = pdev;
+ if (!pvt->pci_dev_16_2_fsb_err_regs)
+ pvt->pci_dev_16_2_fsb_err_regs =
+ pci_dev_get(pdev);
break;
}
}
+ if (!pvt->pci_dev_16_1_fsb_addr_map ||
+ !pvt->pci_dev_16_2_fsb_err_regs) {
+ /* At least one device was not found */
+ i7300_printk(KERN_ERR,
+ "'system address,Process Bus' device not found:"
+ "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
+ goto error;
+ }
+
edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
pci_name(pvt->pci_dev_16_0_fsb_ctlr),
pvt->pci_dev_16_0_fsb_ctlr->vendor,
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 40a228da4547..ab127cf5c798 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1334,14 +1334,19 @@ static int i7core_get_onedevice(struct pci_dev **prev,
* is at addr 8086:2c40, instead of 8086:2c41. So, we need
* to probe for the alternate address in case of failure
*/
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) {
+ pci_dev_get(*prev); /* pci_get_device will put it */
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
+ }
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE &&
+ !pdev) {
+ pci_dev_get(*prev); /* pci_get_device will put it */
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
*prev);
+ }
if (!pdev) {
if (*prev) {
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index c20602f601ee..98a14f6143a7 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -222,27 +222,19 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
struct snd_soc_dapm_context *dapm = arizona->dapm;
int ret;
- mutex_lock(&dapm->card->dapm_mutex);
-
ret = snd_soc_dapm_force_enable_pin(dapm, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to enable %s: %d\n",
widget, ret);
- mutex_unlock(&dapm->card->dapm_mutex);
-
snd_soc_dapm_sync(dapm);
if (!arizona->pdata.micd_force_micbias) {
- mutex_lock(&dapm->card->dapm_mutex);
-
ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to disable %s: %d\n",
widget, ret);
- mutex_unlock(&dapm->card->dapm_mutex);
-
snd_soc_dapm_sync(dapm);
}
}
@@ -304,16 +296,12 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
ARIZONA_MICD_ENA, 0,
&change);
- mutex_lock(&dapm->card->dapm_mutex);
-
ret = snd_soc_dapm_disable_pin(dapm, widget);
if (ret != 0)
dev_warn(arizona->dev,
"Failed to disable %s: %d\n",
widget, ret);
- mutex_unlock(&dapm->card->dapm_mutex);
-
snd_soc_dapm_sync(dapm);
if (info->micd_reva) {
diff --git a/drivers/fmc/fmc-write-eeprom.c b/drivers/fmc/fmc-write-eeprom.c
index ee5b47904130..9bb2cbd5c9f2 100644
--- a/drivers/fmc/fmc-write-eeprom.c
+++ b/drivers/fmc/fmc-write-eeprom.c
@@ -27,7 +27,7 @@ FMC_PARAM_BUSID(fwe_drv);
/* The "file=" is like the generic "gateware=" used elsewhere */
static char *fwe_file[FMC_MAX_CARDS];
static int fwe_file_n;
-module_param_array_named(file, fwe_file, charp, &fwe_file_n, 444);
+module_param_array_named(file, fwe_file, charp, &fwe_file_n, 0444);
static int fwe_run_tlv(struct fmc_device *fmc, const struct firmware *fw,
int write)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 697338772b64..903f24d28ba0 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -403,6 +403,7 @@ config GPIO_GRGPIO
config GPIO_TB10X
bool
+ select GENERIC_IRQ_CHIP
select OF_GPIO
comment "I2C GPIO expanders:"
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 233d088ac59f..f32357e2d78d 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2013 Broadcom Corporation
+ * Copyright (C) 2012-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -657,6 +657,6 @@ static struct platform_driver bcm_kona_gpio_driver = {
module_platform_driver(bcm_kona_gpio_driver);
-MODULE_AUTHOR("Broadcom");
+MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>");
MODULE_DESCRIPTION("Broadcom Kona GPIO Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index d3550274b8f7..3c2ba2ad0ada 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -97,3 +97,4 @@ module_platform_driver(clps711x_gpio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
MODULE_DESCRIPTION("CLPS711X GPIO driver");
+MODULE_ALIAS("platform:clps711x-gpio");
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index d1b50ef5fab8..e585163f1ad5 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -394,8 +394,8 @@ static const struct irq_domain_ops intel_gpio_irq_ops = {
static int intel_gpio_runtime_idle(struct device *dev)
{
- pm_schedule_suspend(dev, 500);
- return -EBUSY;
+ int err = pm_schedule_suspend(dev, 500);
+ return err ?: -EBUSY;
}
static const struct dev_pm_ops intel_gpio_pm_ops = {
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index 1d136eceda62..7081304d6797 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -40,6 +40,8 @@
#error GPIO32 option is not enabled for your xtensa core variant
#endif
+#if XCHAL_HAVE_CP
+
static inline unsigned long enable_cp(unsigned long *cpenable)
{
unsigned long flags;
@@ -57,6 +59,20 @@ static inline void disable_cp(unsigned long flags, unsigned long cpenable)
local_irq_restore(flags);
}
+#else
+
+static inline unsigned long enable_cp(unsigned long *cpenable)
+{
+ *cpenable = 0; /* avoid uninitialized value warning */
+ return 0;
+}
+
+static inline void disable_cp(unsigned long flags, unsigned long cpenable)
+{
+}
+
+#endif /* XCHAL_HAVE_CP */
+
static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset)
{
return 1; /* input only */
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 3f65dd6676b2..a28640f47c27 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- if (!drm_can_sleep())
+ if (drm_can_sleep())
ret = ast_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 2fd4a92162cb..32bbba0a787b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- if (!drm_can_sleep())
+ if (drm_can_sleep())
ret = cirrus_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index dffc836144cc..f4dc9b7a3831 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -296,6 +296,18 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CAP_ASYNC_PAGE_FLIP:
req->value = dev->mode_config.async_page_flip;
break;
+ case DRM_CAP_CURSOR_WIDTH:
+ if (dev->mode_config.cursor_width)
+ req->value = dev->mode_config.cursor_width;
+ else
+ req->value = 64;
+ break;
+ case DRM_CAP_CURSOR_HEIGHT:
+ if (dev->mode_config.cursor_height)
+ req->value = dev->mode_config.cursor_height;
+ else
+ req->value = 64;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f227f544aa36..6e1a1a20cf6b 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -51,7 +51,7 @@ config DRM_EXYNOS_G2D
config DRM_EXYNOS_IPP
bool "Exynos DRM IPP"
- depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM
+ depends on DRM_EXYNOS
help
Choose this option if you want to use IPP feature for DRM.
@@ -69,6 +69,6 @@ config DRM_EXYNOS_ROTATOR
config DRM_EXYNOS_GSC
bool "Exynos DRM GSC"
- depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM
help
Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 9d096a0c5f8d..215131ab1dd2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -171,22 +171,24 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
file->driver_priv = file_priv;
ret = exynos_drm_subdrv_open(dev, file);
- if (ret) {
- kfree(file_priv);
- file->driver_priv = NULL;
- }
+ if (ret)
+ goto out;
anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
NULL, 0);
if (IS_ERR(anon_filp)) {
- kfree(file_priv);
- return PTR_ERR(anon_filp);
+ ret = PTR_ERR(anon_filp);
+ goto out;
}
anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
file_priv->anon_filp = anon_filp;
return ret;
+out:
+ kfree(file_priv);
+ file->driver_priv = NULL;
+ return ret;
}
static void exynos_drm_preclose(struct drm_device *dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 380aec28840b..6c1885eedfdf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -607,7 +607,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
reg_type = REG_TYPE_NONE;
DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
break;
- };
+ }
return reg_type;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d519a4e5fe40..09312b877470 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -16,7 +16,6 @@
#include <linux/types.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
-#include <plat/map-base.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
@@ -826,7 +825,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
/*
- * quf == NULL condition means all event deletion.
+ * qbuf == NULL condition means all event deletion.
* stop operations want to delete all event list.
* another case delete only same buf id.
*/
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index a0e10aeb0e67..c021ddc1ffb4 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,6 +34,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/hdmi.h>
#include <drm/exynos_drm.h>
@@ -59,19 +60,6 @@
#define HDMI_AUI_VERSION 0x01
#define HDMI_AUI_LENGTH 0x0A
-/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
-enum HDMI_PACKET_TYPE {
- /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
- /* InfoFrame packet type */
- HDMI_PACKET_TYPE_INFOFRAME = 0x80,
- /* Vendor-Specific InfoFrame */
- HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
- /* Auxiliary Video information InfoFrame */
- HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
- /* Audio information InfoFrame */
- HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
-};
-
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
@@ -379,12 +367,6 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
},
};
-struct hdmi_infoframe {
- enum HDMI_PACKET_TYPE type;
- u8 ver;
- u8 len;
-};
-
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{
return readl(hdata->regs + reg_id);
@@ -682,7 +664,7 @@ static u8 hdmi_chksum(struct hdmi_context *hdata,
}
static void hdmi_reg_infoframe(struct hdmi_context *hdata,
- struct hdmi_infoframe *infoframe)
+ union hdmi_infoframe *infoframe)
{
u32 hdr_sum;
u8 chksum;
@@ -700,13 +682,15 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
return;
}
- switch (infoframe->type) {
- case HDMI_PACKET_TYPE_AVI:
+ switch (infoframe->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
- hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1,
+ infoframe->any.version);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length);
+ hdr_sum = infoframe->any.type + infoframe->any.version +
+ infoframe->any.length;
/* Output format zero hardcoded ,RGB YBCR selection */
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
@@ -722,18 +706,20 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
- infoframe->len, hdr_sum);
+ infoframe->any.length, hdr_sum);
DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
break;
- case HDMI_PACKET_TYPE_AUI:
+ case HDMI_INFOFRAME_TYPE_AUDIO:
hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
- hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1,
+ infoframe->any.version);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length);
+ hdr_sum = infoframe->any.type + infoframe->any.version +
+ infoframe->any.length;
chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
- infoframe->len, hdr_sum);
+ infoframe->any.length, hdr_sum);
DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
break;
@@ -985,7 +971,7 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
static void hdmi_conf_init(struct hdmi_context *hdata)
{
- struct hdmi_infoframe infoframe;
+ union hdmi_infoframe infoframe;
/* disable HPD interrupts from HDMI IP block, use GPIO instead */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
@@ -1021,14 +1007,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
- infoframe.type = HDMI_PACKET_TYPE_AVI;
- infoframe.ver = HDMI_AVI_VERSION;
- infoframe.len = HDMI_AVI_LENGTH;
+ infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI;
+ infoframe.any.version = HDMI_AVI_VERSION;
+ infoframe.any.length = HDMI_AVI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
- infoframe.type = HDMI_PACKET_TYPE_AUI;
- infoframe.ver = HDMI_AUI_VERSION;
- infoframe.len = HDMI_AUI_LENGTH;
+ infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO;
+ infoframe.any.version = HDMI_AUI_VERSION;
+ infoframe.any.length = HDMI_AUI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
/* enable AVI packet every vsync, fixes purple line problem */
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 400b0c4a10fb..faa77f543a07 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -208,7 +208,7 @@ struct tda998x_priv {
# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1)
# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6)
#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */
-# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0)
+# define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0)
# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4)
#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */
# define PLL_SERIAL_3_SRL_CCIR (1 << 0)
@@ -528,10 +528,10 @@ tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
{
uint8_t buf[PB(5) + 1];
+ memset(buf, 0, sizeof(buf));
buf[HB(0)] = 0x84;
buf[HB(1)] = 0x01;
buf[HB(2)] = 10;
- buf[PB(0)] = 0;
buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
buf[PB(4)] = p->audio_frame[4];
@@ -824,6 +824,11 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
}
div = 148500 / mode->clock;
+ if (div != 0) {
+ div--;
+ if (div > 3)
+ div = 3;
+ }
/* mute the audio FIFO: */
reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -913,7 +918,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
if (priv->rev == TDA19988) {
/* let incoming pixels fill the active space (if any) */
- reg_write(encoder, REG_ENABLE_SPACE, 0x01);
+ reg_write(encoder, REG_ENABLE_SPACE, 0x00);
}
/* must be last register set: */
@@ -1094,6 +1099,8 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
{
struct tda998x_priv *priv = to_tda998x_priv(encoder);
drm_i2c_encoder_destroy(encoder);
+ if (priv->cec)
+ i2c_unregister_device(priv->cec);
kfree(priv);
}
@@ -1142,8 +1149,12 @@ tda998x_encoder_init(struct i2c_client *client,
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
- priv->current_page = 0;
+ priv->current_page = 0xff;
priv->cec = i2c_new_dummy(client->adapter, 0x34);
+ if (!priv->cec) {
+ kfree(priv);
+ return -ENODEV;
+ }
priv->dpms = DRM_MODE_DPMS_OFF;
encoder_slave->slave_priv = priv;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4a2bf8e3f739..df77e20e3c3d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1831,6 +1831,14 @@ struct drm_i915_file_private {
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+/*
+ * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
+ * even when in MSI mode. This results in spurious interrupt warnings if the
+ * legacy irq no. is shared with another device. The kernel then disables that
+ * interrupt source and so prevents the other device from working properly.
+ */
+#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d7fd2fd2f0a5..990cf8f43efd 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -146,7 +146,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
va_list tmp;
va_copy(tmp, args);
- if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
+ len = vsnprintf(NULL, 0, f, tmp);
+ va_end(tmp);
+
+ if (!__i915_error_seek(e, len))
return;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 17d8fcb1b6f7..9fec71175571 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -567,8 +567,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
} else {
- enum transcoder cpu_transcoder =
- intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ enum transcoder cpu_transcoder = (enum transcoder) pipe;
u32 htotal;
htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9fa24347963a..4c1672809493 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8586,6 +8586,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
if (ring->id == RCS)
len += 6;
+ /*
+ * BSpec MI_DISPLAY_FLIP for IVB:
+ * "The full packet must be contained within the same cache line."
+ *
+ * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
+ * cacheline, if we ever start emitting more commands before
+ * the MI_DISPLAY_FLIP we may need to first emit everything else,
+ * then do the cacheline alignment, and finally emit the
+ * MI_DISPLAY_FLIP.
+ */
+ ret = intel_ring_cacheline_align(ring);
+ if (ret)
+ goto err_unpin;
+
ret = intel_ring_begin(ring, len);
if (ret)
goto err_unpin;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5ede4e8e290d..57552eb386b0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
int i, ret, recv_bytes;
uint32_t status;
int try, precharge, clock = 0;
- bool has_aux_irq = true;
+ bool has_aux_irq = HAS_AUX_IRQ(dev);
uint32_t timeout;
/* dp aux is extremely sensitive to irq latency, hence request the
@@ -537,6 +537,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
uint8_t msg[20];
int msg_bytes;
uint8_t ack;
+ int retry;
if (WARN_ON(send_bytes > 16))
return -E2BIG;
@@ -548,19 +549,21 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
msg[3] = send_bytes - 1;
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
- for (;;) {
+ for (retry = 0; retry < 7; retry++) {
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
ack >>= 4;
if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
- break;
+ return send_bytes;
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
- udelay(100);
+ usleep_range(400, 500);
else
return -EIO;
}
- return send_bytes;
+
+ DRM_ERROR("too many retries, giving up\n");
+ return -EIO;
}
/* Write a single byte to the aux channel in native mode */
@@ -582,6 +585,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
int reply_bytes;
uint8_t ack;
int ret;
+ int retry;
if (WARN_ON(recv_bytes > 19))
return -E2BIG;
@@ -595,7 +599,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
msg_bytes = 4;
reply_bytes = recv_bytes + 1;
- for (;;) {
+ for (retry = 0; retry < 7; retry++) {
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
@@ -608,10 +612,13 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
return ret - 1;
}
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
- udelay(100);
+ usleep_range(400, 500);
else
return -EIO;
}
+
+ DRM_ERROR("too many retries, giving up\n");
+ return -EIO;
}
static int
@@ -1869,10 +1876,12 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
- /* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ if (is_edp(intel_dp)) {
+ /* init power sequencer on this pipe and port */
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+ }
intel_enable_dp(encoder);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index b1dc33f47899..d33b61d0dd33 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -258,13 +258,6 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
algo->data = bus;
}
-/*
- * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
- * mode. This results in spurious interrupt warnings if the legacy irq no. is
- * shared with another device. The kernel then disables that interrupt source
- * and so prevents the other device from working properly.
- */
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
static int
gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
u32 gmbus2_status,
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4e960ec7419f..acde2945eb8a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -226,6 +226,8 @@ struct opregion_asle {
#define ACPI_DIGITAL_OUTPUT (3<<8)
#define ACPI_LVDS_OUTPUT (4<<8)
+#define MAX_DSLP 1500
+
#ifdef CONFIG_ACPI
static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
{
@@ -260,10 +262,11 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
/* The spec says 2ms should be the default, but it's too small
* for some machines. */
dslp = 50;
- } else if (dslp > 500) {
+ } else if (dslp > MAX_DSLP) {
/* Hey bios, trust must be earned. */
- WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
- dslp = 500;
+ DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
+ "using %u ms instead\n", dslp, MAX_DSLP);
+ dslp = MAX_DSLP;
}
/* The spec tells us to do this, but we are the only user... */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b7f1742caf87..31b36c5ac894 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1653,6 +1653,27 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
return 0;
}
+/* Align the ring tail to a cacheline boundary */
+int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
+{
+ int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
+ int ret;
+
+ if (num_dwords == 0)
+ return 0;
+
+ ret = intel_ring_begin(ring, num_dwords);
+ if (ret)
+ return ret;
+
+ while (num_dwords--)
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 71a73f4fe252..0b243ce33714 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -233,6 +233,7 @@ intel_write_status_page(struct intel_ring_buffer *ring,
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
+int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
u32 data)
{
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index f9adc27ef32a..13b7dd83faa9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- if (!drm_can_sleep())
+ if (drm_can_sleep())
ret = mgag200_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index b8583f275e80..968374776db9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1519,11 +1519,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
(mga_vga_calculate_mode_bandwidth(mode, bpp)
> (32700 * 1024))) {
return MODE_BANDWIDTH;
- } else if (mode->type == G200_EH &&
+ } else if (mdev->type == G200_EH &&
(mga_vga_calculate_mode_bandwidth(mode, bpp)
> (37500 * 1024))) {
return MODE_BANDWIDTH;
- } else if (mode->type == G200_ER &&
+ } else if (mdev->type == G200_ER &&
(mga_vga_calculate_mode_bandwidth(mode,
bpp) > (55000 * 1024))) {
return MODE_BANDWIDTH;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 1964f4f0d452..84c5b13b33c9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -39,6 +39,7 @@ struct mdp4_crtc {
spinlock_t lock;
bool stale;
uint32_t width, height;
+ uint32_t x, y;
/* next cursor to scan-out: */
uint32_t next_iova;
@@ -57,9 +58,16 @@ struct mdp4_crtc {
#define PENDING_FLIP 0x2
atomic_t pending;
- /* the fb that we currently hold a scanout ref to: */
+ /* the fb that we logically (from PoV of KMS API) hold a ref
+ * to. Which we may not yet be scanning out (we may still
+ * be scanning out previous in case of page_flip while waiting
+ * for gpu rendering to complete:
+ */
struct drm_framebuffer *fb;
+ /* the fb that we currently hold a scanout ref to: */
+ struct drm_framebuffer *scanout_fb;
+
/* for unref'ing framebuffers after scanout completes: */
struct drm_flip_work unref_fb_work;
@@ -77,24 +85,73 @@ static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void update_fb(struct drm_crtc *crtc, bool async,
- struct drm_framebuffer *new_fb)
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct drm_framebuffer *old_fb = mdp4_crtc->fb;
- if (old_fb)
- drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+ atomic_or(pending, &mdp4_crtc->pending);
+ mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ uint32_t i, flush = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
+ struct drm_plane *plane = mdp4_crtc->planes[i];
+ if (plane) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ flush |= pipe2flush(pipe_id);
+ }
+ }
+ flush |= ovlp2flush(mdp4_crtc->ovlp);
+
+ DBG("%s: flush=%08x", mdp4_crtc->name, flush);
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
+}
+
+static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_framebuffer *old_fb = mdp4_crtc->fb;
/* grab reference to incoming scanout fb: */
drm_framebuffer_reference(new_fb);
mdp4_crtc->base.fb = new_fb;
mdp4_crtc->fb = new_fb;
- if (!async) {
- /* enable vblank to pick up the old_fb */
- mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
- }
+ if (old_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+}
+
+/* unlike update_fb(), take a ref to the new scanout fb *before* updating
+ * plane, then call this. Needed to ensure we don't unref the buffer that
+ * is actually still being scanned out.
+ *
+ * Note that this whole thing goes away with atomic.. since we can defer
+ * calling into driver until rendering is done.
+ */
+static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ /* flush updates, to make sure hw is updated to new scanout fb,
+ * so that we can safely queue unref to current fb (ie. next
+ * vblank we know hw is done w/ previous scanout_fb).
+ */
+ crtc_flush(crtc);
+
+ if (mdp4_crtc->scanout_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
+ mdp4_crtc->scanout_fb);
+
+ mdp4_crtc->scanout_fb = fb;
+
+ /* enable vblank to complete flip: */
+ request_pending(crtc, PENDING_FLIP);
}
/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -120,34 +177,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static void crtc_flush(struct drm_crtc *crtc)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
- uint32_t i, flush = 0;
-
- for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
- struct drm_plane *plane = mdp4_crtc->planes[i];
- if (plane) {
- enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
- flush |= pipe2flush(pipe_id);
- }
- }
- flush |= ovlp2flush(mdp4_crtc->ovlp);
-
- DBG("%s: flush=%08x", mdp4_crtc->name, flush);
-
- mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
-}
-
-static void request_pending(struct drm_crtc *crtc, uint32_t pending)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
- atomic_or(pending, &mdp4_crtc->pending);
- mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
-}
-
static void pageflip_cb(struct msm_fence_cb *cb)
{
struct mdp4_crtc *mdp4_crtc =
@@ -158,11 +187,9 @@ static void pageflip_cb(struct msm_fence_cb *cb)
if (!fb)
return;
+ drm_framebuffer_reference(fb);
mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
- crtc_flush(crtc);
-
- /* enable vblank to complete flip: */
- request_pending(crtc, PENDING_FLIP);
+ update_scanout(crtc, fb);
}
static void unref_fb_worker(struct drm_flip_work *work, void *val)
@@ -320,6 +347,20 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
+
+ ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+ mdp4_crtc->name, ret);
+ return ret;
+ }
+
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
@@ -341,24 +382,15 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
- update_fb(crtc, false, crtc->fb);
-
- ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
- if (ret) {
- dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
- mdp4_crtc->name, ret);
- return ret;
- }
-
if (dma == DMA_E) {
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
}
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
return 0;
}
@@ -385,13 +417,24 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_plane *plane = mdp4_crtc->plane;
struct drm_display_mode *mode = &crtc->mode;
+ int ret;
- update_fb(crtc, false, crtc->fb);
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
- return mdp4_plane_mode_set(plane, crtc, crtc->fb,
+ ret = mdp4_plane_mode_set(plane, crtc, crtc->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ return ret;
+ }
+
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
+ return 0;
}
static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
@@ -419,7 +462,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
mdp4_crtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
- update_fb(crtc, true, new_fb);
+ update_fb(crtc, new_fb);
return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
}
@@ -442,12 +485,12 @@ static int mdp4_crtc_set_property(struct drm_crtc *crtc,
static void update_cursor(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
enum mdp4_dma dma = mdp4_crtc->dma;
unsigned long flags;
spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
if (mdp4_crtc->cursor.stale) {
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
uint32_t iova = mdp4_crtc->cursor.next_iova;
@@ -479,6 +522,11 @@ static void update_cursor(struct drm_crtc *crtc)
mdp4_crtc->cursor.scanout_bo = next_bo;
mdp4_crtc->cursor.stale = false;
}
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
+ MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
+ MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
+
spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
}
@@ -530,6 +578,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
drm_gem_object_unreference_unlocked(old_bo);
}
+ crtc_flush(crtc);
request_pending(crtc, PENDING_CURSOR);
return 0;
@@ -542,12 +591,15 @@ fail:
static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
- enum mdp4_dma dma = mdp4_crtc->dma;
+ unsigned long flags;
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
- MDP4_DMA_CURSOR_POS_X(x) |
- MDP4_DMA_CURSOR_POS_Y(y));
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ mdp4_crtc->cursor.x = x;
+ mdp4_crtc->cursor.y = y;
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+ crtc_flush(crtc);
+ request_pending(crtc, PENDING_CURSOR);
return 0;
}
@@ -713,6 +765,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
crtc = &mdp4_crtc->base;
mdp4_crtc->plane = plane;
+ mdp4_crtc->id = id;
mdp4_crtc->ovlp = ovlp_id;
mdp4_crtc->dma = dma_id;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 2406027200ec..1e893dd13859 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -170,8 +170,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
- MDP4_PIPE_SRC_XY_X(crtc_x) |
- MDP4_PIPE_SRC_XY_Y(crtc_y));
+ MDP4_PIPE_DST_XY_X(crtc_x) |
+ MDP4_PIPE_DST_XY_Y(crtc_y));
mdp4_plane_set_scanout(plane, fb);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 71a3b2345eb3..f2794021f086 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -296,6 +296,7 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
mdp5_crtc->name, ret);
return ret;
@@ -343,11 +344,15 @@ static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ return ret;
+ }
update_fb(crtc, crtc->fb);
update_scanout(crtc, crtc->fb);
- return ret;
+ return 0;
}
static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d8d60c969ac7..3da8264d3039 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -644,7 +644,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
fail:
if (obj)
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference(obj);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5281d4bc37f7..5423e914e491 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -163,7 +163,7 @@ retry:
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_iova(&msm_obj->base,
+ ret = msm_gem_get_iova_locked(&msm_obj->base,
submit->gpu->id, &iova);
/* this would break the logic in the fail path.. there is no
@@ -247,7 +247,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
- ptr = msm_gem_vaddr(&obj->base);
+ ptr = msm_gem_vaddr_locked(&obj->base);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
@@ -307,14 +307,12 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
{
unsigned i;
- mutex_lock(&submit->dev->struct_mutex);
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit_unlock_unpin_bo(submit, i);
list_del_init(&msm_obj->submit_entry);
drm_gem_object_unreference(&msm_obj->base);
}
- mutex_unlock(&submit->dev->struct_mutex);
ww_acquire_fini(&submit->ticket);
kfree(submit);
@@ -342,6 +340,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (args->nr_cmds > MAX_CMDS)
return -EINVAL;
+ mutex_lock(&dev->struct_mutex);
+
submit = submit_create(dev, gpu, args->nr_bos);
if (!submit) {
ret = -ENOMEM;
@@ -410,5 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
if (submit)
submit_cleanup(submit, !!ret);
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 4ebce8be489d..0cfe3f426ee4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -298,8 +298,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_drm_private *priv = dev->dev_private;
int i, ret;
- mutex_lock(&dev->struct_mutex);
-
submit->fence = ++priv->next_fence;
gpu->submitted_fence = submit->fence;
@@ -331,7 +329,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
}
hangcheck_timer_reset(gpu);
- mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index e88145ba1bf5..d310c195bdfe 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -141,6 +141,7 @@ nouveau-y += core/subdev/mc/base.o
nouveau-y += core/subdev/mc/nv04.o
nouveau-y += core/subdev/mc/nv40.o
nouveau-y += core/subdev/mc/nv44.o
+nouveau-y += core/subdev/mc/nv4c.o
nouveau-y += core/subdev/mc/nv50.o
nouveau-y += core/subdev/mc/nv94.o
nouveau-y += core/subdev/mc/nv98.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 1b653dd74a70..08b88591ed60 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -311,7 +311,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
@@ -334,7 +334,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass;
@@ -357,7 +357,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
@@ -380,7 +380,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
@@ -403,7 +403,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 940eaa5d8b9a..9ad722e4e087 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1142,7 +1142,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
if (conf != ~0) {
if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
u32 soff = (ffs(outp.or) - 1) * 0x08;
- u32 ctrl = nv_rd32(priv, 0x610798 + soff);
+ u32 ctrl = nv_rd32(priv, 0x610794 + soff);
u32 datarate;
switch ((ctrl & 0x000f0000) >> 16) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 9a850fe19515..54c1b5b471cd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -112,7 +112,7 @@ nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
nv_wr32(priv, 0x002270, cur->addr >> 12);
nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
- if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
+ if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000))
nv_error(priv, "runlist %d update timeout\n", engine);
mutex_unlock(&nv_subdev(priv)->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 30ed19c52e05..7a367c402978 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -539,7 +539,7 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
ustatus &= ~0x04030000;
}
if (ustatus && display) {
- nv_error("%s - TP%d:", name, i);
+ nv_error(priv, "%s - TP%d:", name, i);
nouveau_bitfield_print(nv50_mpc_traps, ustatus);
pr_cont("\n");
ustatus = 0;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index adc88b73d911..3c6738edd127 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -47,6 +47,7 @@ struct nouveau_mc_oclass {
extern struct nouveau_oclass *nv04_mc_oclass;
extern struct nouveau_oclass *nv40_mc_oclass;
extern struct nouveau_oclass *nv44_mc_oclass;
+extern struct nouveau_oclass *nv4c_mc_oclass;
extern struct nouveau_oclass *nv50_mc_oclass;
extern struct nouveau_oclass *nv94_mc_oclass;
extern struct nouveau_oclass *nv98_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index aa0fbbec7f08..ef0c9c4a8cc3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -130,6 +130,10 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
u16 pcir;
int i;
+ /* there is no prom on nv4x IGP's */
+ if (device->card_type == NV_40 && device->chipset >= 0x4c)
+ return;
+
/* enable access to rom */
if (device->card_type >= NV_50)
pcireg = 0x088050;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
index 9159a5ccee93..265d1253624a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -36,7 +36,7 @@ nv1a_fb_oclass = &(struct nv04_fb_impl) {
.fini = _nouveau_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv10_ram_oclass,
+ .base.ram = &nv1a_ram_oclass,
.tile.regions = 8,
.tile.init = nv10_fb_tile_init,
.tile.fini = nv10_fb_tile_fini,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
index b0d5c31606c1..81a408e7d034 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
@@ -14,6 +14,7 @@ int nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *,
extern const struct nouveau_mc_intr nv04_mc_intr[];
int nv04_mc_init(struct nouveau_object *);
void nv40_mc_msi_rearm(struct nouveau_mc *);
+int nv44_mc_init(struct nouveau_object *object);
int nv50_mc_init(struct nouveau_object *);
extern const struct nouveau_mc_intr nv50_mc_intr[];
extern const struct nouveau_mc_intr nvc0_mc_intr[];
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 3bfee5c6c4f2..cc4d0d2d886e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -24,7 +24,7 @@
#include "nv04.h"
-static int
+int
nv44_mc_init(struct nouveau_object *object)
{
struct nv04_mc_priv *priv = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
new file mode 100644
index 000000000000..a75c35ccf25c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2014 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ilia Mirkin
+ */
+
+#include "nv04.h"
+
+static void
+nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
+{
+ struct nv04_mc_priv *priv = (void *)pmc;
+ nv_wr08(priv, 0x088050, 0xff);
+}
+
+struct nouveau_oclass *
+nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
+ .base.handle = NV_SUBDEV(MC, 0x4c),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_mc_ctor,
+ .dtor = _nouveau_mc_dtor,
+ .init = nv44_mc_init,
+ .fini = _nouveau_mc_fini,
+ },
+ .intr = nv04_mc_intr,
+ .msi_rearm = nv4c_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 4ef83df2b246..83face3f608f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -106,6 +106,29 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
return 0;
}
+/*
+ * On some platforms, _DSM(nouveau_op_dsm_muid, func0) has special
+ * requirements on the fourth parameter, so a private implementation
+ * instead of using acpi_check_dsm().
+ */
+static int nouveau_check_optimus_dsm(acpi_handle handle)
+{
+ int result;
+
+ /*
+ * Function 0 returns a Buffer containing available functions.
+ * The args parameter is ignored for function 0, so just put 0 in it
+ */
+ if (nouveau_optimus_dsm(handle, 0, 0, &result))
+ return 0;
+
+ /*
+ * ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported.
+ * If the n-th bit is enabled, function n is supported
+ */
+ return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS);
+}
+
static int nouveau_dsm(acpi_handle handle, int func, int arg)
{
int ret = 0;
@@ -207,8 +230,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
1 << NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
- if (acpi_check_dsm(dhandle, nouveau_op_dsm_muid, 0x00000100,
- 1 << NOUVEAU_DSM_OPTIMUS_CAPS))
+ if (nouveau_check_optimus_dsm(dhandle))
retval |= NOUVEAU_DSM_HAS_OPT;
if (retval & NOUVEAU_DSM_HAS_OPT) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 488686d490c0..4aed1714b9ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1249,7 +1249,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
mem->bus.is_iomem = !dev->agp->cant_use_aperture;
}
#endif
- if (!node->memtype)
+ if (nv_device(drm->device)->card_type < NV_50 || !node->memtype)
/* untiled */
break;
/* fallthrough, tiled memory */
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 78c8e7146d56..89c484d8ac26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -376,6 +376,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto fail_device;
+ dev->irq_enabled = true;
+
/* workaround an odd issue on nvc1 by disabling the device's
* nosnoop capability. hopefully won't cause issues until a
* better fix is found - assuming there is one...
@@ -475,6 +477,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_object *device;
+ dev->irq_enabled = false;
device = drm->client.base.device;
drm_put_dev(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 81638d7f2eff..471347edc27e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -14,7 +14,9 @@ nouveau_vga_set_decode(void *priv, bool state)
{
struct nouveau_device *device = nouveau_dev(priv);
- if (device->chipset >= 0x40)
+ if (device->card_type == NV_40 && device->chipset >= 0x4c)
+ nv_wr32(device, 0x088060, state);
+ else if (device->chipset >= 0x40)
nv_wr32(device, 0x088054, state);
else
nv_wr32(device, 0x001854, state);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a9338c85630f..daa4dd375ab1 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -559,7 +559,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
u32 adjusted_clock = mode->clock;
int encoder_mode = atombios_get_encoder_mode(encoder);
u32 dp_clock = mode->clock;
- int bpc = radeon_get_monitor_bpc(connector);
+ int bpc = radeon_crtc->bpc;
bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
/* reset the pll flags */
@@ -1176,7 +1176,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
/* Set NUM_BANKS. */
- if (rdev->family >= CHIP_BONAIRE) {
+ if (rdev->family >= CHIP_TAHITI) {
unsigned tileb, index, num_banks, tile_split_bytes;
/* Calculate the macrotile mode index. */
@@ -1194,13 +1194,14 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
- num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+ if (rdev->family >= CHIP_BONAIRE)
+ num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+ else
+ num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
} else {
- /* SI and older. */
- if (rdev->family >= CHIP_TAHITI)
- tmp = rdev->config.si.tile_config;
- else if (rdev->family >= CHIP_CAYMAN)
+ /* NI and older. */
+ if (rdev->family >= CHIP_CAYMAN)
tmp = rdev->config.cayman.tile_config;
else
tmp = rdev->config.evergreen.tile_config;
@@ -1773,6 +1774,20 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
return ATOM_PPLL1;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
+ } else if (ASIC_IS_DCE41(rdev)) {
+ /* Don't share PLLs on DCE4.1 chips */
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+ if (rdev->clock.dp_extclk)
+ /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
+ }
+ pll_in_use = radeon_get_pll_use_mask(crtc);
+ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+ if (!(pll_in_use & (1 << ATOM_PPLL2)))
+ return ATOM_PPLL2;
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
} else if (ASIC_IS_DCE4(rdev)) {
/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
* depending on the asic:
@@ -1800,7 +1815,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
if (pll != ATOM_PPLL_INVALID)
return pll;
}
- } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
+ } else {
/* use the same PPLL for all monitors with the same clock */
pll = radeon_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index a42d61571f49..2cec2ab02f80 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -464,11 +464,12 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
{
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int bpc = 8;
- if (connector)
- bpc = radeon_get_monitor_bpc(connector);
+ if (encoder->crtc) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ bpc = radeon_crtc->bpc;
+ }
switch (bpc) {
case 0:
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 0fbd36f3d4e9..ea103ccdf4bd 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -29,6 +29,7 @@
#include "cypress_dpm.h"
#include "btc_dpm.h"
#include "atom.h"
+#include <linux/seq_file.h>
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -2756,6 +2757,37 @@ void btc_dpm_fini(struct radeon_device *rdev)
r600_free_extended_power_table(rdev);
}
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct rv7xx_ps *ps = rv770_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ seq_printf(m, "invalid dpm profile %d\n", current_index);
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ if (rdev->family >= CHIP_CEDAR) {
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+ } else {
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc);
+ }
+ }
+}
+
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h
index 29e32de7e025..9c65be2d55a9 100644
--- a/drivers/gpu/drm/radeon/btcd.h
+++ b/drivers/gpu/drm/radeon/btcd.h
@@ -44,6 +44,10 @@
# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
# define AC_DC_SW (1 << 24)
+#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
+# define CURRENT_PROFILE_INDEX_MASK (0xf << 4)
+# define CURRENT_PROFILE_INDEX_SHIFT 4
+
#define CG_BIF_REQ_AND_RSP 0x7f4
#define CG_CLIENT_REQ(x) ((x) << 0)
#define CG_CLIENT_REQ_MASK (0xff << 0)
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 713a5d359901..94e858751994 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -278,13 +278,15 @@ static int dce6_audio_chipset_supported(struct radeon_device *rdev)
return !ASIC_IS_NODCE(rdev);
}
-static void dce6_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- bool enable)
+void dce6_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
{
+ if (!pin)
+ return;
+
WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
- AUDIO_ENABLED);
- DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
+ enable ? AUDIO_ENABLED : 0);
}
static const u32 pin_offsets[7] =
@@ -323,7 +325,8 @@ int dce6_audio_init(struct radeon_device *rdev)
rdev->audio.pin[i].connected = false;
rdev->audio.pin[i].offset = pin_offsets[i];
rdev->audio.pin[i].id = i;
- dce6_audio_enable(rdev, &rdev->audio.pin[i], true);
+ /* disable audio. it will be set up later */
+ dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
}
return 0;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f2b9e21ce4da..8a2c010b7dc5 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1680,7 +1680,7 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
case RADEON_HPD_6:
if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
- break;
+ break;
default:
break;
}
@@ -5475,9 +5475,9 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- evergreen_pcie_gart_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
+ evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 0c6d5cef4cf1..05b0c95813fd 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -306,6 +306,15 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
return;
offset = dig->afmt->offset;
+ /* disable audio prior to setting up hw */
+ if (ASIC_IS_DCE6(rdev)) {
+ dig->afmt->pin = dce6_audio_get_pin(rdev);
+ dce6_audio_enable(rdev, dig->afmt->pin, false);
+ } else {
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ r600_audio_enable(rdev, dig->afmt->pin, false);
+ }
+
evergreen_audio_set_dto(encoder, mode->clock);
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
@@ -409,12 +418,16 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
+
+ /* enable audio after to setting up hw */
+ if (ASIC_IS_DCE6(rdev))
+ dce6_audio_enable(rdev, dig->afmt->pin, true);
+ else
+ r600_audio_enable(rdev, dig->afmt->pin, true);
}
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -427,15 +440,6 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
- if (enable) {
- if (ASIC_IS_DCE6(rdev))
- dig->afmt->pin = dce6_audio_get_pin(rdev);
- else
- dig->afmt->pin = r600_audio_get_pin(rdev);
- } else {
- dig->afmt->pin = NULL;
- }
-
dig->afmt->enabled = enable;
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index b6e01d5d2cce..351db361239d 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1223,7 +1223,7 @@ int kv_dpm_enable(struct radeon_device *rdev)
int kv_dpm_late_enable(struct radeon_device *rdev)
{
- int ret;
+ int ret = 0;
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index c351226ecb31..ca814276b075 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -2588,7 +2588,7 @@ static int ni_populate_sq_ramping_values(struct radeon_device *rdev,
if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
enable_sq_ramping = false;
- if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
@@ -3945,7 +3945,6 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_ps *ps = ni_get_ps(rps);
- u16 vddc;
struct rv7xx_pl *pl = &ps->performance_levels[index];
ps->performance_level_count = index + 1;
@@ -3961,8 +3960,8 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
/* patch up vddc if necessary */
if (pl->vddc == 0xff01) {
- if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
- pl->vddc = vddc;
+ if (pi->max_vddc)
+ pl->vddc = pi->max_vddc;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -4322,7 +4321,8 @@ void ni_dpm_print_power_state(struct radeon_device *rdev,
void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 56140b4e5bb2..cdbc4171fe73 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3991,6 +3991,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 47fc2b886979..bffac10c4296 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -142,12 +142,15 @@ void r600_audio_update_hdmi(struct work_struct *work)
}
/* enable the audio stream */
-static void r600_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- bool enable)
+void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
{
u32 value = 0;
+ if (!pin)
+ return;
+
if (ASIC_IS_DCE4(rdev)) {
if (enable) {
value |= 0x81000000; /* Required to enable audio */
@@ -158,7 +161,6 @@ static void r600_audio_enable(struct radeon_device *rdev,
WREG32_P(R600_AUDIO_ENABLE,
enable ? 0x81000000 : 0x0, ~0x81000000);
}
- DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
}
/*
@@ -178,8 +180,8 @@ int r600_audio_init(struct radeon_device *rdev)
rdev->audio.pin[0].status_bits = 0;
rdev->audio.pin[0].category_code = 0;
rdev->audio.pin[0].id = 0;
-
- r600_audio_enable(rdev, &rdev->audio.pin[0], true);
+ /* disable audio. it will be set up later */
+ r600_audio_enable(rdev, &rdev->audio.pin[0], false);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 7b399dc5fd54..2812c7d1ae6f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1007,8 +1007,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_008C64_SQ_VSTMP_RING_SIZE:
case R_0288C8_SQ_GS_VERT_ITEMSIZE:
/* get value to populate the IB don't remove */
- tmp =radeon_get_ib_value(p, idx);
- ib[idx] = 0;
+ /*tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;*/
+ break;
+ case SQ_ESGS_RING_BASE:
+ case SQ_GSVS_RING_BASE:
+ case SQ_ESTMP_RING_BASE:
+ case SQ_GSTMP_RING_BASE:
+ case SQ_PSTMP_RING_BASE:
+ case SQ_VSTMP_RING_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case SQ_CONFIG:
track->sq_config = radeon_get_ib_value(p, idx);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 3016fc14f502..85a2bb28aed2 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -329,9 +329,6 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb;
int sad_count;
- /* XXX: setting this register causes hangs on some asics */
- return;
-
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
@@ -460,6 +457,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
return;
offset = dig->afmt->offset;
+ /* disable audio prior to setting up hw */
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ r600_audio_enable(rdev, dig->afmt->pin, false);
+
r600_audio_set_dto(encoder, mode->clock);
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
@@ -531,6 +532,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
r600_hdmi_audio_workaround(encoder);
+
+ /* enable audio after to setting up hw */
+ r600_audio_enable(rdev, dig->afmt->pin, true);
}
/*
@@ -651,11 +655,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
- if (enable)
- dig->afmt->pin = r600_audio_get_pin(rdev);
- else
- dig->afmt->pin = NULL;
-
/* Older chipsets require setting HDMI and routing manually */
if (!ASIC_IS_DCE3(rdev)) {
if (enable)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 4a8ac1cd6b4c..e887d027b6d0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -135,6 +135,9 @@ extern int radeon_hard_reset;
/* R600+ */
#define R600_RING_TYPE_UVD_INDEX 5
+/* number of hw syncs before falling back on blocking */
+#define RADEON_NUM_SYNCS 4
+
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
@@ -554,7 +557,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
/*
* Semaphores.
*/
-/* everything here is constant */
struct radeon_semaphore {
struct radeon_sa_bo *sa_bo;
signed waiters;
@@ -2745,6 +2747,12 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
void r600_audio_update_hdmi(struct work_struct *work);
struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
+void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable);
+void dce6_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable);
/*
* R600 vram scratch functions
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index f74db43346fd..dda02bfc10a4 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1555,7 +1555,7 @@ static struct radeon_asic btc_asic = {
.get_sclk = &btc_dpm_get_sclk,
.get_mclk = &btc_dpm_get_mclk,
.print_power_state = &rv770_dpm_print_power_state,
- .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
+ .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &btc_dpm_vblank_too_short,
},
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index b3bc433eed4c..ae637cfda783 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -551,6 +551,8 @@ void btc_dpm_fini(struct radeon_device *rdev);
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m);
int sumo_dpm_init(struct radeon_device *rdev);
int sumo_dpm_enable(struct radeon_device *rdev);
int sumo_dpm_late_enable(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 485848f889f5..fa9a9c02751e 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -219,7 +219,8 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
memcpy(&output, info->buffer.pointer, size);
/* TODO: check version? */
- printk("ATPX version %u\n", output.version);
+ printk("ATPX version %u, functions 0x%08x\n",
+ output.version, output.function_bits);
radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d680608f6f5b..fbd8b930f2be 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -571,6 +571,8 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
radeon_crtc->max_cursor_width = CURSOR_WIDTH;
radeon_crtc->max_cursor_height = CURSOR_HEIGHT;
}
+ dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
+ dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
#if 0
radeon_crtc->mode_set.crtc = &radeon_crtc->base;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ec8c388eec17..84a1bbb75f91 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -78,9 +78,10 @@
* 2.34.0 - Add CIK tiling mode array query
* 2.35.0 - Add CIK macrotile mode array query
* 2.36.0 - Fix CIK DCE tiling setup
+ * 2.37.0 - allow GS ring setup on r6xx/r7xx
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 36
+#define KMS_DRIVER_MINOR 37
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 114d1672d616..2aecd6dc2610 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -537,6 +537,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
radeon_vm_init(rdev, &fpriv->vm);
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+ if (r)
+ return r;
+
/* map the ib pool buffer read only into
* virtual address space */
bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
@@ -544,6 +548,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
+
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
if (r) {
radeon_vm_fini(rdev, &fpriv->vm);
kfree(fpriv);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 1b783f0e6d3a..15e44a7281ab 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -139,7 +139,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
}
/* 64 dwords should be enough for fence too */
- r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
+ r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
if (r) {
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 2b42aa1914f2..9006b32d5eed 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -34,14 +34,15 @@
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
+ uint32_t *cpu_addr;
int i, r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
return -ENOMEM;
}
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
- &(*semaphore)->sa_bo, 8, 8, true);
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
+ 8 * RADEON_NUM_SYNCS, 8, true);
if (r) {
kfree(*semaphore);
*semaphore = NULL;
@@ -49,7 +50,10 @@ int radeon_semaphore_create(struct radeon_device *rdev,
}
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
- *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
+
+ cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo);
+ for (i = 0; i < RADEON_NUM_SYNCS; ++i)
+ cpu_addr[i] = 0;
for (i = 0; i < RADEON_NUM_RINGS; ++i)
(*semaphore)->sync_to[i] = NULL;
@@ -125,6 +129,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int ring)
{
+ unsigned count = 0;
int i, r;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -140,6 +145,12 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
return -EINVAL;
}
+ if (++count > RADEON_NUM_SYNCS) {
+ /* not enough room, wait manually */
+ radeon_fence_wait_locked(fence);
+ continue;
+ }
+
/* allocate enough space for sync command */
r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
if (r) {
@@ -164,6 +175,8 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
radeon_ring_commit(rdev, &rdev->ring[i]);
radeon_fence_note_sync(fence, ring);
+
+ semaphore->gpu_addr += 8;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 6781fee1eaad..3e6804b2b2ef 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -171,6 +171,8 @@ void radeon_uvd_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->uvd.vcpu_bo);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
+
release_firmware(rdev->uvd_fw);
}
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 20bfbda7b3f1..ec0c6829c1dc 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -18,6 +18,7 @@ r600 0x9400
0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
0x00028A40 VGT_GS_MODE
0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028B38 VGT_GS_MAX_VERT_OUT
0x000088C8 VGT_GS_PER_ES
0x000088E8 VGT_GS_PER_VS
0x000088D4 VGT_GS_VERTEX_REUSE
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 6c772e58c784..4e37a42305d8 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1955,9 +1955,9 @@ void rv770_fini(struct radeon_device *rdev)
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- rv770_pcie_gart_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
+ rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 80c595aba359..b5f63f5e22a3 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2174,7 +2174,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct rv7xx_ps *ps = rv770_get_ps(rps);
u32 sclk, mclk;
- u16 vddc;
struct rv7xx_pl *pl;
switch (index) {
@@ -2214,8 +2213,8 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
/* patch up vddc if necessary */
if (pl->vddc == 0xff01) {
- if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
- pl->vddc = vddc;
+ if (pi->max_vddc)
+ pl->vddc = pi->max_vddc;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -2527,14 +2526,7 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
{
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
- u32 switch_limit = 300;
-
- /* quirks */
- /* ASUS K70AF */
- if ((rdev->pdev->device == 0x9553) &&
- (rdev->pdev->subsystem_vendor == 0x1043) &&
- (rdev->pdev->subsystem_device == 0x1c42))
- switch_limit = 200;
+ u32 switch_limit = 200; /* 300 */
/* RV770 */
/* mclk switching doesn't seem to work reliably on desktop RV770s */
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 09ec4f6c53bb..83578324e5d1 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6338,6 +6338,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 146:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 0471501338fb..0a2f5b4bca43 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2395,7 +2395,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev,
if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
@@ -6472,7 +6472,8 @@ void si_dpm_fini(struct radeon_device *rdev)
void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index f121efe12dc5..8b47b3cd0357 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1807,7 +1807,7 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev
struct seq_file *m)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct radeon_ps *rps = &pi->current_rps;
struct sumo_ps *ps = sumo_get_ps(rps);
struct sumo_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 2d447192d6f7..2da0e17eb960 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1926,7 +1926,8 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev,
void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct trinity_power_info *pi = trinity_get_pi(rdev);
+ struct radeon_ps *rps = &pi->current_rps;
struct trinity_ps *ps = trinity_get_ps(rps);
struct trinity_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index 824550db3fed..d1771004cb52 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -57,7 +57,6 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
radeon_ring_write(ring, 2);
- return;
}
/**
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 88a529008ce0..c71594754f46 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -104,7 +104,7 @@ static void tegra_drm_context_free(struct tegra_drm_context *context)
static void tegra_drm_lastclose(struct drm_device *drm)
{
-#ifdef CONFIG_TEGRA_DRM_FBDEV
+#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_drm *tegra = drm->dev_private;
tegra_fbdev_restore_mode(tegra->fbdev);
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 338f7f6561d7..0266fb40479e 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -15,6 +15,7 @@
struct tegra_rgb {
struct tegra_output output;
struct tegra_dc *dc;
+ bool enabled;
struct clk *clk_parent;
struct clk *clk;
@@ -89,6 +90,9 @@ static int tegra_output_rgb_enable(struct tegra_output *output)
struct tegra_rgb *rgb = to_rgb(output);
unsigned long value;
+ if (rgb->enabled)
+ return 0;
+
tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
@@ -122,6 +126,8 @@ static int tegra_output_rgb_enable(struct tegra_output *output)
tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+ rgb->enabled = true;
+
return 0;
}
@@ -130,6 +136,9 @@ static int tegra_output_rgb_disable(struct tegra_output *output)
struct tegra_rgb *rgb = to_rgb(output);
unsigned long value;
+ if (!rgb->enabled)
+ return 0;
+
value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL);
value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
@@ -144,6 +153,8 @@ static int tegra_output_rgb_disable(struct tegra_output *output)
tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+ rgb->enabled = false;
+
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 3302f99e7497..764be36397fd 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -126,6 +126,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
agp_be->ttm.func = &ttm_agp_func;
if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(agp_be);
return NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 37079859afc8..53b51c4e671a 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -292,7 +292,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
- if (!kref_get_unless_zero(&ref->kref)) {
+ if (kref_get_unless_zero(&ref->kref)) {
rcu_read_unlock();
break;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 9af99084b344..75f319090043 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -380,6 +380,9 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
pgoff_t i;
struct page **page = ttm->pages;
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+ return;
+
for (i = 0; i < ttm->num_pages; ++i) {
(*page)->mapping = NULL;
(*page++)->index = 0;
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index d95335cb90bd..f58dc7dd15c5 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -261,12 +261,7 @@ typedef enum SVGA3dSurfaceFormat {
/* Planar video formats. */
SVGA3D_YV12 = 121,
- /* Shader constant formats. */
- SVGA3D_SURFACE_SHADERCONST_FLOAT = 122,
- SVGA3D_SURFACE_SHADERCONST_INT = 123,
- SVGA3D_SURFACE_SHADERCONST_BOOL = 124,
-
- SVGA3D_FORMAT_MAX = 125,
+ SVGA3D_FORMAT_MAX = 122,
} SVGA3dSurfaceFormat;
typedef uint32 SVGA3dColor; /* a, r, g, b */
@@ -1223,9 +1218,19 @@ typedef enum {
#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130
-
+#define SVGA_3D_CMD_GB_SCREEN_DMA 1131
+#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132
+#define SVGA_3D_CMD_GB_MOB_FENCE 1133
+#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134
#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135
#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136
+#define SVGA_3D_CMD_NOP_ERROR 1137
+
+#define SVGA_3D_CMD_RESERVED1 1138
+#define SVGA_3D_CMD_RESERVED2 1139
+#define SVGA_3D_CMD_RESERVED3 1140
+#define SVGA_3D_CMD_RESERVED4 1141
+#define SVGA_3D_CMD_RESERVED5 1142
#define SVGA_3D_CMD_MAX 1142
#define SVGA_3D_CMD_FUTURE_MAX 3000
@@ -1973,8 +1978,7 @@ struct {
uint32 sizeInBytes;
uint32 validSizeInBytes;
SVGAMobFormat ptDepth;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
typedef
@@ -1984,15 +1988,13 @@ struct {
uint32 sizeInBytes;
uint32 validSizeInBytes;
SVGAMobFormat ptDepth;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
typedef
struct {
SVGAOTableType type;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
/*
@@ -2005,8 +2007,7 @@ struct SVGA3dCmdDefineGBMob {
SVGAMobFormat ptDepth;
PPN base;
uint32 sizeInBytes;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
@@ -2017,8 +2018,7 @@ SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
typedef
struct SVGA3dCmdDestroyGBMob {
SVGAMobId mobid;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
/*
@@ -2031,8 +2031,7 @@ struct SVGA3dCmdRedefineGBMob {
SVGAMobFormat ptDepth;
PPN base;
uint32 sizeInBytes;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */
/*
@@ -2045,8 +2044,7 @@ struct SVGA3dCmdDefineGBMob64 {
SVGAMobFormat ptDepth;
PPN64 base;
uint32 sizeInBytes;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
/*
@@ -2059,8 +2057,7 @@ struct SVGA3dCmdRedefineGBMob64 {
SVGAMobFormat ptDepth;
PPN64 base;
uint32 sizeInBytes;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
/*
@@ -2070,8 +2067,7 @@ SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
typedef
struct SVGA3dCmdUpdateGBMobMapping {
SVGAMobId mobid;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
/*
@@ -2087,7 +2083,8 @@ struct SVGA3dCmdDefineGBSurface {
uint32 multisampleCount;
SVGA3dTextureFilter autogenFilter;
SVGA3dSize size;
-} SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
+} __packed
+SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
/*
* Destroy a guest-backed surface.
@@ -2096,7 +2093,8 @@ struct SVGA3dCmdDefineGBSurface {
typedef
struct SVGA3dCmdDestroyGBSurface {
uint32 sid;
-} SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
+} __packed
+SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
/*
* Bind a guest-backed surface to an object.
@@ -2106,7 +2104,8 @@ typedef
struct SVGA3dCmdBindGBSurface {
uint32 sid;
SVGAMobId mobid;
-} SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
+} __packed
+SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
/*
* Conditionally bind a mob to a guest backed surface if testMobid
@@ -2123,7 +2122,7 @@ struct{
SVGAMobId testMobid;
SVGAMobId mobid;
uint32 flags;
-}
+} __packed
SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
/*
@@ -2135,7 +2134,8 @@ typedef
struct SVGA3dCmdUpdateGBImage {
SVGA3dSurfaceImageId image;
SVGA3dBox box;
-} SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
+} __packed
+SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
/*
* Update an entire guest-backed surface.
@@ -2145,7 +2145,8 @@ struct SVGA3dCmdUpdateGBImage {
typedef
struct SVGA3dCmdUpdateGBSurface {
uint32 sid;
-} SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
+} __packed
+SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
/*
* Readback an image in a guest-backed surface.
@@ -2155,7 +2156,8 @@ struct SVGA3dCmdUpdateGBSurface {
typedef
struct SVGA3dCmdReadbackGBImage {
SVGA3dSurfaceImageId image;
-} SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
+} __packed
+SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
/*
* Readback an entire guest-backed surface.
@@ -2165,7 +2167,8 @@ struct SVGA3dCmdReadbackGBImage {
typedef
struct SVGA3dCmdReadbackGBSurface {
uint32 sid;
-} SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
+} __packed
+SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
/*
* Readback a sub rect of an image in a guest-backed surface. After
@@ -2179,7 +2182,7 @@ struct SVGA3dCmdReadbackGBImagePartial {
SVGA3dSurfaceImageId image;
SVGA3dBox box;
uint32 invertBox;
-}
+} __packed
SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
/*
@@ -2190,7 +2193,8 @@ SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
typedef
struct SVGA3dCmdInvalidateGBImage {
SVGA3dSurfaceImageId image;
-} SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
+} __packed
+SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
/*
* Invalidate an entire guest-backed surface.
@@ -2200,7 +2204,8 @@ struct SVGA3dCmdInvalidateGBImage {
typedef
struct SVGA3dCmdInvalidateGBSurface {
uint32 sid;
-} SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
+} __packed
+SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
/*
* Invalidate a sub rect of an image in a guest-backed surface. After
@@ -2214,7 +2219,7 @@ struct SVGA3dCmdInvalidateGBImagePartial {
SVGA3dSurfaceImageId image;
SVGA3dBox box;
uint32 invertBox;
-}
+} __packed
SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
/*
@@ -2224,7 +2229,8 @@ SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
typedef
struct SVGA3dCmdDefineGBContext {
uint32 cid;
-} SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
+} __packed
+SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
/*
* Destroy a guest-backed context.
@@ -2233,7 +2239,8 @@ struct SVGA3dCmdDefineGBContext {
typedef
struct SVGA3dCmdDestroyGBContext {
uint32 cid;
-} SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
+} __packed
+SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
/*
* Bind a guest-backed context.
@@ -2252,7 +2259,8 @@ struct SVGA3dCmdBindGBContext {
uint32 cid;
SVGAMobId mobid;
uint32 validContents;
-} SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
+} __packed
+SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
/*
* Readback a guest-backed context.
@@ -2262,7 +2270,8 @@ struct SVGA3dCmdBindGBContext {
typedef
struct SVGA3dCmdReadbackGBContext {
uint32 cid;
-} SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
+} __packed
+SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
/*
* Invalidate a guest-backed context.
@@ -2270,7 +2279,8 @@ struct SVGA3dCmdReadbackGBContext {
typedef
struct SVGA3dCmdInvalidateGBContext {
uint32 cid;
-} SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
+} __packed
+SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
/*
* Define a guest-backed shader.
@@ -2281,7 +2291,8 @@ struct SVGA3dCmdDefineGBShader {
uint32 shid;
SVGA3dShaderType type;
uint32 sizeInBytes;
-} SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
+} __packed
+SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
/*
* Bind a guest-backed shader.
@@ -2291,7 +2302,8 @@ typedef struct SVGA3dCmdBindGBShader {
uint32 shid;
SVGAMobId mobid;
uint32 offsetInBytes;
-} SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
+} __packed
+SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
/*
* Destroy a guest-backed shader.
@@ -2299,7 +2311,8 @@ typedef struct SVGA3dCmdBindGBShader {
typedef struct SVGA3dCmdDestroyGBShader {
uint32 shid;
-} SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
+} __packed
+SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
typedef
struct {
@@ -2314,14 +2327,16 @@ struct {
* Note that FLOAT and INT constants are 4-dwords in length, while
* BOOL constants are 1-dword in length.
*/
-} SVGA3dCmdSetGBShaderConstInline;
+} __packed
+SVGA3dCmdSetGBShaderConstInline;
/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
typedef
struct {
uint32 cid;
SVGA3dQueryType type;
-} SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
+} __packed
+SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
typedef
struct {
@@ -2329,7 +2344,8 @@ struct {
SVGA3dQueryType type;
SVGAMobId mobid;
uint32 offset;
-} SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
+} __packed
+SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
/*
@@ -2346,21 +2362,22 @@ struct {
SVGA3dQueryType type;
SVGAMobId mobid;
uint32 offset;
-} SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
+} __packed
+SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
typedef
struct {
SVGAMobId mobid;
uint32 fbOffset;
uint32 initalized;
-}
+} __packed
SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
typedef
struct {
SVGAMobId mobid;
uint32 gartOffset;
-}
+} __packed
SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
@@ -2368,7 +2385,7 @@ typedef
struct {
uint32 gartOffset;
uint32 numPages;
-}
+} __packed
SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
@@ -2385,27 +2402,27 @@ struct {
int32 xRoot;
int32 yRoot;
uint32 flags;
-}
+} __packed
SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
typedef
struct {
uint32 stid;
-}
+} __packed
SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
typedef
struct {
uint32 stid;
SVGA3dSurfaceImageId image;
-}
+} __packed
SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
typedef
struct {
uint32 stid;
SVGA3dBox box;
-}
+} __packed
SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
/*
@@ -2583,4 +2600,28 @@ typedef union {
float f;
} SVGA3dDevCapResult;
+typedef enum {
+ SVGA3DCAPS_RECORD_UNKNOWN = 0,
+ SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
+} SVGA3dCapsRecordType;
+
+typedef
+struct SVGA3dCapsRecordHeader {
+ uint32 length;
+ SVGA3dCapsRecordType type;
+}
+SVGA3dCapsRecordHeader;
+
+typedef
+struct SVGA3dCapsRecord {
+ SVGA3dCapsRecordHeader header;
+ uint32 data[1];
+}
+SVGA3dCapsRecord;
+
+
+typedef uint32 SVGA3dCapPair[2];
+
#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
index 8369c3ba10fe..ef3385096145 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
@@ -38,8 +38,11 @@
#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
+#define min_t(type, x, y) ((x) < (y) ? (x) : (y))
#define surf_size_struct SVGA3dSize
#define u32 uint32
+#define u64 uint64_t
+#define U32_MAX ((u32)~0U)
#endif /* __KERNEL__ */
@@ -704,8 +707,8 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = {
static inline u32 clamped_umul32(u32 a, u32 b)
{
- uint64_t tmp = (uint64_t) a*b;
- return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+ u64 tmp = (u64) a*b;
+ return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
}
static inline const struct svga3d_surface_desc *
@@ -834,7 +837,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
bool cubemap)
{
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
- u32 total_size = 0;
+ u64 total_size = 0;
u32 mip;
for (mip = 0; mip < num_mip_levels; mip++) {
@@ -847,7 +850,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
if (cubemap)
total_size *= SVGA3D_MAX_SURFACE_FACES;
- return total_size;
+ return (u32) min_t(u64, total_size, (u64) U32_MAX);
}
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
index 71defa4d2d75..11323dd5196f 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -169,10 +169,17 @@ enum {
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
+ SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */
+ SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
- SVGA_REG_TOP = 53, /* Must be 1 more than the last register */
+ SVGA_REG_CMD_PREPEND_LOW = 53,
+ SVGA_REG_CMD_PREPEND_HIGH = 54,
+ SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
+ SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
+ SVGA_REG_MOB_MAX_SIZE = 57,
+ SVGA_REG_TOP = 58, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 82c41daebc0e..1e80152674b5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -37,7 +37,7 @@ struct vmw_user_context {
-typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
@@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_context_destroy(struct vmw_resource *res);
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind);
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
static uint64_t vmw_user_context_size;
@@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
if (res->func->destroy == vmw_gb_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex);
+ mutex_lock(&dev_priv->binding_mutex);
+ (void) vmw_context_binding_state_kill
+ (&container_of(res, struct vmw_user_context, res)->cbs);
(void) vmw_gb_context_destroy(res);
if (dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
+ mutex_unlock(&dev_priv->binding_mutex);
mutex_unlock(&dev_priv->cmdbuf_mutex);
return;
}
@@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_state_kill(&uctx->cbs);
+ vmw_context_binding_state_scrub(&uctx->cbs);
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
@@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
SVGA3dCmdHeader header;
SVGA3dCmdDestroyGBContext body;
} *cmd;
- struct vmw_user_context *uctx =
- container_of(res, struct vmw_user_context, res);
-
- BUG_ON(!list_empty(&uctx->cbs.list));
if (likely(res->id == -1))
return 0;
@@ -528,8 +530,9 @@ out_unlock:
* vmw_context_scrub_shader - scrub a shader binding from a context.
*
* @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
*/
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
@@ -548,7 +551,7 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.shader_type;
- cmd->body.shid = SVGA3D_INVALID_ID;
+ cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -559,8 +562,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
* from a context.
*
* @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
*/
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
@@ -579,7 +584,7 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.rt_type;
- cmd->body.target.sid = SVGA3D_INVALID_ID;
+ cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
cmd->body.target.face = 0;
cmd->body.target.mipmap = 0;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -591,11 +596,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
* vmw_context_scrub_texture - scrub a texture binding from a context.
*
* @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
*
* TODO: Possibly complement this function with a function that takes
* a list of texture bindings and combines them to a single command.
*/
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
@@ -619,7 +626,7 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
cmd->body.c.cid = bi->ctx->id;
cmd->body.s1.stage = bi->i1.texture_stage;
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
- cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
+ cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -692,6 +699,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
vmw_context_binding_drop(loc);
loc->bi = *bi;
+ loc->bi.scrubbed = false;
list_add_tail(&loc->ctx_list, &cbs->list);
INIT_LIST_HEAD(&loc->res_list);
@@ -727,12 +735,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
if (loc->bi.ctx != NULL)
vmw_context_binding_drop(loc);
- loc->bi = *bi;
- list_add_tail(&loc->ctx_list, &cbs->list);
- if (bi->res != NULL)
+ if (bi->res != NULL) {
+ loc->bi = *bi;
+ list_add_tail(&loc->ctx_list, &cbs->list);
list_add_tail(&loc->res_list, &bi->res->binding_head);
- else
- INIT_LIST_HEAD(&loc->res_list);
+ }
}
/**
@@ -746,7 +753,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
*/
static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
{
- (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
+ if (!cb->bi.scrubbed) {
+ (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
+ cb->bi.scrubbed = true;
+ }
vmw_context_binding_drop(cb);
}
@@ -768,6 +778,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
}
/**
+ * vmw_context_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_binding *entry;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (!entry->bi.scrubbed) {
+ (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+ entry->bi.scrubbed = true;
+ }
+ }
+}
+
+/**
* vmw_context_binding_res_list_kill - Kill all bindings on a
* resource binding list
*
@@ -785,6 +816,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head)
}
/**
+ * vmw_context_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_context_binding_res_list_scrub(struct list_head *head)
+{
+ struct vmw_ctx_binding *entry;
+
+ list_for_each_entry(entry, head, res_list) {
+ if (!entry->bi.scrubbed) {
+ (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+ entry->bi.scrubbed = true;
+ }
+ }
+}
+
+/**
* vmw_context_binding_state_transfer - Commit staged binding info
*
* @ctx: Pointer to context to commit the staged binding info to.
@@ -803,3 +855,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
list_for_each_entry_safe(entry, next, &from->list, ctx_list)
vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
}
+
+/**
+ * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_context_rebind_all(struct vmw_resource *ctx)
+{
+ struct vmw_ctx_binding *entry;
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
+ struct vmw_ctx_binding_state *cbs = &uctx->cbs;
+ int ret;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (likely(!entry->bi.scrubbed))
+ continue;
+
+ if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
+ SVGA3D_INVALID_ID))
+ continue;
+
+ ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ entry->bi.scrubbed = false;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_context_binding_list - Return a list of context bindings
+ *
+ * @ctx: The context resource
+ *
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ */
+struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
+{
+ return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9893328f8fdc..0083cbf99edf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -667,6 +667,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->memory_size = 512*1024*1024;
}
dev_priv->max_mob_pages = 0;
+ dev_priv->max_mob_size = 0;
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
uint64_t mem_size =
vmw_read(dev_priv,
@@ -676,6 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->prim_bb_mem =
vmw_read(dev_priv,
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
+ dev_priv->max_mob_size =
+ vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
} else
dev_priv->prim_bb_mem = dev_priv->vram_size;
@@ -941,6 +944,7 @@ static void vmw_postclose(struct drm_device *dev,
drm_master_put(&vmw_fp->locked_master);
}
+ vmw_compat_shader_man_destroy(vmw_fp->shman);
ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp);
}
@@ -960,11 +964,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
+ vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
+ if (IS_ERR(vmw_fp->shman))
+ goto out_no_shman;
+
file_priv->driver_priv = vmw_fp;
dev_priv->bdev.dev_mapping = dev->dev_mapping;
return 0;
+out_no_shman:
+ ttm_object_file_release(&vmw_fp->tfile);
out_no_tfile:
kfree(vmw_fp);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 554e7fa33082..07831554dad7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,7 +40,7 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20121114"
+#define VMWGFX_DRIVER_DATE "20140228"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 5
#define VMWGFX_DRIVER_PATCHLEVEL 0
@@ -75,10 +75,14 @@
#define VMW_RES_FENCE ttm_driver_type3
#define VMW_RES_SHADER ttm_driver_type4
+struct vmw_compat_shader_manager;
+
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
struct list_head fence_events;
+ bool gb_aware;
+ struct vmw_compat_shader_manager *shman;
};
struct vmw_dma_buffer {
@@ -272,6 +276,7 @@ struct vmw_ctx_bindinfo {
struct vmw_resource *ctx;
struct vmw_resource *res;
enum vmw_ctx_binding_type bt;
+ bool scrubbed;
union {
SVGA3dShaderType shader_type;
SVGA3dRenderTargetType rt_type;
@@ -318,7 +323,7 @@ struct vmw_sw_context{
struct drm_open_hash res_ht;
bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */
- struct ttm_object_file *tfile;
+ struct vmw_fpriv *fp;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
@@ -336,6 +341,7 @@ struct vmw_sw_context{
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
struct vmw_ctx_binding_state staged_bindings;
+ struct list_head staged_shaders;
};
struct vmw_legacy_display;
@@ -380,6 +386,7 @@ struct vmw_private {
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
uint32_t max_mob_pages;
+ uint32_t max_mob_size;
uint32_t memory_size;
bool has_gmr;
bool has_mob;
@@ -569,6 +576,8 @@ struct vmw_user_resource_conv;
extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res);
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@@ -957,6 +966,9 @@ extern void
vmw_context_binding_state_transfer(struct vmw_resource *res,
struct vmw_ctx_binding_state *cbs);
extern void vmw_context_binding_res_list_kill(struct list_head *head);
+extern void vmw_context_binding_res_list_scrub(struct list_head *head);
+extern int vmw_context_rebind_all(struct vmw_resource *ctx);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
/*
* Surface management - vmwgfx_surface.c
@@ -991,6 +1003,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+ SVGA3dShaderType shader_type,
+ u32 *user_key);
+extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+ struct list_head *list);
+extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+ struct list_head *list);
+extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+ u32 user_key,
+ SVGA3dShaderType shader_type,
+ struct list_head *list);
+extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+ u32 user_key, const void *bytecode,
+ SVGA3dShaderType shader_type,
+ size_t size,
+ struct ttm_object_file *tfile,
+ struct list_head *list);
+extern struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv);
+extern void
+vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
+
/**
* Inline helper functions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 7a5f1eb55c5a..efb575a7996c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -114,8 +114,10 @@ static void vmw_resource_list_unreserve(struct list_head *list,
* persistent context binding tracker.
*/
if (unlikely(val->staged_bindings)) {
- vmw_context_binding_state_transfer
- (val->res, val->staged_bindings);
+ if (!backoff) {
+ vmw_context_binding_state_transfer
+ (val->res, val->staged_bindings);
+ }
kfree(val->staged_bindings);
val->staged_bindings = NULL;
}
@@ -178,6 +180,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
}
/**
+ * vmw_resource_context_res_add - Put resources previously bound to a context on
+ * the validation list
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @sw_context: Pointer to a software context used for this command submission
+ * @ctx: Pointer to the context resource
+ *
+ * This function puts all resources that were previously bound to @ctx on
+ * the resource validation list. This is part of the context state reemission
+ */
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ struct vmw_resource *ctx)
+{
+ struct list_head *binding_list;
+ struct vmw_ctx_binding *entry;
+ int ret = 0;
+ struct vmw_resource *res;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ binding_list = vmw_context_binding_list(ctx);
+
+ list_for_each_entry(entry, binding_list, ctx_list) {
+ res = vmw_resource_reference_unless_doomed(entry->bi.res);
+ if (unlikely(res == NULL))
+ continue;
+
+ ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+ vmw_resource_unreference(&res);
+ if (unlikely(ret != 0))
+ break;
+ }
+
+ mutex_unlock(&dev_priv->binding_mutex);
+ return ret;
+}
+
+/**
* vmw_resource_relocation_add - Add a relocation to the relocation list
*
* @list: Pointer to head of relocation list.
@@ -233,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
{
struct vmw_resource_relocation *rel;
- list_for_each_entry(rel, list, head)
- cb[rel->offset] = rel->res->id;
+ list_for_each_entry(rel, list, head) {
+ if (likely(rel->res != NULL))
+ cb[rel->offset] = rel->res->id;
+ else
+ cb[rel->offset] = SVGA_3D_CMD_NOP;
+ }
}
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
@@ -379,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
}
/**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
- * @id: Pointer to the location in the command buffer currently being
+ * @id: user-space resource id handle.
+ * @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
*/
-static int vmw_cmd_res_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
- const struct vmw_user_resource_conv *converter,
- uint32_t *id,
- struct vmw_resource_val_node **p_val)
+static int
+vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t id,
+ uint32_t *id_loc,
+ struct vmw_resource_val_node **p_val)
{
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type];
@@ -402,7 +451,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_resource_val_node *node;
int ret;
- if (*id == SVGA3D_INVALID_ID) {
+ if (id == SVGA3D_INVALID_ID) {
if (p_val)
*p_val = NULL;
if (res_type == vmw_res_context) {
@@ -417,7 +466,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
* resource
*/
- if (likely(rcache->valid && *id == rcache->handle)) {
+ if (likely(rcache->valid && id == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
rcache->node->first_usage = false;
@@ -426,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
- id - sw_context->buf_start);
+ id_loc - sw_context->buf_start);
}
ret = vmw_user_resource_lookup_handle(dev_priv,
- sw_context->tfile,
- *id,
+ sw_context->fp->tfile,
+ id,
converter,
&res);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n",
- (unsigned) *id);
+ (unsigned) id);
dump_stack();
return ret;
}
rcache->valid = true;
rcache->res = res;
- rcache->handle = *id;
+ rcache->handle = id;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
- id - sw_context->buf_start);
+ id_loc - sw_context->buf_start);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -459,7 +508,11 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
if (p_val)
*p_val = node;
- if (node->first_usage && res_type == vmw_res_context) {
+ if (dev_priv->has_mob && node->first_usage &&
+ res_type == vmw_res_context) {
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
node->staged_bindings =
kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
if (node->staged_bindings == NULL) {
@@ -481,6 +534,59 @@ out_no_reloc:
}
/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id_loc: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
+ */
+static int
+vmw_cmd_res_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t *id_loc,
+ struct vmw_resource_val_node **p_val)
+{
+ return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
+ converter, *id_loc, id_loc, p_val);
+}
+
+/**
+ * vmw_rebind_contexts - Rebind all resources previously bound to
+ * referenced contexts.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Rebind context binding points that have been scrubbed because of eviction.
+ */
+static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
+{
+ struct vmw_resource_val_node *val;
+ int ret;
+
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ if (likely(!val->staged_bindings))
+ continue;
+
+ ret = vmw_context_rebind_all(val->res);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to rebind context.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
* vmw_cmd_cid_check - Check a command header for valid context information.
*
* @dev_priv: Pointer to a device private structure.
@@ -496,7 +602,7 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
{
struct vmw_cid_cmd {
SVGA3dCmdHeader header;
- __le32 cid;
+ uint32_t cid;
} *cmd;
cmd = container_of(header, struct vmw_cid_cmd, header);
@@ -767,7 +873,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
return -EINVAL;
@@ -828,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL;
@@ -1127,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
+ vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
+ header);
out_no_surface:
vmw_dmabuf_unreference(&vmw_bo);
@@ -1478,6 +1585,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
&cmd->body.sid, NULL);
}
+
+/**
+ * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_shader_define_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineShader body;
+ } *cmd;
+ int ret;
+ size_t size;
+
+ cmd = container_of(header, struct vmw_shader_define_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(!dev_priv->has_mob))
+ return 0;
+
+ size = cmd->header.size - sizeof(cmd->body);
+ ret = vmw_compat_shader_add(sw_context->fp->shman,
+ cmd->body.shid, cmd + 1,
+ cmd->body.type, size,
+ sw_context->fp->tfile,
+ &sw_context->staged_shaders);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ NULL, &cmd->header.id -
+ sw_context->buf_start);
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_shader_destroy_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyShader body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_shader_destroy_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(!dev_priv->has_mob))
+ return 0;
+
+ ret = vmw_compat_shader_remove(sw_context->fp->shman,
+ cmd->body.shid,
+ cmd->body.type,
+ &sw_context->staged_shaders);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ NULL, &cmd->header.id -
+ sw_context->buf_start);
+
+ return 0;
+}
+
/**
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
* command
@@ -1509,10 +1708,18 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo bi;
struct vmw_resource_val_node *res_node;
-
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
- user_shader_converter,
- &cmd->body.shid, &res_node);
+ u32 shid = cmd->body.shid;
+
+ if (shid != SVGA3D_INVALID_ID)
+ (void) vmw_compat_shader_lookup(sw_context->fp->shman,
+ cmd->body.type,
+ &shid);
+
+ ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
+ vmw_res_shader,
+ user_shader_converter,
+ shid,
+ &cmd->body.shid, &res_node);
if (unlikely(ret != 0))
return ret;
@@ -1527,6 +1734,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
}
/**
+ * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_set_shader_const_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShaderConst body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_set_shader_const_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (dev_priv->has_mob)
+ header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
+
+ return 0;
+}
+
+/**
* vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
* command
*
@@ -1595,7 +1835,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
return 0;
}
-static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
+static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
@@ -1634,14 +1874,14 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
- true, true, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
- true, true, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
+ true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
- true, true, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
+ true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
@@ -1792,6 +2032,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
goto out_invalid;
entry = &vmw_cmd_entries[cmd_id];
+ if (unlikely(!entry->func))
+ goto out_invalid;
+
if (unlikely(!entry->user_allow && !sw_context->kernel))
goto out_privileged;
@@ -2171,7 +2414,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else
sw_context->kernel = true;
- sw_context->tfile = vmw_fpriv(file_priv)->tfile;
+ sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
@@ -2188,16 +2431,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock;
sw_context->res_ht_initialized = true;
}
+ INIT_LIST_HEAD(&sw_context->staged_shaders);
INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_resources_reserve(sw_context);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
if (unlikely(ret != 0))
@@ -2225,6 +2469,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_err;
}
+ if (dev_priv->has_mob) {
+ ret = vmw_rebind_contexts(sw_context);
+ if (unlikely(ret != 0))
+ goto out_unlock_binding;
+ }
+
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
@@ -2276,6 +2526,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
}
list_splice_init(&sw_context->resource_list, &resource_list);
+ vmw_compat_shaders_commit(sw_context->fp->shman,
+ &sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
@@ -2289,10 +2541,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex);
out_err:
- vmw_resource_relocations_free(&sw_context->res_relocations);
- vmw_free_relocations(sw_context);
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
+out_err_nores:
vmw_resource_list_unreserve(&sw_context->resource_list, true);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+ vmw_free_relocations(sw_context);
vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
@@ -2301,6 +2554,8 @@ out_unlock:
list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
+ vmw_compat_shaders_revert(sw_context->fp->shman,
+ &sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 116c49736763..47b70949bf3a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -29,12 +29,18 @@
#include <drm/vmwgfx_drm.h>
#include "vmwgfx_kms.h"
+struct svga_3d_compat_cap {
+ SVGA3dCapsRecordHeader header;
+ SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
+};
+
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_getparam_arg *param =
(struct drm_vmw_getparam_arg *)data;
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
switch (param->param) {
case DRM_VMW_PARAM_NUM_STREAMS:
@@ -60,6 +66,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
+ param->value = SVGA3D_HWVERSION_WS8_B1;
+ break;
+ }
+
param->value =
ioread32(fifo_mem +
((fifo->capabilities &
@@ -69,19 +80,31 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
break;
}
case DRM_VMW_PARAM_MAX_SURF_MEMORY:
- param->value = dev_priv->memory_size;
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+ !vmw_fp->gb_aware)
+ param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
+ else
+ param->value = dev_priv->memory_size;
break;
case DRM_VMW_PARAM_3D_CAPS_SIZE:
- if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
- param->value = SVGA3D_DEVCAP_MAX;
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+ vmw_fp->gb_aware)
+ param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+ else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+ param->value = sizeof(struct svga_3d_compat_cap) +
+ sizeof(uint32_t);
else
param->value = (SVGA_FIFO_3D_CAPS_LAST -
- SVGA_FIFO_3D_CAPS + 1);
- param->value *= sizeof(uint32_t);
+ SVGA_FIFO_3D_CAPS + 1) *
+ sizeof(uint32_t);
break;
case DRM_VMW_PARAM_MAX_MOB_MEMORY:
+ vmw_fp->gb_aware = true;
param->value = dev_priv->max_mob_pages * PAGE_SIZE;
break;
+ case DRM_VMW_PARAM_MAX_MOB_SIZE:
+ param->value = dev_priv->max_mob_size;
+ break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
@@ -91,6 +114,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
+ size_t size)
+{
+ struct svga_3d_compat_cap *compat_cap =
+ (struct svga_3d_compat_cap *) bounce;
+ unsigned int i;
+ size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
+ unsigned int max_size;
+
+ if (size < pair_offset)
+ return -EINVAL;
+
+ max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
+
+ if (max_size > SVGA3D_DEVCAP_MAX)
+ max_size = SVGA3D_DEVCAP_MAX;
+
+ compat_cap->header.length =
+ (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
+ compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ for (i = 0; i < max_size; ++i) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+ compat_cap->pairs[i][0] = i;
+ compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return 0;
+}
+
int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -104,41 +159,49 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
void *bounce;
int ret;
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
if (unlikely(arg->pad64 != 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
- if (gb_objects)
- size = SVGA3D_DEVCAP_MAX;
+ if (gb_objects && vmw_fp->gb_aware)
+ size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+ else if (gb_objects)
+ size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
else
- size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1);
-
- size *= sizeof(uint32_t);
+ size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
+ sizeof(uint32_t);
if (arg->max_size < size)
size = arg->max_size;
- bounce = vmalloc(size);
+ bounce = vzalloc(size);
if (unlikely(bounce == NULL)) {
DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
return -ENOMEM;
}
- if (gb_objects) {
- int i;
+ if (gb_objects && vmw_fp->gb_aware) {
+ int i, num;
uint32_t *bounce32 = (uint32_t *) bounce;
+ num = size / sizeof(uint32_t);
+ if (num > SVGA3D_DEVCAP_MAX)
+ num = SVGA3D_DEVCAP_MAX;
+
mutex_lock(&dev_priv->hw_mutex);
- for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) {
+ for (i = 0; i < num; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
mutex_unlock(&dev_priv->hw_mutex);
-
+ } else if (gb_objects) {
+ ret = vmw_fill_compat_cap(dev_priv, bounce, size);
+ if (unlikely(ret != 0))
+ goto out_err;
} else {
-
fifo_mem = dev_priv->mmio_virt;
memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
}
@@ -146,6 +209,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
ret = copy_to_user(buffer, bounce, size);
if (ret)
ret = -EFAULT;
+out_err:
vfree(bounce);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 4910e7b81811..04a64b8cd3cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -134,6 +134,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
+ ret = -ENOMEM;
goto out_no_fifo;
}
@@ -187,18 +188,20 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
bo = otable->page_table->pt_bo;
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL))
- DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
-
- memset(cmd, 0, sizeof(*cmd));
- cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.type = type;
- cmd->body.baseAddress = 0;
- cmd->body.sizeInBytes = 0;
- cmd->body.validSizeInBytes = 0;
- cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for OTable "
+ "takedown.\n");
+ } else {
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.type = type;
+ cmd->body.baseAddress = 0;
+ cmd->body.sizeInBytes = 0;
+ cmd->body.validSizeInBytes = 0;
+ cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ }
if (bo) {
int ret;
@@ -561,11 +564,12 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for Memory "
"Object unbinding.\n");
+ } else {
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.mobid = mob->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
- cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.mobid = mob->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
if (bo) {
vmw_fence_single_bo(bo, NULL);
ttm_bo_unreserve(bo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6fdd82d42f65..9757b57f8388 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
return res;
}
+struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res)
+{
+ return kref_get_unless_zero(&res->kref) ? res : NULL;
+}
/**
* vmw_resource_release_id - release a resource id to the id manager.
@@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
vmw_dmabuf_unreference(&res->backup);
}
- if (likely(res->hw_destroy != NULL))
+ if (likely(res->hw_destroy != NULL)) {
res->hw_destroy(res);
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_res_list_kill(&res->binding_head);
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
id = res->id;
if (res->res_free != NULL)
@@ -418,8 +427,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
- (user) ? ttm_bo_type_device :
- ttm_bo_type_kernel, placement,
+ ttm_bo_type_device, placement,
0, interruptible,
NULL, acc_size, NULL, bo_free);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 1457ec4b7125..ee3856578a12 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -29,6 +29,8 @@
#include "vmwgfx_resource_priv.h"
#include "ttm/ttm_placement.h"
+#define VMW_COMPAT_SHADER_HT_ORDER 12
+
struct vmw_shader {
struct vmw_resource res;
SVGA3dShaderType type;
@@ -40,6 +42,50 @@ struct vmw_user_shader {
struct vmw_shader shader;
};
+/**
+ * enum vmw_compat_shader_state - Staging state for compat shaders
+ */
+enum vmw_compat_shader_state {
+ VMW_COMPAT_COMMITED,
+ VMW_COMPAT_ADD,
+ VMW_COMPAT_DEL
+};
+
+/**
+ * struct vmw_compat_shader - Metadata for compat shaders.
+ *
+ * @handle: The TTM handle of the guest backed shader.
+ * @tfile: The struct ttm_object_file the guest backed shader is registered
+ * with.
+ * @hash: Hash item for lookup.
+ * @head: List head for staging lists or the compat shader manager list.
+ * @state: Staging state.
+ *
+ * The structure is protected by the cmdbuf lock.
+ */
+struct vmw_compat_shader {
+ u32 handle;
+ struct ttm_object_file *tfile;
+ struct drm_hash_item hash;
+ struct list_head head;
+ enum vmw_compat_shader_state state;
+};
+
+/**
+ * struct vmw_compat_shader_manager - Compat shader manager.
+ *
+ * @shaders: Hash table containing staged and commited compat shaders
+ * @list: List of commited shaders.
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * @shaders and @list are protected by the cmdbuf mutex for now.
+ */
+struct vmw_compat_shader_manager {
+ struct drm_open_hash shaders;
+ struct list_head list;
+ struct vmw_private *dev_priv;
+};
+
static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_shader_base_to_res(struct ttm_base_object *base);
@@ -258,7 +304,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
return 0;
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_res_list_kill(&res->binding_head);
+ vmw_context_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
@@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
TTM_REF_USAGE);
}
+static int vmw_shader_alloc(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buffer,
+ size_t shader_size,
+ size_t offset,
+ SVGA3dShaderType shader_type,
+ struct ttm_object_file *tfile,
+ u32 *handle)
+{
+ struct vmw_user_shader *ushader;
+ struct vmw_resource *res, *tmp;
+ int ret;
+
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of shaders anyway.
+ */
+ if (unlikely(vmw_user_shader_size == 0))
+ vmw_user_shader_size =
+ ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_user_shader_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for shader "
+ "creation.\n");
+ goto out;
+ }
+
+ ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
+ if (unlikely(ushader == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_shader_size);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ res = &ushader->shader.res;
+ ushader->base.shareable = false;
+ ushader->base.tfile = NULL;
+
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+
+ ret = vmw_gb_shader_init(dev_priv, res, shader_size,
+ offset, shader_type, buffer,
+ vmw_user_shader_free);
+ if (unlikely(ret != 0))
+ goto out;
+
+ tmp = vmw_resource_reference(res);
+ ret = ttm_base_object_init(tfile, &ushader->base, false,
+ VMW_RES_SHADER,
+ &vmw_user_shader_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ if (handle)
+ *handle = ushader->base.hash.key;
+out_err:
+ vmw_resource_unreference(&res);
+out:
+ return ret;
+}
+
+
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_shader *ushader;
- struct vmw_resource *res;
- struct vmw_resource *tmp;
struct drm_vmw_shader_create_arg *arg =
(struct drm_vmw_shader_create_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -373,69 +487,326 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
goto out_bad_arg;
}
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of shaders anyway.
- */
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ goto out_bad_arg;
- if (unlikely(vmw_user_shader_size == 0))
- vmw_user_shader_size = ttm_round_pot(sizeof(*ushader))
- + 128;
+ ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
+ shader_type, tfile, &arg->shader_handle);
- ret = ttm_read_lock(&vmaster->lock, true);
+ ttm_read_unlock(&vmaster->lock);
+out_bad_arg:
+ vmw_dmabuf_unreference(&buffer);
+ return ret;
+}
+
+/**
+ * vmw_compat_shader_lookup - Look up a compat shader
+ *
+ * @man: Pointer to the compat shader manager.
+ * @shader_type: The shader type, that combined with the user_key identifies
+ * the shader.
+ * @user_key: On entry, this should be a pointer to the user_key.
+ * On successful exit, it will contain the guest-backed shader's TTM handle.
+ *
+ * Returns 0 on success. Non-zero on failure, in which case the value pointed
+ * to by @user_key is unmodified.
+ */
+int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+ SVGA3dShaderType shader_type,
+ u32 *user_key)
+{
+ struct drm_hash_item *hash;
+ int ret;
+ unsigned long key = *user_key | (shader_type << 24);
+
+ ret = drm_ht_find_item(&man->shaders, key, &hash);
if (unlikely(ret != 0))
return ret;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_user_shader_size,
- false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for shader"
- " creation.\n");
- goto out_unlock;
+ *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
+ hash)->handle;
+
+ return 0;
+}
+
+/**
+ * vmw_compat_shader_free - Free a compat shader.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @entry: Pointer to a struct vmw_compat_shader.
+ *
+ * Frees a struct vmw_compat_shder entry and drops its reference to the
+ * guest backed shader.
+ */
+static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
+ struct vmw_compat_shader *entry)
+{
+ list_del(&entry->head);
+ WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
+ WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
+ TTM_REF_USAGE));
+ kfree(entry);
+}
+
+/**
+ * vmw_compat_shaders_commit - Commit a list of compat shader actions.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function commits a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions has commited the fifo contents to the device.
+ */
+void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry, *next;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ list_del(&entry->head);
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ entry->state = VMW_COMPAT_COMMITED;
+ list_add_tail(&entry->head, &man->list);
+ break;
+ case VMW_COMPAT_DEL:
+ ttm_ref_object_base_unref(entry->tfile, entry->handle,
+ TTM_REF_USAGE);
+ kfree(entry);
+ break;
+ default:
+ BUG();
+ break;
+ }
}
+}
- ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
- if (unlikely(ushader == NULL)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_shader_size);
- ret = -ENOMEM;
- goto out_unlock;
+/**
+ * vmw_compat_shaders_revert - Revert a list of compat shader actions
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function reverts a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions failed for some reason, and the command stream was never
+ * submitted.
+ */
+void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry, *next;
+ int ret;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ vmw_compat_shader_free(man, entry);
+ break;
+ case VMW_COMPAT_DEL:
+ ret = drm_ht_insert_item(&man->shaders, &entry->hash);
+ list_del(&entry->head);
+ list_add_tail(&entry->head, &man->list);
+ entry->state = VMW_COMPAT_COMMITED;
+ break;
+ default:
+ BUG();
+ break;
+ }
}
+}
- res = &ushader->shader.res;
- ushader->base.shareable = false;
- ushader->base.tfile = NULL;
+/**
+ * vmw_compat_shader_remove - Stage a compat shader for removal.
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @shader_type: Shader type.
+ * @list: Caller's list of staged shader actions.
+ *
+ * This function stages a compat shader for removal and removes the key from
+ * the shader manager's hash table. If the shader was previously only staged
+ * for addition it is completely removed (But the execbuf code may keep a
+ * reference if it was bound to a context between addition and removal). If
+ * it was previously commited to the manager, it is staged for removal.
+ */
+int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry;
+ struct drm_hash_item *hash;
+ int ret;
- /*
- * From here on, the destructor takes over resource freeing.
- */
+ ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
+ &hash);
+ if (likely(ret != 0))
+ return -EINVAL;
- ret = vmw_gb_shader_init(dev_priv, res, arg->size,
- arg->offset, shader_type, buffer,
- vmw_user_shader_free);
+ entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
+
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ vmw_compat_shader_free(man, entry);
+ break;
+ case VMW_COMPAT_COMMITED:
+ (void) drm_ht_remove_item(&man->shaders, &entry->hash);
+ list_del(&entry->head);
+ entry->state = VMW_COMPAT_DEL;
+ list_add_tail(&entry->head, list);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_compat_shader_add - Create a compat shader and add the
+ * key to the manager
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @bytecode: Pointer to the bytecode of the shader.
+ * @shader_type: Shader type.
+ * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
+ * to be created with.
+ * @list: Caller's list of staged shader actions.
+ *
+ * Note that only the key is added to the shader manager's hash table.
+ * The shader is not yet added to the shader manager's list of shaders.
+ */
+int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+ u32 user_key, const void *bytecode,
+ SVGA3dShaderType shader_type,
+ size_t size,
+ struct ttm_object_file *tfile,
+ struct list_head *list)
+{
+ struct vmw_dma_buffer *buf;
+ struct ttm_bo_kmap_obj map;
+ bool is_iomem;
+ struct vmw_compat_shader *compat;
+ u32 handle;
+ int ret;
+
+ if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
+ return -EINVAL;
+
+ /* Allocate and pin a DMA buffer */
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (unlikely(buf == NULL))
+ return -ENOMEM;
+
+ ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
+ true, vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
- goto out_unlock;
+ goto out;
- tmp = vmw_resource_reference(res);
- ret = ttm_base_object_init(tfile, &ushader->base, false,
- VMW_RES_SHADER,
- &vmw_user_shader_base_release, NULL);
+ ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
+ if (unlikely(ret != 0))
+ goto no_reserve;
+ /* Map and copy shader bytecode. */
+ ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
+ &map);
if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- goto out_err;
+ ttm_bo_unreserve(&buf->base);
+ goto no_reserve;
}
- arg->shader_handle = ushader->base.hash.key;
-out_err:
- vmw_resource_unreference(&res);
-out_unlock:
- ttm_read_unlock(&vmaster->lock);
-out_bad_arg:
- vmw_dmabuf_unreference(&buffer);
+ memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
+ WARN_ON(is_iomem);
+
+ ttm_bo_kunmap(&map);
+ ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
+ WARN_ON(ret != 0);
+ ttm_bo_unreserve(&buf->base);
+
+ /* Create a guest-backed shader container backed by the dma buffer */
+ ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
+ tfile, &handle);
+ vmw_dmabuf_unreference(&buf);
+ if (unlikely(ret != 0))
+ goto no_reserve;
+ /*
+ * Create a compat shader structure and stage it for insertion
+ * in the manager
+ */
+ compat = kzalloc(sizeof(*compat), GFP_KERNEL);
+ if (compat == NULL)
+ goto no_compat;
+
+ compat->hash.key = user_key | (shader_type << 24);
+ ret = drm_ht_insert_item(&man->shaders, &compat->hash);
+ if (unlikely(ret != 0))
+ goto out_invalid_key;
+
+ compat->state = VMW_COMPAT_ADD;
+ compat->handle = handle;
+ compat->tfile = tfile;
+ list_add_tail(&compat->head, list);
+ return 0;
+
+out_invalid_key:
+ kfree(compat);
+no_compat:
+ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+no_reserve:
+out:
return ret;
+}
+
+/**
+ * vmw_compat_shader_man_create - Create a compat shader manager
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Typically done at file open time. If successful returns a pointer to a
+ * compat shader manager. Otherwise returns an error pointer.
+ */
+struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv)
+{
+ struct vmw_compat_shader_manager *man;
+ int ret;
+
+ man = kzalloc(sizeof(*man), GFP_KERNEL);
+ if (man == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ man->dev_priv = dev_priv;
+ INIT_LIST_HEAD(&man->list);
+ ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
+ if (ret == 0)
+ return man;
+
+ kfree(man);
+ return ERR_PTR(ret);
+}
+
+/**
+ * vmw_compat_shader_man_destroy - Destroy a compat shader manager
+ *
+ * @man: Pointer to the shader manager to destroy.
+ *
+ * Typically done at file close time.
+ */
+void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
+{
+ struct vmw_compat_shader *entry, *next;
+
+ mutex_lock(&man->dev_priv->cmdbuf_mutex);
+ list_for_each_entry_safe(entry, next, &man->list, head)
+ vmw_compat_shader_free(man, entry);
+ mutex_unlock(&man->dev_priv->cmdbuf_mutex);
+ kfree(man);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 979da1c246a5..82468d902915 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -908,8 +908,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
rep->size_addr;
if (user_sizes)
- ret = copy_to_user(user_sizes, srf->sizes,
- srf->num_sizes * sizeof(*srf->sizes));
+ ret = copy_to_user(user_sizes, &srf->base_size,
+ sizeof(srf->base_size));
if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
@@ -1111,7 +1111,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
return 0;
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_res_list_kill(&res->binding_head);
+ vmw_context_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 1146e3bba6e1..112f27e51bc7 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -538,7 +538,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
g->base = job->gather_addr_phys[i];
- for (j = 0; j < job->num_gathers; j++)
+ for (j = i + 1; j < job->num_gathers; j++)
if (job->gathers[j].bo == g->bo)
job->gathers[j].handled = true;
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 497558127bb3..f822fd2a1ada 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -469,6 +469,9 @@ static const struct hid_device_id apple_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3bfac3accd22..cc32a6f96c64 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1679,6 +1679,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
@@ -1779,6 +1780,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 8fae6d1414cc..c24908f14934 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -157,6 +157,7 @@ struct mousevsc_dev {
u32 report_desc_size;
struct hv_input_dev_info hid_dev_info;
struct hid_device *hid_device;
+ u8 input_buf[HID_MAX_BUFFER_SIZE];
};
@@ -256,6 +257,7 @@ static void mousevsc_on_receive(struct hv_device *device,
struct synthhid_msg *hid_msg;
struct mousevsc_dev *input_dev = hv_get_drvdata(device);
struct synthhid_input_report *input_report;
+ size_t len;
pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet +
(packet->offset8 << 3));
@@ -300,9 +302,12 @@ static void mousevsc_on_receive(struct hv_device *device,
(struct synthhid_input_report *)pipe_msg->data;
if (!input_dev->init_complete)
break;
- hid_input_report(input_dev->hid_device,
- HID_INPUT_REPORT, input_report->buffer,
- input_report->header.size, 1);
+
+ len = min(input_report->header.size,
+ (u32)sizeof(input_dev->input_buf));
+ memcpy(input_dev->input_buf, input_report->buffer, len);
+ hid_input_report(input_dev->hid_device, HID_INPUT_REPORT,
+ input_dev->input_buf, len, 1);
break;
default:
pr_err("unsupported hid msg type - type %d len %d",
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5a5248f2cc07..22f28d6b33a8 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -135,6 +135,7 @@
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
@@ -240,6 +241,7 @@
#define USB_VENDOR_ID_CYGNAL 0x10c4
#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a
+#define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH 0x81b9
#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244
@@ -451,6 +453,9 @@
#define USB_VENDOR_ID_INTEL_1 0x8087
#define USB_DEVICE_ID_INTEL_HID_SENSOR 0x09fa
+#define USB_VENDOR_ID_STM_0 0x0483
+#define USB_DEVICE_ID_STM_HID_SENSOR 0x91d1
+
#define USB_VENDOR_ID_ION 0x15e4
#define USB_DEVICE_ID_ICADE 0x0132
@@ -619,6 +624,8 @@
#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
+#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
+#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -644,6 +651,7 @@
#define USB_VENDOR_ID_NEXIO 0x1870
#define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d
+#define USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750 0x0110
#define USB_VENDOR_ID_NEXTWINDOW 0x1926
#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index d50e7313b171..a713e6211419 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1178,7 +1178,7 @@ static void hidinput_led_worker(struct work_struct *work)
/* fall back to generic raw-output-report */
len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
- buf = kmalloc(len, GFP_KERNEL);
+ buf = hid_alloc_report_buf(report, GFP_KERNEL);
if (!buf)
return;
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index c6ef6eed3091..404a3a8a82f1 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -208,6 +208,10 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
.driver_data = MS_DUPLICATE_USAGES },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2),
+ .driver_data = 0 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2),
+ .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
.driver_data = MS_PRESENTER },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f134d73beca1..221d503f1c24 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1166,6 +1166,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG,
USB_DEVICE_ID_MULTITOUCH_3200) },
+ /* FocalTech Panels */
+ { .driver_data = MT_CLS_SERIAL,
+ MT_USB_DEVICE(USB_VENDOR_ID_CYGNAL,
+ USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH) },
+
/* GeneralTouch panel */
{ .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS,
MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 46f4480035bc..9c22e14c57f0 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -665,6 +665,9 @@ static const struct hid_device_id sensor_hub_devices[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1,
USB_DEVICE_ID_INTEL_HID_SENSOR),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
+ USB_DEVICE_ID_STM_HID_SENSOR),
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
HID_ANY_ID) },
{ }
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d1f81f52481a..42eebd14de1f 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -582,7 +582,7 @@ static void i2c_hid_request(struct hid_device *hid, struct hid_report *rep,
int ret;
int len = i2c_hid_get_report_length(rep) - 2;
- buf = kzalloc(len, GFP_KERNEL);
+ buf = hid_alloc_report_buf(rep, GFP_KERNEL);
if (!buf)
return;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 175ec0afb70c..dbd83878ff99 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -74,6 +74,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index af6edf9b1936..f2d7bf90c9fe 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -67,7 +67,6 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
int ret = 0;
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
- int t;
init_completion(&msginfo->waitevent);
@@ -78,6 +77,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
+ if (version == VERSION_WIN8)
+ msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
/*
* Add to list before we send the request since we may
@@ -100,15 +101,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
}
/* Wait for the connection response */
- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
- if (t == 0) {
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
- flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
- flags);
- return -ETIMEDOUT;
- }
+ wait_for_completion(&msginfo->waitevent);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
index 029ecabc4380..73b3865f1207 100644
--- a/drivers/hwmon/da9055-hwmon.c
+++ b/drivers/hwmon/da9055-hwmon.c
@@ -278,10 +278,6 @@ static int da9055_hwmon_probe(struct platform_device *pdev)
if (hwmon_irq < 0)
return hwmon_irq;
- hwmon_irq = regmap_irq_get_virq(hwmon->da9055->irq_data, hwmon_irq);
- if (hwmon_irq < 0)
- return hwmon_irq;
-
ret = devm_request_threaded_irq(&pdev->dev, hwmon_irq,
NULL, da9055_auxadc_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index a7626358c95d..029b65e6c589 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -243,7 +243,7 @@ static ssize_t set_temp_min(struct device *dev,
data->temp_min[index] = clamp_val(temp/1000, -128, 127);
if (i2c_smbus_write_byte_data(client,
MAX1668_REG_LIML_WR(index),
- data->temp_max[index]))
+ data->temp_min[index]))
count = -EIO;
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8c23203915af..8a17f01e8672 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -145,7 +145,7 @@ struct ntc_data {
static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
{
struct iio_channel *channel = pdata->chan;
- unsigned int result;
+ s64 result;
int val, ret;
ret = iio_read_channel_raw(channel, &val);
@@ -155,10 +155,10 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
}
/* unit: mV */
- result = pdata->pullup_uv * val;
+ result = pdata->pullup_uv * (s64) val;
result >>= 12;
- return result;
+ return (int)result;
}
static const struct of_device_id ntc_match[] = {
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 3cbf66e9d861..291d11fe93e7 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -90,7 +90,8 @@ struct pmbus_data {
u32 flags; /* from platform data */
- int exponent; /* linear mode: exponent for output voltages */
+ int exponent[PMBUS_PAGES];
+ /* linear mode: exponent for output voltages */
const struct pmbus_driver_info *info;
@@ -410,7 +411,7 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
long val;
if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */
- exponent = data->exponent;
+ exponent = data->exponent[sensor->page];
mantissa = (u16) sensor->data;
} else { /* LINEAR11 */
exponent = ((s16)sensor->data) >> 11;
@@ -516,7 +517,7 @@ static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
#define MIN_MANTISSA (511 * 1000)
static u16 pmbus_data2reg_linear(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
s16 exponent = 0, mantissa;
bool negative = false;
@@ -525,7 +526,7 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
if (val == 0)
return 0;
- if (class == PSC_VOLTAGE_OUT) {
+ if (sensor->class == PSC_VOLTAGE_OUT) {
/* LINEAR16 does not support negative voltages */
if (val < 0)
return 0;
@@ -534,10 +535,10 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
* For a static exponents, we don't have a choice
* but to adjust the value to it.
*/
- if (data->exponent < 0)
- val <<= -data->exponent;
+ if (data->exponent[sensor->page] < 0)
+ val <<= -data->exponent[sensor->page];
else
- val >>= data->exponent;
+ val >>= data->exponent[sensor->page];
val = DIV_ROUND_CLOSEST(val, 1000);
return val & 0xffff;
}
@@ -548,14 +549,14 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
}
/* Power is in uW. Convert to mW before converting. */
- if (class == PSC_POWER)
+ if (sensor->class == PSC_POWER)
val = DIV_ROUND_CLOSEST(val, 1000L);
/*
* For simplicity, convert fan data to milli-units
* before calculating the exponent.
*/
- if (class == PSC_FAN)
+ if (sensor->class == PSC_FAN)
val = val * 1000;
/* Reduce large mantissa until it fits into 10 bit */
@@ -585,22 +586,22 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
}
static u16 pmbus_data2reg_direct(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
long m, b, R;
- m = data->info->m[class];
- b = data->info->b[class];
- R = data->info->R[class];
+ m = data->info->m[sensor->class];
+ b = data->info->b[sensor->class];
+ R = data->info->R[sensor->class];
/* Power is in uW. Adjust R and b. */
- if (class == PSC_POWER) {
+ if (sensor->class == PSC_POWER) {
R -= 3;
b *= 1000;
}
/* Calculate Y = (m * X + b) * 10^R */
- if (class != PSC_FAN) {
+ if (sensor->class != PSC_FAN) {
R -= 3; /* Adjust R and b for data in milli-units */
b *= 1000;
}
@@ -619,7 +620,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
}
static u16 pmbus_data2reg_vid(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
val = clamp_val(val, 500, 1600);
@@ -627,20 +628,20 @@ static u16 pmbus_data2reg_vid(struct pmbus_data *data,
}
static u16 pmbus_data2reg(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
u16 regval;
- switch (data->info->format[class]) {
+ switch (data->info->format[sensor->class]) {
case direct:
- regval = pmbus_data2reg_direct(data, class, val);
+ regval = pmbus_data2reg_direct(data, sensor, val);
break;
case vid:
- regval = pmbus_data2reg_vid(data, class, val);
+ regval = pmbus_data2reg_vid(data, sensor, val);
break;
case linear:
default:
- regval = pmbus_data2reg_linear(data, class, val);
+ regval = pmbus_data2reg_linear(data, sensor, val);
break;
}
return regval;
@@ -746,7 +747,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
return -EINVAL;
mutex_lock(&data->update_lock);
- regval = pmbus_data2reg(data, sensor->class, val);
+ regval = pmbus_data2reg(data, sensor, val);
ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
if (ret < 0)
rv = ret;
@@ -1643,12 +1644,13 @@ static int pmbus_find_attributes(struct i2c_client *client,
* This function is called for all chips.
*/
static int pmbus_identify_common(struct i2c_client *client,
- struct pmbus_data *data)
+ struct pmbus_data *data, int page)
{
int vout_mode = -1;
- if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE))
- vout_mode = _pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (pmbus_check_byte_register(client, page, PMBUS_VOUT_MODE))
+ vout_mode = _pmbus_read_byte_data(client, page,
+ PMBUS_VOUT_MODE);
if (vout_mode >= 0 && vout_mode != 0xff) {
/*
* Not all chips support the VOUT_MODE command,
@@ -1659,7 +1661,7 @@ static int pmbus_identify_common(struct i2c_client *client,
if (data->info->format[PSC_VOLTAGE_OUT] != linear)
return -ENODEV;
- data->exponent = ((s8)(vout_mode << 3)) >> 3;
+ data->exponent[page] = ((s8)(vout_mode << 3)) >> 3;
break;
case 1: /* VID mode */
if (data->info->format[PSC_VOLTAGE_OUT] != vid)
@@ -1674,7 +1676,7 @@ static int pmbus_identify_common(struct i2c_client *client,
}
}
- pmbus_clear_fault_page(client, 0);
+ pmbus_clear_fault_page(client, page);
return 0;
}
@@ -1682,7 +1684,7 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
struct pmbus_driver_info *info)
{
struct device *dev = &client->dev;
- int ret;
+ int page, ret;
/*
* Some PMBus chips don't support PMBUS_STATUS_BYTE, so try
@@ -1715,10 +1717,12 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
return -ENODEV;
}
- ret = pmbus_identify_common(client, data);
- if (ret < 0) {
- dev_err(dev, "Failed to identify chip capabilities\n");
- return ret;
+ for (page = 0; page < info->pages; page++) {
+ ret = pmbus_identify_common(client, data, page);
+ if (ret < 0) {
+ dev_err(dev, "Failed to identify chip capabilities\n");
+ return ret;
+ }
}
return 0;
}
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index b8c5187b9ee0..d52d84937ad3 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -97,7 +97,6 @@ enum {
enum {
MV64XXX_I2C_ACTION_INVALID,
MV64XXX_I2C_ACTION_CONTINUE,
- MV64XXX_I2C_ACTION_OFFLOAD_SEND_START,
MV64XXX_I2C_ACTION_SEND_START,
MV64XXX_I2C_ACTION_SEND_RESTART,
MV64XXX_I2C_ACTION_OFFLOAD_RESTART,
@@ -204,6 +203,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
unsigned long ctrl_reg;
struct i2c_msg *msg = drv_data->msgs;
+ if (!drv_data->offload_enabled)
+ return -EOPNOTSUPP;
+
drv_data->msg = msg;
drv_data->byte_posn = 0;
drv_data->bytes_left = msg->len;
@@ -433,8 +435,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->msgs++;
drv_data->num_msgs--;
- if (!(drv_data->offload_enabled &&
- mv64xxx_i2c_offload_msg(drv_data))) {
+ if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
writel(drv_data->cntl_bits,
drv_data->reg_base + drv_data->reg_offsets.control);
@@ -458,15 +459,14 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->reg_base + drv_data->reg_offsets.control);
break;
- case MV64XXX_I2C_ACTION_OFFLOAD_SEND_START:
- if (!mv64xxx_i2c_offload_msg(drv_data))
- break;
- else
- drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
- /* FALLTHRU */
case MV64XXX_I2C_ACTION_SEND_START:
- writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
- drv_data->reg_base + drv_data->reg_offsets.control);
+ /* Can we offload this msg ? */
+ if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
+ /* No, switch to standard path */
+ mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
+ writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
+ drv_data->reg_base + drv_data->reg_offsets.control);
+ }
break;
case MV64XXX_I2C_ACTION_SEND_ADDR_1:
@@ -625,15 +625,10 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
unsigned long flags;
spin_lock_irqsave(&drv_data->lock, flags);
- if (drv_data->offload_enabled) {
- drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_START;
- drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
- } else {
- mv64xxx_i2c_prepare_for_io(drv_data, msg);
- drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
- drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
- }
+ drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
+ drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+
drv_data->send_stop = is_last;
drv_data->block = 1;
mv64xxx_i2c_do_action(drv_data);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 3bec9220df04..bfec313492b3 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -447,14 +447,14 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
{ },
};
-#define BMA180_CHANNEL(_index) { \
+#define BMA180_CHANNEL(_axis) { \
.type = IIO_ACCEL, \
- .indexed = 1, \
- .channel = (_index), \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .scan_index = (_index), \
+ .scan_index = AXIS_##_axis, \
.scan_type = { \
.sign = 's', \
.realbits = 14, \
@@ -465,10 +465,10 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
}
static const struct iio_chan_spec bma180_channels[] = {
- BMA180_CHANNEL(AXIS_X),
- BMA180_CHANNEL(AXIS_Y),
- BMA180_CHANNEL(AXIS_Z),
- IIO_CHAN_SOFT_TIMESTAMP(4),
+ BMA180_CHANNEL(X),
+ BMA180_CHANNEL(Y),
+ BMA180_CHANNEL(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
static irqreturn_t bma180_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index e283f2f2ee2f..360259266d4f 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1560,7 +1560,7 @@ static int max1363_probe(struct i2c_client *client,
st->client = client;
st->vref_uv = st->chip_info->int_vref_mv * 1000;
- vref = devm_regulator_get(&client->dev, "vref");
+ vref = devm_regulator_get_optional(&client->dev, "vref");
if (!IS_ERR(vref)) {
int vref_uv;
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 41c64a43bcab..ac2d69e34c8c 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -70,7 +70,7 @@ config IIO_ST_GYRO_3AXIS
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
help
Say yes here to build support for STMicroelectronics gyroscopes:
- L3G4200D, LSM330DL, L3GD20, L3GD20H, LSM330DLC, L3G4IS, LSM330.
+ L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330.
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index f8f2bf84a5a2..c197360c450b 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -19,7 +19,6 @@
#define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro"
#define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro"
#define L3GD20_GYRO_DEV_NAME "l3gd20"
-#define L3GD20H_GYRO_DEV_NAME "l3gd20h"
#define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
#define LSM330_GYRO_DEV_NAME "lsm330_gyro"
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index d53d91adfb55..a8e174a47bc4 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -167,11 +167,10 @@ static const struct st_sensors st_gyro_sensors[] = {
.wai = ST_GYRO_2_WAI_EXP,
.sensors_supported = {
[0] = L3GD20_GYRO_DEV_NAME,
- [1] = L3GD20H_GYRO_DEV_NAME,
- [2] = LSM330D_GYRO_DEV_NAME,
- [3] = LSM330DLC_GYRO_DEV_NAME,
- [4] = L3G4IS_GYRO_DEV_NAME,
- [5] = LSM330_GYRO_DEV_NAME,
+ [1] = LSM330D_GYRO_DEV_NAME,
+ [2] = LSM330DLC_GYRO_DEV_NAME,
+ [3] = L3G4IS_GYRO_DEV_NAME,
+ [4] = LSM330_GYRO_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 16b8b8d70bf1..23c12f361b05 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -55,7 +55,6 @@ static const struct i2c_device_id st_gyro_id_table[] = {
{ LSM330DL_GYRO_DEV_NAME },
{ LSM330DLC_GYRO_DEV_NAME },
{ L3GD20_GYRO_DEV_NAME },
- { L3GD20H_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
{},
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index 94763e25caf9..b4ad3be26687 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -54,7 +54,6 @@ static const struct spi_device_id st_gyro_id_table[] = {
{ LSM330DL_GYRO_DEV_NAME },
{ LSM330DLC_GYRO_DEV_NAME },
{ L3GD20_GYRO_DEV_NAME },
- { L3GD20H_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
{},
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
index 2f8f9d632386..0916bf6b6c31 100644
--- a/drivers/iio/imu/adis16400.h
+++ b/drivers/iio/imu/adis16400.h
@@ -189,6 +189,7 @@ enum {
ADIS16300_SCAN_INCLI_X,
ADIS16300_SCAN_INCLI_Y,
ADIS16400_SCAN_ADC,
+ ADIS16400_SCAN_TIMESTAMP,
};
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 368660dfe135..7c582f7ae34e 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -632,7 +632,7 @@ static const struct iio_chan_spec adis16400_channels[] = {
ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14),
ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12),
ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12),
- IIO_CHAN_SOFT_TIMESTAMP(12)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16448_channels[] = {
@@ -659,7 +659,7 @@ static const struct iio_chan_spec adis16448_channels[] = {
},
},
ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(11)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16350_channels[] = {
@@ -677,7 +677,7 @@ static const struct iio_chan_spec adis16350_channels[] = {
ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12),
ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12),
ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(11)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16300_channels[] = {
@@ -690,7 +690,7 @@ static const struct iio_chan_spec adis16300_channels[] = {
ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12),
ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13),
ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13),
- IIO_CHAN_SOFT_TIMESTAMP(14)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16334_channels[] = {
@@ -701,7 +701,7 @@ static const struct iio_chan_spec adis16334_channels[] = {
ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(8)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static struct attribute *adis16400_attributes[] = {
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index f17b4e6183c6..47a6dbac2d0c 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -103,13 +103,13 @@ static int cm32181_reg_init(struct cm32181_chip *cm32181)
/**
* cm32181_read_als_it() - Get sensor integration time (ms)
* @cm32181: pointer of struct cm32181
- * @val: pointer of int to load the als_it value.
+ * @val2: pointer of int to load the als_it value.
*
* Report the current integartion time by millisecond.
*
- * Return: IIO_VAL_INT for success, otherwise -EINVAL.
+ * Return: IIO_VAL_INT_PLUS_MICRO for success, otherwise -EINVAL.
*/
-static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val)
+static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val2)
{
u16 als_it;
int i;
@@ -119,8 +119,8 @@ static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val)
als_it >>= CM32181_CMD_ALS_IT_SHIFT;
for (i = 0; i < ARRAY_SIZE(als_it_bits); i++) {
if (als_it == als_it_bits[i]) {
- *val = als_it_value[i];
- return IIO_VAL_INT;
+ *val2 = als_it_value[i];
+ return IIO_VAL_INT_PLUS_MICRO;
}
}
@@ -221,7 +221,7 @@ static int cm32181_read_raw(struct iio_dev *indio_dev,
*val = cm32181->calibscale;
return IIO_VAL_INT;
case IIO_CHAN_INFO_INT_TIME:
- ret = cm32181_read_als_it(cm32181, val);
+ ret = cm32181_read_als_it(cm32181, val2);
return ret;
}
@@ -240,7 +240,7 @@ static int cm32181_write_raw(struct iio_dev *indio_dev,
cm32181->calibscale = val;
return val;
case IIO_CHAN_INFO_INT_TIME:
- ret = cm32181_write_als_it(cm32181, val);
+ ret = cm32181_write_als_it(cm32181, val2);
return ret;
}
@@ -264,7 +264,7 @@ static ssize_t cm32181_get_it_available(struct device *dev,
n = ARRAY_SIZE(als_it_value);
for (i = 0, len = 0; i < n; i++)
- len += sprintf(buf + len, "%d ", als_it_value[i]);
+ len += sprintf(buf + len, "0.%06u ", als_it_value[i]);
return len + sprintf(buf + len, "\n");
}
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 0a142af83e25..a45e07492db3 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -50,10 +50,10 @@
#define CM36651_CS_CONF2_DEFAULT_BIT 0x08
/* CS_CONF3 channel integration time */
-#define CM36651_CS_IT1 0x00 /* Integration time 80000 usec */
-#define CM36651_CS_IT2 0x40 /* Integration time 160000 usec */
-#define CM36651_CS_IT3 0x80 /* Integration time 320000 usec */
-#define CM36651_CS_IT4 0xC0 /* Integration time 640000 usec */
+#define CM36651_CS_IT1 0x00 /* Integration time 80 msec */
+#define CM36651_CS_IT2 0x40 /* Integration time 160 msec */
+#define CM36651_CS_IT3 0x80 /* Integration time 320 msec */
+#define CM36651_CS_IT4 0xC0 /* Integration time 640 msec */
/* PS_CONF1 command code */
#define CM36651_PS_ENABLE 0x00
@@ -64,10 +64,10 @@
#define CM36651_PS_PERS4 0x0C
/* PS_CONF1 command code: integration time */
-#define CM36651_PS_IT1 0x00 /* Integration time 320 usec */
-#define CM36651_PS_IT2 0x10 /* Integration time 420 usec */
-#define CM36651_PS_IT3 0x20 /* Integration time 520 usec */
-#define CM36651_PS_IT4 0x30 /* Integration time 640 usec */
+#define CM36651_PS_IT1 0x00 /* Integration time 0.32 msec */
+#define CM36651_PS_IT2 0x10 /* Integration time 0.42 msec */
+#define CM36651_PS_IT3 0x20 /* Integration time 0.52 msec */
+#define CM36651_PS_IT4 0x30 /* Integration time 0.64 msec */
/* PS_CONF1 command code: duty ratio */
#define CM36651_PS_DR1 0x00 /* Duty ratio 1/80 */
@@ -93,8 +93,8 @@
#define CM36651_CLOSE_PROXIMITY 0x32
#define CM36651_FAR_PROXIMITY 0x33
-#define CM36651_CS_INT_TIME_AVAIL "80000 160000 320000 640000"
-#define CM36651_PS_INT_TIME_AVAIL "320 420 520 640"
+#define CM36651_CS_INT_TIME_AVAIL "0.08 0.16 0.32 0.64"
+#define CM36651_PS_INT_TIME_AVAIL "0.000320 0.000420 0.000520 0.000640"
enum cm36651_operation_mode {
CM36651_LIGHT_EN,
@@ -356,30 +356,30 @@ static int cm36651_read_channel(struct cm36651_data *cm36651,
}
static int cm36651_read_int_time(struct cm36651_data *cm36651,
- struct iio_chan_spec const *chan, int *val)
+ struct iio_chan_spec const *chan, int *val2)
{
switch (chan->type) {
case IIO_LIGHT:
if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT1)
- *val = 80000;
+ *val2 = 80000;
else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT2)
- *val = 160000;
+ *val2 = 160000;
else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT3)
- *val = 320000;
+ *val2 = 320000;
else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT4)
- *val = 640000;
+ *val2 = 640000;
else
return -EINVAL;
break;
case IIO_PROXIMITY:
if (cm36651->ps_int_time == CM36651_PS_IT1)
- *val = 320;
+ *val2 = 320;
else if (cm36651->ps_int_time == CM36651_PS_IT2)
- *val = 420;
+ *val2 = 420;
else if (cm36651->ps_int_time == CM36651_PS_IT3)
- *val = 520;
+ *val2 = 520;
else if (cm36651->ps_int_time == CM36651_PS_IT4)
- *val = 640;
+ *val2 = 640;
else
return -EINVAL;
break;
@@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651,
return -EINVAL;
}
- return IIO_VAL_INT;
+ return IIO_VAL_INT_PLUS_MICRO;
}
static int cm36651_write_int_time(struct cm36651_data *cm36651,
@@ -459,7 +459,8 @@ static int cm36651_read_raw(struct iio_dev *indio_dev,
ret = cm36651_read_channel(cm36651, chan, val);
break;
case IIO_CHAN_INFO_INT_TIME:
- ret = cm36651_read_int_time(cm36651, chan, val);
+ *val = 0;
+ ret = cm36651_read_int_time(cm36651, chan, val2);
break;
default:
ret = -EINVAL;
@@ -479,7 +480,7 @@ static int cm36651_write_raw(struct iio_dev *indio_dev,
int ret = -EINVAL;
if (mask == IIO_CHAN_INFO_INT_TIME) {
- ret = cm36651_write_int_time(cm36651, chan, val);
+ ret = cm36651_write_int_time(cm36651, chan, val2);
if (ret < 0)
dev_err(&client->dev, "Integration time write failed\n");
}
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 3d8110157f2d..94daa9fc1247 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -460,10 +460,14 @@ static int tsl2563_write_raw(struct iio_dev *indio_dev,
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
- if (chan->channel == IIO_MOD_LIGHT_BOTH)
+ if (mask != IIO_CHAN_INFO_CALIBSCALE)
+ return -EINVAL;
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
chip->calib0 = calib_from_sysfs(val);
- else
+ else if (chan->channel2 == IIO_MOD_LIGHT_IR)
chip->calib1 = calib_from_sysfs(val);
+ else
+ return -EINVAL;
return 0;
}
@@ -472,14 +476,14 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
- long m)
+ long mask)
{
int ret = -EINVAL;
u32 calib0, calib1;
struct tsl2563_chip *chip = iio_priv(indio_dev);
mutex_lock(&chip->lock);
- switch (m) {
+ switch (mask) {
case IIO_CHAN_INFO_RAW:
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
@@ -498,7 +502,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
ret = tsl2563_get_adc(chip);
if (ret)
goto error_ret;
- if (chan->channel == 0)
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
*val = chip->data0;
else
*val = chip->data1;
@@ -510,7 +514,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_CALIBSCALE:
- if (chan->channel == 0)
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
*val = calib_to_sysfs(chip->calib0);
else
*val = calib_to_sysfs(chip->calib1);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index ff284e5afd95..05423543f89d 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -85,6 +85,7 @@
#define AK8975_MAX_CONVERSION_TIMEOUT 500
#define AK8975_CONVERSION_DONE_POLL_TIME 10
#define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000)
+#define RAW_TO_GAUSS(asa) ((((asa) + 128) * 3000) / 256)
/*
* Per-instance context data for the device.
@@ -265,15 +266,15 @@ static int ak8975_setup(struct i2c_client *client)
*
* Since 1uT = 0.01 gauss, our final scale factor becomes:
*
- * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100
- * Hadj = H * ((ASA + 128) * 30 / 256
+ * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100
+ * Hadj = H * ((ASA + 128) * 0.003) / 256
*
* Since ASA doesn't change, we cache the resultant scale factor into the
* device context in ak8975_setup().
*/
- data->raw_to_gauss[0] = ((data->asa[0] + 128) * 30) >> 8;
- data->raw_to_gauss[1] = ((data->asa[1] + 128) * 30) >> 8;
- data->raw_to_gauss[2] = ((data->asa[2] + 128) * 30) >> 8;
+ data->raw_to_gauss[0] = RAW_TO_GAUSS(data->asa[0]);
+ data->raw_to_gauss[1] = RAW_TO_GAUSS(data->asa[1]);
+ data->raw_to_gauss[2] = RAW_TO_GAUSS(data->asa[2]);
return 0;
}
@@ -428,8 +429,9 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
return ak8975_read_axis(indio_dev, chan->address, val);
case IIO_CHAN_INFO_SCALE:
- *val = data->raw_to_gauss[chan->address];
- return IIO_VAL_INT;
+ *val = 0;
+ *val2 = data->raw_to_gauss[chan->address];
+ return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 4b65b6d3bdb1..f66955fb3509 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -106,7 +106,7 @@ static ssize_t mag3110_show_int_plus_micros(char *buf,
while (n-- > 0)
len += scnprintf(buf + len, PAGE_SIZE - len,
- "%d.%d ", vals[n][0], vals[n][1]);
+ "%d.%06d ", vals[n][0], vals[n][1]);
/* replace trailing space by newline */
buf[len - 1] = '\n';
@@ -154,6 +154,9 @@ static int mag3110_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
switch (chan->type) {
case IIO_MAGN: /* in 0.1 uT / LSB */
ret = mag3110_read(data, buffer);
@@ -199,6 +202,9 @@ static int mag3110_write_raw(struct iio_dev *indio_dev,
struct mag3110_data *data = iio_priv(indio_dev);
int rate;
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
rate = mag3110_get_samp_freq_index(data, val, val2);
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index d53cf519f42a..00400c352c1a 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -1082,6 +1082,7 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
/* Initialize network device */
if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
+ ret = -ENOMEM;
iounmap(mmio_regs);
goto bail4;
}
@@ -1151,7 +1152,8 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
goto bail10;
}
- if (c2_register_device(c2dev))
+ ret = c2_register_device(c2dev);
+ if (ret)
goto bail10;
return 0;
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index b7c986990053..d2a6d961344b 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -576,7 +576,8 @@ int c2_rnic_init(struct c2_dev *c2dev)
goto bail4;
/* Initialize cached the adapter limits */
- if (c2_rnic_query(c2dev, &c2dev->props))
+ err = c2_rnic_query(c2dev, &c2dev->props);
+ if (err)
goto bail5;
/* Initialize the PD pool */
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 45126879ad28..d286bdebe2ab 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3352,6 +3352,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
goto free_dst;
}
+ neigh_release(neigh);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
window = (__force u16) htons((__force u16)tcph->window);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index c2702f549f10..e81c5547e647 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -347,7 +347,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
IB_WIDTH_4X : IB_WIDTH_1X;
props->active_speed = IB_SPEED_QDR;
- props->port_cap_flags = IB_PORT_CM_SUP;
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
props->pkey_tbl_len = 1;
@@ -1357,6 +1357,21 @@ static struct device_attribute *mlx4_class_attributes[] = {
&dev_attr_board_id
};
+static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
+ struct net_device *dev)
+{
+ memcpy(eui, dev->dev_addr, 3);
+ memcpy(eui + 5, dev->dev_addr + 3, 3);
+ if (vlan_id < 0x1000) {
+ eui[3] = vlan_id >> 8;
+ eui[4] = vlan_id & 0xff;
+ } else {
+ eui[3] = 0xff;
+ eui[4] = 0xfe;
+ }
+ eui[0] ^= 2;
+}
+
static void update_gids_task(struct work_struct *work)
{
struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
@@ -1393,7 +1408,6 @@ static void reset_gids_task(struct work_struct *work)
struct mlx4_cmd_mailbox *mailbox;
union ib_gid *gids;
int err;
- int i;
struct mlx4_dev *dev = gw->dev->dev;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -1405,18 +1419,16 @@ static void reset_gids_task(struct work_struct *work)
gids = mailbox->buf;
memcpy(gids, gw->gids, sizeof(gw->gids));
- for (i = 1; i < gw->dev->num_ports + 1; i++) {
- if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) ==
- IB_LINK_LAYER_ETHERNET) {
- err = mlx4_cmd(dev, mailbox->dma,
- MLX4_SET_PORT_GID_TABLE << 8 | i,
- 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_WRAPPED);
- if (err)
- pr_warn(KERN_WARNING
- "set port %d command failed\n", i);
- }
+ if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
+ IB_LINK_LAYER_ETHERNET) {
+ err = mlx4_cmd(dev, mailbox->dma,
+ MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
+ 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ pr_warn(KERN_WARNING
+ "set port %d command failed\n", gw->port);
}
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -1425,7 +1437,8 @@ free:
}
static int update_gid_table(struct mlx4_ib_dev *dev, int port,
- union ib_gid *gid, int clear)
+ union ib_gid *gid, int clear,
+ int default_gid)
{
struct update_gid_work *work;
int i;
@@ -1434,26 +1447,31 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port,
int found = -1;
int max_gids;
- max_gids = dev->dev->caps.gid_table_len[port];
- for (i = 0; i < max_gids; ++i) {
- if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
- sizeof(*gid)))
- found = i;
-
- if (clear) {
- if (found >= 0) {
- need_update = 1;
- dev->iboe.gid_table[port - 1][found] = zgid;
- break;
- }
- } else {
- if (found >= 0)
- break;
-
- if (free < 0 &&
- !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid,
+ if (default_gid) {
+ free = 0;
+ } else {
+ max_gids = dev->dev->caps.gid_table_len[port];
+ for (i = 1; i < max_gids; ++i) {
+ if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
sizeof(*gid)))
- free = i;
+ found = i;
+
+ if (clear) {
+ if (found >= 0) {
+ need_update = 1;
+ dev->iboe.gid_table[port - 1][found] =
+ zgid;
+ break;
+ }
+ } else {
+ if (found >= 0)
+ break;
+
+ if (free < 0 &&
+ !memcmp(&dev->iboe.gid_table[port - 1][i],
+ &zgid, sizeof(*gid)))
+ free = i;
+ }
}
}
@@ -1478,18 +1496,26 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port,
return 0;
}
-static int reset_gid_table(struct mlx4_ib_dev *dev)
+static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid)
{
- struct update_gid_work *work;
+ gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
+}
+
+static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
+{
+ struct update_gid_work *work;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return -ENOMEM;
- memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table));
+
+ memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
memset(work->gids, 0, sizeof(work->gids));
INIT_WORK(&work->work, reset_gids_task);
work->dev = dev;
+ work->port = port;
queue_work(wq, &work->work);
return 0;
}
@@ -1502,6 +1528,12 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
rdma_vlan_dev_real_dev(event_netdev) :
event_netdev;
+ union ib_gid default_gid;
+
+ mlx4_make_default_gid(real_dev, &default_gid);
+
+ if (!memcmp(gid, &default_gid, sizeof(*gid)))
+ return 0;
if (event != NETDEV_DOWN && event != NETDEV_UP)
return 0;
@@ -1520,7 +1552,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
(!netif_is_bond_master(real_dev) &&
(real_dev == iboe->netdevs[port - 1])))
update_gid_table(ibdev, port, gid,
- event == NETDEV_DOWN);
+ event == NETDEV_DOWN, 0);
spin_unlock(&iboe->lock);
return 0;
@@ -1536,7 +1568,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
rdma_vlan_dev_real_dev(dev) : dev;
iboe = &ibdev->iboe;
- spin_lock(&iboe->lock);
for (port = 1; port <= MLX4_MAX_PORTS; ++port)
if ((netif_is_bond_master(real_dev) &&
@@ -1545,8 +1576,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
(real_dev == iboe->netdevs[port - 1])))
break;
- spin_unlock(&iboe->lock);
-
if ((port == 0) || (port > MLX4_MAX_PORTS))
return 0;
else
@@ -1607,7 +1636,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
/*ifa->ifa_address;*/
ipv6_addr_set_v4mapped(ifa->ifa_address,
(struct in6_addr *)&gid);
- update_gid_table(ibdev, port, &gid, 0);
+ update_gid_table(ibdev, port, &gid, 0, 0);
}
endfor_ifa(in_dev);
in_dev_put(in_dev);
@@ -1619,7 +1648,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
read_lock_bh(&in6_dev->lock);
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
pgid = (union ib_gid *)&ifp->addr;
- update_gid_table(ibdev, port, pgid, 0);
+ update_gid_table(ibdev, port, pgid, 0, 0);
}
read_unlock_bh(&in6_dev->lock);
in6_dev_put(in6_dev);
@@ -1627,14 +1656,26 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
#endif
}
+static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
+ struct net_device *dev, u8 port)
+{
+ union ib_gid gid;
+ mlx4_make_default_gid(dev, &gid);
+ update_gid_table(ibdev, port, &gid, 0, 1);
+}
+
static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
{
struct net_device *dev;
+ struct mlx4_ib_iboe *iboe = &ibdev->iboe;
+ int i;
- if (reset_gid_table(ibdev))
- return -1;
+ for (i = 1; i <= ibdev->num_ports; ++i)
+ if (reset_gid_table(ibdev, i))
+ return -1;
read_lock(&dev_base_lock);
+ spin_lock(&iboe->lock);
for_each_netdev(&init_net, dev) {
u8 port = mlx4_ib_get_dev_port(dev, ibdev);
@@ -1642,6 +1683,7 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
mlx4_ib_get_dev_addr(dev, ibdev, port);
}
+ spin_unlock(&iboe->lock);
read_unlock(&dev_base_lock);
return 0;
@@ -1656,25 +1698,57 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
spin_lock(&iboe->lock);
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
+ enum ib_port_state port_state = IB_PORT_NOP;
struct net_device *old_master = iboe->masters[port - 1];
+ struct net_device *curr_netdev;
struct net_device *curr_master;
+
iboe->netdevs[port - 1] =
mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
+ if (iboe->netdevs[port - 1])
+ mlx4_ib_set_default_gid(ibdev,
+ iboe->netdevs[port - 1], port);
+ curr_netdev = iboe->netdevs[port - 1];
if (iboe->netdevs[port - 1] &&
netif_is_bond_slave(iboe->netdevs[port - 1])) {
- rtnl_lock();
iboe->masters[port - 1] = netdev_master_upper_dev_get(
iboe->netdevs[port - 1]);
- rtnl_unlock();
+ } else {
+ iboe->masters[port - 1] = NULL;
}
curr_master = iboe->masters[port - 1];
+ if (curr_netdev) {
+ port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ } else {
+ reset_gid_table(ibdev, port);
+ }
+ /* if using bonding/team and a slave port is down, we don't the bond IP
+ * based gids in the table since flows that select port by gid may get
+ * the down port.
+ */
+ if (curr_master && (port_state == IB_PORT_DOWN)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ }
/* if bonding is used it is possible that we add it to masters
- only after IP address is assigned to the net bonding
- interface */
- if (curr_master && (old_master != curr_master))
+ * only after IP address is assigned to the net bonding
+ * interface.
+ */
+ if (curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
mlx4_ib_get_dev_addr(curr_master, ibdev, port);
+ }
+
+ if (!curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
+ }
}
spin_unlock(&iboe->lock);
@@ -1810,6 +1884,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
int i, j;
int err;
struct mlx4_ib_iboe *iboe;
+ int ib_num_ports = 0;
pr_info_once("%s", mlx4_ib_version);
@@ -1985,10 +2060,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->counters[i] = -1;
}
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ ib_num_ports++;
+
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
+ ib_num_ports) {
ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
MLX4_IB_UC_STEER_QPN_ALIGN,
@@ -2051,7 +2130,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
}
}
#endif
+ for (i = 1 ; i <= ibdev->num_ports ; ++i)
+ reset_gid_table(ibdev, i);
+ rtnl_lock();
mlx4_ib_scan_netdevs(ibdev);
+ rtnl_unlock();
mlx4_ib_init_gid_table(ibdev);
}
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index 8e6aebfaf8a4..10df386c6344 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,6 +1,6 @@
config MLX5_INFINIBAND
tristate "Mellanox Connect-IB HCA support"
- depends on NETDEVICES && ETHERNET && PCI && X86
+ depends on NETDEVICES && ETHERNET && PCI
select NET_VENDOR_MELLANOX
select MLX5_CORE
---help---
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 9660d093f8cf..aa03e732b6a8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -261,8 +261,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID |
- IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+ IB_DEVICE_RC_RNR_NAK_GEN;
flags = dev->mdev.caps.flags;
if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
@@ -536,24 +535,38 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_ib_alloc_ucontext_req req;
+ struct mlx5_ib_alloc_ucontext_req_v2 req;
struct mlx5_ib_alloc_ucontext_resp resp;
struct mlx5_ib_ucontext *context;
struct mlx5_uuar_info *uuari;
struct mlx5_uar *uars;
int gross_uuars;
int num_uars;
+ int ver;
int uuarn;
int err;
int i;
+ int reqlen;
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
- err = ib_copy_from_udata(&req, udata, sizeof(req));
+ memset(&req, 0, sizeof(req));
+ reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
+ if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
+ ver = 0;
+ else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
+ ver = 2;
+ else
+ return ERR_PTR(-EINVAL);
+
+ err = ib_copy_from_udata(&req, udata, reqlen);
if (err)
return ERR_PTR(err);
+ if (req.flags || req.reserved)
+ return ERR_PTR(-EINVAL);
+
if (req.total_num_uuars > MLX5_MAX_UUARS)
return ERR_PTR(-ENOMEM);
@@ -626,6 +639,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (err)
goto out_uars;
+ uuari->ver = ver;
uuari->num_low_latency_uuars = req.num_low_latency_uuars;
uuari->uars = uars;
uuari->num_uars = num_uars;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index ae37fb9bf262..7dfe8a1c84cf 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -216,7 +216,9 @@ static int sq_overhead(enum ib_qp_type qp_type)
case IB_QPT_UC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg);
+ sizeof(struct mlx5_wqe_raddr_seg) +
+ sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg);
break;
case IB_QPT_UD:
@@ -428,11 +430,17 @@ static int alloc_uuar(struct mlx5_uuar_info *uuari,
break;
case MLX5_IB_LATENCY_CLASS_MEDIUM:
- uuarn = alloc_med_class_uuar(uuari);
+ if (uuari->ver < 2)
+ uuarn = -ENOMEM;
+ else
+ uuarn = alloc_med_class_uuar(uuari);
break;
case MLX5_IB_LATENCY_CLASS_HIGH:
- uuarn = alloc_high_class_uuar(uuari);
+ if (uuari->ver < 2)
+ uuarn = -ENOMEM;
+ else
+ uuarn = alloc_high_class_uuar(uuari);
break;
case MLX5_IB_LATENCY_CLASS_FAST_PATH:
@@ -657,8 +665,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
int err;
uuari = &dev->mdev.priv.uuari;
- if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
- qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
+ if (init_attr->create_flags)
+ return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index 32a2a5dfc523..0f4f8e42a17f 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -62,6 +62,13 @@ struct mlx5_ib_alloc_ucontext_req {
__u32 num_low_latency_uuars;
};
+struct mlx5_ib_alloc_ucontext_req_v2 {
+ __u32 total_num_uuars;
+ __u32 num_low_latency_uuars;
+ __u32 flags;
+ __u32 reserved;
+};
+
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 429141078eec..353c7b05a90a 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -675,8 +675,11 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
/* Initialize network devices */
- if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL)
+ netdev = nes_netdev_init(nesdev, mmio_regs);
+ if (netdev == NULL) {
+ ret = -ENOMEM;
goto bail7;
+ }
/* Register network device */
ret = register_netdev(netdev);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 2ca86ca818bd..1a8a945efa60 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -127,7 +127,7 @@ static int ocrdma_addr_event(unsigned long event, struct net_device *netdev,
is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
if (is_vlan)
- netdev = vlan_dev_real_dev(netdev);
+ netdev = rdma_vlan_dev_real_dev(netdev);
rcu_read_lock();
list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index aa92f40c9d50..e0cc201be41a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -176,7 +176,7 @@ int ocrdma_query_port(struct ib_device *ibdev,
props->port_cap_flags =
IB_PORT_CM_SUP |
IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
+ IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = OCRDMA_MAX_SGID;
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
@@ -1416,7 +1416,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
- OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
+ OCRDMA_QP_PARAMS_TCLASS_MASK) >>
OCRDMA_QP_PARAMS_TCLASS_SHIFT;
qp_attr->ah_attr.ah_flags = IB_AH_GRH;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bfc02f450e6..d1bd21319d7d 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2395,6 +2395,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* ensure previous Tx parameters are not still forced */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+
if (qib_compat_ddr_negotiate) {
ppd->cpspec->ibdeltainprog = 1;
ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index 7ecc6061f1f4..f8dfd76be89f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -629,6 +629,7 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
{
enum usnic_transport_type trans_type = qp_flow->trans_type;
int err;
+ uint16_t port_num = 0;
switch (trans_type) {
case USNIC_TRANSPORT_ROCE_CUSTOM:
@@ -637,9 +638,15 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
case USNIC_TRANSPORT_IPV4_UDP:
err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
NULL, NULL,
- (uint16_t *) id);
+ &port_num);
if (err)
return err;
+ /*
+ * Copy port_num to stack first and then to *id,
+ * so that the short to int cast works for little
+ * and big endian systems.
+ */
+ *id = port_num;
break;
default:
usnic_err("Unsupported transport %u\n", trans_type);
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 538822684d5b..334f34b1cd46 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -610,11 +610,12 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc,
ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
kmem_cache_free(ig.desc_cache, tx_desc);
+ tx_desc = NULL;
}
atomic_dec(&ib_conn->post_send_buf_count);
- if (tx_desc->type == ISCSI_TX_CONTROL) {
+ if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */
task = (void *) ((long)(void *)tx_desc -
sizeof(struct iscsi_task));
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index afe95674008b..ca37edef2791 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -652,9 +652,13 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
/* getting here when the state is UP means that the conn is being *
* terminated asynchronously from the iSCSI layer's perspective. */
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
- ISER_CONN_TERMINATING))
- iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
- ISCSI_ERR_CONN_FAILED);
+ ISER_CONN_TERMINATING)){
+ if (ib_conn->iser_conn)
+ iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+ else
+ iser_err("iscsi_iser connection isn't bound\n");
+ }
/* Complete the termination process if no posts are pending */
if (ib_conn->post_recv_buf_count == 0 &&
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 2b161be3c1a3..d18d08a076e8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -453,6 +453,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
if (ret) {
pr_err("Failed to create fastreg descriptor err=%d\n",
ret);
+ kfree(fr_desc);
goto err;
}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 520a7e5a490b..0e537d8d0e47 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3666,9 +3666,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
unsigned long val;
int ret;
- ret = strict_strtoul(page, 0, &val);
+ ret = kstrtoul(page, 0, &val);
if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_RDMA_SIZE) {
@@ -3706,9 +3706,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
unsigned long val;
int ret;
- ret = strict_strtoul(page, 0, &val);
+ ret = kstrtoul(page, 0, &val);
if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_RSP_SIZE) {
@@ -3746,9 +3746,9 @@ static ssize_t srpt_tpg_attrib_store_srp_sq_size(
unsigned long val;
int ret;
- ret = strict_strtoul(page, 0, &val);
+ ret = kstrtoul(page, 0, &val);
if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_SRQ_SIZE) {
@@ -3793,7 +3793,7 @@ static ssize_t srpt_tpg_store_enable(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
return -EINVAL;
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 7a04f54ef961..ef2e281b0a43 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -37,7 +37,6 @@ static void arizona_haptics_work(struct work_struct *work)
struct arizona_haptics,
work);
struct arizona *arizona = haptics->arizona;
- struct mutex *dapm_mutex = &arizona->dapm->card->dapm_mutex;
int ret;
if (!haptics->arizona->dapm) {
@@ -67,13 +66,10 @@ static void arizona_haptics_work(struct work_struct *work)
return;
}
- mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-
ret = snd_soc_dapm_enable_pin(arizona->dapm, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to start HAPTICS: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
@@ -81,21 +77,14 @@ static void arizona_haptics_work(struct work_struct *work)
if (ret != 0) {
dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
-
- mutex_unlock(dapm_mutex);
-
} else {
/* This disable sequence will be a noop if already enabled */
- mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-
ret = snd_soc_dapm_disable_pin(arizona->dapm, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
@@ -103,12 +92,9 @@ static void arizona_haptics_work(struct work_struct *work)
if (ret != 0) {
dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
- mutex_unlock(dapm_mutex);
-
ret = regmap_update_bits(arizona->regmap,
ARIZONA_HAPTICS_CONTROL_1,
ARIZONA_HAP_CTRL_MASK,
@@ -155,16 +141,11 @@ static int arizona_haptics_play(struct input_dev *input, void *data,
static void arizona_haptics_close(struct input_dev *input)
{
struct arizona_haptics *haptics = input_get_drvdata(input);
- struct mutex *dapm_mutex = &haptics->arizona->dapm->card->dapm_mutex;
cancel_work_sync(&haptics->work);
- mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-
if (haptics->arizona->dapm)
snd_soc_dapm_disable_pin(haptics->arizona->dapm, "HAPTICS");
-
- mutex_unlock(dapm_mutex);
}
static int arizona_haptics_probe(struct platform_device *pdev)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 8911850c9444..1d9ab39af29f 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -79,7 +79,6 @@
#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
-#define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t))
/* Stage-1 PTE */
#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
@@ -191,6 +190,9 @@
#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
#define CBAR_VMID_SHIFT 0
#define CBAR_VMID_MASK 0xff
+#define CBAR_S1_BPSHCFG_SHIFT 8
+#define CBAR_S1_BPSHCFG_MASK 3
+#define CBAR_S1_BPSHCFG_NSH 3
#define CBAR_S1_MEMATTR_SHIFT 12
#define CBAR_S1_MEMATTR_MASK 0xf
#define CBAR_S1_MEMATTR_WB 0xf
@@ -393,7 +395,7 @@ struct arm_smmu_domain {
struct arm_smmu_cfg root_cfg;
phys_addr_t output_mask;
- struct mutex lock;
+ spinlock_t lock;
};
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -632,6 +634,28 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
return IRQ_HANDLED;
}
+static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
+ size_t size)
+{
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+
+
+ /* Ensure new page tables are visible to the hardware walker */
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
+ dsb();
+ } else {
+ /*
+ * If the SMMU can't walk tables in the CPU caches, treat them
+ * like non-coherent DMA since we need to flush the new entries
+ * all the way out to memory. There's no possibility of
+ * recursion here as the SMMU table walker will not be wired
+ * through another SMMU.
+ */
+ dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
+ DMA_TO_DEVICE);
+ }
+}
+
static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
{
u32 reg;
@@ -650,11 +674,16 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
if (smmu->version == 1)
reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
- /* Use the weakest memory type, so it is overridden by the pte */
- if (stage1)
- reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
- else
+ /*
+ * Use the weakest shareability/memory types, so they are
+ * overridden by the ttbcr/pte.
+ */
+ if (stage1) {
+ reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
+ (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
+ } else {
reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT;
+ }
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
if (smmu->version > 1) {
@@ -715,6 +744,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
}
/* TTBR0 */
+ arm_smmu_flush_pgtable(smmu, root_cfg->pgd,
+ PTRS_PER_PGD * sizeof(pgd_t));
reg = __pa(root_cfg->pgd);
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
@@ -901,7 +932,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
goto out_free_domain;
smmu_domain->root_cfg.pgd = pgd;
- mutex_init(&smmu_domain->lock);
+ spin_lock_init(&smmu_domain->lock);
domain->priv = smmu_domain;
return 0;
@@ -1128,6 +1159,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_device *device_smmu = dev->archdata.iommu;
struct arm_smmu_master *master;
+ unsigned long flags;
if (!device_smmu) {
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
@@ -1138,7 +1170,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
* Sanity check the domain. We don't currently support domains
* that cross between different SMMU chains.
*/
- mutex_lock(&smmu_domain->lock);
+ spin_lock_irqsave(&smmu_domain->lock, flags);
if (!smmu_domain->leaf_smmu) {
/* Now that we have a master, we can finalise the domain */
ret = arm_smmu_init_domain_context(domain, dev);
@@ -1153,7 +1185,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
dev_name(device_smmu->dev));
goto err_unlock;
}
- mutex_unlock(&smmu_domain->lock);
+ spin_unlock_irqrestore(&smmu_domain->lock, flags);
/* Looks ok, so add the device to the domain */
master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
@@ -1163,7 +1195,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return arm_smmu_domain_add_master(smmu_domain, master);
err_unlock:
- mutex_unlock(&smmu_domain->lock);
+ spin_unlock_irqrestore(&smmu_domain->lock, flags);
return ret;
}
@@ -1177,23 +1209,6 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
arm_smmu_domain_remove_master(smmu_domain, master);
}
-static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
- size_t size)
-{
- unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
- /*
- * If the SMMU can't walk tables in the CPU caches, treat them
- * like non-coherent DMA since we need to flush the new entries
- * all the way out to memory. There's no possibility of recursion
- * here as the SMMU table walker will not be wired through another
- * SMMU.
- */
- if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK))
- dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
- DMA_TO_DEVICE);
-}
-
static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
unsigned long end)
{
@@ -1210,12 +1225,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
if (pmd_none(*pmd)) {
/* Allocate a new set of tables */
- pgtable_t table = alloc_page(PGALLOC_GFP);
+ pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
if (!table)
return -ENOMEM;
- arm_smmu_flush_pgtable(smmu, page_address(table),
- ARM_SMMU_PTE_HWTABLE_SIZE);
+ arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
if (!pgtable_page_ctor(table)) {
__free_page(table);
return -ENOMEM;
@@ -1317,9 +1331,15 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
#ifndef __PAGETABLE_PMD_FOLDED
if (pud_none(*pud)) {
- pmd = pmd_alloc_one(NULL, addr);
+ pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
if (!pmd)
return -ENOMEM;
+
+ arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE);
+ pud_populate(NULL, pud, pmd);
+ arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
+
+ pmd += pmd_index(addr);
} else
#endif
pmd = pmd_offset(pud, addr);
@@ -1328,8 +1348,6 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
next = pmd_addr_end(addr, end);
ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
flags, stage);
- pud_populate(NULL, pud, pmd);
- arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
phys += next - addr;
} while (pmd++, addr = next, addr < end);
@@ -1346,9 +1364,15 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
#ifndef __PAGETABLE_PUD_FOLDED
if (pgd_none(*pgd)) {
- pud = pud_alloc_one(NULL, addr);
+ pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
if (!pud)
return -ENOMEM;
+
+ arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE);
+ pgd_populate(NULL, pgd, pud);
+ arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
+
+ pud += pud_index(addr);
} else
#endif
pud = pud_offset(pgd, addr);
@@ -1357,8 +1381,6 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
next = pud_addr_end(addr, end);
ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
flags, stage);
- pgd_populate(NULL, pud, pgd);
- arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
phys += next - addr;
} while (pud++, addr = next, addr < end);
@@ -1375,6 +1397,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
pgd_t *pgd = root_cfg->pgd;
struct arm_smmu_device *smmu = root_cfg->smmu;
+ unsigned long irqflags;
if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) {
stage = 2;
@@ -1397,7 +1420,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
if (paddr & ~output_mask)
return -ERANGE;
- mutex_lock(&smmu_domain->lock);
+ spin_lock_irqsave(&smmu_domain->lock, irqflags);
pgd += pgd_index(iova);
end = iova + size;
do {
@@ -1413,11 +1436,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
} while (pgd++, iova != end);
out_unlock:
- mutex_unlock(&smmu_domain->lock);
-
- /* Ensure new page tables are visible to the hardware walker */
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
- dsb();
+ spin_unlock_irqrestore(&smmu_domain->lock, irqflags);
return ret;
}
@@ -1987,8 +2006,10 @@ static int __init arm_smmu_init(void)
if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+#ifdef CONFIG_ARM_AMBA
if (!iommu_present(&amba_bustype))
bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+#endif
return 0;
}
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index d97fbe4fb9b1..80fffba7f12d 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -354,8 +354,8 @@ DEBUG_FOPS(mem);
return -ENOMEM; \
}
-#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600)
-#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400)
+#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600)
+#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
static int iommu_debug_register(struct device *dev, void *data)
{
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 86b484cb3ec2..5194afb39e78 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
+obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 9300bc32784e..540956465ed2 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -381,7 +381,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
& PCI_MSI_DOORBELL_MASK;
- writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base +
+ writel(~msimask, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
for (msinr = PCI_MSI_DOORBELL_START;
@@ -407,7 +407,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
& IPI_DOORBELL_MASK;
- writel(~IPI_DOORBELL_MASK, per_cpu_int_base +
+ writel(~ipimask, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
/* Handle all pending doorbells */
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index 92c41ab4dbfd..2cb474ad8809 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -515,7 +515,7 @@ static int meta_intc_set_affinity(struct irq_data *data,
* one cpu (the interrupt code doesn't support it), so we just
* pick the first cpu we find in 'cpumask'.
*/
- cpu = cpumask_any(cpumask);
+ cpu = cpumask_any_and(cpumask, cpu_online_mask);
thread = cpu_2_hwthread_id[cpu];
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
index 8e94d7a3b20d..c16c186d97d3 100644
--- a/drivers/irqchip/irq-metag.c
+++ b/drivers/irqchip/irq-metag.c
@@ -201,7 +201,7 @@ static int metag_internal_irq_set_affinity(struct irq_data *data,
* one cpu (the interrupt code doesn't support it), so we just
* pick the first cpu we find in 'cpumask'.
*/
- cpu = cpumask_any(cpumask);
+ cpu = cpumask_any_and(cpumask, cpu_online_mask);
thread = cpu_2_hwthread_id[cpu];
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)),
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index e51d40031884..8e41be62812e 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -111,7 +111,8 @@ IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct irq_domain *d = irq_get_handler_data(irq);
- struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, irq);
+
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
gc->mask_cache;
@@ -123,6 +124,19 @@ static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
}
}
+/*
+ * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
+ * To avoid interrupt events on stale irqs, we clear them before unmask.
+ */
+static unsigned int orion_bridge_irq_startup(struct irq_data *d)
+{
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
+ ct->chip.irq_ack(d);
+ ct->chip.irq_unmask(d);
+ return 0;
+}
+
static int __init orion_bridge_irq_init(struct device_node *np,
struct device_node *parent)
{
@@ -143,7 +157,7 @@ static int __init orion_bridge_irq_init(struct device_node *np,
}
ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
- handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
if (ret) {
pr_err("%s: unable to alloc irq domain gc\n", np->name);
return ret;
@@ -176,12 +190,14 @@ static int __init orion_bridge_irq_init(struct device_node *np,
gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
+ gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
- /* mask all interrupts */
+ /* mask and clear all interrupts */
writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
+ writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
irq_set_handler_data(irq, domain);
irq_set_chained_handler(irq, orion_bridge_irq_handler);
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
new file mode 100644
index 000000000000..8ed04c4a43ee
--- /dev/null
+++ b/drivers/irqchip/irq-zevio.c
@@ -0,0 +1,127 @@
+/*
+ * linux/drivers/irqchip/irq-zevio.c
+ *
+ * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/mach/irq.h>
+#include <asm/exception.h>
+
+#include "irqchip.h"
+
+#define IO_STATUS 0x000
+#define IO_RAW_STATUS 0x004
+#define IO_ENABLE 0x008
+#define IO_DISABLE 0x00C
+#define IO_CURRENT 0x020
+#define IO_RESET 0x028
+#define IO_MAX_PRIOTY 0x02C
+
+#define IO_IRQ_BASE 0x000
+#define IO_FIQ_BASE 0x100
+
+#define IO_INVERT_SEL 0x200
+#define IO_STICKY_SEL 0x204
+#define IO_PRIORITY_SEL 0x300
+
+#define MAX_INTRS 32
+#define FIQ_START MAX_INTRS
+
+static struct irq_domain *zevio_irq_domain;
+static void __iomem *zevio_irq_io;
+
+static void zevio_irq_ack(struct irq_data *irqd)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(irqd);
+ struct irq_chip_regs *regs =
+ &container_of(irqd->chip, struct irq_chip_type, chip)->regs;
+
+ readl(gc->reg_base + regs->ack);
+}
+
+static asmlinkage void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
+{
+ int irqnr;
+
+ while (readl(zevio_irq_io + IO_STATUS)) {
+ irqnr = readl(zevio_irq_io + IO_CURRENT);
+ irqnr = irq_find_mapping(zevio_irq_domain, irqnr);
+ handle_IRQ(irqnr, regs);
+ };
+}
+
+static void __init zevio_init_irq_base(void __iomem *base)
+{
+ /* Disable all interrupts */
+ writel(~0, base + IO_DISABLE);
+
+ /* Accept interrupts of all priorities */
+ writel(0xF, base + IO_MAX_PRIOTY);
+
+ /* Reset existing interrupts */
+ readl(base + IO_RESET);
+}
+
+static int __init zevio_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct irq_chip_generic *gc;
+ int ret;
+
+ if (WARN_ON(zevio_irq_io || zevio_irq_domain))
+ return -EBUSY;
+
+ zevio_irq_io = of_iomap(node, 0);
+ BUG_ON(!zevio_irq_io);
+
+ /* Do not invert interrupt status bits */
+ writel(~0, zevio_irq_io + IO_INVERT_SEL);
+
+ /* Disable sticky interrupts */
+ writel(0, zevio_irq_io + IO_STICKY_SEL);
+
+ /* We don't use IRQ priorities. Set each IRQ to highest priority. */
+ memset_io(zevio_irq_io + IO_PRIORITY_SEL, 0, MAX_INTRS * sizeof(u32));
+
+ /* Init IRQ and FIQ */
+ zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE);
+ zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE);
+
+ zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS,
+ &irq_generic_chip_ops, NULL);
+ BUG_ON(!zevio_irq_domain);
+
+ ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1,
+ "zevio_intc", handle_level_irq,
+ clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ BUG_ON(ret);
+
+ gc = irq_get_domain_generic_chip(zevio_irq_domain, 0);
+ gc->reg_base = zevio_irq_io;
+ gc->chip_types[0].chip.irq_ack = zevio_irq_ack;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+ gc->chip_types[0].regs.mask = IO_IRQ_BASE + IO_ENABLE;
+ gc->chip_types[0].regs.enable = IO_IRQ_BASE + IO_ENABLE;
+ gc->chip_types[0].regs.disable = IO_IRQ_BASE + IO_DISABLE;
+ gc->chip_types[0].regs.ack = IO_IRQ_BASE + IO_RESET;
+
+ set_handle_irq(zevio_handle_irq);
+
+ pr_info("TI-NSPIRE classic IRQ controller\n");
+ return 0;
+}
+
+IRQCHIP_DECLARE(zevio_irq, "lsi,zevio-intc", zevio_of_init);
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index af1b020a81f1..b420f8bd862e 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -810,7 +810,7 @@ prfeatureind(char *dest, u_char *p)
dp += sprintf(dp, " octet 3 ");
dp += prbits(dp, *p, 8, 8);
*dp++ = '\n';
- if (!(*p++ & 80)) {
+ if (!(*p++ & 0x80)) {
dp += sprintf(dp, " octet 4 ");
dp += prbits(dp, *p++, 8, 8);
*dp++ = '\n';
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 0c707e4f4eaf..a4c7306ff43d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -210,7 +210,9 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_RECLAIMABLE 0
#define GC_MARK_DIRTY 1
#define GC_MARK_METADATA 2
-BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
+#define GC_SECTORS_USED_SIZE 13
+#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE))
+BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
#include "journal.h"
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 4f6b5940e609..3f74b4b0747b 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -23,7 +23,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
for (k = i->start; k < bset_bkey_last(i); k = next) {
next = bkey_next(k);
- printk(KERN_ERR "block %u key %zi/%u: ", set,
+ printk(KERN_ERR "block %u key %li/%u: ", set,
(uint64_t *) k - i->d, i->keys);
if (b->ops->key_dump)
@@ -1185,9 +1185,12 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
order);
if (!out) {
+ struct page *outp;
+
BUG_ON(order > state->page_order);
- out = page_address(mempool_alloc(state->pool, GFP_NOIO));
+ outp = mempool_alloc(state->pool, GFP_NOIO);
+ out = page_address(outp);
used_mempool = true;
order = state->page_order;
}
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 98cc0a810a36..5f9c2a665ca5 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1167,7 +1167,7 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
/* guard against overflow */
SET_GC_SECTORS_USED(g, min_t(unsigned,
GC_SECTORS_USED(g) + KEY_SIZE(k),
- (1 << 14) - 1));
+ MAX_GC_SECTORS_USED));
BUG_ON(!GC_SECTORS_USED(g));
}
@@ -1805,7 +1805,7 @@ static bool btree_insert_key(struct btree *b, struct bkey *k,
static size_t insert_u64s_remaining(struct btree *b)
{
- ssize_t ret = bch_btree_keys_u64s_remaining(&b->keys);
+ long ret = bch_btree_keys_u64s_remaining(&b->keys);
/*
* Might land in the middle of an existing extent and have to split it
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index c3ead586dc27..416d1a3e028e 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -194,7 +194,7 @@ err:
mutex_unlock(&b->c->bucket_lock);
bch_extent_to_text(buf, sizeof(buf), k);
btree_bug(b,
-"inconsistent btree pointer %s: bucket %li pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
return true;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 72cd213f213f..5d5d031cf381 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -353,14 +353,14 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
- if (op->bypass)
- return bch_data_invalidate(cl);
-
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
set_gc_sectors(op->c);
wake_up_gc(op->c);
}
+ if (op->bypass)
+ return bch_data_invalidate(cl);
+
/*
* Journal writes are marked REQ_FLUSH; if the original write was a
* flush, it'll wait on the journal write.
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index c6ab69333a6d..d8458d477a12 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -416,7 +416,7 @@ static int btree_bset_stats(struct btree_op *b_op, struct btree *b)
return MAP_CONTINUE;
}
-int bch_bset_print_stats(struct cache_set *c, char *buf)
+static int bch_bset_print_stats(struct cache_set *c, char *buf)
{
struct bset_stats_op op;
int ret;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index ffd472e015ca..1af70145fab9 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -289,6 +289,7 @@ struct per_bio_data {
bool tick:1;
unsigned req_nr:2;
struct dm_deferred_entry *all_io_entry;
+ struct dm_hook_info hook_info;
/*
* writethrough fields. These MUST remain at the end of this
@@ -297,7 +298,6 @@ struct per_bio_data {
*/
struct cache *cache;
dm_cblock_t cblock;
- struct dm_hook_info hook_info;
struct dm_bio_details bio_details;
};
@@ -671,15 +671,16 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
dm_cblock_t cblock)
{
sector_t bi_sector = bio->bi_iter.bi_sector;
+ sector_t block = from_cblock(cblock);
bio->bi_bdev = cache->cache_dev->bdev;
if (!block_size_is_power_of_two(cache))
bio->bi_iter.bi_sector =
- (from_cblock(cblock) * cache->sectors_per_block) +
+ (block * cache->sectors_per_block) +
sector_div(bi_sector, cache->sectors_per_block);
else
bio->bi_iter.bi_sector =
- (from_cblock(cblock) << cache->sectors_per_block_shift) |
+ (block << cache->sectors_per_block_shift) |
(bi_sector & (cache->sectors_per_block - 1));
}
@@ -1010,13 +1011,15 @@ static void overwrite_endio(struct bio *bio, int err)
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
unsigned long flags;
+ dm_unhook_bio(&pb->hook_info, bio);
+
if (err)
mg->err = true;
+ mg->requeue_holder = false;
+
spin_lock_irqsave(&cache->lock, flags);
list_add_tail(&mg->list, &cache->completed_migrations);
- dm_unhook_bio(&pb->hook_info, bio);
- mg->requeue_holder = false;
spin_unlock_irqrestore(&cache->lock, flags);
wake_worker(cache);
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index b2b8a10e8427..3842ac738f98 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -201,29 +201,28 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
/*
* Functions for getting the pages from a bvec.
*/
-static void bio_get_page(struct dpages *dp,
- struct page **p, unsigned long *len, unsigned *offset)
+static void bio_get_page(struct dpages *dp, struct page **p,
+ unsigned long *len, unsigned *offset)
{
- struct bio *bio = dp->context_ptr;
- struct bio_vec bvec = bio_iovec(bio);
- *p = bvec.bv_page;
- *len = bvec.bv_len;
- *offset = bvec.bv_offset;
+ struct bio_vec *bvec = dp->context_ptr;
+ *p = bvec->bv_page;
+ *len = bvec->bv_len - dp->context_u;
+ *offset = bvec->bv_offset + dp->context_u;
}
static void bio_next_page(struct dpages *dp)
{
- struct bio *bio = dp->context_ptr;
- struct bio_vec bvec = bio_iovec(bio);
-
- bio_advance(bio, bvec.bv_len);
+ struct bio_vec *bvec = dp->context_ptr;
+ dp->context_ptr = bvec + 1;
+ dp->context_u = 0;
}
static void bio_dp_init(struct dpages *dp, struct bio *bio)
{
dp->get_page = bio_get_page;
dp->next_page = bio_next_page;
- dp->context_ptr = bio;
+ dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ dp->context_u = bio->bi_iter.bi_bvec_done;
}
/*
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6eb9dc9ef8f3..422a9fdeb53e 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1626,8 +1626,11 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
- r = scsi_verify_blk_ioctl(NULL, cmd);
+ if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
+ int err = scsi_verify_blk_ioctl(NULL, cmd);
+ if (err)
+ r = err;
+ }
if (r == -ENOTCONN && !fatal_signal_pending(current))
queue_work(kmultipathd, &m->process_queued_ios);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index f284e0bfb25f..7dfdb5c746d6 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1244,6 +1244,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
dm_bio_restore(bd, bio);
bio_record->details.bi_bdev = NULL;
+
+ atomic_inc(&bio->bi_remaining);
+
queue_bio(ms, bio, rw);
return DM_ENDIO_INCOMPLETE;
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 7da347665552..baa87ff12816 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -483,7 +483,7 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
disk_super->data_mapping_root = cpu_to_le64(pmd->root);
disk_super->device_details_root = cpu_to_le64(pmd->details_root);
- disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+ disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE);
disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
@@ -651,7 +651,7 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
{
int r;
- pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE,
+ pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
THIN_METADATA_CACHE_SIZE,
THIN_MAX_CONCURRENT_LOCKS);
if (IS_ERR(pmd->bm)) {
@@ -1489,6 +1489,23 @@ bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
return r;
}
+bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
+{
+ bool r = false;
+ struct dm_thin_device *td, *tmp;
+
+ down_read(&pmd->root_lock);
+ list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
+ if (td->changed) {
+ r = td->changed;
+ break;
+ }
+ }
+ up_read(&pmd->root_lock);
+
+ return r;
+}
+
bool dm_thin_aborted_changes(struct dm_thin_device *td)
{
bool r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 9a368567632f..82ea384d36ff 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -9,16 +9,14 @@
#include "persistent-data/dm-block-manager.h"
#include "persistent-data/dm-space-map.h"
+#include "persistent-data/dm-space-map-metadata.h"
-#define THIN_METADATA_BLOCK_SIZE 4096
+#define THIN_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE
/*
* The metadata device is currently limited in size.
- *
- * We have one block of index, which can hold 255 index entries. Each
- * index entry contains allocation info about 16k metadata blocks.
*/
-#define THIN_METADATA_MAX_SECTORS (255 * (1 << 14) * (THIN_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT)))
+#define THIN_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS
/*
* A metadata device larger than 16GB triggers a warning.
@@ -161,6 +159,8 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);
*/
bool dm_thin_changed_this_transaction(struct dm_thin_device *td);
+bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd);
+
bool dm_thin_aborted_changes(struct dm_thin_device *td);
int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index faaf944597ab..7e84baccf0ad 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1357,7 +1357,8 @@ static void process_deferred_bios(struct pool *pool)
bio_list_init(&pool->deferred_flush_bios);
spin_unlock_irqrestore(&pool->lock, flags);
- if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
+ if (bio_list_empty(&bios) &&
+ !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
return;
if (commit(pool)) {
@@ -1999,16 +2000,27 @@ static void metadata_low_callback(void *context)
dm_table_event(pool->ti->table);
}
-static sector_t get_metadata_dev_size(struct block_device *bdev)
+static sector_t get_dev_size(struct block_device *bdev)
+{
+ return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+}
+
+static void warn_if_metadata_device_too_big(struct block_device *bdev)
{
- sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t metadata_dev_size = get_dev_size(bdev);
char buffer[BDEVNAME_SIZE];
- if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
+ if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
- metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
- }
+}
+
+static sector_t get_metadata_dev_size(struct block_device *bdev)
+{
+ sector_t metadata_dev_size = get_dev_size(bdev);
+
+ if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
+ metadata_dev_size = THIN_METADATA_MAX_SECTORS;
return metadata_dev_size;
}
@@ -2017,7 +2029,7 @@ static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
{
sector_t metadata_dev_size = get_metadata_dev_size(bdev);
- sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+ sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
return metadata_dev_size;
}
@@ -2095,12 +2107,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Error opening metadata block device";
goto out_unlock;
}
-
- /*
- * Run for the side-effect of possibly issuing a warning if the
- * device is too big.
- */
- (void) get_metadata_dev_size(metadata_dev->bdev);
+ warn_if_metadata_device_too_big(metadata_dev->bdev);
r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
if (r) {
@@ -2287,6 +2294,7 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
return -EINVAL;
} else if (metadata_dev_size > sb_metadata_dev_size) {
+ warn_if_metadata_device_too_big(pool->md_dev);
DMINFO("%s: growing the metadata device from %llu to %llu blocks",
dm_device_name(pool->pool_md),
sb_metadata_dev_size, metadata_dev_size);
@@ -2894,6 +2902,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (get_pool_mode(tc->pool) == PM_FAIL) {
ti->error = "Couldn't open thin device, Pool is in fail mode";
+ r = -EINVAL;
goto bad_thin_open;
}
@@ -2905,7 +2914,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
if (r)
- goto bad_thin_open;
+ goto bad_target_max_io_len;
ti->num_flush_bios = 1;
ti->flush_supported = true;
@@ -2926,6 +2935,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
return 0;
+bad_target_max_io_len:
+ dm_pool_close_thin_device(tc->td);
bad_thin_open:
__pool_dec(tc->pool);
bad_pool_lookup:
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 536782e3bcb7..e9bdd462f4f5 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -680,6 +680,8 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
if (r)
return r;
+ if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
+ nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
r = sm_ll_extend(&smm->ll, nr_blocks);
if (r)
return r;
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.h b/drivers/md/persistent-data/dm-space-map-metadata.h
index 39bba0801cf2..64df923974d8 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.h
+++ b/drivers/md/persistent-data/dm-space-map-metadata.h
@@ -9,6 +9,17 @@
#include "dm-transaction-manager.h"
+#define DM_SM_METADATA_BLOCK_SIZE (4096 >> SECTOR_SHIFT)
+
+/*
+ * The metadata device is currently limited in size.
+ *
+ * We have one block of index, which can hold 255 index entries. Each
+ * index entry contains allocation info about ~16k metadata blocks.
+ */
+#define DM_SM_METADATA_MAX_BLOCKS (255 * ((1 << 14) - 64))
+#define DM_SM_METADATA_MAX_SECTORS (DM_SM_METADATA_MAX_BLOCKS * DM_SM_METADATA_BLOCK_SIZE)
+
/*
* Unfortunately we have to use two-phase construction due to the cycle
* between the tm and sm.
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fd3a2a14b587..4a6ca1cb2e78 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1953,11 +1953,15 @@ static int process_checks(struct r1bio *r1_bio)
for (i = 0; i < conf->raid_disks * 2; i++) {
int j;
int size;
+ int uptodate;
struct bio *b = r1_bio->bios[i];
if (b->bi_end_io != end_sync_read)
continue;
- /* fixup the bio for reuse */
+ /* fixup the bio for reuse, but preserve BIO_UPTODATE */
+ uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
bio_reset(b);
+ if (!uptodate)
+ clear_bit(BIO_UPTODATE, &b->bi_flags);
b->bi_vcnt = vcnt;
b->bi_iter.bi_size = r1_bio->sectors << 9;
b->bi_iter.bi_sector = r1_bio->sector +
@@ -1990,11 +1994,14 @@ static int process_checks(struct r1bio *r1_bio)
int j;
struct bio *pbio = r1_bio->bios[primary];
struct bio *sbio = r1_bio->bios[i];
+ int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
if (sbio->bi_end_io != end_sync_read)
continue;
+ /* Now we can 'fixup' the BIO_UPTODATE flag */
+ set_bit(BIO_UPTODATE, &sbio->bi_flags);
- if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
+ if (uptodate) {
for (j = vcnt; j-- ; ) {
struct page *p, *s;
p = pbio->bi_io_vec[j].bv_page;
@@ -2009,7 +2016,7 @@ static int process_checks(struct r1bio *r1_bio)
if (j >= 0)
atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
- && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
+ && uptodate)) {
/* No need to write to this device. */
sbio->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[i].rdev, mddev);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f1feadeb7bb2..16f5c21963db 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5514,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
return sectors * (raid_disks - conf->max_degraded);
}
+static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ safe_put_page(percpu->spare_page);
+ kfree(percpu->scribble);
+ percpu->spare_page = NULL;
+ percpu->scribble = NULL;
+}
+
+static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ if (conf->level == 6 && !percpu->spare_page)
+ percpu->spare_page = alloc_page(GFP_KERNEL);
+ if (!percpu->scribble)
+ percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+
+ if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
+ free_scratch_buffer(conf, percpu);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static void raid5_free_percpu(struct r5conf *conf)
{
- struct raid5_percpu *percpu;
unsigned long cpu;
if (!conf->percpu)
return;
- get_online_cpus();
- for_each_possible_cpu(cpu) {
- percpu = per_cpu_ptr(conf->percpu, cpu);
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- }
#ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&conf->cpu_notify);
#endif
+
+ get_online_cpus();
+ for_each_possible_cpu(cpu)
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
put_online_cpus();
free_percpu(conf->percpu);
@@ -5557,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- if (conf->level == 6 && !percpu->spare_page)
- percpu->spare_page = alloc_page(GFP_KERNEL);
- if (!percpu->scribble)
- percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
-
- if (!percpu->scribble ||
- (conf->level == 6 && !percpu->spare_page)) {
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
+ if (alloc_scratch_buffer(conf, percpu)) {
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
return notifier_from_errno(-ENOMEM);
@@ -5573,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- percpu->spare_page = NULL;
- percpu->scribble = NULL;
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
break;
default:
break;
@@ -5588,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
static int raid5_alloc_percpu(struct r5conf *conf)
{
unsigned long cpu;
- struct page *spare_page;
- struct raid5_percpu __percpu *allcpus;
- void *scribble;
- int err;
+ int err = 0;
- allcpus = alloc_percpu(struct raid5_percpu);
- if (!allcpus)
+ conf->percpu = alloc_percpu(struct raid5_percpu);
+ if (!conf->percpu)
return -ENOMEM;
- conf->percpu = allcpus;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ conf->cpu_notify.notifier_call = raid456_cpu_notify;
+ conf->cpu_notify.priority = 0;
+ err = register_cpu_notifier(&conf->cpu_notify);
+ if (err)
+ return err;
+#endif
get_online_cpus();
- err = 0;
for_each_present_cpu(cpu) {
- if (conf->level == 6) {
- spare_page = alloc_page(GFP_KERNEL);
- if (!spare_page) {
- err = -ENOMEM;
- break;
- }
- per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
- }
- scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
- if (!scribble) {
- err = -ENOMEM;
+ err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+ if (err) {
+ pr_err("%s: failed memory allocation for cpu%ld\n",
+ __func__, cpu);
break;
}
- per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
}
-#ifdef CONFIG_HOTPLUG_CPU
- conf->cpu_notify.notifier_call = raid456_cpu_notify;
- conf->cpu_notify.priority = 0;
- if (err == 0)
- err = register_cpu_notifier(&conf->cpu_notify);
-#endif
put_online_cpus();
return err;
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index 68f768a5422d..a6c3c9e2e897 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -1176,7 +1176,7 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
switch (demod) {
case 0:
- dev_err(&state->priv->i2c->dev,
+ dev_err(&i2c->dev,
"%s: Error attaching frontend %d\n",
KBUILD_MODNAME, demod);
goto error1;
@@ -1200,12 +1200,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
state->demod = demod - 1;
state->priv = priv;
- /* test i2c bus for ack */
- if (demod == 0) {
- if (cx24117_readreg(state, 0x00) < 0)
- goto error3;
- }
-
dev_info(&state->priv->i2c->dev,
"%s: Attaching frontend %d\n",
KBUILD_MODNAME, state->demod);
@@ -1216,8 +1210,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
state->frontend.demodulator_priv = state;
return &state->frontend;
-error3:
- kfree(state);
error2:
cx24117_release_priv(priv);
error1:
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index 4bf057544607..8a8e1ecb762d 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -2,7 +2,7 @@
* Support for NXT2002 and NXT2004 - VSB/QAM
*
* Copyright (C) 2005 Kirk Lapray <kirk.lapray@gmail.com>
- * Copyright (C) 2006 Michael Krufky <mkrufky@m1k.net>
+ * Copyright (C) 2006-2014 Michael Krufky <mkrufky@linuxtv.org>
* based on nxt2002 by Taylor Jacob <rtjacob@earthlink.net>
* and nxt2004 by Jean-Francois Thibert <jeanfrancois@sagetv.com>
*
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index e04fe3f80383..88ce9dcb4971 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -2645,7 +2645,7 @@ static int adv7842_core_init(struct v4l2_subdev *sd)
sdp_write_and_or(sd, 0xdd, 0xf0, pdata->sdp_free_run_force |
(pdata->sdp_free_run_cbar_en << 1) |
(pdata->sdp_free_run_man_col_en << 2) |
- (pdata->sdp_free_run_force << 3));
+ (pdata->sdp_free_run_auto << 3));
/* TODO from platform data */
cp_write(sd, 0x69, 0x14); /* Enable CP CSC */
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 4b8381111cbd..77e10e0fd8d6 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -478,25 +478,33 @@ static void s5k5baf_write_arr_seq(struct s5k5baf *state, u16 addr,
u16 count, const u16 *seq)
{
struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
- __be16 buf[count + 1];
- int ret, n;
+ __be16 buf[65];
s5k5baf_i2c_write(state, REG_CMDWR_ADDR, addr);
if (state->error)
return;
+ v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count,
+ min(2 * count, 64), seq);
+
buf[0] = __constant_cpu_to_be16(REG_CMD_BUF);
- for (n = 1; n <= count; ++n)
- buf[n] = cpu_to_be16(*seq++);
- n *= 2;
- ret = i2c_master_send(c, (char *)buf, n);
- v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count,
- min(2 * count, 64), seq - count);
+ while (count > 0) {
+ int n = min_t(int, count, ARRAY_SIZE(buf) - 1);
+ int ret, i;
- if (ret != n) {
- v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret);
- state->error = ret;
+ for (i = 1; i <= n; ++i)
+ buf[i] = cpu_to_be16(*seq++);
+
+ i *= 2;
+ ret = i2c_master_send(c, (char *)buf, i);
+ if (ret != i) {
+ v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret);
+ state->error = ret;
+ break;
+ }
+
+ count -= n;
}
}
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index d85cb0ace4dc..6662b495b22c 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -2426,7 +2426,7 @@ struct tvcard bttv_tvcards[] = {
},
/* ---- card 0x87---------------------------------- */
[BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE] = {
- /* Michael Krufky <mkrufky@m1k.net> */
+ /* Michael Krufky <mkrufky@linuxtv.org> */
.name = "DViCO FusionHDTV 5 Lite",
.tuner_type = TUNER_LG_TDVS_H06XF, /* TDVS-H064F */
.tuner_addr = ADDR_UNSET,
diff --git a/drivers/media/pci/bt8xx/bttv-gpio.c b/drivers/media/pci/bt8xx/bttv-gpio.c
index 922e8233fd0b..3f364b7062b9 100644
--- a/drivers/media/pci/bt8xx/bttv-gpio.c
+++ b/drivers/media/pci/bt8xx/bttv-gpio.c
@@ -98,7 +98,7 @@ int bttv_sub_add_device(struct bttv_core *core, char *name)
err = device_register(&sub->dev);
if (0 != err) {
- kfree(sub);
+ put_device(&sub->dev);
return err;
}
pr_info("%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev));
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index d45e7f6ff332..c9b2350e92c8 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -2590,7 +2590,7 @@ struct saa7134_board saa7134_boards[] = {
}},
},
[SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180] = {
- /* Michael Krufky <mkrufky@m1k.net>
+ /* Michael Krufky <mkrufky@linuxtv.org>
* Uses Alps Electric TDHU2, containing NXT2004 ATSC Decoder
* AFAIK, there is no analog demod, thus,
* no support for analog television.
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index a7dfd07e8389..da2fc86cc524 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -1027,7 +1027,8 @@ static int fimc_probe(struct platform_device *pdev)
return 0;
err_gclk:
- clk_disable(fimc->clock[CLK_GATE]);
+ if (!pm_runtime_enabled(dev))
+ clk_disable(fimc->clock[CLK_GATE]);
err_sd:
fimc_unregister_capture_subdev(fimc);
err_sclk:
@@ -1036,6 +1037,7 @@ err_sclk:
return ret;
}
+#ifdef CONFIG_PM_RUNTIME
static int fimc_runtime_resume(struct device *dev)
{
struct fimc_dev *fimc = dev_get_drvdata(dev);
@@ -1068,6 +1070,7 @@ static int fimc_runtime_suspend(struct device *dev)
dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
return ret;
}
+#endif
#ifdef CONFIG_PM_SLEEP
static int fimc_resume(struct device *dev)
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index 2be4bb522cad..3ad660b55b6b 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -1563,7 +1563,7 @@ static int fimc_lite_probe(struct platform_device *pdev)
if (!pm_runtime_enabled(dev)) {
ret = clk_enable(fimc->clock);
if (ret < 0)
- goto err_clk_put;
+ goto err_sd;
}
fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
@@ -1579,7 +1579,8 @@ static int fimc_lite_probe(struct platform_device *pdev)
return 0;
err_clk_dis:
- clk_disable(fimc->clock);
+ if (!pm_runtime_enabled(dev))
+ clk_disable(fimc->clock);
err_sd:
fimc_lite_unregister_capture_subdev(fimc);
err_clk_put:
@@ -1587,6 +1588,7 @@ err_clk_put:
return ret;
}
+#ifdef CONFIG_PM_RUNTIME
static int fimc_lite_runtime_resume(struct device *dev)
{
struct fimc_lite *fimc = dev_get_drvdata(dev);
@@ -1602,6 +1604,7 @@ static int fimc_lite_runtime_suspend(struct device *dev)
clk_disable(fimc->clock);
return 0;
}
+#endif
#ifdef CONFIG_PM_SLEEP
static int fimc_lite_resume(struct device *dev)
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index da0ad886a5bf..8a18972012f7 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -175,7 +175,7 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
{
.name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12,
- .depth = 16,
+ .depth = 12,
.colplanes = 2,
.h_align = 1,
.v_align = 1,
@@ -188,10 +188,10 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
{
.name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12,
- .depth = 16,
- .colplanes = 4,
+ .depth = 12,
+ .colplanes = 2,
.h_align = 4,
- .v_align = 1,
+ .v_align = 4,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_S5P |
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
index d83df4bb72d3..0a98d04c53e4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-demod.c - driver for the MaxLinear MXL111SF DVB-T demodulator
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -601,7 +601,7 @@ struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state,
EXPORT_SYMBOL_GPL(mxl111sf_demod_attach);
MODULE_DESCRIPTION("MaxLinear MxL111SF DVB-T demodulator driver");
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
index 3f3f8bfd190b..2d4530f5be54 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-demod.h - driver for the MaxLinear MXL111SF DVB-T demodulator
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
index e4121cb8f5ef..a619410adde4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-gpio.c - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
index 0220f54299a5..b85a5772d771 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-gpio.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 34434557ef65..a101d06eb143 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-i2c.c - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
index a57a45ffb9e4..465762145ad2 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-i2c.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
index b741b3a7a325..f6b348024bec 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-phy.c - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
index f0756071d347..0643738de7de 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-phy.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
index 17831b0fb9db..89bf115e927e 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-reg.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index 879c529640f7..a8d2c7053674 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-tuner.c - driver for the MaxLinear MXL111SF CMOS tuner
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -512,7 +512,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
EXPORT_SYMBOL_GPL(mxl111sf_tuner_attach);
MODULE_DESCRIPTION("MaxLinear MxL111SF CMOS tuner driver");
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
index 90f583e5d6a6..2046db22519e 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-tuner.h - driver for the MaxLinear MXL111SF CMOS tuner
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -68,7 +68,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
#else
static inline
struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
- struct mxl111sf_state *mxl_state
+ struct mxl111sf_state *mxl_state,
struct mxl111sf_tuner_config *cfg)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 08240e498451..c7304fa8ab73 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -105,7 +105,7 @@ int mxl111sf_read_reg(struct mxl111sf_state *state, u8 addr, u8 *data)
ret = -EINVAL;
}
- pr_debug("R: (0x%02x, 0x%02x)\n", addr, *data);
+ pr_debug("R: (0x%02x, 0x%02x)\n", addr, buf[1]);
fail:
return ret;
}
@@ -1421,7 +1421,7 @@ static struct usb_driver mxl111sf_usb_driver = {
module_usb_driver(mxl111sf_usb_driver);
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_DESCRIPTION("Driver for MaxLinear MxL111SF");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.h b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
index 9816de86e48c..8516c011b7cc 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 2f0c89cbac76..c5638964c3f2 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -198,7 +198,6 @@ static int device_authorization(struct hdpvr_device *dev)
hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0);
v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n",
print_buf);
- kfree(print_buf);
#endif
msleep(100);
@@ -214,6 +213,9 @@ static int device_authorization(struct hdpvr_device *dev)
retval = ret != 8;
unlock:
mutex_unlock(&dev->usbc_mutex);
+#ifdef HDPVR_DEBUG
+ kfree(print_buf);
+#endif
return retval;
}
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 41bf3f9b6ca6..f63a6bdc7d46 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -519,6 +519,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
aspect.denominator = 9;
}
image_width = ((image_height * aspect.numerator) / aspect.denominator);
+ image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1);
/* Horizontal */
if (default_gtf)
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index 65411adcd0ea..7e6b209b7002 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -66,14 +66,11 @@ static void __videobuf_dc_free(struct device *dev,
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
- dev_dbg(q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
+ dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
map->count++;
- videobuf_queue_unlock(q);
}
static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -85,11 +82,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
- if (!--map->count) {
+ map->count--;
+ if (0 == map->count) {
struct videobuf_dma_contig_memory *mem;
dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
@@ -128,8 +126,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
kfree(map);
+ videobuf_queue_unlock(q);
}
- videobuf_queue_unlock(q);
}
static const struct vm_operations_struct videobuf_vm_ops = {
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 9db674ccdc68..828e7c10bd70 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -338,14 +338,11 @@ EXPORT_SYMBOL_GPL(videobuf_dma_free);
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
map->count++;
- videobuf_queue_unlock(q);
}
static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -358,9 +355,10 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
- if (!--map->count) {
+ map->count--;
+ if (0 == map->count) {
dprintk(1, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
@@ -376,9 +374,9 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
q->bufs[i]->baddr = 0;
q->ops->buf_release(q, q->bufs[i]);
}
+ videobuf_queue_unlock(q);
kfree(map);
}
- videobuf_queue_unlock(q);
return;
}
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
index 1365c651c177..2ff7fcc77b11 100644
--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -54,14 +54,11 @@ MODULE_LICENSE("GPL");
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
map->count++;
- videobuf_queue_unlock(q);
}
static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -73,11 +70,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
- if (!--map->count) {
+ map->count--;
+ if (0 == map->count) {
struct videobuf_vmalloc_memory *mem;
dprintk(1, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
@@ -116,8 +114,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
kfree(map);
+ videobuf_queue_unlock(q);
}
- videobuf_queue_unlock(q);
return;
}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 79eb9ba819dc..8e6695c9b0e2 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1827,6 +1827,11 @@ static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
return 0;
}
+ if (!q->num_buffers) {
+ dprintk(1, "streamon: no buffers have been allocated\n");
+ return -EINVAL;
+ }
+
/*
* If any buffers were queued before streamon,
* we can now pass them to driver for processing.
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index a60c188c2bd9..04bd3b6de401 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -754,19 +754,19 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd,
unsigned long arg)
{
int ret;
- mutex_lock(&i2o_cfg_mutex);
switch (cmd) {
case I2OGETIOPS:
ret = i2o_cfg_ioctl(file, cmd, arg);
break;
case I2OPASSTHRU32:
+ mutex_lock(&i2o_cfg_mutex);
ret = i2o_cfg_passthru32(file, cmd, arg);
+ mutex_unlock(&i2o_cfg_mutex);
break;
default:
ret = -ENOIOCTLCMD;
break;
}
- mutex_unlock(&i2o_cfg_mutex);
return ret;
}
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index 13af7e50021e..8103e4362132 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -53,17 +53,25 @@ static int da9055_i2c_remove(struct i2c_client *i2c)
return 0;
}
+/*
+ * DO NOT change the device Ids. The naming is intentionally specific as both
+ * the PMIC and CODEC parts of this chip are instantiated separately as I2C
+ * devices (both have configurable I2C addresses, and are to all intents and
+ * purposes separate). As a result there are specific DA9055 ids for PMIC
+ * and CODEC, which must be different to operate together.
+ */
static struct i2c_device_id da9055_i2c_id[] = {
- {"da9055", 0},
+ {"da9055-pmic", 0},
{ }
};
+MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
static struct i2c_driver da9055_i2c_driver = {
.probe = da9055_i2c_probe,
.remove = da9055_i2c_remove,
.id_table = da9055_i2c_id,
.driver = {
- .name = "da9055",
+ .name = "da9055-pmic",
.owner = THIS_MODULE,
},
};
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index ac514fb2b877..71aa14a6bfbb 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -173,6 +173,7 @@ static const struct i2c_device_id max14577_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max14577_i2c_id);
+#ifdef CONFIG_PM_SLEEP
static int max14577_suspend(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
@@ -208,6 +209,7 @@ static int max14577_resume(struct device *dev)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
static struct of_device_id max14577_dt_match[] = {
{ .compatible = "maxim,max14577", },
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index be88a3bf7b85..5adede0fb04c 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -164,15 +164,15 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
return pd;
}
-static inline int max8997_i2c_get_driver_data(struct i2c_client *i2c,
+static inline unsigned long max8997_i2c_get_driver_data(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(max8997_pmic_dt_match, i2c->dev.of_node);
- return (int)match->data;
+ return (unsigned long)match->data;
}
- return (int)id->driver_data;
+ return id->driver_data;
}
static int max8997_i2c_probe(struct i2c_client *i2c,
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 612ca404e150..5d5e186b5d8b 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -169,16 +169,16 @@ static struct max8998_platform_data *max8998_i2c_parse_dt_pdata(
return pd;
}
-static inline int max8998_i2c_get_driver_data(struct i2c_client *i2c,
+static inline unsigned long max8998_i2c_get_driver_data(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(max8998_dt_match, i2c->dev.of_node);
- return (int)(long)match->data;
+ return (unsigned long)match->data;
}
- return (int)id->driver_data;
+ return id->driver_data;
}
static int max8998_i2c_probe(struct i2c_client *i2c,
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index a139798b8065..714e2135210e 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -315,6 +315,7 @@ static int sec_pmic_remove(struct i2c_client *i2c)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int sec_pmic_suspend(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
@@ -349,6 +350,7 @@ static int sec_pmic_resume(struct device *dev)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops, sec_pmic_suspend, sec_pmic_resume);
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 966cf65c5c36..3cc4c7084b92 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -158,7 +158,7 @@ static int tps65217_probe(struct i2c_client *client,
{
struct tps65217 *tps;
unsigned int version;
- unsigned int chip_id = ids->driver_data;
+ unsigned long chip_id = ids->driver_data;
const struct of_device_id *match;
bool status_off = false;
int ret;
@@ -170,7 +170,7 @@ static int tps65217_probe(struct i2c_client *client,
"Failed to find matching dt id\n");
return -EINVAL;
}
- chip_id = (unsigned int)(unsigned long)match->data;
+ chip_id = (unsigned long)match->data;
status_off = of_property_read_bool(client->dev.of_node,
"ti,pmic-shutdown-controller");
}
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index ba04f1bc70eb..e6fab94e2c8a 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -636,7 +636,7 @@ static int wm8994_i2c_probe(struct i2c_client *i2c,
if (i2c->dev.of_node) {
of_id = of_match_device(wm8994_of_match, &i2c->dev);
if (of_id)
- wm8994->type = (int)of_id->data;
+ wm8994->type = (enum wm8994_type)of_id->data;
} else {
wm8994->type = id->driver_data;
}
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 8f8a6b327cdb..2c2c9cc75231 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -787,6 +787,7 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
if (rc != 0) {
dev_err(&pci_dev->dev,
"[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
+ kfree(dma_map);
return rc;
}
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1ee2b9492a82..89a557972d1b 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -666,7 +666,6 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
goto err;
cb->fop_type = MEI_FOP_READ;
- cl->read_cb = cb;
if (dev->hbuf_is_ready) {
dev->hbuf_is_ready = false;
if (mei_hbm_cl_flow_control_req(dev, cl)) {
@@ -678,6 +677,9 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
} else {
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
+
+ cl->read_cb = cb;
+
return rets;
err:
mei_io_cb_free(cb);
@@ -908,7 +910,6 @@ void mei_cl_all_disconnect(struct mei_device *dev)
list_for_each_entry_safe(cl, next, &dev->file_list, link) {
cl->state = MEI_FILE_DISCONNECTED;
cl->mei_flow_ctrl_creds = 0;
- cl->read_cb = NULL;
cl->timer_count = 0;
}
}
@@ -942,8 +943,16 @@ void mei_cl_all_wakeup(struct mei_device *dev)
void mei_cl_all_write_clear(struct mei_device *dev)
{
struct mei_cl_cb *cb, *next;
+ struct list_head *list;
+
+ list = &dev->write_list.list;
+ list_for_each_entry_safe(cb, next, list, list) {
+ list_del(&cb->list);
+ mei_io_cb_free(cb);
+ }
- list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
+ list = &dev->write_waiting_list.list;
+ list_for_each_entry_safe(cb, next, list, list) {
list_del(&cb->list);
mei_io_cb_free(cb);
}
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index 752ff873f891..7e1ef0ebbb80 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -156,7 +156,8 @@ static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
static int _mic_virtio_copy(struct mic_vdev *mvdev,
struct mic_copy_desc *copy)
{
- int ret = 0, iovcnt = copy->iovcnt;
+ int ret = 0;
+ u32 iovcnt = copy->iovcnt;
struct iovec iov;
struct iovec __user *u_iov = copy->iov;
void __user *ubuf = NULL;
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 9b2062d17327..2bef3f76032a 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -139,8 +139,11 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
ubuf += sizeof(hdr);
ubufcch = ubuf;
- if (gru_user_copy_handle(&ubuf, cch))
- goto fail;
+ if (gru_user_copy_handle(&ubuf, cch)) {
+ if (cch_locked)
+ unlock_cch_handle(cch);
+ return -EFAULT;
+ }
if (cch_locked)
ubufcch->delresp = 0;
bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
@@ -179,10 +182,6 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
ret = -EFAULT;
return ret ? ret : bytes;
-
-fail:
- unlock_cch_handle(cch);
- return -EFAULT;
}
int gru_dump_chiplet_request(unsigned long arg)
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 357bbc54fe4b..3e049c13429c 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -197,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
- limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
+ limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
mq->card = card;
mq->queue = blk_init_queue(mmc_request_fn, lock);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 59eba5d2c685..9715a7ba164a 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1584,7 +1584,7 @@ read_retry:
}
if (mtd->ecc_stats.failed - ecc_failures) {
- if (retry_mode + 1 <= chip->read_retries) {
+ if (retry_mode + 1 < chip->read_retries) {
retry_mode++;
ret = nand_setup_read_retry(mtd,
retry_mode);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index ef4190a02b7b..bf642ceef681 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1633,6 +1633,7 @@ static int omap_nand_probe(struct platform_device *pdev)
int i;
dma_cap_mask_t mask;
unsigned sig;
+ unsigned oob_index;
struct resource *res;
struct mtd_part_parser_data ppdata = {};
@@ -1826,11 +1827,14 @@ static int omap_nand_probe(struct platform_device *pdev)
(mtd->writesize /
nand_chip->ecc.size);
if (nand_chip->options & NAND_BUSWIDTH_16)
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
+ oob_index = BADBLOCK_MARKER_LENGTH;
else
- ecclayout->eccpos[0] = 1;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = 1;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* no reserved-marker in ecclayout for this ecc-scheme */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
break;
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
@@ -1847,9 +1851,15 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
+ ecclayout->eccpos[i] = oob_index;
+ if (((i + 1) % nand_chip->ecc.bytes) == 0)
+ oob_index++;
+ }
+ /* include reserved-marker in ecclayout->oobfree calculation */
+ ecclayout->oobfree->offset = 1 +
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* software bch library is used for locating errors */
nand_chip->ecc.priv = nand_bch_init(mtd,
nand_chip->ecc.size,
@@ -1883,9 +1893,12 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* reserved marker already included in ecclayout->eccbytes */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* This ECC scheme requires ELM H/W block */
if (is_elm_present(info, pdata->elm_of_node, BCH4_ECC) < 0) {
pr_err("nand: error: could not initialize ELM\n");
@@ -1913,9 +1926,15 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
+ ecclayout->eccpos[i] = oob_index;
+ if (((i + 1) % nand_chip->ecc.bytes) == 0)
+ oob_index++;
+ }
+ /* include reserved-marker in ecclayout->oobfree calculation */
+ ecclayout->oobfree->offset = 1 +
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* software bch library is used for locating errors */
nand_chip->ecc.priv = nand_bch_init(mtd,
nand_chip->ecc.size,
@@ -1956,9 +1975,12 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* reserved marker already included in ecclayout->eccbytes */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
break;
#else
pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
@@ -1972,11 +1994,8 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
- /* populate remaining ECC layout data */
- ecclayout->oobfree->length = mtd->oobsize - (BADBLOCK_MARKER_LENGTH +
- ecclayout->eccbytes);
- for (i = 1; i < ecclayout->eccbytes; i++)
- ecclayout->eccpos[i] = ecclayout->eccpos[0] + i;
+ /* all OOB bytes from oobfree->offset till end off OOB are free */
+ ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset;
/* check if NAND device's OOB is enough to store ECC signatures */
if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) {
pr_err("not enough OOB bytes required = %d, available=%d\n",
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index ead861307b3c..c5dad652614d 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -463,8 +463,8 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
}
}
if (found_orphan) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
@@ -846,16 +846,16 @@ fail_bad:
ret = UBI_BAD_FASTMAP;
fail:
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
return ret;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f342278539d5..494b888a6568 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -139,7 +139,7 @@ config MACVTAP
This adds a specialized tap character device driver that is based
on the MAC-VLAN network interface, called macvtap. A macvtap device
can be added in the same way as a macvlan device, using 'type
- macvlan', and then be accessed through the tap user space interface.
+ macvtap', and then be accessed through the tap user space interface.
To compile this driver as a module, choose M here: the module
will be called macvtap.
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index cce1f1bf90b4..6d20fbde8d43 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1796,8 +1796,6 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
BOND_AD_INFO(bond).agg_select_timer = timeout;
}
-static u16 aggregator_identifier;
-
/**
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
* @bond: bonding struct to work on
@@ -1811,7 +1809,7 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
bond->dev->dev_addr)) {
- aggregator_identifier = 0;
+ BOND_AD_INFO(bond).aggregator_identifier = 0;
BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
@@ -1880,7 +1878,7 @@ void bond_3ad_bind_slave(struct slave *slave)
ad_initialize_agg(aggregator);
aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr);
- aggregator->aggregator_identifier = (++aggregator_identifier);
+ aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier;
aggregator->slave = slave;
aggregator->is_active = 0;
aggregator->num_of_ports = 0;
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 13dc9d3c5e34..f4dd9592ac62 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -253,6 +253,7 @@ struct ad_system {
struct ad_bond_info {
struct ad_system system; /* 802.3ad system structure */
u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
+ u16 aggregator_identifier;
};
struct ad_slave_info {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4c08018d7333..1c6104d3501d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1270,9 +1270,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (slave_ops->ndo_set_mac_address == NULL) {
if (!bond_has_slaves(bond)) {
- pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
- bond_dev->name);
- bond->params.fail_over_mac = BOND_FOM_ACTIVE;
+ pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address.\n",
+ bond_dev->name);
+ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+ bond->params.fail_over_mac = BOND_FOM_ACTIVE;
+ pr_warn("%s: Setting fail_over_mac to active for active-backup mode.\n",
+ bond_dev->name);
+ }
} else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
bond_dev->name);
@@ -1315,7 +1319,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
*/
memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
- if (!bond->params.fail_over_mac) {
+ if (!bond->params.fail_over_mac ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/*
* Set slave to master's mac address. The application already
* set the master's mac address to that of the first slave
@@ -1505,7 +1510,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
slave_dev->npinfo = bond->dev->npinfo;
if (slave_dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
- read_unlock(&bond->lock);
pr_info("Error, %s: master_dev is using netpoll, "
"but new slave device does not support netpoll.\n",
bond_dev->name);
@@ -1539,9 +1543,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_carrier(bond);
if (USES_PRIMARY(bond->params.mode)) {
+ block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
}
pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1567,10 +1573,12 @@ err_detach:
if (bond->primary_slave == new_slave)
bond->primary_slave = NULL;
if (bond->curr_active_slave == new_slave) {
+ block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
}
slave_disable_netpoll(new_slave);
@@ -1579,7 +1587,8 @@ err_close:
dev_close(slave_dev);
err_restore_mac:
- if (!bond->params.fail_over_mac) {
+ if (!bond->params.fail_over_mac ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* XXX TODO - fom follow mode needs to change master's
* MAC if this slave's MAC is in use by the bond, or at
* least print a warning.
@@ -1672,7 +1681,8 @@ static int __bond_release_one(struct net_device *bond_dev,
bond->current_arp_slave = NULL;
- if (!all && !bond->params.fail_over_mac) {
+ if (!all && (!bond->params.fail_over_mac ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
@@ -1769,7 +1779,8 @@ static int __bond_release_one(struct net_device *bond_dev,
/* close slave before restoring its mac address */
dev_close(slave_dev);
- if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
+ if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
addr.sa_family = slave_dev->type;
@@ -2857,9 +2868,12 @@ static int bond_slave_netdev_event(unsigned long event,
pr_info("%s: Primary slave changed to %s, reselecting active slave.\n",
bond->dev->name, bond->primary_slave ? slave_dev->name :
"none");
+
+ block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
break;
case NETDEV_FEAT_CHANGE:
bond_compute_features(bond);
@@ -3431,7 +3445,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
/* If fail_over_mac is enabled, do nothing and return success.
* Returning an error causes ifenslave to fail.
*/
- if (bond->params.fail_over_mac)
+ if (bond->params.fail_over_mac &&
+ bond->params.mode == BOND_MODE_ACTIVEBACKUP)
return 0;
if (!is_valid_ether_addr(sa->sa_data))
@@ -3692,7 +3707,7 @@ static inline int bond_slave_override(struct bonding *bond,
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
/*
* This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 11cb943222d5..c37878432717 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -14,7 +14,7 @@
#include <linux/errno.h>
#include <linux/if.h>
#include <linux/netdevice.h>
-#include <linux/rwlock.h>
+#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/ctype.h>
#include <linux/inet.h>
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d447b881bbde..9e7d95dae2c7 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -104,7 +104,7 @@ config CAN_JANZ_ICAN3
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
- depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
+ depends on ARM || PPC
---help---
Say Y here if you want to support for Freescale FlexCAN.
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 13a909822e25..fc59bc6f040b 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -323,19 +323,10 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
}
if (!priv->echo_skb[idx]) {
- struct sock *srcsk = skb->sk;
- if (atomic_read(&skb->users) != 1) {
- struct sk_buff *old_skb = skb;
-
- skb = skb_clone(old_skb, GFP_ATOMIC);
- kfree_skb(old_skb);
- if (!skb)
- return;
- } else
- skb_orphan(skb);
-
- skb->sk = srcsk;
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return;
/* make settings for echo to reduce code in irq context */
skb->protocol = htons(ETH_P_CAN);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index aaed97bee471..320bef2dba42 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -235,9 +235,12 @@ static const struct can_bittiming_const flexcan_bittiming_const = {
};
/*
- * Abstract off the read/write for arm versus ppc.
+ * Abstract off the read/write for arm versus ppc. This
+ * assumes that PPC uses big-endian registers and everything
+ * else uses little-endian registers, independent of CPU
+ * endianess.
*/
-#if defined(__BIG_ENDIAN)
+#if defined(CONFIG_PPC)
static inline u32 flexcan_read(void __iomem *addr)
{
return in_be32(addr);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index e24e6690d672..71594e5676fd 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -18,6 +18,7 @@
#include <linux/netdevice.h>
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/can/skb.h>
#include <linux/can/error.h>
#include <linux/mfd/janz.h>
@@ -1133,20 +1134,9 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
*/
static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
{
- struct sock *srcsk = skb->sk;
-
- if (atomic_read(&skb->users) != 1) {
- struct sk_buff *old_skb = skb;
-
- skb = skb_clone(old_skb, GFP_ATOMIC);
- kfree_skb(old_skb);
- if (!skb)
- return;
- } else {
- skb_orphan(skb);
- }
-
- skb->sk = srcsk;
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return;
/* save this skb for tx interrupt echo handling */
skb_queue_tail(&mod->echoq, skb);
@@ -1322,7 +1312,7 @@ static int ican3_napi(struct napi_struct *napi, int budget)
/* process all communication messages */
while (true) {
- struct ican3_msg msg;
+ struct ican3_msg uninitialized_var(msg);
ret = ican3_recv_msg(mod, &msg);
if (ret)
break;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 6c859bba8b65..e77d11049747 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -473,6 +473,8 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
return err;
dev->nchannels = msg.u.cardinfo.nchannels;
+ if (dev->nchannels > MAX_NET_DEVICES)
+ return -EINVAL;
return 0;
}
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 0a2a5ee79a17..4e94057ef5cf 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -46,6 +46,7 @@
#include <linux/if_ether.h>
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/can/skb.h>
#include <linux/slab.h>
#include <net/rtnetlink.h>
@@ -109,25 +110,23 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
stats->rx_packets++;
stats->rx_bytes += cfd->len;
}
- kfree_skb(skb);
+ consume_skb(skb);
return NETDEV_TX_OK;
}
/* perform standard echo handling for CAN network interfaces */
if (loop) {
- struct sock *srcsk = skb->sk;
- skb = skb_share_check(skb, GFP_ATOMIC);
+ skb = can_create_echo_skb(skb);
if (!skb)
return NETDEV_TX_OK;
/* receive with packet counting */
- skb->sk = srcsk;
vcan_rx(skb, dev);
} else {
/* no looped packets => no counting */
- kfree_skb(skb);
+ consume_skb(skb);
}
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 0f4241c6e97e..238ccea965c8 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -3294,7 +3294,6 @@ static int __init vortex_init(void)
static void __exit vortex_eisa_cleanup(void)
{
- struct vortex_private *vp;
void __iomem *ioaddr;
#ifdef CONFIG_EISA
@@ -3303,7 +3302,6 @@ static void __exit vortex_eisa_cleanup(void)
#endif
if (compaq_net_device) {
- vp = netdev_priv(compaq_net_device);
ioaddr = ioport_map(compaq_net_device->base_addr,
VORTEX_TOTAL_SIZE);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 0cc21437478c..511f6eecd58b 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -929,6 +929,9 @@ static int emac_resume(struct platform_device *dev)
}
static const struct of_device_id emac_of_match[] = {
+ {.compatible = "allwinner,sun4i-a10-emac",},
+
+ /* Deprecated */
{.compatible = "allwinner,sun4i-emac",},
{},
};
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e92ffd6e1c15..2e45f6ec1bf0 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1292,6 +1292,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
alx = netdev_priv(netdev);
spin_lock_init(&alx->hw.mdio_lock);
spin_lock_init(&alx->irq_lock);
+ spin_lock_init(&alx->stats_lock);
alx->dev = netdev;
alx->hw.pdev = pdev;
alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 9d2dedadf2df..cda25ac45b47 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -85,7 +85,7 @@ MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
static int disable_msi = 0;
-module_param(disable_msi, int, 0);
+module_param(disable_msi, int, S_IRUGO);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
typedef enum {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 9d7419e0390b..66c0df78c3ff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1873,7 +1873,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1895,7 +1895,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
+ return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 17d1689aec6b..a89a40f88c25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -496,7 +496,7 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv);
+ void *accel_priv, select_queue_fallback_t fallback);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
@@ -936,7 +936,7 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
- start_params->gre_tunnel_mode = IPGRE_TUNNEL;
+ start_params->gre_tunnel_mode = L2GRE_TUNNEL;
start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
return bnx2x_func_state_change(bp, &func_params);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c9c445e7b4a5..7d4382286457 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -95,29 +95,29 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
int bnx2x_num_queues;
-module_param_named(num_queues, bnx2x_num_queues, int, 0);
+module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
MODULE_PARM_DESC(num_queues,
" Set number of queues (default is as a number of CPUs)");
static int disable_tpa;
-module_param(disable_tpa, int, 0);
+module_param(disable_tpa, int, S_IRUGO);
MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
static int int_mode;
-module_param(int_mode, int, 0);
+module_param(int_mode, int, S_IRUGO);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
static int dropless_fc;
-module_param(dropless_fc, int, 0);
+module_param(dropless_fc, int, S_IRUGO);
MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
static int mrrs = -1;
-module_param(mrrs, int, 0);
+module_param(mrrs, int, S_IRUGO);
MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
static int debug;
-module_param(debug, int, 0);
+module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, " Default debug msglevel");
struct workqueue_struct *bnx2x_wq;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index aec5ef2ed7ce..e42f48df6e94 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1446,12 +1446,12 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (vf->cfg_flags & VF_CFG_INT_SIMD)
val |= IGU_VF_CONF_SINGLE_ISR_EN;
val &= ~IGU_VF_CONF_PARENT_MASK;
- val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */
+ val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
DP(BNX2X_MSG_IOV,
- "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
- vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
+ "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
+ vf->abs_vfid, val);
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e2ca03e23dc1..3167ed6593b0 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -2609,13 +2609,14 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
tg3_writephy(tp, MII_CTRL1000, phy9_orig);
- if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
- reg32 &= ~0x3000;
- tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
- } else if (!err)
- err = -EBUSY;
+ err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
+ if (err)
+ return err;
- return err;
+ reg32 &= ~0x3000;
+ tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+
+ return 0;
}
static void tg3_carrier_off(struct tg3 *tp)
@@ -14113,12 +14114,12 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_stop(tp);
+ tg3_set_mtu(dev, tp, new_mtu);
+
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_set_mtu(dev, tp, new_mtu);
-
/* Reset PHY, otherwise the read DMA engine will be in a mode that
* breaks all requests to 256 bytes.
*/
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index add05f14b38b..1642de78aac8 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1939,6 +1939,7 @@ static void tulip_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, tp->base_addr);
free_netdev (dev);
pci_release_regions (pdev);
+ pci_disable_device(pdev);
/* pci_power_off (pdev, -1); */
}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 4de8cfd149cf..55e0fa03dc90 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -13,6 +13,7 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
+#include <linux/clk.h>
#include <linux/crc32.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -51,6 +52,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
#define ETH_HASH0 0x48
#define ETH_HASH1 0x4c
#define ETH_TXCTRL 0x50
+#define ETH_END 0x54
/* mode register */
#define MODER_RXEN (1 << 0) /* receive enable */
@@ -179,6 +181,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* @membase: pointer to buffer memory region
* @dma_alloc: dma allocated buffer size
* @io_region_size: I/O memory region size
+ * @num_bd: number of buffer descriptors
* @num_tx: number of send buffers
* @cur_tx: last send buffer written
* @dty_tx: last buffer actually sent
@@ -199,6 +202,7 @@ struct ethoc {
int dma_alloc;
resource_size_t io_region_size;
+ unsigned int num_bd;
unsigned int num_tx;
unsigned int cur_tx;
unsigned int dty_tx;
@@ -216,6 +220,7 @@ struct ethoc {
struct phy_device *phy;
struct mii_bus *mdio;
+ struct clk *clk;
s8 phy_id;
};
@@ -688,6 +693,11 @@ static int ethoc_mdio_probe(struct net_device *dev)
}
priv->phy = phy;
+ phy->advertising &= ~(ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half);
+ phy->supported &= ~(SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseT_Half);
+
return 0;
}
@@ -890,6 +900,102 @@ out:
return NETDEV_TX_OK;
}
+static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phy;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phy;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+static int ethoc_get_regs_len(struct net_device *netdev)
+{
+ return ETH_END;
+}
+
+static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ u32 *regs_buff = p;
+ unsigned i;
+
+ regs->version = 0;
+ for (i = 0; i < ETH_END / sizeof(u32); ++i)
+ regs_buff[i] = ethoc_read(priv, i * sizeof(u32));
+}
+
+static void ethoc_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct ethoc *priv = netdev_priv(dev);
+
+ ring->rx_max_pending = priv->num_bd - 1;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->tx_max_pending = priv->num_bd - 1;
+
+ ring->rx_pending = priv->num_rx;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+ ring->tx_pending = priv->num_tx;
+}
+
+static int ethoc_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct ethoc *priv = netdev_priv(dev);
+
+ if (ring->tx_pending < 1 || ring->rx_pending < 1 ||
+ ring->tx_pending + ring->rx_pending > priv->num_bd)
+ return -EINVAL;
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(dev)) {
+ netif_tx_disable(dev);
+ ethoc_disable_rx_and_tx(priv);
+ ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+ synchronize_irq(dev->irq);
+ }
+
+ priv->num_tx = rounddown_pow_of_two(ring->tx_pending);
+ priv->num_rx = ring->rx_pending;
+ ethoc_init_ring(priv, dev->mem_start);
+
+ if (netif_running(dev)) {
+ ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+ ethoc_enable_rx_and_tx(priv);
+ netif_wake_queue(dev);
+ }
+ return 0;
+}
+
+const struct ethtool_ops ethoc_ethtool_ops = {
+ .get_settings = ethoc_get_settings,
+ .set_settings = ethoc_set_settings,
+ .get_regs_len = ethoc_get_regs_len,
+ .get_regs = ethoc_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = ethoc_get_ringparam,
+ .set_ringparam = ethoc_set_ringparam,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct net_device_ops ethoc_netdev_ops = {
.ndo_open = ethoc_open,
.ndo_stop = ethoc_stop,
@@ -917,6 +1023,8 @@ static int ethoc_probe(struct platform_device *pdev)
int num_bd;
int ret = 0;
bool random_mac = false;
+ struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0;
/* allocate networking device */
netdev = alloc_etherdev(sizeof(struct ethoc));
@@ -1016,6 +1124,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = -ENODEV;
goto error;
}
+ priv->num_bd = num_bd;
/* num_tx must be a power of two */
priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
priv->num_rx = num_bd - priv->num_tx;
@@ -1030,8 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
}
/* Allow the platform setup code to pass in a MAC address. */
- if (dev_get_platdata(&pdev->dev)) {
- struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
} else {
@@ -1069,6 +1177,27 @@ static int ethoc_probe(struct platform_device *pdev)
if (random_mac)
netdev->addr_assign_type = NET_ADDR_RANDOM;
+ /* Allow the platform setup code to adjust MII management bus clock. */
+ if (!eth_clkfreq) {
+ struct clk *clk = devm_clk_get(&pdev->dev, NULL);
+
+ if (!IS_ERR(clk)) {
+ priv->clk = clk;
+ clk_prepare_enable(clk);
+ eth_clkfreq = clk_get_rate(clk);
+ }
+ }
+ if (eth_clkfreq) {
+ u32 clkdiv = MIIMODER_CLKDIV(eth_clkfreq / 2500000 + 1);
+
+ if (!clkdiv)
+ clkdiv = 2;
+ dev_dbg(&pdev->dev, "setting MII clkdiv to %u\n", clkdiv);
+ ethoc_write(priv, MIIMODER,
+ (ethoc_read(priv, MIIMODER) & MIIMODER_NOPRE) |
+ clkdiv);
+ }
+
/* register MII bus */
priv->mdio = mdiobus_alloc();
if (!priv->mdio) {
@@ -1111,6 +1240,7 @@ static int ethoc_probe(struct platform_device *pdev)
netdev->netdev_ops = &ethoc_netdev_ops;
netdev->watchdog_timeo = ETHOC_TIMEOUT;
netdev->features |= 0;
+ netdev->ethtool_ops = &ethoc_ethtool_ops;
/* setup NAPI */
netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
@@ -1133,6 +1263,8 @@ free_mdio:
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
free:
+ if (priv->clk)
+ clk_disable_unprepare(priv->clk);
free_netdev(netdev);
out:
return ret;
@@ -1157,6 +1289,8 @@ static int ethoc_remove(struct platform_device *pdev)
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
}
+ if (priv->clk)
+ clk_disable_unprepare(priv->clk);
unregister_netdev(netdev);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d4782b42401b..903362a7b584 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1778,8 +1778,6 @@ fec_enet_open(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
- napi_enable(&fep->napi);
-
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
@@ -1794,6 +1792,8 @@ fec_enet_open(struct net_device *ndev)
fec_enet_free_buffers(ndev);
return ret;
}
+
+ napi_enable(&fep->napi);
phy_start(fep->phy_dev);
netif_start_queue(ndev);
fep->opened = 1;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index cbaba4442d4b..bf7a01ef9a57 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -3034,7 +3034,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
*enable_wake = false;
}
- pci_disable_device(pdev);
+ pci_clear_master(pdev);
}
static int __e100_power_off(struct pci_dev *pdev, bool wake)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6d4ada72dfd0..18076c4178b4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6881,7 +6881,7 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
}
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
#ifdef IXGBE_FCOE
@@ -6907,7 +6907,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
break;
default:
- return __netdev_pick_tx(dev, skb);
+ return fallback(dev, skb);
}
f = &adapter->ring_feature[RING_F_FCOE];
@@ -6920,7 +6920,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq + f->offset;
#else
- return __netdev_pick_tx(dev, skb);
+ return fallback(dev, skb);
#endif
}
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 8f9266c64c75..fd4b6aecf6ee 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -619,7 +619,7 @@ ltq_etop_set_multicast_list(struct net_device *dev)
static u16
ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
/* we are currently only using the first queue */
return 0;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 6300fd27f2db..68e6a6613e9a 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -43,12 +43,12 @@ config MVMDIO
This driver is used by the MV643XX_ETH and MVNETA drivers.
config MVNETA
- tristate "Marvell Armada 370/XP network interface support"
- depends on MACH_ARMADA_370_XP
+ tristate "Marvell Armada 370/38x/XP network interface support"
+ depends on PLAT_ORION
select MVMDIO
---help---
This driver supports the network interface units in the
- Marvell ARMADA XP and ARMADA 370 SoC family.
+ Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family.
Note that this driver is distinct from the mv643xx_eth
driver, which should be used for the older Marvell SoCs
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8e8a7eb43a2c..13457032d15f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -629,7 +629,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
@@ -641,7 +641,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
if (vlan_tx_tag_present(skb))
up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
- return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up;
+ return fallback(dev, skb) % rings_p_up + up * rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 3af04c3f42ea..9ca223bc90fc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -723,7 +723,7 @@ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv);
+ void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 157fe8df2c3e..8ff57e8e3e91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -4,5 +4,5 @@
config MLX5_CORE
tristate
- depends on PCI && X86
+ depends on PCI
default n
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 1ded50ca1600..e46e8698e630 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -726,9 +726,6 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
int vpath_idx = 0;
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath = NULL;
- struct __vxge_hw_device *hldev;
-
- hldev = pci_get_drvdata(vdev->pdev);
mac_address = (u8 *)&mac_addr;
memcpy(mac_address, mac_header, ETH_ALEN);
@@ -2443,9 +2440,6 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
static void vxge_rem_isr(struct vxgedev *vdev)
{
- struct __vxge_hw_device *hldev;
- hldev = pci_get_drvdata(vdev->pdev);
-
#ifdef CONFIG_PCI_MSI
if (vdev->config.intr_type == MSI_X) {
vxge_rem_msix_isr(vdev);
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index c49d1fb16965..75d11fa4eb0a 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -429,7 +429,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
}
/* Transfer ownership of the skb to the final buffer */
+#ifdef EFX_USE_PIO
finish_packet:
+#endif
buffer->skb = skb;
buffer->flags = EFX_TX_BUF_SKB | dma_flags;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index e2f202e3932f..f2d7c702c77f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -37,6 +37,17 @@ config DWMAC_SUNXI
stmmac device driver. This driver is used for A20/A31
GMAC ethernet controller.
+config DWMAC_STI
+ bool "STi GMAC support"
+ depends on STMMAC_PLATFORM && ARCH_STI
+ default y
+ ---help---
+ Support for ethernet controller on STi SOCs.
+
+ This selects STi SoC glue layer support for the stmmac
+ device driver. This driver is used on for the STi series
+ SOCs GMAC ethernet controller.
+
config STMMAC_PCI
bool "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index ecadecea79b2..dcef28775dad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
+stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
new file mode 100644
index 000000000000..552bbc17863c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -0,0 +1,330 @@
+/**
+ * dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer
+ *
+ * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
+ * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+#include <linux/phy.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+
+/**
+ * STi GMAC glue logic.
+ * --------------------
+ *
+ * _
+ * | \
+ * --------|0 \ ETH_SEL_INTERNAL_NOTEXT_PHYCLK
+ * phyclk | |___________________________________________
+ * | | | (phyclk-in)
+ * --------|1 / |
+ * int-clk |_ / |
+ * | _
+ * | | \
+ * |_______|1 \ ETH_SEL_TX_RETIME_CLK
+ * | |___________________________
+ * | | (tx-retime-clk)
+ * _______|0 /
+ * | |_ /
+ * _ |
+ * | \ |
+ * --------|0 \ |
+ * clk_125 | |__|
+ * | | ETH_SEL_TXCLK_NOT_CLK125
+ * --------|1 /
+ * txclk |_ /
+ *
+ *
+ * ETH_SEL_INTERNAL_NOTEXT_PHYCLK is valid only for RMII where PHY can
+ * generate 50MHz clock or MAC can generate it.
+ * This bit is configured by "st,ext-phyclk" property.
+ *
+ * ETH_SEL_TXCLK_NOT_CLK125 is only valid for gigabit modes, where the 125Mhz
+ * clock either comes from clk-125 pin or txclk pin. This configuration is
+ * totally driven by the board wiring. This bit is configured by
+ * "st,tx-retime-src" property.
+ *
+ * TXCLK configuration is different for different phy interface modes
+ * and changes according to link speed in modes like RGMII.
+ *
+ * Below table summarizes the clock requirement and clock sources for
+ * supported phy interface modes with link speeds.
+ * ________________________________________________
+ *| PHY_MODE | 1000 Mbit Link | 100 Mbit Link |
+ * ------------------------------------------------
+ *| MII | n/a | 25Mhz |
+ *| | | txclk |
+ * ------------------------------------------------
+ *| GMII | 125Mhz | 25Mhz |
+ *| | clk-125/txclk | txclk |
+ * ------------------------------------------------
+ *| RGMII | 125Mhz | 25Mhz |
+ *| | clk-125/txclk | clkgen |
+ * ------------------------------------------------
+ *| RMII | n/a | 25Mhz |
+ *| | |clkgen/phyclk-in |
+ * ------------------------------------------------
+ *
+ * TX lines are always retimed with a clk, which can vary depending
+ * on the board configuration. Below is the table of these bits
+ * in eth configuration register depending on source of retime clk.
+ *
+ *---------------------------------------------------------------
+ * src | tx_rt_clk | int_not_ext_phyclk | txclk_n_clk125|
+ *---------------------------------------------------------------
+ * txclk | 0 | n/a | 1 |
+ *---------------------------------------------------------------
+ * ck_125| 0 | n/a | 0 |
+ *---------------------------------------------------------------
+ * phyclk| 1 | 0 | n/a |
+ *---------------------------------------------------------------
+ * clkgen| 1 | 1 | n/a |
+ *---------------------------------------------------------------
+ */
+
+ /* Register definition */
+
+ /* 3 bits [8:6]
+ * [6:6] ETH_SEL_TXCLK_NOT_CLK125
+ * [7:7] ETH_SEL_INTERNAL_NOTEXT_PHYCLK
+ * [8:8] ETH_SEL_TX_RETIME_CLK
+ *
+ */
+
+#define TX_RETIME_SRC_MASK GENMASK(8, 6)
+#define ETH_SEL_TX_RETIME_CLK BIT(8)
+#define ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
+#define ETH_SEL_TXCLK_NOT_CLK125 BIT(6)
+
+#define ENMII_MASK GENMASK(5, 5)
+#define ENMII BIT(5)
+
+/**
+ * 3 bits [4:2]
+ * 000-GMII/MII
+ * 001-RGMII
+ * 010-SGMII
+ * 100-RMII
+*/
+#define MII_PHY_SEL_MASK GENMASK(4, 2)
+#define ETH_PHY_SEL_RMII BIT(4)
+#define ETH_PHY_SEL_SGMII BIT(3)
+#define ETH_PHY_SEL_RGMII BIT(2)
+#define ETH_PHY_SEL_GMII 0x0
+#define ETH_PHY_SEL_MII 0x0
+
+#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
+ iface == PHY_INTERFACE_MODE_RGMII_ID || \
+ iface == PHY_INTERFACE_MODE_RGMII_RXID || \
+ iface == PHY_INTERFACE_MODE_RGMII_TXID)
+
+#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
+ iface == PHY_INTERFACE_MODE_GMII)
+
+struct sti_dwmac {
+ int interface;
+ bool ext_phyclk;
+ bool is_tx_retime_src_clk_125;
+ struct clk *clk;
+ int reg;
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+static u32 phy_intf_sels[] = {
+ [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII,
+ [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII,
+ [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII,
+ [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII,
+ [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII,
+ [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII,
+};
+
+enum {
+ TX_RETIME_SRC_NA = 0,
+ TX_RETIME_SRC_TXCLK = 1,
+ TX_RETIME_SRC_CLK_125,
+ TX_RETIME_SRC_PHYCLK,
+ TX_RETIME_SRC_CLKGEN,
+};
+
+static const char *const tx_retime_srcs[] = {
+ [TX_RETIME_SRC_NA] = "",
+ [TX_RETIME_SRC_TXCLK] = "txclk",
+ [TX_RETIME_SRC_CLK_125] = "clk_125",
+ [TX_RETIME_SRC_PHYCLK] = "phyclk",
+ [TX_RETIME_SRC_CLKGEN] = "clkgen",
+};
+
+static u32 tx_retime_val[] = {
+ [TX_RETIME_SRC_TXCLK] = ETH_SEL_TXCLK_NOT_CLK125,
+ [TX_RETIME_SRC_CLK_125] = 0x0,
+ [TX_RETIME_SRC_PHYCLK] = ETH_SEL_TX_RETIME_CLK,
+ [TX_RETIME_SRC_CLKGEN] = ETH_SEL_TX_RETIME_CLK |
+ ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
+};
+
+static void setup_retime_src(struct sti_dwmac *dwmac, u32 spd)
+{
+ u32 src = 0, freq = 0;
+
+ if (spd == SPEED_100) {
+ if (dwmac->interface == PHY_INTERFACE_MODE_MII ||
+ dwmac->interface == PHY_INTERFACE_MODE_GMII) {
+ src = TX_RETIME_SRC_TXCLK;
+ } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+ if (dwmac->ext_phyclk) {
+ src = TX_RETIME_SRC_PHYCLK;
+ } else {
+ src = TX_RETIME_SRC_CLKGEN;
+ freq = 50000000;
+ }
+
+ } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
+ src = TX_RETIME_SRC_CLKGEN;
+ freq = 25000000;
+ }
+
+ if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk)
+ clk_set_rate(dwmac->clk, freq);
+
+ } else if (spd == SPEED_1000) {
+ if (dwmac->is_tx_retime_src_clk_125)
+ src = TX_RETIME_SRC_CLK_125;
+ else
+ src = TX_RETIME_SRC_TXCLK;
+ }
+
+ regmap_update_bits(dwmac->regmap, dwmac->reg,
+ TX_RETIME_SRC_MASK, tx_retime_val[src]);
+}
+
+static void sti_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct sti_dwmac *dwmac = priv;
+
+ if (dwmac->clk)
+ clk_disable_unprepare(dwmac->clk);
+}
+
+static void sti_fix_mac_speed(void *priv, unsigned int spd)
+{
+ struct sti_dwmac *dwmac = priv;
+
+ setup_retime_src(dwmac, spd);
+
+ return;
+}
+
+static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct regmap *regmap;
+ int err;
+
+ if (!np)
+ return -EINVAL;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf");
+ if (!res)
+ return -ENODATA;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ dwmac->dev = dev;
+ dwmac->interface = of_get_phy_mode(np);
+ dwmac->regmap = regmap;
+ dwmac->reg = res->start;
+ dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
+ dwmac->is_tx_retime_src_clk_125 = false;
+
+ if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
+ const char *rs;
+
+ err = of_property_read_string(np, "st,tx-retime-src", &rs);
+ if (err < 0) {
+ dev_err(dev, "st,tx-retime-src not specified\n");
+ return err;
+ }
+
+ if (!strcasecmp(rs, "clk_125"))
+ dwmac->is_tx_retime_src_clk_125 = true;
+ }
+
+ dwmac->clk = devm_clk_get(dev, "sti-ethclk");
+
+ if (IS_ERR(dwmac->clk))
+ dwmac->clk = NULL;
+
+ return 0;
+}
+
+static int sti_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct sti_dwmac *dwmac = priv;
+ struct regmap *regmap = dwmac->regmap;
+ int iface = dwmac->interface;
+ u32 reg = dwmac->reg;
+ u32 val, spd;
+
+ if (dwmac->clk)
+ clk_prepare_enable(dwmac->clk);
+
+ regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
+
+ val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
+ regmap_update_bits(regmap, reg, ENMII_MASK, val);
+
+ if (IS_PHY_IF_MODE_GBIT(iface))
+ spd = SPEED_1000;
+ else
+ spd = SPEED_100;
+
+ setup_retime_src(dwmac, spd);
+
+ return 0;
+}
+
+static void *sti_dwmac_setup(struct platform_device *pdev)
+{
+ struct sti_dwmac *dwmac;
+ int ret;
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sti_dwmac_parse_data(dwmac, pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to parse OF data\n");
+ return ERR_PTR(ret);
+ }
+
+ return dwmac;
+}
+
+const struct stmmac_of_data sti_gmac_data = {
+ .fix_mac_speed = sti_fix_mac_speed,
+ .setup = sti_dwmac_setup,
+ .init = sti_dwmac_init,
+ .exit = sti_dwmac_exit,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index d9af26ed58ee..f9e60d7918c4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -133,6 +133,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv);
#ifdef CONFIG_DWMAC_SUNXI
extern const struct stmmac_of_data sun7i_gmac_data;
#endif
+#ifdef CONFIG_DWMAC_STI
+extern const struct stmmac_of_data sti_gmac_data;
+#endif
extern struct platform_driver stmmac_pltfr_driver;
static inline int stmmac_register_platform(void)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 5884a7d2063b..c61bc72b8e90 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -33,6 +33,11 @@ static const struct of_device_id stmmac_dt_ids[] = {
#ifdef CONFIG_DWMAC_SUNXI
{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
#endif
+#ifdef CONFIG_DWMAC_STI
+ { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data},
+ { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data},
+ { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data},
+#endif
/* SoC specific glue layers should come before generic bindings */
{ .compatible = "st,spear600-gmac"},
{ .compatible = "snps,dwmac-3.610"},
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index bde63e3af96f..651087b5c8da 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -554,7 +554,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
* common for both the interface as the interface shares
* the same hardware resource.
*/
- for (i = 0; i <= priv->data.slaves; i++)
+ for (i = 0; i < priv->data.slaves; i++)
if (priv->slaves[i].ndev->flags & IFF_PROMISC)
flag = true;
@@ -578,7 +578,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
unsigned long timeout = jiffies + HZ;
/* Disable Learn for all ports */
- for (i = 0; i <= priv->data.slaves; i++) {
+ for (i = 0; i < priv->data.slaves; i++) {
cpsw_ale_control_set(ale, i,
ALE_PORT_NOLEARN, 1);
cpsw_ale_control_set(ale, i,
@@ -606,7 +606,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
/* Enable Learn for all ports */
- for (i = 0; i <= priv->data.slaves; i++) {
+ for (i = 0; i < priv->data.slaves; i++) {
cpsw_ale_control_set(ale, i,
ALE_PORT_NOLEARN, 0);
cpsw_ale_control_set(ale, i,
@@ -1878,14 +1878,29 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
phyid = be32_to_cpup(parp+1);
mdio = of_find_device_by_node(mdio_node);
- snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
- PHY_ID_FMT, mdio->name, phyid);
+
+ if (strncmp(mdio->name, "gpio", 4) == 0) {
+ /* GPIO bitbang MDIO driver attached */
+ struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
+
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, bus->id, phyid);
+ } else {
+ /* davinci MDIO driver attached */
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, mdio->name, phyid);
+ }
mac_addr = of_get_mac_address(slave_node);
if (mac_addr)
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
slave_data->phy_if = of_get_phy_mode(slave_node);
+ if (slave_data->phy_if < 0) {
+ pr_err("Missing or malformed slave[%d] phy-mode property\n",
+ i);
+ return slave_data->phy_if;
+ }
if (data->dual_emac) {
if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 023237a65720..17503da9f7a5 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2071,7 +2071,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
/* Return subqueue id on this core (one per core). */
static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
return smp_processor_id();
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 1ec65feebb9e..4bfdf8c7ada0 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -26,6 +26,7 @@
#include <linux/netdevice.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
+#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
@@ -600,7 +601,8 @@ static void axienet_start_xmit_done(struct net_device *ndev)
size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
packets++;
- lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM;
+ ++lp->tx_bd_ci;
+ lp->tx_bd_ci %= TX_BD_NUM;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
status = cur_p->status;
}
@@ -686,7 +688,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_headlen(skb), DMA_TO_DEVICE);
for (ii = 0; ii < num_frag; ii++) {
- lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
+ ++lp->tx_bd_tail;
+ lp->tx_bd_tail %= TX_BD_NUM;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
cur_p->phys = dma_map_single(ndev->dev.parent,
@@ -702,7 +705,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
/* Start the transfer */
axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
+ ++lp->tx_bd_tail;
+ lp->tx_bd_tail %= TX_BD_NUM;
return NETDEV_TX_OK;
}
@@ -774,7 +778,8 @@ static void axienet_recv(struct net_device *ndev)
cur_p->status = 0;
cur_p->sw_id_offset = (u32) new_skb;
- lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM;
+ ++lp->rx_bd_ci;
+ lp->rx_bd_ci %= RX_BD_NUM;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7756118c2f0a..7141a1937360 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -88,8 +88,12 @@ static int netvsc_open(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *device_obj = net_device_ctx->device_ctx;
+ struct netvsc_device *nvdev;
+ struct rndis_device *rdev;
int ret = 0;
+ netif_carrier_off(net);
+
/* Open up the device */
ret = rndis_filter_open(device_obj);
if (ret != 0) {
@@ -99,6 +103,11 @@ static int netvsc_open(struct net_device *net)
netif_start_queue(net);
+ nvdev = hv_get_drvdata(device_obj);
+ rdev = nvdev->extension;
+ if (!rdev->link_state)
+ netif_carrier_on(net);
+
return ret;
}
@@ -229,23 +238,24 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
struct net_device *net;
struct net_device_context *ndev_ctx;
struct netvsc_device *net_device;
+ struct rndis_device *rdev;
net_device = hv_get_drvdata(device_obj);
+ rdev = net_device->extension;
+
+ rdev->link_state = status != 1;
+
net = net_device->ndev;
- if (!net) {
- netdev_err(net, "got link status but net device "
- "not initialized yet\n");
+ if (!net || net->reg_state != NETREG_REGISTERED)
return;
- }
+ ndev_ctx = netdev_priv(net);
if (status == 1) {
- netif_carrier_on(net);
- ndev_ctx = netdev_priv(net);
schedule_delayed_work(&ndev_ctx->dwork, 0);
schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
} else {
- netif_carrier_off(net);
+ schedule_delayed_work(&ndev_ctx->dwork, 0);
}
}
@@ -388,17 +398,35 @@ static const struct net_device_ops device_ops = {
* current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
* another netif_notify_peers() into a delayed work, otherwise GARP packet
* will not be sent after quick migration, and cause network disconnection.
+ * Also, we update the carrier status here.
*/
-static void netvsc_send_garp(struct work_struct *w)
+static void netvsc_link_change(struct work_struct *w)
{
struct net_device_context *ndev_ctx;
struct net_device *net;
struct netvsc_device *net_device;
+ struct rndis_device *rdev;
+ bool notify;
+
+ rtnl_lock();
ndev_ctx = container_of(w, struct net_device_context, dwork.work);
net_device = hv_get_drvdata(ndev_ctx->device_ctx);
+ rdev = net_device->extension;
net = net_device->ndev;
- netdev_notify_peers(net);
+
+ if (rdev->link_state) {
+ netif_carrier_off(net);
+ notify = false;
+ } else {
+ netif_carrier_on(net);
+ notify = true;
+ }
+
+ rtnl_unlock();
+
+ if (notify)
+ netdev_notify_peers(net);
}
@@ -414,13 +442,10 @@ static int netvsc_probe(struct hv_device *dev,
if (!net)
return -ENOMEM;
- /* Set initial state */
- netif_carrier_off(net);
-
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
hv_set_drvdata(dev, net);
- INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
+ INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
net->netdev_ops = &device_ops;
@@ -443,8 +468,6 @@ static int netvsc_probe(struct hv_device *dev,
}
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
- netif_carrier_on(net);
-
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 2dc82f1d2e70..3da44d5d9149 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -210,13 +210,6 @@ config KINGSUN_DONGLE
To compile it as a module, choose M here: the module will be called
kingsun-sir.
-config EP7211_DONGLE
- tristate "Cirrus Logic clps711x I/R support"
- depends on IRTTY_SIR && ARCH_CLPS711X && IRDA
- help
- Say Y here if you want to build support for the Cirrus logic
- EP7211 chipset's infrared module.
-
config KSDAZZLE_DONGLE
tristate "KingSun Dazzle IrDA-USB dongle"
depends on IRDA && USB
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index dfc64537f62f..be8ab5b9a4a2 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -35,7 +35,6 @@ obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
-obj-$(CONFIG_EP7211_DONGLE) += ep7211-sir.o
obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o
obj-$(CONFIG_KSDAZZLE_DONGLE) += ksdazzle-sir.o
obj-$(CONFIG_KS959_DONGLE) += ks959-sir.o
diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c
deleted file mode 100644
index 5fe1f4dd3369..000000000000
--- a/drivers/net/irda/ep7211-sir.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * IR port driver for the Cirrus Logic CLPS711X processors
- *
- * Copyright 2001, Blue Mug Inc. All rights reserved.
- * Copyright 2007, Samuel Ortiz <samuel@sortiz.org>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include <mach/hardware.h>
-
-#include "sir-dev.h"
-
-static int clps711x_dongle_open(struct sir_dev *dev)
-{
- unsigned int syscon;
-
- /* Turn on the SIR encoder. */
- syscon = clps_readl(SYSCON1);
- syscon |= SYSCON1_SIREN;
- clps_writel(syscon, SYSCON1);
-
- return 0;
-}
-
-static int clps711x_dongle_close(struct sir_dev *dev)
-{
- unsigned int syscon;
-
- /* Turn off the SIR encoder. */
- syscon = clps_readl(SYSCON1);
- syscon &= ~SYSCON1_SIREN;
- clps_writel(syscon, SYSCON1);
-
- return 0;
-}
-
-static struct dongle_driver clps711x_dongle = {
- .owner = THIS_MODULE,
- .driver_name = "EP7211 IR driver",
- .type = IRDA_EP7211_DONGLE,
- .open = clps711x_dongle_open,
- .close = clps711x_dongle_close,
-};
-
-static int clps711x_sir_probe(struct platform_device *pdev)
-{
- return irda_register_dongle(&clps711x_dongle);
-}
-
-static int clps711x_sir_remove(struct platform_device *pdev)
-{
- return irda_unregister_dongle(&clps711x_dongle);
-}
-
-static struct platform_driver clps711x_sir_driver = {
- .driver = {
- .name = "sir-clps711x",
- .owner = THIS_MODULE,
- },
- .probe = clps711x_sir_probe,
- .remove = clps711x_sir_remove,
-};
-module_platform_driver(clps711x_sir_driver);
-
-MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>");
-MODULE_DESCRIPTION("EP7211 IR dongle driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 177441afeb96..24b6dddd7f2f 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -522,7 +522,6 @@ static void irtty_close(struct tty_struct *tty)
sirdev_put_instance(priv->dev);
/* Stop tty */
- irtty_stop_receiver(tty, TRUE);
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
if (tty->ops->stop)
tty->ops->stop(tty);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 8433de4509c7..a5d21893670d 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -879,14 +879,15 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
dev->priv_flags |= IFF_MACVLAN;
err = netdev_upper_dev_link(lowerdev, dev);
if (err)
- goto destroy_port;
-
+ goto unregister_netdev;
list_add_tail_rcu(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
+unregister_netdev:
+ unregister_netdevice(dev);
destroy_port:
port->count -= 1;
if (!port->count)
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 547725fa8671..98e7cbf720a5 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -437,7 +437,10 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
if (on) {
gpio_num = gpio_tab[EXTTS0_GPIO + index];
evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
- evnt |= EVNT_RISE;
+ if (rq->extts.flags & PTP_FALLING_EDGE)
+ evnt |= EVNT_FALL;
+ else
+ evnt |= EVNT_RISE;
}
ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
return 0;
@@ -1003,11 +1006,6 @@ static int dp83640_probe(struct phy_device *phydev)
} else
list_add_tail(&dp83640->list, &clock->phylist);
- if (clock->chosen && !list_empty(&clock->phylist))
- recalibrate(clock);
- else
- enable_broadcast(dp83640->phydev, clock->page, 1);
-
dp83640_clock_put(clock);
return 0;
@@ -1058,6 +1056,21 @@ static void dp83640_remove(struct phy_device *phydev)
kfree(dp83640);
}
+static int dp83640_config_init(struct phy_device *phydev)
+{
+ struct dp83640_private *dp83640 = phydev->priv;
+ struct dp83640_clock *clock = dp83640->clock;
+
+ if (clock->chosen && !list_empty(&clock->phylist))
+ recalibrate(clock);
+ else
+ enable_broadcast(phydev, clock->page, 1);
+
+ enable_status_frames(phydev, true);
+ ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
+ return 0;
+}
+
static int dp83640_ack_interrupt(struct phy_device *phydev)
{
int err = phy_read(phydev, MII_DP83640_MISR);
@@ -1195,11 +1208,6 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
mutex_lock(&dp83640->clock->extreg_lock);
- if (dp83640->hwts_tx_en || dp83640->hwts_rx_en) {
- enable_status_frames(phydev, true);
- ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
- }
-
ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0);
ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0);
@@ -1281,6 +1289,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
}
/* fall through */
case HWTSTAMP_TX_ON:
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_queue_tail(&dp83640->tx_queue, skb);
schedule_work(&dp83640->ts_work);
break;
@@ -1330,6 +1339,7 @@ static struct phy_driver dp83640_driver = {
.flags = PHY_HAS_INTERRUPT,
.probe = dp83640_probe,
.remove = dp83640_remove,
+ .config_init = dp83640_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = dp83640_ack_interrupt,
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index bb88bc7d81fb..9367acc84fbb 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -170,6 +170,9 @@ static int sun4i_mdio_remove(struct platform_device *pdev)
}
static const struct of_device_id sun4i_mdio_dt_ids[] = {
+ { .compatible = "allwinner,sun4i-a10-mdio" },
+
+ /* Deprecated */
{ .compatible = "allwinner,sun4i-mdio" },
{ }
};
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4b03e63639b7..82514e72b3d8 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -719,7 +719,7 @@ int phy_resume(struct phy_device *phydev)
static int genphy_config_advert(struct phy_device *phydev)
{
u32 advertise;
- int oldadv, adv;
+ int oldadv, adv, bmsr;
int err, changed = 0;
/* Only allow advertising what this PHY supports */
@@ -744,26 +744,36 @@ static int genphy_config_advert(struct phy_device *phydev)
changed = 1;
}
+ bmsr = phy_read(phydev, MII_BMSR);
+ if (bmsr < 0)
+ return bmsr;
+
+ /* Per 802.3-2008, Section 22.2.4.2.16 Extended status all
+ * 1000Mbits/sec capable PHYs shall have the BMSR_ESTATEN bit set to a
+ * logical 1.
+ */
+ if (!(bmsr & BMSR_ESTATEN))
+ return changed;
+
/* Configure gigabit if it's supported */
+ adv = phy_read(phydev, MII_CTRL1000);
+ if (adv < 0)
+ return adv;
+
+ oldadv = adv;
+ adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+
if (phydev->supported & (SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full)) {
- adv = phy_read(phydev, MII_CTRL1000);
- if (adv < 0)
- return adv;
-
- oldadv = adv;
- adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
-
- if (adv != oldadv) {
- err = phy_write(phydev, MII_CTRL1000, adv);
-
- if (err < 0)
- return err;
+ if (adv != oldadv)
changed = 1;
- }
}
+ err = phy_write(phydev, MII_CTRL1000, adv);
+ if (err < 0)
+ return err;
+
return changed;
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 28407426fd6f..c8624a8235ab 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1648,7 +1648,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
}
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
/*
* This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 44c4db8450f0..8fe9cb7d0f72 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -366,7 +366,7 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
* hope the rxq no. may help here.
*/
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct tun_struct *tun = netdev_priv(dev);
struct tun_flow_entry *e;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 6b638a066c1d..7e7269fd3707 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -292,6 +292,21 @@ config USB_NET_SR9700
This option adds support for CoreChip-sz SR9700 based USB 1.1
10/100 Ethernet adapters.
+config USB_NET_SR9800
+ tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices"
+ depends on USB_USBNET
+ select CRC32
+ ---help---
+ Say Y if you want to use one of the following 100Mbps USB Ethernet
+ device based on the CoreChip-sz SR9800 chip.
+
+ This driver makes the adapter appear as a normal Ethernet interface,
+ typically on eth0, if it is the only ethernet device, or perhaps on
+ eth1, if you have a PCI or ISA ethernet card installed.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sr9800.
+
config USB_NET_SMSC75XX
tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b17b5e88bbaf..433f0a00c683 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
obj-$(CONFIG_USB_NET_SR9700) += sr9700.o
+obj-$(CONFIG_USB_NET_SR9800) += sr9800.o
obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o
obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o
obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 9765a7d4766d..5d194093f3e1 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -917,7 +917,8 @@ static const struct driver_info ax88178_info = {
.status = asix_status,
.link_reset = ax88178_link_reset,
.reset = ax88178_reset,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+ FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
};
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index d6f64dad05bc..955df81a4358 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1118,6 +1118,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u16 hdr_off;
u32 *pkt_hdr;
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
skb_trim(skb, skb->len - 4);
memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
le32_to_cpus(&rx_hdr);
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index e4a8a93fbaf7..1cc24e6f23e2 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -84,6 +84,10 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u32 size;
u32 count;
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
header = (struct gl_header *) skb->data;
// get the packet count of the received skb
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 1a482344b3f5..660bd5ea9fc0 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1201,16 +1201,18 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
struct hso_serial *serial = urb->context;
int status = urb->status;
+ D4("\n--- Got serial_read_bulk callback %02x ---", status);
+
/* sanity check */
if (!serial) {
D1("serial == NULL");
return;
- } else if (status) {
+ }
+ if (status) {
handle_usb_error(status, __func__, serial->parent);
return;
}
- D4("\n--- Got serial_read_bulk callback %02x ---", status);
D1("Actual length = %d\n", urb->actual_length);
DUMP1(urb->transfer_buffer, urb->actual_length);
@@ -1218,25 +1220,13 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
if (serial->port.count == 0)
return;
- if (status == 0) {
- if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
- fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
- /* Valid data, handle RX data */
- spin_lock(&serial->serial_lock);
- serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
- put_rxbuf_data_and_resubmit_bulk_urb(serial);
- spin_unlock(&serial->serial_lock);
- } else if (status == -ENOENT || status == -ECONNRESET) {
- /* Unlinked - check for throttled port. */
- D2("Port %d, successfully unlinked urb", serial->minor);
- spin_lock(&serial->serial_lock);
- serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
- hso_resubmit_rx_bulk_urb(serial, urb);
- spin_unlock(&serial->serial_lock);
- } else {
- D2("Port %d, status = %d for read urb", serial->minor, status);
- return;
- }
+ if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
+ fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
+ /* Valid data, handle RX data */
+ spin_lock(&serial->serial_lock);
+ serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
+ put_rxbuf_data_and_resubmit_bulk_urb(serial);
+ spin_unlock(&serial->serial_lock);
}
/*
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index a305a7b2dae6..82d844a8ebd0 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -526,8 +526,9 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
u8 status;
- if (skb->len == 0) {
- dev_err(&dev->udev->dev, "unexpected empty rx frame\n");
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len) {
+ dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
return 0;
}
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 0a85d9227775..4cbdb1307f3e 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -364,6 +364,10 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
struct nc_trailer *trailer;
u16 hdr_len, packet_len;
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
if (!(skb->len & 0x01)) {
netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
skb->len, dev->net->hard_header_len, dev->hard_mtu,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 23bdd5b9274d..313cb6cd4848 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -80,10 +80,10 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
__be16 proto;
- /* usbnet rx_complete guarantees that skb->len is at least
- * hard_header_len, so we can inspect the dest address without
- * checking skb->len
- */
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
switch (skb->data[0] & 0xf0) {
case 0x40:
proto = htons(ETH_P_IP);
@@ -712,6 +712,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1255, 3)},
{QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */
{QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
@@ -723,6 +724,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
+ {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
@@ -730,6 +732,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
+ {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index e8fac732c6f1..d89dbe395ad2 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -2273,22 +2273,21 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ rtl8152_set_speed(tp, AUTONEG_ENABLE,
+ tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
+ DUPLEX_FULL);
+ tp->speed = 0;
+ netif_carrier_off(netdev);
+ netif_start_queue(netdev);
+ set_bit(WORK_ENABLE, &tp->flags);
res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
if (res) {
if (res == -ENODEV)
netif_device_detach(tp->netdev);
netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
res);
- return res;
}
- rtl8152_set_speed(tp, AUTONEG_ENABLE,
- tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
- DUPLEX_FULL);
- tp->speed = 0;
- netif_carrier_off(netdev);
- netif_start_queue(netdev);
- set_bit(WORK_ENABLE, &tp->flags);
return res;
}
@@ -2298,8 +2297,8 @@ static int rtl8152_close(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
- usb_kill_urb(tp->intr_urb);
clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
tasklet_disable(&tp->tl);
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index a48bc0f20c1a..524a47a28120 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -492,6 +492,10 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
*/
int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
/* peripheral may have batched packets to us... */
while (likely(skb->len)) {
struct rndis_data_hdr *hdr = (void *)skb->data;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index f17b9e02dd34..d9e7892262fa 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -2106,6 +2106,10 @@ static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
while (skb->len > 0) {
u32 rx_cmd_a, rx_cmd_b, align_count, size;
struct sk_buff *ax_skb;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 8dd54a0f7b29..424db65e4396 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1723,6 +1723,10 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
while (skb->len > 0) {
u32 header, align_count;
struct sk_buff *ax_skb;
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
new file mode 100644
index 000000000000..b94a0fbb8b3b
--- /dev/null
+++ b/drivers/net/usb/sr9800.c
@@ -0,0 +1,874 @@
+/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * Based on asix_common.c, asix_devices.c
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.*
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+
+#include "sr9800.h"
+
+static int sr_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ int err;
+
+ err = usbnet_read_cmd(dev, cmd, SR_REQ_RD_REG, value, index,
+ data, size);
+ if ((err != size) && (err >= 0))
+ err = -EINVAL;
+
+ return err;
+}
+
+static int sr_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ int err;
+
+ err = usbnet_write_cmd(dev, cmd, SR_REQ_WR_REG, value, index,
+ data, size);
+ if ((err != size) && (err >= 0))
+ err = -EINVAL;
+
+ return err;
+}
+
+static void
+sr_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ usbnet_write_cmd_async(dev, cmd, SR_REQ_WR_REG, value, index, data,
+ size);
+}
+
+static int sr_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ int offset = 0;
+
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
+ while (offset + sizeof(u32) < skb->len) {
+ struct sk_buff *sr_skb;
+ u16 size;
+ u32 header = get_unaligned_le32(skb->data + offset);
+
+ offset += sizeof(u32);
+ /* get the packet length */
+ size = (u16) (header & 0x7ff);
+ if (size != ((~header >> 16) & 0x07ff)) {
+ netdev_err(dev->net, "%s : Bad Header Length\n",
+ __func__);
+ return 0;
+ }
+
+ if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
+ (size + offset > skb->len)) {
+ netdev_err(dev->net, "%s : Bad RX Length %d\n",
+ __func__, size);
+ return 0;
+ }
+ sr_skb = netdev_alloc_skb_ip_align(dev->net, size);
+ if (!sr_skb)
+ return 0;
+
+ skb_put(sr_skb, size);
+ memcpy(sr_skb->data, skb->data + offset, size);
+ usbnet_skb_return(dev, sr_skb);
+
+ offset += (size + 1) & 0xfffe;
+ }
+
+ if (skb->len != offset) {
+ netdev_err(dev->net, "%s : Bad SKB Length %d\n", __func__,
+ skb->len);
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+ u32 padbytes = 0xffff0000;
+ u32 packet_len;
+ int padlen;
+
+ padlen = ((skb->len + 4) % (dev->maxpacket - 1)) ? 0 : 4;
+
+ if ((!skb_cloned(skb)) && ((headroom + tailroom) >= (4 + padlen))) {
+ if ((headroom < 4) || (tailroom < padlen)) {
+ skb->data = memmove(skb->head + 4, skb->data,
+ skb->len);
+ skb_set_tail_pointer(skb, skb->len);
+ }
+ } else {
+ struct sk_buff *skb2;
+ skb2 = skb_copy_expand(skb, 4, padlen, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+
+ skb_push(skb, 4);
+ packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
+ cpu_to_le32s(&packet_len);
+ skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
+
+ if (padlen) {
+ cpu_to_le32s(&padbytes);
+ memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
+ skb_put(skb, sizeof(padbytes));
+ }
+
+ return skb;
+}
+
+static void sr_status(struct usbnet *dev, struct urb *urb)
+{
+ struct sr9800_int_data *event;
+ int link;
+
+ if (urb->actual_length < 8)
+ return;
+
+ event = urb->transfer_buffer;
+ link = event->link & 0x01;
+ if (netif_carrier_ok(dev->net) != link) {
+ usbnet_link_change(dev, link, 1);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
+ }
+
+ return;
+}
+
+static inline int sr_set_sw_mii(struct usbnet *dev)
+{
+ int ret;
+
+ ret = sr_write_cmd(dev, SR_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable software MII access\n");
+ return ret;
+}
+
+static inline int sr_set_hw_mii(struct usbnet *dev)
+{
+ int ret;
+
+ ret = sr_write_cmd(dev, SR_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable hardware MII access\n");
+ return ret;
+}
+
+static inline int sr_get_phy_addr(struct usbnet *dev)
+{
+ u8 buf[2];
+ int ret;
+
+ ret = sr_read_cmd(dev, SR_CMD_READ_PHY_ID, 0, 0, 2, buf);
+ if (ret < 0) {
+ netdev_err(dev->net, "%s : Error reading PHYID register:%02x\n",
+ __func__, ret);
+ goto out;
+ }
+ netdev_dbg(dev->net, "%s : returning 0x%04x\n", __func__,
+ *((__le16 *)buf));
+
+ ret = buf[1];
+
+out:
+ return ret;
+}
+
+static int sr_sw_reset(struct usbnet *dev, u8 flags)
+{
+ int ret;
+
+ ret = sr_write_cmd(dev, SR_CMD_SW_RESET, flags, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to send software reset:%02x\n",
+ ret);
+
+ return ret;
+}
+
+static u16 sr_read_rx_ctl(struct usbnet *dev)
+{
+ __le16 v;
+ int ret;
+
+ ret = sr_read_cmd(dev, SR_CMD_READ_RX_CTL, 0, 0, 2, &v);
+ if (ret < 0) {
+ netdev_err(dev->net, "Error reading RX_CTL register:%02x\n",
+ ret);
+ goto out;
+ }
+
+ ret = le16_to_cpu(v);
+out:
+ return ret;
+}
+
+static int sr_write_rx_ctl(struct usbnet *dev, u16 mode)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net,
+ "Failed to write RX_CTL mode to 0x%04x:%02x\n",
+ mode, ret);
+
+ return ret;
+}
+
+static u16 sr_read_medium_status(struct usbnet *dev)
+{
+ __le16 v;
+ int ret;
+
+ ret = sr_read_cmd(dev, SR_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
+ if (ret < 0) {
+ netdev_err(dev->net,
+ "Error reading Medium Status register:%02x\n", ret);
+ return ret; /* TODO: callers not checking for error ret */
+ }
+
+ return le16_to_cpu(v);
+}
+
+static int sr_write_medium_mode(struct usbnet *dev, u16 mode)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net,
+ "Failed to write Medium Mode mode to 0x%04x:%02x\n",
+ mode, ret);
+ return ret;
+}
+
+static int sr_write_gpio(struct usbnet *dev, u16 value, int sleep)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "%s : value = 0x%04x\n", __func__, value);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_GPIOS, value, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to write GPIO value 0x%04x:%02x\n",
+ value, ret);
+ if (sleep)
+ msleep(sleep);
+
+ return ret;
+}
+
+/* SR9800 have a 16-bit RX_CTL value */
+static void sr_set_multicast(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ u16 rx_ctl = SR_DEFAULT_RX_CTL;
+
+ if (net->flags & IFF_PROMISC) {
+ rx_ctl |= SR_RX_CTL_PRO;
+ } else if (net->flags & IFF_ALLMULTI ||
+ netdev_mc_count(net) > SR_MAX_MCAST) {
+ rx_ctl |= SR_RX_CTL_AMALL;
+ } else if (netdev_mc_empty(net)) {
+ /* just broadcast and directed */
+ } else {
+ /* We use the 20 byte dev->data
+ * for our 8 byte filter buffer
+ * to avoid allocating memory that
+ * is tricky to free later
+ */
+ struct netdev_hw_addr *ha;
+ u32 crc_bits;
+
+ memset(data->multi_filter, 0, SR_MCAST_FILTER_SIZE);
+
+ /* Build the multicast hash filter. */
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ data->multi_filter[crc_bits >> 3] |=
+ 1 << (crc_bits & 7);
+ }
+
+ sr_write_cmd_async(dev, SR_CMD_WRITE_MULTI_FILTER, 0, 0,
+ SR_MCAST_FILTER_SIZE, data->multi_filter);
+
+ rx_ctl |= SR_RX_CTL_AM;
+ }
+
+ sr_write_cmd_async(dev, SR_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
+}
+
+static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
+{
+ struct usbnet *dev = netdev_priv(net);
+ __le16 res;
+
+ mutex_lock(&dev->phy_mutex);
+ sr_set_sw_mii(dev);
+ sr_read_cmd(dev, SR_CMD_READ_MII_REG, phy_id, (__u16)loc, 2, &res);
+ sr_set_hw_mii(dev);
+ mutex_unlock(&dev->phy_mutex);
+
+ netdev_dbg(dev->net,
+ "%s : phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", __func__,
+ phy_id, loc, le16_to_cpu(res));
+
+ return le16_to_cpu(res);
+}
+
+static void
+sr_mdio_write(struct net_device *net, int phy_id, int loc, int val)
+{
+ struct usbnet *dev = netdev_priv(net);
+ __le16 res = cpu_to_le16(val);
+
+ netdev_dbg(dev->net,
+ "%s : phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", __func__,
+ phy_id, loc, val);
+ mutex_lock(&dev->phy_mutex);
+ sr_set_sw_mii(dev);
+ sr_write_cmd(dev, SR_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
+ sr_set_hw_mii(dev);
+ mutex_unlock(&dev->phy_mutex);
+}
+
+/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
+static u32 sr_get_phyid(struct usbnet *dev)
+{
+ int phy_reg;
+ u32 phy_id;
+ int i;
+
+ /* Poll for the rare case the FW or phy isn't ready yet. */
+ for (i = 0; i < 100; i++) {
+ phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
+ if (phy_reg != 0 && phy_reg != 0xFFFF)
+ break;
+ mdelay(1);
+ }
+
+ if (phy_reg <= 0 || phy_reg == 0xFFFF)
+ return 0;
+
+ phy_id = (phy_reg & 0xffff) << 16;
+
+ phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID2);
+ if (phy_reg < 0)
+ return 0;
+
+ phy_id |= (phy_reg & 0xffff);
+
+ return phy_id;
+}
+
+static void
+sr_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt;
+
+ if (sr_read_cmd(dev, SR_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
+ wolinfo->supported = 0;
+ wolinfo->wolopts = 0;
+ return;
+ }
+ wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
+ wolinfo->wolopts = 0;
+ if (opt & SR_MONITOR_LINK)
+ wolinfo->wolopts |= WAKE_PHY;
+ if (opt & SR_MONITOR_MAGIC)
+ wolinfo->wolopts |= WAKE_MAGIC;
+}
+
+static int
+sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt = 0;
+
+ if (wolinfo->wolopts & WAKE_PHY)
+ opt |= SR_MONITOR_LINK;
+ if (wolinfo->wolopts & WAKE_MAGIC)
+ opt |= SR_MONITOR_MAGIC;
+
+ if (sr_write_cmd(dev, SR_CMD_WRITE_MONITOR_MODE,
+ opt, 0, 0, NULL) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sr_get_eeprom_len(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+
+ return data->eeprom_len;
+}
+
+static int sr_get_eeprom(struct net_device *net,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct usbnet *dev = netdev_priv(net);
+ __le16 *ebuf = (__le16 *)data;
+ int ret;
+ int i;
+
+ /* Crude hack to ensure that we don't overwrite memory
+ * if an odd length is supplied
+ */
+ if (eeprom->len % 2)
+ return -EINVAL;
+
+ eeprom->magic = SR_EEPROM_MAGIC;
+
+ /* sr9800 returns 2 bytes from eeprom on read */
+ for (i = 0; i < eeprom->len / 2; i++) {
+ ret = sr_read_cmd(dev, SR_CMD_READ_EEPROM, eeprom->offset + i,
+ 0, 2, &ebuf[i]);
+ if (ret < 0)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void sr_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+
+ /* Inherit standard device info */
+ usbnet_get_drvinfo(net, info);
+ strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ info->eedump_len = data->eeprom_len;
+}
+
+static u32 sr_get_link(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ return mii_link_ok(&dev->mii);
+}
+
+static int sr_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static int sr_set_mac_address(struct net_device *net, void *p)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ struct sockaddr *addr = p;
+
+ if (netif_running(net))
+ return -EBUSY;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+
+ /* We use the 20 byte dev->data
+ * for our 6 byte mac buffer
+ * to avoid allocating memory that
+ * is tricky to free later
+ */
+ memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
+ sr_write_cmd_async(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+
+ return 0;
+}
+
+static const struct ethtool_ops sr9800_ethtool_ops = {
+ .get_drvinfo = sr_get_drvinfo,
+ .get_link = sr_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_wol = sr_get_wol,
+ .set_wol = sr_set_wol,
+ .get_eeprom_len = sr_get_eeprom_len,
+ .get_eeprom = sr_get_eeprom,
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .nway_reset = usbnet_nway_reset,
+};
+
+static int sr9800_link_reset(struct usbnet *dev)
+{
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+ u16 mode;
+
+ mii_check_media(&dev->mii, 1, 1);
+ mii_ethtool_gset(&dev->mii, &ecmd);
+ mode = SR9800_MEDIUM_DEFAULT;
+
+ if (ethtool_cmd_speed(&ecmd) != SPEED_100)
+ mode &= ~SR_MEDIUM_PS;
+
+ if (ecmd.duplex != DUPLEX_FULL)
+ mode &= ~SR_MEDIUM_FD;
+
+ netdev_dbg(dev->net, "%s : speed: %u duplex: %d mode: 0x%04x\n",
+ __func__, ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
+
+ sr_write_medium_mode(dev, mode);
+
+ return 0;
+}
+
+
+static int sr9800_set_default_mode(struct usbnet *dev)
+{
+ u16 rx_ctl;
+ int ret;
+
+ sr_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
+ sr_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
+ ADVERTISE_ALL | ADVERTISE_CSMA);
+ mii_nway_restart(&dev->mii);
+
+ ret = sr_write_medium_mode(dev, SR9800_MEDIUM_DEFAULT);
+ if (ret < 0)
+ goto out;
+
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_IPG012,
+ SR9800_IPG0_DEFAULT | SR9800_IPG1_DEFAULT,
+ SR9800_IPG2_DEFAULT, 0, NULL);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
+ goto out;
+ }
+
+ /* Set RX_CTL to default values with 2k buffer, and enable cactus */
+ ret = sr_write_rx_ctl(dev, SR_DEFAULT_RX_CTL);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
+ rx_ctl);
+
+ rx_ctl = sr_read_medium_status(dev);
+ netdev_dbg(dev->net, "Medium Status:0x%04x after all initializations\n",
+ rx_ctl);
+
+ return 0;
+out:
+ return ret;
+}
+
+static int sr9800_reset(struct usbnet *dev)
+{
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ int ret, embd_phy;
+ u16 rx_ctl;
+
+ ret = sr_write_gpio(dev,
+ SR_GPIO_RSE | SR_GPIO_GPO_2 | SR_GPIO_GPO2EN, 5);
+ if (ret < 0)
+ goto out;
+
+ embd_phy = ((sr_get_phy_addr(dev) & 0x1f) == 0x10 ? 1 : 0);
+
+ ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
+ goto out;
+ }
+
+ ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_PRL);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ if (embd_phy) {
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = sr_sw_reset(dev, SR_SWRESET_PRTE);
+ if (ret < 0)
+ goto out;
+ }
+
+ msleep(150);
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
+ ret = sr_write_rx_ctl(dev, 0x0000);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
+
+ ret = sr_sw_reset(dev, SR_SWRESET_PRL);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL | SR_SWRESET_PRL);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ ret = sr9800_set_default_mode(dev);
+ if (ret < 0)
+ goto out;
+
+ /* Rewrite MAC address */
+ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+ if (ret < 0)
+ goto out;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static const struct net_device_ops sr9800_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = usbnet_change_mtu,
+ .ndo_set_mac_address = sr_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = sr_ioctl,
+ .ndo_set_rx_mode = sr_set_multicast,
+};
+
+static int sr9800_phy_powerup(struct usbnet *dev)
+{
+ int ret;
+
+ /* set the embedded Ethernet PHY in power-down state */
+ ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_IPRL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to power down PHY : %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ /* set the embedded Ethernet PHY in power-up state */
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
+ return ret;
+ }
+ msleep(600);
+
+ /* set the embedded Ethernet PHY in reset state */
+ ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to power up PHY: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ /* set the embedded Ethernet PHY in power-up state */
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ u16 led01_mux, led23_mux;
+ int ret, embd_phy;
+ u32 phyid;
+ u16 rx_ctl;
+
+ data->eeprom_len = SR9800_EEPROM_LEN;
+
+ usbnet_get_endpoints(dev, intf);
+
+ /* LED Setting Rule :
+ * AABB:CCDD
+ * AA : MFA0(LED0)
+ * BB : MFA1(LED1)
+ * CC : MFA2(LED2), Reserved for SR9800
+ * DD : MFA3(LED3), Reserved for SR9800
+ */
+ led01_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_LINK;
+ led23_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_TX_ACTIVE;
+ ret = sr_write_cmd(dev, SR_CMD_LED_MUX, led01_mux, led23_mux, 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "set LINK LED failed : %d\n", ret);
+ goto out;
+ }
+
+ /* Get the MAC address */
+ ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN,
+ dev->net->dev_addr);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
+ return ret;
+ }
+ netdev_dbg(dev->net, "mac addr : %pM\n", dev->net->dev_addr);
+
+ /* Initialize MII structure */
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = sr_mdio_read;
+ dev->mii.mdio_write = sr_mdio_write;
+ dev->mii.phy_id_mask = 0x1f;
+ dev->mii.reg_num_mask = 0x1f;
+ dev->mii.phy_id = sr_get_phy_addr(dev);
+
+ dev->net->netdev_ops = &sr9800_netdev_ops;
+ dev->net->ethtool_ops = &sr9800_ethtool_ops;
+
+ embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
+ /* Reset the PHY to normal operation mode */
+ ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Init PHY routine */
+ ret = sr9800_phy_powerup(dev);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
+ ret = sr_write_rx_ctl(dev, 0x0000);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
+
+ /* Read PHYID register *AFTER* the PHY was reset properly */
+ phyid = sr_get_phyid(dev);
+ netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
+
+ /* medium mode setting */
+ ret = sr9800_set_default_mode(dev);
+ if (ret < 0)
+ goto out;
+
+ if (dev->udev->speed == USB_SPEED_HIGH) {
+ ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].byte_cnt,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].threshold,
+ 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
+ goto out;
+ }
+ dev->rx_urb_size =
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].size;
+ } else {
+ ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].byte_cnt,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].threshold,
+ 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
+ goto out;
+ }
+ dev->rx_urb_size =
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size;
+ }
+ netdev_dbg(dev->net, "%s : setting rx_urb_size with : %zu\n", __func__,
+ dev->rx_urb_size);
+ return 0;
+
+out:
+ return ret;
+}
+
+static const struct driver_info sr9800_driver_info = {
+ .description = "CoreChip SR9800 USB 2.0 Ethernet",
+ .bind = sr9800_bind,
+ .status = sr_status,
+ .link_reset = sr9800_link_reset,
+ .reset = sr9800_reset,
+ .flags = DRIVER_FLAG,
+ .rx_fixup = sr_rx_fixup,
+ .tx_fixup = sr_tx_fixup,
+};
+
+static const struct usb_device_id products[] = {
+ {
+ USB_DEVICE(0x0fe6, 0x9800), /* SR9800 Device */
+ .driver_info = (unsigned long) &sr9800_driver_info,
+ },
+ {}, /* END */
+};
+
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver sr_driver = {
+ .name = DRIVER_NAME,
+ .id_table = products,
+ .probe = usbnet_probe,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .disconnect = usbnet_disconnect,
+ .supports_autosuspend = 1,
+};
+
+module_usb_driver(sr_driver);
+
+MODULE_AUTHOR("Liu Junliang <liujunliang_ljl@163.com");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_DESCRIPTION("SR9800 USB 2.0 USB2NET Dev : http://www.corechip-sz.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/sr9800.h b/drivers/net/usb/sr9800.h
new file mode 100644
index 000000000000..18f670251275
--- /dev/null
+++ b/drivers/net/usb/sr9800.h
@@ -0,0 +1,202 @@
+/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef _SR9800_H
+#define _SR9800_H
+
+/* SR9800 spec. command table on Linux Platform */
+
+/* command : Software Station Management Control Reg */
+#define SR_CMD_SET_SW_MII 0x06
+/* command : PHY Read Reg */
+#define SR_CMD_READ_MII_REG 0x07
+/* command : PHY Write Reg */
+#define SR_CMD_WRITE_MII_REG 0x08
+/* command : Hardware Station Management Control Reg */
+#define SR_CMD_SET_HW_MII 0x0a
+/* command : SROM Read Reg */
+#define SR_CMD_READ_EEPROM 0x0b
+/* command : SROM Write Reg */
+#define SR_CMD_WRITE_EEPROM 0x0c
+/* command : SROM Write Enable Reg */
+#define SR_CMD_WRITE_ENABLE 0x0d
+/* command : SROM Write Disable Reg */
+#define SR_CMD_WRITE_DISABLE 0x0e
+/* command : RX Control Read Reg */
+#define SR_CMD_READ_RX_CTL 0x0f
+#define SR_RX_CTL_PRO (1 << 0)
+#define SR_RX_CTL_AMALL (1 << 1)
+#define SR_RX_CTL_SEP (1 << 2)
+#define SR_RX_CTL_AB (1 << 3)
+#define SR_RX_CTL_AM (1 << 4)
+#define SR_RX_CTL_AP (1 << 5)
+#define SR_RX_CTL_ARP (1 << 6)
+#define SR_RX_CTL_SO (1 << 7)
+#define SR_RX_CTL_RH1M (1 << 8)
+#define SR_RX_CTL_RH2M (1 << 9)
+#define SR_RX_CTL_RH3M (1 << 10)
+/* command : RX Control Write Reg */
+#define SR_CMD_WRITE_RX_CTL 0x10
+/* command : IPG0/IPG1/IPG2 Control Read Reg */
+#define SR_CMD_READ_IPG012 0x11
+/* command : IPG0/IPG1/IPG2 Control Write Reg */
+#define SR_CMD_WRITE_IPG012 0x12
+/* command : Node ID Read Reg */
+#define SR_CMD_READ_NODE_ID 0x13
+/* command : Node ID Write Reg */
+#define SR_CMD_WRITE_NODE_ID 0x14
+/* command : Multicast Filter Array Read Reg */
+#define SR_CMD_READ_MULTI_FILTER 0x15
+/* command : Multicast Filter Array Write Reg */
+#define SR_CMD_WRITE_MULTI_FILTER 0x16
+/* command : Eth/HomePNA PHY Address Reg */
+#define SR_CMD_READ_PHY_ID 0x19
+/* command : Medium Status Read Reg */
+#define SR_CMD_READ_MEDIUM_STATUS 0x1a
+#define SR_MONITOR_LINK (1 << 1)
+#define SR_MONITOR_MAGIC (1 << 2)
+#define SR_MONITOR_HSFS (1 << 4)
+/* command : Medium Status Write Reg */
+#define SR_CMD_WRITE_MEDIUM_MODE 0x1b
+#define SR_MEDIUM_GM (1 << 0)
+#define SR_MEDIUM_FD (1 << 1)
+#define SR_MEDIUM_AC (1 << 2)
+#define SR_MEDIUM_ENCK (1 << 3)
+#define SR_MEDIUM_RFC (1 << 4)
+#define SR_MEDIUM_TFC (1 << 5)
+#define SR_MEDIUM_JFE (1 << 6)
+#define SR_MEDIUM_PF (1 << 7)
+#define SR_MEDIUM_RE (1 << 8)
+#define SR_MEDIUM_PS (1 << 9)
+#define SR_MEDIUM_RSV (1 << 10)
+#define SR_MEDIUM_SBP (1 << 11)
+#define SR_MEDIUM_SM (1 << 12)
+/* command : Monitor Mode Status Read Reg */
+#define SR_CMD_READ_MONITOR_MODE 0x1c
+/* command : Monitor Mode Status Write Reg */
+#define SR_CMD_WRITE_MONITOR_MODE 0x1d
+/* command : GPIO Status Read Reg */
+#define SR_CMD_READ_GPIOS 0x1e
+#define SR_GPIO_GPO0EN (1 << 0) /* GPIO0 Output enable */
+#define SR_GPIO_GPO_0 (1 << 1) /* GPIO0 Output value */
+#define SR_GPIO_GPO1EN (1 << 2) /* GPIO1 Output enable */
+#define SR_GPIO_GPO_1 (1 << 3) /* GPIO1 Output value */
+#define SR_GPIO_GPO2EN (1 << 4) /* GPIO2 Output enable */
+#define SR_GPIO_GPO_2 (1 << 5) /* GPIO2 Output value */
+#define SR_GPIO_RESERVED (1 << 6) /* Reserved */
+#define SR_GPIO_RSE (1 << 7) /* Reload serial EEPROM */
+/* command : GPIO Status Write Reg */
+#define SR_CMD_WRITE_GPIOS 0x1f
+/* command : Eth PHY Power and Reset Control Reg */
+#define SR_CMD_SW_RESET 0x20
+#define SR_SWRESET_CLEAR 0x00
+#define SR_SWRESET_RR (1 << 0)
+#define SR_SWRESET_RT (1 << 1)
+#define SR_SWRESET_PRTE (1 << 2)
+#define SR_SWRESET_PRL (1 << 3)
+#define SR_SWRESET_BZ (1 << 4)
+#define SR_SWRESET_IPRL (1 << 5)
+#define SR_SWRESET_IPPD (1 << 6)
+/* command : Software Interface Selection Status Read Reg */
+#define SR_CMD_SW_PHY_STATUS 0x21
+/* command : Software Interface Selection Status Write Reg */
+#define SR_CMD_SW_PHY_SELECT 0x22
+/* command : BULK in Buffer Size Reg */
+#define SR_CMD_BULKIN_SIZE 0x2A
+/* command : LED_MUX Control Reg */
+#define SR_CMD_LED_MUX 0x70
+#define SR_LED_MUX_TX_ACTIVE (1 << 0)
+#define SR_LED_MUX_RX_ACTIVE (1 << 1)
+#define SR_LED_MUX_COLLISION (1 << 2)
+#define SR_LED_MUX_DUP_COL (1 << 3)
+#define SR_LED_MUX_DUP (1 << 4)
+#define SR_LED_MUX_SPEED (1 << 5)
+#define SR_LED_MUX_LINK_ACTIVE (1 << 6)
+#define SR_LED_MUX_LINK (1 << 7)
+
+/* Register Access Flags */
+#define SR_REQ_RD_REG (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+#define SR_REQ_WR_REG (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+
+/* Multicast Filter Array size & Max Number */
+#define SR_MCAST_FILTER_SIZE 8
+#define SR_MAX_MCAST 64
+
+/* IPG0/1/2 Default Value */
+#define SR9800_IPG0_DEFAULT 0x15
+#define SR9800_IPG1_DEFAULT 0x0c
+#define SR9800_IPG2_DEFAULT 0x12
+
+/* Medium Status Default Mode */
+#define SR9800_MEDIUM_DEFAULT \
+ (SR_MEDIUM_FD | SR_MEDIUM_RFC | \
+ SR_MEDIUM_TFC | SR_MEDIUM_PS | \
+ SR_MEDIUM_AC | SR_MEDIUM_RE)
+
+/* RX Control Default Setting */
+#define SR_DEFAULT_RX_CTL \
+ (SR_RX_CTL_SO | SR_RX_CTL_AB | SR_RX_CTL_RH1M)
+
+/* EEPROM Magic Number & EEPROM Size */
+#define SR_EEPROM_MAGIC 0xdeadbeef
+#define SR9800_EEPROM_LEN 0xff
+
+/* SR9800 Driver Version and Driver Name */
+#define DRIVER_VERSION "11-Nov-2013"
+#define DRIVER_NAME "CoreChips"
+#define DRIVER_FLAG \
+ (FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET)
+
+/* SR9800 BULKIN Buffer Size */
+#define SR9800_MAX_BULKIN_2K 0
+#define SR9800_MAX_BULKIN_4K 1
+#define SR9800_MAX_BULKIN_6K 2
+#define SR9800_MAX_BULKIN_8K 3
+#define SR9800_MAX_BULKIN_16K 4
+#define SR9800_MAX_BULKIN_20K 5
+#define SR9800_MAX_BULKIN_24K 6
+#define SR9800_MAX_BULKIN_32K 7
+
+struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
+ /* 2k */
+ {2048, 0x8000, 0x8001},
+ /* 4k */
+ {4096, 0x8100, 0x8147},
+ /* 6k */
+ {6144, 0x8200, 0x81EB},
+ /* 8k */
+ {8192, 0x8300, 0x83D7},
+ /* 16 */
+ {16384, 0x8400, 0x851E},
+ /* 20k */
+ {20480, 0x8500, 0x8666},
+ /* 24k */
+ {24576, 0x8600, 0x87AE},
+ /* 32k */
+ {32768, 0x8700, 0x8A3D},
+};
+
+/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
+struct sr_data {
+ u8 multi_filter[SR_MCAST_FILTER_SIZE];
+ u8 mac_addr[ETH_ALEN];
+ u8 phymode;
+ u8 ledmode;
+ u8 eeprom_len;
+};
+
+struct sr9800_int_data {
+ __le16 res1;
+ u8 link;
+ __le16 res2;
+ u8 status;
+ __le16 res3;
+} __packed;
+
+#endif /* _SR9800_H */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 4671da755e7b..dd10d5817d2a 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -542,17 +542,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
}
// else network stack removes extra byte if we forced a short packet
- if (skb->len) {
- /* all data was already cloned from skb inside the driver */
- if (dev->driver_info->flags & FLAG_MULTI_PACKET)
- dev_kfree_skb_any(skb);
- else
- usbnet_skb_return(dev, skb);
+ /* all data was already cloned from skb inside the driver */
+ if (dev->driver_info->flags & FLAG_MULTI_PACKET)
+ goto done;
+
+ if (skb->len < ETH_HLEN) {
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
+ netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
+ } else {
+ usbnet_skb_return(dev, skb);
return;
}
- netif_dbg(dev, rx_err, dev->net, "drop\n");
- dev->net->stats.rx_errors++;
done:
skb_queue_tail(&dev->done, skb);
}
@@ -574,13 +576,6 @@ static void rx_complete (struct urb *urb)
switch (urb_status) {
/* success */
case 0:
- if (skb->len < dev->net->hard_header_len) {
- state = rx_cleanup;
- dev->net->stats.rx_errors++;
- dev->net->stats.rx_length_errors++;
- netif_dbg(dev, rx_err, dev->net,
- "rx length %d\n", skb->len);
- }
break;
/* stalls need manual reset. this is rare ... except that
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 026a313c2d2d..b0f705c2378f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -469,7 +469,6 @@ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
/* Look up Ethernet address in forwarding table */
static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
const u8 *mac)
-
{
struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
struct vxlan_fdb *f;
@@ -596,10 +595,8 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
- goto found;
}
-found:
type = eh->h_proto;
rcu_read_lock();
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 0d1c7592efa0..19f7cb2cdef3 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -71,12 +71,9 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
const void *saddr, unsigned len)
{
struct frhdr hdr;
- struct dlci_local *dlp;
unsigned int hlen;
char *dest;
- dlp = netdev_priv(dev);
-
hdr.control = FRAD_I_UI;
switch (type)
{
@@ -107,11 +104,9 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
{
- struct dlci_local *dlp;
struct frhdr *hdr;
int process, header;
- dlp = netdev_priv(dev);
if (!pskb_may_pull(skb, sizeof(*hdr))) {
netdev_notice(dev, "invalid data no header\n");
dev->stats.rx_errors++;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 8aa20df55e50..507d9a9ee69a 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1764,7 +1764,7 @@ static struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
- AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108
+ AR5523_DEVICE_UG(0x129b, 0x160b), /* Gigaset / USB stick 108
(CyberTAN Technology) */
AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */
AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index d6bc7cb61bfb..1a2973b7acf2 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -110,7 +110,7 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20));
if (ah->ah_version == AR5K_AR5210) {
- srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf;
+ srev = (ath5k_hw_reg_read(ah, AR5K_PHY(256)) >> 28) & 0xf;
ret = (u16)ath5k_hw_bitswap(srev, 4) + 1;
} else {
srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 25243cbc07f0..b8daff78b9d1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -5065,6 +5065,10 @@ static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep,
break;
}
}
+
+ if (is2GHz && !twiceMaxEdgePower)
+ twiceMaxEdgePower = 60;
+
return twiceMaxEdgePower;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 58da3468d1f0..99a203174f45 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -262,6 +262,8 @@ enum tid_aggr_state {
struct ath9k_htc_sta {
u8 index;
enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
+ struct work_struct rc_update_work;
+ struct ath9k_htc_priv *htc_priv;
};
#define ATH9K_HTC_RXBUF 256
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index f4e1de20d99c..c57d6b859c04 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -34,6 +34,10 @@ static int ath9k_htc_btcoex_enable;
module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444);
MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+static int ath9k_ps_enable;
+module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
+MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+
#define CHAN2G(_freq, _idx) { \
.center_freq = (_freq), \
.hw_value = (_idx), \
@@ -725,12 +729,14 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+ if (ath9k_ps_enable)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 608d739d1378..c9254a61ca52 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1270,18 +1270,50 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&priv->mutex);
}
+static void ath9k_htc_sta_rc_update_work(struct work_struct *work)
+{
+ struct ath9k_htc_sta *ista =
+ container_of(work, struct ath9k_htc_sta, rc_update_work);
+ struct ieee80211_sta *sta =
+ container_of((void *)ista, struct ieee80211_sta, drv_priv);
+ struct ath9k_htc_priv *priv = ista->htc_priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_rate trate;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+ ath9k_htc_setup_rate(priv, sta, &trate);
+ if (!ath9k_htc_send_rate_cmd(priv, &trate))
+ ath_dbg(common, CONFIG,
+ "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
+ sta->addr, be32_to_cpu(trate.capflags));
+ else
+ ath_dbg(common, CONFIG,
+ "Unable to update supported rates for sta: %pM\n",
+ sta->addr);
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
static int ath9k_htc_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
int ret;
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
ret = ath9k_htc_add_station(priv, vif, sta);
- if (!ret)
+ if (!ret) {
+ INIT_WORK(&ista->rc_update_work, ath9k_htc_sta_rc_update_work);
+ ista->htc_priv = priv;
ath9k_htc_init_rate(priv, sta);
+ }
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
@@ -1293,12 +1325,13 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct ath9k_htc_priv *priv = hw->priv;
- struct ath9k_htc_sta *ista;
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
int ret;
+ cancel_work_sync(&ista->rc_update_work);
+
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
- ista = (struct ath9k_htc_sta *) sta->drv_priv;
htc_sta_drain(priv->htc, ista->index);
ret = ath9k_htc_remove_station(priv, vif, sta);
ath9k_htc_ps_restore(priv);
@@ -1311,28 +1344,12 @@ static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u32 changed)
{
- struct ath9k_htc_priv *priv = hw->priv;
- struct ath_common *common = ath9k_hw_common(priv->ah);
- struct ath9k_htc_target_rate trate;
-
- mutex_lock(&priv->mutex);
- ath9k_htc_ps_wakeup(priv);
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
- memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
- ath9k_htc_setup_rate(priv, sta, &trate);
- if (!ath9k_htc_send_rate_cmd(priv, &trate))
- ath_dbg(common, CONFIG,
- "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
- sta->addr, be32_to_cpu(trate.capflags));
- else
- ath_dbg(common, CONFIG,
- "Unable to update supported rates for sta: %pM\n",
- sta->addr);
- }
+ if (!(changed & IEEE80211_RC_SUPP_RATES_CHANGED))
+ return;
- ath9k_htc_ps_restore(priv);
- mutex_unlock(&priv->mutex);
+ schedule_work(&ista->rc_update_work);
}
static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index fbf43c05713f..11eab9f01fd8 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1316,7 +1316,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
if (AR_SREV_9300_20_OR_LATER(ah))
udelay(50);
else if (AR_SREV_9100(ah))
- udelay(10000);
+ mdelay(10);
else
udelay(100);
@@ -2051,9 +2051,8 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
-
if (AR_SREV_9100(ah))
- udelay(10000);
+ mdelay(10);
else
udelay(50);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index c36de303c8f3..1fc2e5a26b52 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -57,6 +57,10 @@ static int ath9k_bt_ant_diversity;
module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444);
MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity");
+static int ath9k_ps_enable;
+module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
+MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+
bool is_ath9k_unloaded;
/* We use the hw_value as an index into our private channel structure */
@@ -903,13 +907,15 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_RC_TABLE |
IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ if (ath9k_ps_enable)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index aa7ad3a7a69b..4e5c0f8c9496 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -496,7 +496,7 @@ void hostap_init_proc(local_info_t *local)
void hostap_remove_proc(local_info_t *local)
{
- remove_proc_subtree(local->ddev->name, hostap_proc);
+ proc_remove(local->proc);
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index c24d1d3d55f6..73086c1629ca 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -696,6 +696,24 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret;
}
+static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ return false;
+ return true;
+}
+
+static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ return false;
+ if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
+ return true;
+
+ /* disabled by default */
+ return false;
+}
+
static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
@@ -717,7 +735,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ if (!iwl_enable_rx_ampdu(priv->cfg))
break;
IWL_DEBUG_HT(priv, "start Rx\n");
ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
@@ -729,7 +747,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_START:
if (!priv->trans->ops->txq_enable)
break;
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ if (!iwl_enable_tx_ampdu(priv->cfg))
break;
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index c3728163be46..75103554cd63 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1286,7 +1286,7 @@ module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
MODULE_PARM_DESC(11n_disable,
- "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
+ "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
int, S_IRUGO);
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index 0a84ade7edac..b29075c3da8e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -79,9 +79,12 @@ enum iwl_power_level {
IWL_POWER_NUM
};
-#define IWL_DISABLE_HT_ALL BIT(0)
-#define IWL_DISABLE_HT_TXAGG BIT(1)
-#define IWL_DISABLE_HT_RXAGG BIT(2)
+enum iwl_disable_11n {
+ IWL_DISABLE_HT_ALL = BIT(0),
+ IWL_DISABLE_HT_TXAGG = BIT(1),
+ IWL_DISABLE_HT_RXAGG = BIT(2),
+ IWL_ENABLE_HT_TXAGG = BIT(3),
+};
/**
* struct iwl_mod_params
@@ -90,7 +93,7 @@ enum iwl_power_level {
*
* @sw_crypto: using hardware encryption, default = 0
* @disable_11n: disable 11n capabilities, default = 0,
- * use IWL_DISABLE_HT_* constants
+ * use IWL_[DIS,EN]ABLE_HT_* constants
* @amsdu_size_8K: enable 8K amsdu size, default = 0
* @restart_fw: restart firmware, default = 1
* @wd_disable: enable stuck queue check, default = 0
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index f06f4cbe1317..725e954d8475 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -182,6 +182,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+
+ if (ch_idx >= NUM_2GHZ_CHANNELS &&
+ !data->sku_cap_band_52GHz_enable)
+ ch_flags &= ~NVM_CHANNEL_VALID;
+
if (!(ch_flags & NVM_CHANNEL_VALID)) {
IWL_DEBUG_EEPROM(dev,
"Ch. %d Flags %x [%sGHz] - No traffic\n",
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 73cbba7424f2..9426905de6b2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -504,6 +504,7 @@ struct iwl_scan_offload_profile {
* @match_notify: clients waiting for match found notification
* @pass_match: clients waiting for the results
* @active_clients: active clients bitmap - enum scan_framework_client
+ * @any_beacon_notify: clients waiting for match notification without match
*/
struct iwl_scan_offload_profile_cfg {
struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
@@ -512,7 +513,8 @@ struct iwl_scan_offload_profile_cfg {
u8 match_notify;
u8 pass_match;
u8 active_clients;
- u8 reserved[3];
+ u8 any_beacon_notify;
+ u8 reserved[2];
} __packed;
/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index c49b5073c251..c35b8661b395 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -246,7 +246,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
+ if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
@@ -328,6 +328,24 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
ieee80211_free_txskb(hw, skb);
}
+static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ return false;
+ return true;
+}
+
+static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ return false;
+ if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
+ return true;
+
+ /* enabled by default */
+ return true;
+}
+
static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
@@ -347,7 +365,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) {
+ if (!iwl_enable_rx_ampdu(mvm->cfg)) {
ret = -EINVAL;
break;
}
@@ -357,7 +375,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
break;
case IEEE80211_AMPDU_TX_START:
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) {
+ if (!iwl_enable_tx_ampdu(mvm->cfg)) {
ret = -EINVAL;
break;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 0e0007960612..742afc429c94 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -344,7 +344,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0);
- cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
+ cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+ TX_CMD_FLG_BT_DIS);
cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
cmd->tx_cmd.rate_n_flags =
@@ -807,6 +808,8 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
+ if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
+ profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
for (i = 0; i < req->n_match_sets; i++) {
profile = &profile_cfg->profiles[i];
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index ec1812133235..3397f59cd4e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -652,7 +652,7 @@ int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
- static const u8 *baddr = _baddr;
+ const u8 *baddr = _baddr;
lockdep_assert_held(&mvm->mutex);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 90378c217bc7..4df12fa9d336 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -659,8 +659,14 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ /*
+ * sta can't be NULL otherwise it'd mean that the sta has been freed in
+ * the firmware while we still have packets for it in the Tx queues.
+ */
+ if (WARN_ON_ONCE(!sta))
+ goto out;
- if (!IS_ERR_OR_NULL(sta)) {
+ if (!IS_ERR(sta)) {
mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (tid != IWL_TID_NON_QOS) {
@@ -675,7 +681,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
spin_unlock_bh(&mvmsta->lock);
}
} else {
- sta = NULL;
mvmsta = NULL;
}
@@ -683,42 +688,38 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
*/
- if (txq_id < mvm->first_agg_queue && !WARN_ON(skb_freed > 1) &&
- atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
- if (mvmsta) {
- /*
- * If there are no pending frames for this STA, notify
- * mac80211 that this station can go to sleep in its
- * STA table.
- */
- if (mvmsta->vif->type == NL80211_IFTYPE_AP)
- ieee80211_sta_block_awake(mvm->hw, sta, false);
- /*
- * We might very well have taken mvmsta pointer while
- * the station was being removed. The remove flow might
- * have seen a pending_frame (because we didn't take
- * the lock) even if now the queues are drained. So make
- * really sure now that this the station is not being
- * removed. If it is, run the drain worker to remove it.
- */
- spin_lock_bh(&mvmsta->lock);
- sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
- if (!sta || PTR_ERR(sta) == -EBUSY) {
- /*
- * Station disappeared in the meantime:
- * so we are draining.
- */
- set_bit(sta_id, mvm->sta_drained);
- schedule_work(&mvm->sta_drained_wk);
- }
- spin_unlock_bh(&mvmsta->lock);
- } else if (!mvmsta && PTR_ERR(sta) == -EBUSY) {
- /* Tx response without STA, so we are draining */
- set_bit(sta_id, mvm->sta_drained);
- schedule_work(&mvm->sta_drained_wk);
- }
+ if (txq_id >= mvm->first_agg_queue)
+ goto out;
+
+ /* We can't free more than one frame at once on a shared queue */
+ WARN_ON(skb_freed > 1);
+
+ /* If we have still frames from this STA nothing to do here */
+ if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
+ goto out;
+
+ if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
+ /*
+ * If there are no pending frames for this STA, notify
+ * mac80211 that this station can go to sleep in its
+ * STA table.
+ * If mvmsta is not NULL, sta is valid.
+ */
+ ieee80211_sta_block_awake(mvm->hw, sta, false);
+ }
+
+ if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
+ /*
+ * We are draining and this was the last packet - pre_rcu_remove
+ * has been called already. We might be after the
+ * synchronize_net already.
+ * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
+ */
+ set_bit(sta_id, mvm->sta_drained);
+ schedule_work(&mvm->sta_drained_wk);
}
+out:
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index a4a5e25623c3..86989df69356 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -411,6 +411,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
mvm->status, table.valid);
}
+ IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
+
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
table.data1, table.data2, table.data3,
table.blink1, table.blink2, table.ilink1,
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3040924f5f3c..f47bcbe2945a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -359,20 +359,25 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5112, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x510A, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 4d79761b9c87..9d3d2758ec35 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -748,7 +748,7 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
static u16
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
skb->priority = cfg80211_classify8021d(skb, NULL);
return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index abc5f56f29fe..2f1cd929c6f6 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1877,6 +1877,11 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
EEPROM_MAC_ADDR_0));
/*
+ * Disable powersaving as default.
+ */
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /*
* Initialize hw_mode information.
*/
spec->supported_bands = SUPPORT_BAND_2GHZ;
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 9f16824cd1bc..d849d590de25 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1706,6 +1706,11 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK;
+ /*
+ * Disable powersaving as default.
+ */
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
rt2x00_eeprom_addr(rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b8f5b06006c4..7f8b5d156c8c 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -7458,10 +7458,9 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
u32 reg;
/*
- * Disable powersaving as default on PCI devices.
+ * Disable powersaving as default.
*/
- if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
- rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
/*
* Initialize all hw fields.
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 8ec17aad0e52..3867d1470b36 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -107,6 +107,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
struct rtl8180_priv *priv = dev->priv;
unsigned int count = 32;
u8 signal, agc, sq;
+ dma_addr_t mapping;
while (count--) {
struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
@@ -128,6 +129,17 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
if (unlikely(!new_skb))
goto done;
+ mapping = pci_map_single(priv->pdev,
+ skb_tail_pointer(new_skb),
+ MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ kfree_skb(new_skb);
+ dev_err(&priv->pdev->dev, "RX DMA map error\n");
+
+ goto done;
+ }
+
pci_unmap_single(priv->pdev,
*((dma_addr_t *)skb->cb),
MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
@@ -158,9 +170,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
skb = new_skb;
priv->rx_buf[priv->rx_idx] = skb;
- *((dma_addr_t *) skb->cb) =
- pci_map_single(priv->pdev, skb_tail_pointer(skb),
- MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+ *((dma_addr_t *) skb->cb) = mapping;
}
done:
@@ -266,6 +276,13 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
mapping = pci_map_single(priv->pdev, skb->data,
skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ kfree_skb(skb);
+ dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
+ return;
+
+ }
+
tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
RTL818X_TX_DESC_FLAG_LS |
(ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
index 56aee067f324..a6ad79f61bf9 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
@@ -15,6 +15,8 @@
#ifndef RTL8187_H
#define RTL8187_H
+#include <linux/cache.h>
+
#include "rtl818x.h"
#include "leds.h"
@@ -139,7 +141,10 @@ struct rtl8187_priv {
u8 aifsn[4];
u8 rfkill_mask;
struct {
- __le64 buf;
+ union {
+ __le64 buf;
+ u8 dummy1[L1_CACHE_BYTES];
+ } ____cacheline_aligned;
struct sk_buff_head queue;
} b_tx_status; /* This queue is used by both -b and non-b devices */
struct mutex io_mutex;
@@ -147,7 +152,8 @@ struct rtl8187_priv {
u8 bits8;
__le16 bits16;
__le32 bits32;
- } *io_dmabuf;
+ u8 dummy2[L1_CACHE_BYTES];
+ } *io_dmabuf ____cacheline_aligned;
bool rfkill_off;
u16 seqno;
};
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index deedae3c5449..d1c0191a195b 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -48,7 +48,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
/*<2> Enable Adapter */
if (rtlpriv->cfg->ops->hw_init(hw))
- return 1;
+ return false;
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
/*<3> Enable Interrupt */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index a82b30a1996c..2eb0b38384dd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -937,14 +937,26 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
bool is92c;
int err;
u8 tmp_u1b;
+ unsigned long flags;
rtlpci->being_init_adapter = true;
+
+ /* Since this function can take a very long time (up to 350 ms)
+ * and can be called with irqs disabled, reenable the irqs
+ * to let the other devices continue being serviced.
+ *
+ * It is safe doing so since our own interrupts will only be enabled
+ * in a subsequent step.
+ */
+ local_save_flags(flags);
+ local_irq_enable();
+
rtlpriv->intf_ops->disable_aspm(hw);
rtstatus = _rtl92ce_init_mac(hw);
if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
err = 1;
- return err;
+ goto exit;
}
err = rtl92c_download_fw(hw);
@@ -952,7 +964,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Failed to download FW. Init HW without FW now..\n");
err = 1;
- return err;
+ goto exit;
}
rtlhal->last_hmeboxnum = 0;
@@ -1032,6 +1044,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
}
rtl92c_dm_init(hw);
+exit:
+ local_irq_restore(flags);
rtlpci->being_init_adapter = false;
return err;
}
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 4c76bcb9a879..ae413a2cbee7 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -143,11 +143,7 @@ struct xenvif {
char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
struct xen_netif_rx_back_ring rx;
struct sk_buff_head rx_queue;
- bool rx_queue_stopped;
- /* Set when the RX interrupt is triggered by the frontend.
- * The worker thread may need to wake the queue.
- */
- bool rx_event;
+ RING_IDX rx_last_skb_slots;
/* This array is allocated seperately as it is large */
struct gnttab_copy *grant_copy_op;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index b9de31ea7fc4..7669d49a67e2 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -100,7 +100,6 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
- vif->rx_event = true;
xenvif_kick_thread(vif);
return IRQ_HANDLED;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 6b62c3eb8e18..e5284bca2d90 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -476,7 +476,6 @@ static void xenvif_rx_action(struct xenvif *vif)
unsigned long offset;
struct skb_cb_overlay *sco;
bool need_to_notify = false;
- bool ring_full = false;
struct netrx_pending_operations npo = {
.copy = vif->grant_copy_op,
@@ -486,7 +485,7 @@ static void xenvif_rx_action(struct xenvif *vif)
skb_queue_head_init(&rxq);
while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
- int max_slots_needed;
+ RING_IDX max_slots_needed;
int i;
/* We need a cheap worse case estimate for the number of
@@ -509,9 +508,10 @@ static void xenvif_rx_action(struct xenvif *vif)
if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
skb_queue_head(&vif->rx_queue, skb);
need_to_notify = true;
- ring_full = true;
+ vif->rx_last_skb_slots = max_slots_needed;
break;
- }
+ } else
+ vif->rx_last_skb_slots = 0;
sco = (struct skb_cb_overlay *)skb->cb;
sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
@@ -522,8 +522,6 @@ static void xenvif_rx_action(struct xenvif *vif)
BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
- vif->rx_queue_stopped = !npo.copy_prod && ring_full;
-
if (!npo.copy_prod)
goto done;
@@ -1473,8 +1471,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
static inline int rx_work_todo(struct xenvif *vif)
{
- return (!skb_queue_empty(&vif->rx_queue) && !vif->rx_queue_stopped) ||
- vif->rx_event;
+ return !skb_queue_empty(&vif->rx_queue) &&
+ xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
}
static inline int tx_work_todo(struct xenvif *vif)
@@ -1560,8 +1558,6 @@ int xenvif_kthread(void *data)
if (!skb_queue_empty(&vif->rx_queue))
xenvif_rx_action(vif);
- vif->rx_event = false;
-
if (skb_queue_empty(&vif->rx_queue) &&
netif_queue_stopped(vif->dev))
xenvif_start_queue(vif);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ff04d4f95baa..f9daa9e183f2 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1832,7 +1832,6 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateInitWait:
@@ -1847,6 +1846,10 @@ static void netback_changed(struct xenbus_device *dev,
netdev_notify_peers(netdev);
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/drivers/of/address.c b/drivers/of/address.c
index d3dd41c840f1..1a54f1ffaadb 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -99,11 +99,12 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
static int of_bus_pci_match(struct device_node *np)
{
/*
+ * "pciex" is PCI Express
* "vci" is for the /chaos bridge on 1st-gen PCI powermacs
* "ht" is hypertransport
*/
- return !strcmp(np->type, "pci") || !strcmp(np->type, "vci") ||
- !strcmp(np->type, "ht");
+ return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") ||
+ !strcmp(np->type, "vci") || !strcmp(np->type, "ht");
}
static void of_bus_pci_count_cells(struct device_node *np,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index ff85450d5683..89e888a78899 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -342,27 +342,72 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
}
EXPORT_SYMBOL(of_get_cpu_node);
-/** Checks if the given "compat" string matches one of the strings in
- * the device's "compatible" property
+/**
+ * __of_device_is_compatible() - Check if the node matches given constraints
+ * @device: pointer to node
+ * @compat: required compatible string, NULL or "" for any match
+ * @type: required device_type value, NULL or "" for any match
+ * @name: required node name, NULL or "" for any match
+ *
+ * Checks if the given @compat, @type and @name strings match the
+ * properties of the given @device. A constraints can be skipped by
+ * passing NULL or an empty string as the constraint.
+ *
+ * Returns 0 for no match, and a positive integer on match. The return
+ * value is a relative score with larger values indicating better
+ * matches. The score is weighted for the most specific compatible value
+ * to get the highest score. Matching type is next, followed by matching
+ * name. Practically speaking, this results in the following priority
+ * order for matches:
+ *
+ * 1. specific compatible && type && name
+ * 2. specific compatible && type
+ * 3. specific compatible && name
+ * 4. specific compatible
+ * 5. general compatible && type && name
+ * 6. general compatible && type
+ * 7. general compatible && name
+ * 8. general compatible
+ * 9. type && name
+ * 10. type
+ * 11. name
*/
static int __of_device_is_compatible(const struct device_node *device,
- const char *compat)
+ const char *compat, const char *type, const char *name)
{
- const char* cp;
- int cplen, l;
+ struct property *prop;
+ const char *cp;
+ int index = 0, score = 0;
+
+ /* Compatible match has highest priority */
+ if (compat && compat[0]) {
+ prop = __of_find_property(device, "compatible", NULL);
+ for (cp = of_prop_next_string(prop, NULL); cp;
+ cp = of_prop_next_string(prop, cp), index++) {
+ if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
+ score = INT_MAX/2 - (index << 2);
+ break;
+ }
+ }
+ if (!score)
+ return 0;
+ }
- cp = __of_get_property(device, "compatible", &cplen);
- if (cp == NULL)
- return 0;
- while (cplen > 0) {
- if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
- return 1;
- l = strlen(cp) + 1;
- cp += l;
- cplen -= l;
+ /* Matching type is better than matching name */
+ if (type && type[0]) {
+ if (!device->type || of_node_cmp(type, device->type))
+ return 0;
+ score += 2;
}
- return 0;
+ /* Matching name is a bit better than not */
+ if (name && name[0]) {
+ if (!device->name || of_node_cmp(name, device->name))
+ return 0;
+ score++;
+ }
+
+ return score;
}
/** Checks if the given "compat" string matches one of the strings in
@@ -375,7 +420,7 @@ int of_device_is_compatible(const struct device_node *device,
int res;
raw_spin_lock_irqsave(&devtree_lock, flags);
- res = __of_device_is_compatible(device, compat);
+ res = __of_device_is_compatible(device, compat, NULL, NULL);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return res;
}
@@ -681,10 +726,7 @@ struct device_node *of_find_compatible_node(struct device_node *from,
raw_spin_lock_irqsave(&devtree_lock, flags);
np = from ? from->allnext : of_allnodes;
for (; np; np = np->allnext) {
- if (type
- && !(np->type && (of_node_cmp(np->type, type) == 0)))
- continue;
- if (__of_device_is_compatible(np, compatible) &&
+ if (__of_device_is_compatible(np, compatible, type, NULL) &&
of_node_get(np))
break;
}
@@ -734,43 +776,22 @@ static
const struct of_device_id *__of_match_node(const struct of_device_id *matches,
const struct device_node *node)
{
- const char *cp;
- int cplen, l;
+ const struct of_device_id *best_match = NULL;
+ int score, best_score = 0;
if (!matches)
return NULL;
- cp = __of_get_property(node, "compatible", &cplen);
- do {
- const struct of_device_id *m = matches;
-
- /* Check against matches with current compatible string */
- while (m->name[0] || m->type[0] || m->compatible[0]) {
- int match = 1;
- if (m->name[0])
- match &= node->name
- && !strcmp(m->name, node->name);
- if (m->type[0])
- match &= node->type
- && !strcmp(m->type, node->type);
- if (m->compatible[0])
- match &= cp
- && !of_compat_cmp(m->compatible, cp,
- strlen(m->compatible));
- if (match)
- return m;
- m++;
- }
-
- /* Get node's next compatible string */
- if (cp) {
- l = strlen(cp) + 1;
- cp += l;
- cplen -= l;
+ for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
+ score = __of_device_is_compatible(node, matches->compatible,
+ matches->type, matches->name);
+ if (score > best_score) {
+ best_match = matches;
+ best_score = score;
}
- } while (cp && (cplen > 0));
+ }
- return NULL;
+ return best_match;
}
/**
@@ -778,10 +799,7 @@ const struct of_device_id *__of_match_node(const struct of_device_id *matches,
* @matches: array of of device match structures to search in
* @node: the of device structure to match against
*
- * Low level utility function used by device matching. Matching order
- * is to compare each of the node's compatibles with all given matches
- * first. This implies node's compatible is sorted from specific to
- * generic while matches can be in any order.
+ * Low level utility function used by device matching.
*/
const struct of_device_id *of_match_node(const struct of_device_id *matches,
const struct device_node *node)
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 875b7b6f0d2a..5b3c24f3cde5 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -24,7 +24,11 @@ MODULE_LICENSE("GPL");
static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
- phydev->supported |= PHY_DEFAULT_FEATURES;
+ /* The default values for phydev->supported are provided by the PHY
+ * driver "features" member, we want to reset to sane defaults fist
+ * before supporting higher speeds.
+ */
+ phydev->supported &= PHY_DEFAULT_FEATURES;
switch (max_speed) {
default:
@@ -44,7 +48,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
{
struct phy_device *phy;
bool is_c45;
- int rc, prev_irq;
+ int rc;
u32 max_speed = 0;
is_c45 = of_device_is_compatible(child,
@@ -54,12 +58,14 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
if (!phy || IS_ERR(phy))
return 1;
- if (mdio->irq) {
- prev_irq = mdio->irq[addr];
- mdio->irq[addr] =
- irq_of_parse_and_map(child, 0);
- if (!mdio->irq[addr])
- mdio->irq[addr] = prev_irq;
+ rc = irq_of_parse_and_map(child, 0);
+ if (rc > 0) {
+ phy->irq = rc;
+ if (mdio->irq)
+ mdio->irq[addr] = rc;
+ } else {
+ if (mdio->irq)
+ phy->irq = mdio->irq[addr];
}
/* Associate the OF node with the device structure so it
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index e21012bde639..6643d1920985 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -300,6 +300,72 @@ static void __init of_selftest_parse_interrupts_extended(void)
of_node_put(np);
}
+static struct of_device_id match_node_table[] = {
+ { .data = "A", .name = "name0", }, /* Name alone is lowest priority */
+ { .data = "B", .type = "type1", }, /* followed by type alone */
+
+ { .data = "Ca", .name = "name2", .type = "type1", }, /* followed by both together */
+ { .data = "Cb", .name = "name2", }, /* Only match when type doesn't match */
+ { .data = "Cc", .name = "name2", .type = "type2", },
+
+ { .data = "E", .compatible = "compat3" },
+ { .data = "G", .compatible = "compat2", },
+ { .data = "H", .compatible = "compat2", .name = "name5", },
+ { .data = "I", .compatible = "compat2", .type = "type1", },
+ { .data = "J", .compatible = "compat2", .type = "type1", .name = "name8", },
+ { .data = "K", .compatible = "compat2", .name = "name9", },
+ {}
+};
+
+static struct {
+ const char *path;
+ const char *data;
+} match_node_tests[] = {
+ { .path = "/testcase-data/match-node/name0", .data = "A", },
+ { .path = "/testcase-data/match-node/name1", .data = "B", },
+ { .path = "/testcase-data/match-node/a/name2", .data = "Ca", },
+ { .path = "/testcase-data/match-node/b/name2", .data = "Cb", },
+ { .path = "/testcase-data/match-node/c/name2", .data = "Cc", },
+ { .path = "/testcase-data/match-node/name3", .data = "E", },
+ { .path = "/testcase-data/match-node/name4", .data = "G", },
+ { .path = "/testcase-data/match-node/name5", .data = "H", },
+ { .path = "/testcase-data/match-node/name6", .data = "G", },
+ { .path = "/testcase-data/match-node/name7", .data = "I", },
+ { .path = "/testcase-data/match-node/name8", .data = "J", },
+ { .path = "/testcase-data/match-node/name9", .data = "K", },
+};
+
+static void __init of_selftest_match_node(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(match_node_tests); i++) {
+ np = of_find_node_by_path(match_node_tests[i].path);
+ if (!np) {
+ selftest(0, "missing testcase node %s\n",
+ match_node_tests[i].path);
+ continue;
+ }
+
+ match = of_match_node(match_node_table, np);
+ if (!match) {
+ selftest(0, "%s didn't match anything\n",
+ match_node_tests[i].path);
+ continue;
+ }
+
+ if (strcmp(match->data, match_node_tests[i].data) != 0) {
+ selftest(0, "%s got wrong match. expected %s, got %s\n",
+ match_node_tests[i].path, match_node_tests[i].data,
+ (const char *)match->data);
+ continue;
+ }
+ selftest(1, "passed");
+ }
+}
+
static int __init of_selftest(void)
{
struct device_node *np;
@@ -316,6 +382,7 @@ static int __init of_selftest(void)
of_selftest_property_match_string();
of_selftest_parse_interrupts();
of_selftest_parse_interrupts_extended();
+ of_selftest_match_node();
pr_info("end of selftest - %i passed, %i failed\n",
selftest_results.passed, selftest_results.failed);
return 0;
diff --git a/drivers/of/testcase-data/testcases.dtsi b/drivers/of/testcase-data/testcases.dtsi
new file mode 100644
index 000000000000..3a5b75a8e4d7
--- /dev/null
+++ b/drivers/of/testcase-data/testcases.dtsi
@@ -0,0 +1,3 @@
+#include "tests-phandle.dtsi"
+#include "tests-interrupts.dtsi"
+#include "tests-match.dtsi"
diff --git a/drivers/of/testcase-data/tests-interrupts.dtsi b/drivers/of/testcase-data/tests-interrupts.dtsi
new file mode 100644
index 000000000000..c843720bd3e5
--- /dev/null
+++ b/drivers/of/testcase-data/tests-interrupts.dtsi
@@ -0,0 +1,58 @@
+
+/ {
+ testcase-data {
+ interrupts {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ test_intc0: intc0 {
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ test_intc1: intc1 {
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ };
+
+ test_intc2: intc2 {
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ test_intmap0: intmap0 {
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ interrupt-map = <1 &test_intc0 9>,
+ <2 &test_intc1 10 11 12>,
+ <3 &test_intc2 13 14>,
+ <4 &test_intc2 15 16>;
+ };
+
+ test_intmap1: intmap1 {
+ #interrupt-cells = <2>;
+ interrupt-map = <0x5000 1 2 &test_intc0 15>;
+ };
+
+ interrupts0 {
+ interrupt-parent = <&test_intc0>;
+ interrupts = <1>, <2>, <3>, <4>;
+ };
+
+ interrupts1 {
+ interrupt-parent = <&test_intmap0>;
+ interrupts = <1>, <2>, <3>, <4>;
+ };
+
+ interrupts-extended0 {
+ reg = <0x5000 0x100>;
+ interrupts-extended = <&test_intc0 1>,
+ <&test_intc1 2 3 4>,
+ <&test_intc2 5 6>,
+ <&test_intmap0 1>,
+ <&test_intmap0 2>,
+ <&test_intmap0 3>,
+ <&test_intmap1 1 2>;
+ };
+ };
+ };
+};
diff --git a/drivers/of/testcase-data/tests-match.dtsi b/drivers/of/testcase-data/tests-match.dtsi
new file mode 100644
index 000000000000..c9e541129534
--- /dev/null
+++ b/drivers/of/testcase-data/tests-match.dtsi
@@ -0,0 +1,19 @@
+
+/ {
+ testcase-data {
+ match-node {
+ name0 { };
+ name1 { device_type = "type1"; };
+ a { name2 { device_type = "type1"; }; };
+ b { name2 { }; };
+ c { name2 { device_type = "type2"; }; };
+ name3 { compatible = "compat3"; };
+ name4 { compatible = "compat2", "compat3"; };
+ name5 { compatible = "compat2", "compat3"; };
+ name6 { compatible = "compat1", "compat2", "compat3"; };
+ name7 { compatible = "compat2"; device_type = "type1"; };
+ name8 { compatible = "compat2"; device_type = "type1"; };
+ name9 { compatible = "compat2"; };
+ };
+ };
+};
diff --git a/drivers/of/testcase-data/tests-phandle.dtsi b/drivers/of/testcase-data/tests-phandle.dtsi
new file mode 100644
index 000000000000..0007d3cd7dc2
--- /dev/null
+++ b/drivers/of/testcase-data/tests-phandle.dtsi
@@ -0,0 +1,39 @@
+
+/ {
+ testcase-data {
+ phandle-tests {
+ provider0: provider0 {
+ #phandle-cells = <0>;
+ };
+
+ provider1: provider1 {
+ #phandle-cells = <1>;
+ };
+
+ provider2: provider2 {
+ #phandle-cells = <2>;
+ };
+
+ provider3: provider3 {
+ #phandle-cells = <3>;
+ };
+
+ consumer-a {
+ phandle-list = <&provider1 1>,
+ <&provider2 2 0>,
+ <0>,
+ <&provider3 4 4 3>,
+ <&provider2 5 100>,
+ <&provider0>,
+ <&provider1 7>;
+ phandle-list-names = "first", "second", "third";
+
+ phandle-list-bad-phandle = <12345678 0 0>;
+ phandle-list-bad-args = <&provider2 1 0>,
+ <&provider3 0>;
+ empty-property;
+ unterminated-string = [40 41 42 43];
+ };
+ };
+ };
+};
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 13478ecd4113..0e79665afd44 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -60,14 +60,6 @@
#define PCIE_DEBUG_CTRL 0x1a60
#define PCIE_DEBUG_SOFT_RESET BIT(20)
-/*
- * This product ID is registered by Marvell, and used when the Marvell
- * SoC is not the root complex, but an endpoint on the PCIe bus. It is
- * therefore safe to re-use this PCI ID for our emulated PCI-to-PCI
- * bridge.
- */
-#define MARVELL_EMULATED_PCI_PCI_BRIDGE_ID 0x7846
-
/* PCI configuration space of a PCI-to-PCI bridge */
struct mvebu_sw_pci_bridge {
u16 vendor;
@@ -388,7 +380,8 @@ static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
bridge->class = PCI_CLASS_BRIDGE_PCI;
bridge->vendor = PCI_VENDOR_ID_MARVELL;
- bridge->device = MARVELL_EMULATED_PCI_PCI_BRIDGE_ID;
+ bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
+ bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
bridge->cache_line_size = 0x10;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index cd929aed3613..7c7a388c85ab 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -210,10 +210,29 @@ static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
}
}
+static void dock_event(acpi_handle handle, u32 type, void *data)
+{
+ struct acpiphp_context *context;
+
+ mutex_lock(&acpiphp_context_lock);
+ context = acpiphp_get_context(handle);
+ if (!context || WARN_ON(context->handle != handle)
+ || context->func.parent->is_going_away) {
+ mutex_unlock(&acpiphp_context_lock);
+ return;
+ }
+ get_bridge(context->func.parent);
+ acpiphp_put_context(context);
+ mutex_unlock(&acpiphp_context_lock);
+
+ hotplug_event(handle, type, data);
+
+ put_bridge(context->func.parent);
+}
static const struct acpi_dock_ops acpiphp_dock_ops = {
.fixup = post_dock_fixups,
- .handler = hotplug_event,
+ .handler = dock_event,
};
/* Check whether the PCI device is managed by native PCIe hotplug driver */
@@ -441,7 +460,9 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
list_del(&bridge->list);
mutex_unlock(&bridge_mutex);
+ mutex_lock(&acpiphp_context_lock);
bridge->is_going_away = true;
+ mutex_unlock(&acpiphp_context_lock);
}
/**
@@ -709,6 +730,17 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
return (unsigned int)sta;
}
+static inline bool device_status_valid(unsigned int sta)
+{
+ /*
+ * ACPI spec says that _STA may return bit 0 clear with bit 3 set
+ * if the device is valid but does not require a device driver to be
+ * loaded (Section 6.3.7 of ACPI 5.0A).
+ */
+ unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING;
+ return (sta & mask) == mask;
+}
+
/**
* trim_stale_devices - remove PCI devices that are not responding.
* @dev: PCI device to start walking the hierarchy from.
@@ -724,7 +756,7 @@ static void trim_stale_devices(struct pci_dev *dev)
unsigned long long sta;
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
- alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
+ alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
|| acpiphp_no_hotplug(handle);
}
if (!alive) {
@@ -742,7 +774,7 @@ static void trim_stale_devices(struct pci_dev *dev)
/* The device is a bridge. so check the bus below it. */
pm_runtime_get_sync(&dev->dev);
- list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
+ list_for_each_entry_safe_reverse(child, tmp, &bus->devices, bus_list)
trim_stale_devices(child);
pm_runtime_put(&dev->dev);
@@ -771,10 +803,10 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
mutex_lock(&slot->crit_sect);
if (slot_no_hotplug(slot)) {
; /* do nothing */
- } else if (get_slot_status(slot) == ACPI_STA_ALL) {
+ } else if (device_status_valid(get_slot_status(slot))) {
/* remove stale devices if any */
- list_for_each_entry_safe(dev, tmp, &bus->devices,
- bus_list)
+ list_for_each_entry_safe_reverse(dev, tmp,
+ &bus->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot->device)
trim_stale_devices(dev);
@@ -805,7 +837,7 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
int i;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM;
- list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
+ list_for_each_entry_safe_reverse(dev, tmp, &bus->devices, bus_list) {
for (i=0; i<PCI_BRIDGE_RESOURCES; i++) {
struct resource *res = &dev->resource[i];
if ((res->flags & type_mask) && !res->start &&
@@ -829,7 +861,11 @@ void acpiphp_check_host_bridge(acpi_handle handle)
bridge = acpiphp_handle_to_bridge(handle);
if (bridge) {
+ pci_lock_rescan_remove();
+
acpiphp_check_bridge(bridge);
+
+ pci_unlock_rescan_remove();
put_bridge(bridge);
}
}
@@ -852,6 +888,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
mutex_unlock(&acpiphp_context_lock);
+ pci_lock_rescan_remove();
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
switch (type) {
@@ -905,6 +942,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
break;
}
+ pci_unlock_rescan_remove();
if (bridge)
put_bridge(bridge);
}
@@ -915,11 +953,9 @@ static void hotplug_event_work(void *data, u32 type)
acpi_handle handle = context->handle;
acpi_scan_lock_acquire();
- pci_lock_rescan_remove();
hotplug_event(handle, type, context);
- pci_unlock_rescan_remove();
acpi_scan_lock_release();
acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL);
put_bridge(context->func.parent);
@@ -937,6 +973,7 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
{
struct acpiphp_context *context;
u32 ost_code = ACPI_OST_SC_SUCCESS;
+ acpi_status status;
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
@@ -972,13 +1009,20 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
mutex_lock(&acpiphp_context_lock);
context = acpiphp_get_context(handle);
- if (context && !WARN_ON(context->handle != handle)) {
- get_bridge(context->func.parent);
- acpiphp_put_context(context);
- acpi_hotplug_execute(hotplug_event_work, context, type);
+ if (!context || WARN_ON(context->handle != handle)
+ || context->func.parent->is_going_away)
+ goto err_out;
+
+ get_bridge(context->func.parent);
+ acpiphp_put_context(context);
+ status = acpi_hotplug_execute(hotplug_event_work, context, type);
+ if (ACPI_SUCCESS(status)) {
mutex_unlock(&acpiphp_context_lock);
return;
}
+ put_bridge(context->func.parent);
+
+ err_out:
mutex_unlock(&acpiphp_context_lock);
ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7a0fec6ce571..955ab7990c5b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -545,9 +545,15 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
return -ENOMEM;
list_for_each_entry(entry, &pdev->msi_list, list) {
char *name = kmalloc(20, GFP_KERNEL);
+ if (!name)
+ goto error_attrs;
+
msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
- if (!msi_dev_attr)
+ if (!msi_dev_attr) {
+ kfree(name);
goto error_attrs;
+ }
+
sprintf(name, "%d", entry->irq);
sysfs_attr_init(&msi_dev_attr->attr);
msi_dev_attr->attr.name = name;
@@ -589,6 +595,7 @@ error_attrs:
++count;
msi_attr = msi_attrs[count];
}
+ kfree(msi_attrs);
return ret;
}
@@ -959,7 +966,6 @@ EXPORT_SYMBOL(pci_disable_msi);
/**
* pci_msix_vec_count - return the number of device's MSI-X table entries
* @dev: pointer to the pci_dev data structure of MSI-X device function
-
* This function returns the number of device's MSI-X table entries and
* therefore the number of MSI-X vectors device is capable of sending.
* It returns a negative errno if the device is not capable of sending MSI-X
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1febe90831b4..6b05f6134b68 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1181,6 +1181,8 @@ EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
static int do_pci_enable_device(struct pci_dev *dev, int bars)
{
int err;
+ u16 cmd;
+ u8 pin;
err = pci_set_power_state(dev, PCI_D0);
if (err < 0 && err != -EIO)
@@ -1190,6 +1192,14 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
return err;
pci_fixup_device(pci_fixup_enable, dev);
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ if (pin) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_INTX_DISABLE)
+ pci_write_config_word(dev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_INTX_DISABLE);
+ }
+
return 0;
}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index afa2354f6600..c7a551c2d5f1 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -5,7 +5,7 @@
menu "PHY Subsystem"
config GENERIC_PHY
- tristate "PHY Core"
+ bool "PHY Core"
help
Generic PHY support.
@@ -61,6 +61,7 @@ config PHY_EXYNOS_DP_VIDEO
config BCM_KONA_USB2_PHY
tristate "Broadcom Kona USB2 PHY Driver"
depends on GENERIC_PHY
+ depends on HAS_IOMEM
help
Enable this to support the Broadcom Kona USB 2.0 PHY.
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 645c867c1257..6c738376daff 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -162,6 +162,9 @@ int phy_init(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
@@ -173,6 +176,8 @@ int phy_init(struct phy *phy)
dev_err(&phy->dev, "phy init failed --> %d\n", ret);
goto out;
}
+ } else {
+ ret = 0; /* Override possible ret == -ENOTSUPP */
}
++phy->init_count;
@@ -187,6 +192,9 @@ int phy_exit(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
@@ -212,6 +220,9 @@ int phy_power_on(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
@@ -223,6 +234,8 @@ int phy_power_on(struct phy *phy)
dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
goto out;
}
+ } else {
+ ret = 0; /* Override possible ret == -ENOTSUPP */
}
++phy->power_count;
mutex_unlock(&phy->mutex);
@@ -240,6 +253,9 @@ int phy_power_off(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
mutex_lock(&phy->mutex);
if (phy->power_count == 1 && phy->ops->power_off) {
ret = phy->ops->power_off(phy);
@@ -308,7 +324,7 @@ err0:
*/
void phy_put(struct phy *phy)
{
- if (IS_ERR(phy))
+ if (!phy || IS_ERR(phy))
return;
module_put(phy->ops->owner);
@@ -328,6 +344,9 @@ void devm_phy_put(struct device *dev, struct phy *phy)
{
int r;
+ if (!phy)
+ return;
+
r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
}
@@ -389,17 +408,11 @@ struct phy *phy_get(struct device *dev, const char *string)
index = of_property_match_string(dev->of_node, "phy-names",
string);
phy = of_phy_get(dev, index);
- if (IS_ERR(phy)) {
- dev_err(dev, "unable to find phy\n");
- return phy;
- }
} else {
phy = phy_lookup(dev, string);
- if (IS_ERR(phy)) {
- dev_err(dev, "unable to find phy\n");
- return phy;
- }
}
+ if (IS_ERR(phy))
+ return phy;
if (!try_module_get(phy->ops->owner))
return ERR_PTR(-EPROBE_DEFER);
@@ -411,6 +424,27 @@ struct phy *phy_get(struct device *dev, const char *string)
EXPORT_SYMBOL_GPL(phy_get);
/**
+ * phy_optional_get() - lookup and obtain a reference to an optional phy.
+ * @dev: device that requests this phy
+ * @string: the phy name as given in the dt data or the name of the controller
+ * port for non-dt case
+ *
+ * Returns the phy driver, after getting a refcount to it; or
+ * NULL if there is no such phy. The caller is responsible for
+ * calling phy_put() to release that count.
+ */
+struct phy *phy_optional_get(struct device *dev, const char *string)
+{
+ struct phy *phy = phy_get(dev, string);
+
+ if (PTR_ERR(phy) == -ENODEV)
+ phy = NULL;
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(phy_optional_get);
+
+/**
* devm_phy_get() - lookup and obtain a reference to a phy.
* @dev: device that requests this phy
* @string: the phy name as given in the dt data or phy device name
@@ -441,6 +475,30 @@ struct phy *devm_phy_get(struct device *dev, const char *string)
EXPORT_SYMBOL_GPL(devm_phy_get);
/**
+ * devm_phy_optional_get() - lookup and obtain a reference to an optional phy.
+ * @dev: device that requests this phy
+ * @string: the phy name as given in the dt data or phy device name
+ * for non-dt case
+ *
+ * Gets the phy using phy_get(), and associates a device with it using
+ * devres. On driver detach, release function is invoked on the devres
+ * data, then, devres data is freed. This differs to devm_phy_get() in
+ * that if the phy does not exist, it is not considered an error and
+ * -ENODEV will not be returned. Instead the NULL phy is returned,
+ * which can be passed to all other phy consumer calls.
+ */
+struct phy *devm_phy_optional_get(struct device *dev, const char *string)
+{
+ struct phy *phy = devm_phy_get(dev, string);
+
+ if (PTR_ERR(phy) == -ENODEV)
+ phy = NULL;
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(devm_phy_optional_get);
+
+/**
* phy_create() - create a new phy
* @dev: device that is creating the new phy
* @ops: function pointers for performing phy operations
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c
index 1dbe6ce7b2ce..0786fef842e7 100644
--- a/drivers/phy/phy-exynos-dp-video.c
+++ b/drivers/phy/phy-exynos-dp-video.c
@@ -76,10 +76,6 @@ static int exynos_dp_video_phy_probe(struct platform_device *pdev)
if (IS_ERR(state->regs))
return PTR_ERR(state->regs);
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
phy = devm_phy_create(dev, &exynos_dp_video_phy_ops, NULL);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create Display Port PHY\n");
@@ -87,6 +83,10 @@ static int exynos_dp_video_phy_probe(struct platform_device *pdev)
}
phy_set_drvdata(phy, state);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
return 0;
}
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index 0c5efab11af1..7f139326a642 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -134,11 +134,6 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
dev_set_drvdata(dev, state);
spin_lock_init(&state->slock);
- phy_provider = devm_of_phy_provider_register(dev,
- exynos_mipi_video_phy_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) {
struct phy *phy = devm_phy_create(dev,
&exynos_mipi_video_phy_ops, NULL);
@@ -152,6 +147,11 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
phy_set_drvdata(phy, &state->phys[i]);
}
+ phy_provider = devm_of_phy_provider_register(dev,
+ exynos_mipi_video_phy_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
return 0;
}
diff --git a/drivers/phy/phy-mvebu-sata.c b/drivers/phy/phy-mvebu-sata.c
index d43786f62437..d70ecd6a1b3f 100644
--- a/drivers/phy/phy-mvebu-sata.c
+++ b/drivers/phy/phy-mvebu-sata.c
@@ -99,17 +99,17 @@ static int phy_mvebu_sata_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
- phy_provider = devm_of_phy_provider_register(&pdev->dev,
- of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
phy = devm_phy_create(&pdev->dev, &phy_mvebu_sata_ops, NULL);
if (IS_ERR(phy))
return PTR_ERR(phy);
phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(&pdev->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
/* The boot loader may of left it on. Turn it off. */
phy_mvebu_sata_power_off(phy);
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index bfc5c337f99a..7699752fba11 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -177,11 +177,6 @@ static int omap_usb2_probe(struct platform_device *pdev)
phy->phy.otg = otg;
phy->phy.type = USB_PHY_TYPE_USB2;
- phy_provider = devm_of_phy_provider_register(phy->dev,
- of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
control_node = of_parse_phandle(node, "ctrl-module", 0);
if (!control_node) {
dev_err(&pdev->dev, "Failed to get control device phandle\n");
@@ -214,6 +209,11 @@ static int omap_usb2_probe(struct platform_device *pdev)
phy_set_drvdata(generic_phy, phy);
+ phy_provider = devm_of_phy_provider_register(phy->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
if (IS_ERR(phy->wkupclk)) {
dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index daf65e68aaab..c3ace1db8136 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -695,11 +695,6 @@ static int twl4030_usb_probe(struct platform_device *pdev)
otg->set_host = twl4030_set_host;
otg->set_peripheral = twl4030_set_peripheral;
- phy_provider = devm_of_phy_provider_register(twl->dev,
- of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
phy = devm_phy_create(twl->dev, &ops, init_data);
if (IS_ERR(phy)) {
dev_dbg(&pdev->dev, "Failed to create PHY\n");
@@ -708,6 +703,11 @@ static int twl4030_usb_probe(struct platform_device *pdev)
phy_set_drvdata(phy, twl);
+ phy_provider = devm_of_phy_provider_register(twl->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
/* init spinlock for workqueue */
spin_lock_init(&twl->lock);
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 5ee61a470016..c0fe6091566a 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -851,7 +851,9 @@ static struct pinctrl *create_pinctrl(struct device *dev)
kref_init(&p->users);
/* Add the pinctrl handle to the global list */
+ mutex_lock(&pinctrl_list_mutex);
list_add_tail(&p->node, &pinctrl_list);
+ mutex_unlock(&pinctrl_list_mutex);
return p;
}
@@ -1642,8 +1644,10 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
device_root, pctldev, &pinctrl_groups_ops);
debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
device_root, pctldev, &pinctrl_gpioranges_ops);
- pinmux_init_device_debugfs(device_root, pctldev);
- pinconf_init_device_debugfs(device_root, pctldev);
+ if (pctldev->desc->pmxops)
+ pinmux_init_device_debugfs(device_root, pctldev);
+ if (pctldev->desc->confops)
+ pinconf_init_device_debugfs(device_root, pctldev);
}
static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 38c6f8b9790e..d990e33d8aa7 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1286,22 +1286,22 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- irq_set_handler(d->irq, handle_simple_irq);
+ __irq_set_handler_locked(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_ESR);
writel_relaxed(mask, pio + PIO_REHLSR);
break;
case IRQ_TYPE_EDGE_FALLING:
- irq_set_handler(d->irq, handle_simple_irq);
+ __irq_set_handler_locked(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_ESR);
writel_relaxed(mask, pio + PIO_FELLSR);
break;
case IRQ_TYPE_LEVEL_LOW:
- irq_set_handler(d->irq, handle_level_irq);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
writel_relaxed(mask, pio + PIO_LSR);
writel_relaxed(mask, pio + PIO_FELLSR);
break;
case IRQ_TYPE_LEVEL_HIGH:
- irq_set_handler(d->irq, handle_level_irq);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
writel_relaxed(mask, pio + PIO_LSR);
writel_relaxed(mask, pio + PIO_REHLSR);
break;
@@ -1310,7 +1310,7 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
* disable additional interrupt modes:
* fall back to default behavior
*/
- irq_set_handler(d->irq, handle_simple_irq);
+ __irq_set_handler_locked(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_AIMDR);
return 0;
case IRQ_TYPE_NONE:
diff --git a/drivers/pinctrl/pinctrl-imx1-core.c b/drivers/pinctrl/pinctrl-imx1-core.c
index 17aecde1b51d..815384b377b5 100644
--- a/drivers/pinctrl/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/pinctrl-imx1-core.c
@@ -45,7 +45,7 @@ struct imx1_pinctrl {
#define MX1_DDIR 0x00
#define MX1_OCR 0x04
#define MX1_ICONFA 0x0c
-#define MX1_ICONFB 0x10
+#define MX1_ICONFB 0x14
#define MX1_GIUS 0x20
#define MX1_GPR 0x38
#define MX1_PUEN 0x40
@@ -97,13 +97,13 @@ static void imx1_write_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
u32 old_val;
u32 new_val;
- dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n",
- reg, offset, value);
-
/* Use the next register if the pin's port pin number is >=16 */
if (pin_id % 32 >= 16)
reg += 0x04;
+ dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n",
+ reg, offset, value);
+
/* Get current state of pins */
old_val = readl(reg);
old_val &= mask;
@@ -139,7 +139,7 @@ static int imx1_read_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
u32 reg_offset)
{
void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset;
- int offset = pin_id % 16;
+ int offset = (pin_id % 16) * 2;
/* Use the next register if the pin's port pin number is >=16 */
if (pin_id % 32 >= 16)
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index a2e93a2b5ff4..e767355ab0ad 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -645,7 +645,7 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
GFP_KERNEL);
if (!pmx->regs) {
dev_err(&pdev->dev, "Can't alloc regs pointer\n");
- return -ENODEV;
+ return -ENOMEM;
}
for (i = 0; i < pmx->nbanks; i++) {
diff --git a/drivers/pinctrl/sirf/pinctrl-prima2.c b/drivers/pinctrl/sirf/pinctrl-prima2.c
index 37b42651d76a..dde0285544d6 100644
--- a/drivers/pinctrl/sirf/pinctrl-prima2.c
+++ b/drivers/pinctrl/sirf/pinctrl-prima2.c
@@ -413,7 +413,7 @@ static const struct sirfsoc_padmux ac97_padmux = {
.funcval = 0,
};
-static const unsigned ac97_pins[] = { 33, 34, 35, 36 };
+static const unsigned ac97_pins[] = { 43, 44, 45, 46 };
static const struct sirfsoc_muxmask spi1_muxmask[] = {
{
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index b28d1af9c232..9802b67040cc 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -276,7 +276,20 @@ static int wmt_pctl_dt_node_to_map_pull(struct wmt_pinctrl_data *data,
if (!configs)
return -ENOMEM;
- configs[0] = pull;
+ switch (pull) {
+ case 0:
+ configs[0] = PIN_CONFIG_BIAS_DISABLE;
+ break;
+ case 1:
+ configs[0] = PIN_CONFIG_BIAS_PULL_DOWN;
+ break;
+ case 2:
+ configs[0] = PIN_CONFIG_BIAS_PULL_UP;
+ break;
+ default:
+ configs[0] = PIN_CONFIG_BIAS_DISABLE;
+ dev_err(data->dev, "invalid pull state %d - disabling\n", pull);
+ }
map->type = PIN_MAP_TYPE_CONFIGS_PIN;
map->data.configs.group_or_pin = data->groups[group];
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 563174891c90..041f9b638d28 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -192,7 +192,7 @@ static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uV)
/*
* Voltage is measured in units of 1.22mV. The voltage is stored as
- * a 10-bit number plus sign, in the upper bits of a 16-bit register
+ * a 12-bit number plus sign, in the upper bits of a 16-bit register
*/
err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
if (err)
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 80edb7d8cb54..0b4cf9d63291 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -444,8 +444,6 @@ static int isp1704_charger_probe(struct platform_device *pdev)
ret = PTR_ERR(isp->phy);
goto fail0;
}
- if (!isp->phy)
- goto fail0;
isp->dev = &pdev->dev;
platform_set_drvdata(pdev, isp);
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index c7ff6d67f158..0fbac861080d 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -148,7 +148,7 @@ static void max17040_get_online(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- if (chip->pdata->battery_online)
+ if (chip->pdata && chip->pdata->battery_online)
chip->online = chip->pdata->battery_online();
else
chip->online = 1;
@@ -158,7 +158,8 @@ static void max17040_get_status(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- if (!chip->pdata->charger_online || !chip->pdata->charger_enable) {
+ if (!chip->pdata || !chip->pdata->charger_online
+ || !chip->pdata->charger_enable) {
chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
return;
}
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 8a843a04c224..a40b9c34e9ff 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -52,8 +52,10 @@ lp3943_pwm_request_map(struct lp3943_pwm *lp3943_pwm, int hwpwm)
offset = pwm_map->output[i];
/* Return an error if the pin is already assigned */
- if (test_and_set_bit(offset, &lp3943->pin_used))
+ if (test_and_set_bit(offset, &lp3943->pin_used)) {
+ kfree(pwm_map);
return ERR_PTR(-EBUSY);
+ }
}
return pwm_map;
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 77b46d0b37a6..e10febe9ec34 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -498,7 +498,7 @@ static int ab3100_regulator_register(struct platform_device *pdev,
struct ab3100_platform_data *plfdata,
struct regulator_init_data *init_data,
struct device_node *np,
- int id)
+ unsigned long id)
{
struct regulator_desc *desc;
struct ab3100_regulator *reg;
@@ -646,7 +646,7 @@ ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
err = ab3100_regulator_register(
pdev, NULL, ab3100_regulator_matches[i].init_data,
ab3100_regulator_matches[i].of_node,
- (int) ab3100_regulator_matches[i].driver_data);
+ (unsigned long)ab3100_regulator_matches[i].driver_data);
if (err) {
ab3100_regulators_remove(pdev);
return err;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b38a6b669e8c..d1ac4caaf1b0 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1272,6 +1272,8 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
if (r->dev.parent &&
node == r->dev.of_node)
return r;
+ *ret = -EPROBE_DEFER;
+ return NULL;
} else {
/*
* If we couldn't even get the node then it's
@@ -1312,7 +1314,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
struct regulator_dev *rdev;
struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
const char *devname = NULL;
- int ret = -EPROBE_DEFER;
+ int ret;
if (id == NULL) {
pr_err("get() with no identifier\n");
@@ -1322,6 +1324,11 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
if (dev)
devname = dev_name(dev);
+ if (have_full_constraints())
+ ret = -ENODEV;
+ else
+ ret = -EPROBE_DEFER;
+
mutex_lock(&regulator_list_mutex);
rdev = regulator_dev_lookup(dev, id, &ret);
@@ -1352,7 +1359,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
goto found;
/* Don't log an error when called from regulator_get_optional() */
} else if (!have_full_constraints() || exclusive) {
- dev_err(dev, "dummy supplies not allowed\n");
+ dev_warn(dev, "dummy supplies not allowed\n");
}
mutex_unlock(&regulator_list_mutex);
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 7f340206d329..b14ebdad5dd2 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -576,7 +576,9 @@ static int da9055_regulator_probe(struct platform_device *pdev)
/* Only LDO 5 and 6 has got the over current interrupt */
if (pdev->id == DA9055_ID_LDO5 || pdev->id == DA9055_ID_LDO6) {
irq = platform_get_irq_byname(pdev, "REGULATOR");
- irq = regmap_irq_get_virq(da9055->irq_data, irq);
+ if (irq < 0)
+ return irq;
+
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
da9055_ldo5_6_oc_irq,
IRQF_TRIGGER_HIGH |
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 56727eb745df..91e99a2c8dc1 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -1,3 +1,4 @@
+
/*
* Regulator driver for DA9063 PMIC series
*
@@ -60,7 +61,8 @@ struct da9063_regulator_info {
.desc.ops = &da9063_ldo_ops, \
.desc.min_uV = (min_mV) * 1000, \
.desc.uV_step = (step_mV) * 1000, \
- .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1), \
+ .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1 \
+ + (DA9063_V##regl_name##_BIAS)), \
.desc.enable_reg = DA9063_REG_##regl_name##_CONT, \
.desc.enable_mask = DA9063_LDO_EN, \
.desc.vsel_reg = DA9063_REG_V##regl_name##_A, \
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
index b1078ba3f393..e0619526708c 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577.c
@@ -166,12 +166,14 @@ static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev)
ret = of_regulator_match(&pdev->dev, np, max14577_regulator_matches,
MAX14577_REG_MAX);
- if (ret < 0) {
+ if (ret < 0)
dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
- return ret;
- }
+ else
+ ret = 0;
- return 0;
+ of_node_put(np);
+
+ return ret;
}
static inline struct regulator_init_data *match_init_data(int index)
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index d9e557990577..cd0b9e35a56d 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -441,6 +441,7 @@ common_reg:
for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
if (!reg_np) {
config.init_data = pdata->regulators[i].initdata;
+ config.of_node = pdata->regulators[i].reg_node;
} else {
config.init_data = rdata[i].init_data;
config.of_node = rdata[i].of_node;
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index d7164bb75d3e..d958dfa05125 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -535,7 +535,7 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
return -ENODEV;
}
- regulators_np = of_find_node_by_name(pmic_np, "regulators");
+ regulators_np = of_get_child_by_name(pmic_np, "regulators");
if (!regulators_np) {
dev_err(iodev->dev, "could not find regulators sub-node\n");
return -EINVAL;
@@ -591,6 +591,8 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
rmode++;
}
+ of_node_put(regulators_np);
+
if (of_get_property(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs", NULL)) {
pdata->buck2_gpiodvs = true;
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index f6b9188c5af5..9f0ea6cb6922 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -610,6 +610,7 @@ void chsc_chp_online(struct chp_id chpid)
css_wait_for_slow_path();
for_each_subchannel_staged(__s390_process_res_acc, NULL,
&link);
+ css_schedule_reprobe();
}
}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 88e35d85d205..8ee88c4ebd83 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -342,8 +342,9 @@ static int cio_check_config(struct subchannel *sch, struct schib *schib)
*/
int cio_commit_config(struct subchannel *sch)
{
- struct schib schib;
int ccode, retry, ret = 0;
+ struct schib schib;
+ struct irb irb;
if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV;
@@ -367,7 +368,10 @@ int cio_commit_config(struct subchannel *sch)
ret = -EAGAIN;
break;
case 1: /* status pending */
- return -EBUSY;
+ ret = -EBUSY;
+ if (tsch(sch->schid, &irb))
+ return ret;
+ break;
case 2: /* busy */
udelay(100); /* allow for recovery */
ret = -EBUSY;
@@ -403,7 +407,6 @@ EXPORT_SYMBOL_GPL(cio_update_schib);
*/
int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
{
- int retry;
int ret;
CIO_TRACE_EVENT(2, "ensch");
@@ -418,20 +421,14 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
sch->config.isc = sch->isc;
sch->config.intparm = intparm;
- for (retry = 0; retry < 3; retry++) {
+ ret = cio_commit_config(sch);
+ if (ret == -EIO) {
+ /*
+ * Got a program check in msch. Try without
+ * the concurrent sense bit the next time.
+ */
+ sch->config.csense = 0;
ret = cio_commit_config(sch);
- if (ret == -EIO) {
- /*
- * Got a program check in msch. Try without
- * the concurrent sense bit the next time.
- */
- sch->config.csense = 0;
- } else if (ret == -EBUSY) {
- struct irb irb;
- if (tsch(sch->schid, &irb) != 0)
- break;
- } else
- break;
}
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
@@ -444,7 +441,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
*/
int cio_disable_subchannel(struct subchannel *sch)
{
- int retry;
int ret;
CIO_TRACE_EVENT(2, "dissch");
@@ -456,16 +452,8 @@ int cio_disable_subchannel(struct subchannel *sch)
return -ENODEV;
sch->config.ena = 0;
+ ret = cio_commit_config(sch);
- for (retry = 0; retry < 3; retry++) {
- ret = cio_commit_config(sch);
- if (ret == -EBUSY) {
- struct irb irb;
- if (tsch(sch->schid, &irb) != 0)
- break;
- } else
- break;
- }
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 8acaae18bd11..a563e4c00590 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -359,14 +359,12 @@ static inline int multicast_outbound(struct qdio_q *q)
#define need_siga_sync_out_after_pci(q) \
(unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
-#define for_each_input_queue(irq_ptr, q, i) \
- for (i = 0, q = irq_ptr->input_qs[0]; \
- i < irq_ptr->nr_input_qs; \
- q = irq_ptr->input_qs[++i])
-#define for_each_output_queue(irq_ptr, q, i) \
- for (i = 0, q = irq_ptr->output_qs[0]; \
- i < irq_ptr->nr_output_qs; \
- q = irq_ptr->output_qs[++i])
+#define for_each_input_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_input_qs && \
+ ({ q = irq_ptr->input_qs[i]; 1; }); i++)
+#define for_each_output_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_output_qs && \
+ ({ q = irq_ptr->output_qs[i]; 1; }); i++)
#define prev_buf(bufnr) \
((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index c883a085c059..77466c4faabb 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -996,7 +996,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
}
}
- if (!pci_out_supported(q))
+ if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
return;
for_each_output_queue(irq_ptr, q, i) {
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index dc542e0a3055..0bc91e46395a 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -311,7 +311,7 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
} __packed * msg = ap_msg->message;
int rcblen = CEIL4(xcRB->request_control_blk_length);
- int replylen;
+ int replylen, req_sumlen, resp_sumlen;
char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
char *function_code;
@@ -321,12 +321,34 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
xcRB->request_data_length;
if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE)
return -EINVAL;
+
+ /* Overflow check
+ sum must be greater (or equal) than the largest operand */
+ req_sumlen = CEIL4(xcRB->request_control_blk_length) +
+ xcRB->request_data_length;
+ if ((CEIL4(xcRB->request_control_blk_length) <=
+ xcRB->request_data_length) ?
+ (req_sumlen < xcRB->request_data_length) :
+ (req_sumlen < CEIL4(xcRB->request_control_blk_length))) {
+ return -EINVAL;
+ }
+
replylen = sizeof(struct type86_fmt2_msg) +
CEIL4(xcRB->reply_control_blk_length) +
xcRB->reply_data_length;
if (replylen > MSGTYPE06_MAX_MSG_SIZE)
return -EINVAL;
+ /* Overflow check
+ sum must be greater (or equal) than the largest operand */
+ resp_sumlen = CEIL4(xcRB->reply_control_blk_length) +
+ xcRB->reply_data_length;
+ if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ?
+ (resp_sumlen < xcRB->reply_data_length) :
+ (resp_sumlen < CEIL4(xcRB->reply_control_blk_length))) {
+ return -EINVAL;
+ }
+
/* prepare type6 header */
msg->hdr = static_type6_hdrX;
memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 6b4678a7900a..4ccb5d869389 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -507,7 +507,6 @@ static int jsflash_init(void)
}
/* Let us be really paranoid for modifications to probing code. */
- /* extern enum sparc_cpu sparc_cpu_model; */ /* in <asm/system.h> */
if (sparc_cpu_model != sun4m) {
/* We must be on sun4m because we use MMU Bypass ASI. */
return -ENXIO;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 9e80d61e5a3a..0cb73074c199 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -790,17 +790,32 @@ static inline int test_tgt_sess_count(struct qla_tgt *tgt)
}
/* Called by tcm_qla2xxx configfs code */
-void qlt_stop_phase1(struct qla_tgt *tgt)
+int qlt_stop_phase1(struct qla_tgt *tgt)
{
struct scsi_qla_host *vha = tgt->vha;
struct qla_hw_data *ha = tgt->ha;
unsigned long flags;
+ mutex_lock(&qla_tgt_mutex);
+ if (!vha->fc_vport) {
+ struct Scsi_Host *sh = vha->host;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
+ bool npiv_vports;
+
+ spin_lock_irqsave(sh->host_lock, flags);
+ npiv_vports = (fc_host->npiv_vports_inuse);
+ spin_unlock_irqrestore(sh->host_lock, flags);
+
+ if (npiv_vports) {
+ mutex_unlock(&qla_tgt_mutex);
+ return -EPERM;
+ }
+ }
if (tgt->tgt_stop || tgt->tgt_stopped) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
"Already in tgt->tgt_stop or tgt_stopped state\n");
- dump_stack();
- return;
+ mutex_unlock(&qla_tgt_mutex);
+ return -EPERM;
}
ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
@@ -815,6 +830,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
qlt_clear_tgt_db(tgt, true);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
mutex_unlock(&vha->vha_tgt.tgt_mutex);
+ mutex_unlock(&qla_tgt_mutex);
flush_delayed_work(&tgt->sess_del_work);
@@ -841,6 +857,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
/* Wait for sessions to clear out (just in case) */
wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+ return 0;
}
EXPORT_SYMBOL(qlt_stop_phase1);
@@ -2595,8 +2612,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
return -ENOMEM;
}
- INIT_LIST_HEAD(&cmd->cmd_list);
-
memcpy(&cmd->atio, atio, sizeof(*atio));
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
@@ -3187,7 +3202,8 @@ restart:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
"SRR cmd %p (se_cmd %p, tag %d, op %x), "
"sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
- se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
+ se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+ cmd->sg_cnt, cmd->offset);
qlt_handle_srr(vha, sctio, imm);
@@ -4183,6 +4199,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
+ if (base_vha->fc_vport)
+ return 0;
+
mutex_lock(&qla_tgt_mutex);
list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
mutex_unlock(&qla_tgt_mutex);
@@ -4196,6 +4215,10 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
if (!vha->vha_tgt.qla_tgt)
return 0;
+ if (vha->fc_vport) {
+ qlt_release(vha->vha_tgt.qla_tgt);
+ return 0;
+ }
mutex_lock(&qla_tgt_mutex);
list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
mutex_unlock(&qla_tgt_mutex);
@@ -4267,6 +4290,12 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
spin_unlock_irqrestore(&ha->hardware_lock, flags);
continue;
}
+ if (tgt->tgt_stop) {
+ pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
+ host->host_no);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ continue;
+ }
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!scsi_host_get(host)) {
@@ -4281,12 +4310,11 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
scsi_host_put(host);
continue;
}
- mutex_unlock(&qla_tgt_mutex);
-
rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
if (rc != 0)
scsi_host_put(host);
+ mutex_unlock(&qla_tgt_mutex);
return rc;
}
mutex_unlock(&qla_tgt_mutex);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 1d10eecad499..ce33d8c26406 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -855,7 +855,6 @@ struct qla_tgt_cmd {
uint16_t loop_id; /* to save extra sess dereferences */
struct qla_tgt *tgt; /* to save extra sess dereferences */
struct scsi_qla_host *vha;
- struct list_head cmd_list;
struct atio_from_isp atio;
};
@@ -1002,7 +1001,7 @@ extern void qlt_modify_vp_config(struct scsi_qla_host *,
extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
extern int qlt_mem_alloc(struct qla_hw_data *);
extern void qlt_mem_free(struct qla_hw_data *);
-extern void qlt_stop_phase1(struct qla_tgt *);
+extern int qlt_stop_phase1(struct qla_tgt *);
extern void qlt_stop_phase2(struct qla_tgt *);
extern irqreturn_t qla83xx_msix_atio_q(int, void *);
extern void qlt_83xx_iospace_config(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 75a141bbe74d..788c4fe2b0c9 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -182,20 +182,6 @@ static int tcm_qla2xxx_npiv_parse_wwn(
return 0;
}
-static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
- u64 wwpn, u64 wwnn)
-{
- u8 b[8], b2[8];
-
- put_unaligned_be64(wwpn, b);
- put_unaligned_be64(wwnn, b2);
- return snprintf(buf, len,
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
- b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
- b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
-}
-
static char *tcm_qla2xxx_npiv_get_fabric_name(void)
{
return "qla2xxx_npiv";
@@ -227,15 +213,6 @@ static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
return lport->lport_naa_name;
}
-static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
-{
- struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
- struct tcm_qla2xxx_tpg, se_tpg);
- struct tcm_qla2xxx_lport *lport = tpg->lport;
-
- return &lport->lport_npiv_name[0];
-}
-
static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
@@ -941,15 +918,41 @@ static ssize_t tcm_qla2xxx_tpg_show_enable(
atomic_read(&tpg->lport_tpg_enabled));
}
+static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
+{
+ struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
+ struct tcm_qla2xxx_tpg, tpg_base_work);
+ struct se_portal_group *se_tpg = &base_tpg->se_tpg;
+ struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
+
+ if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
+ &se_tpg->tpg_group.cg_item)) {
+ atomic_set(&base_tpg->lport_tpg_enabled, 1);
+ qlt_enable_vha(base_vha);
+ }
+ complete(&base_tpg->tpg_base_comp);
+}
+
+static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
+{
+ struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
+ struct tcm_qla2xxx_tpg, tpg_base_work);
+ struct se_portal_group *se_tpg = &base_tpg->se_tpg;
+ struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
+
+ if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
+ atomic_set(&base_tpg->lport_tpg_enabled, 0);
+ configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
+ &se_tpg->tpg_group.cg_item);
+ }
+ complete(&base_tpg->tpg_base_comp);
+}
+
static ssize_t tcm_qla2xxx_tpg_store_enable(
struct se_portal_group *se_tpg,
const char *page,
size_t count)
{
- struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
- struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
- struct tcm_qla2xxx_lport, lport_wwn);
- struct scsi_qla_host *vha = lport->qla_vha;
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
unsigned long op;
@@ -964,19 +967,28 @@ static ssize_t tcm_qla2xxx_tpg_store_enable(
pr_err("Illegal value for tpg_enable: %lu\n", op);
return -EINVAL;
}
-
if (op) {
- atomic_set(&tpg->lport_tpg_enabled, 1);
- qlt_enable_vha(vha);
+ if (atomic_read(&tpg->lport_tpg_enabled))
+ return -EEXIST;
+
+ INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);
} else {
- if (!vha->vha_tgt.qla_tgt) {
- pr_err("struct qla_hw_data *vha->vha_tgt.qla_tgt is NULL\n");
- return -ENODEV;
- }
- atomic_set(&tpg->lport_tpg_enabled, 0);
- qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ if (!atomic_read(&tpg->lport_tpg_enabled))
+ return count;
+
+ INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);
}
+ init_completion(&tpg->tpg_base_comp);
+ schedule_work(&tpg->tpg_base_work);
+ wait_for_completion(&tpg->tpg_base_comp);
+ if (op) {
+ if (!atomic_read(&tpg->lport_tpg_enabled))
+ return -ENODEV;
+ } else {
+ if (atomic_read(&tpg->lport_tpg_enabled))
+ return -EPERM;
+ }
return count;
}
@@ -1053,11 +1065,64 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
/*
* Clear local TPG=1 pointer for non NPIV mode.
*/
- lport->tpg_1 = NULL;
-
+ lport->tpg_1 = NULL;
kfree(tpg);
}
+static ssize_t tcm_qla2xxx_npiv_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ return tcm_qla2xxx_tpg_show_enable(se_tpg, page);
+}
+
+static ssize_t tcm_qla2xxx_npiv_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ unsigned long op;
+ int rc;
+
+ rc = kstrtoul(page, 0, &op);
+ if (rc < 0) {
+ pr_err("kstrtoul() returned %d\n", rc);
+ return -EINVAL;
+ }
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %lu\n", op);
+ return -EINVAL;
+ }
+ if (op) {
+ if (atomic_read(&tpg->lport_tpg_enabled))
+ return -EEXIST;
+
+ atomic_set(&tpg->lport_tpg_enabled, 1);
+ qlt_enable_vha(vha);
+ } else {
+ if (!atomic_read(&tpg->lport_tpg_enabled))
+ return count;
+
+ atomic_set(&tpg->lport_tpg_enabled, 0);
+ qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ }
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
+ &tcm_qla2xxx_npiv_tpg_enable.attr,
+ NULL,
+};
+
static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
struct se_wwn *wwn,
struct config_group *group,
@@ -1650,6 +1715,9 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
struct scsi_qla_host *npiv_vha;
struct tcm_qla2xxx_lport *lport =
(struct tcm_qla2xxx_lport *)target_lport_ptr;
+ struct tcm_qla2xxx_lport *base_lport =
+ (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
+ struct tcm_qla2xxx_tpg *base_tpg;
struct fc_vport_identifiers vport_id;
if (!qla_tgt_mode_enabled(base_vha)) {
@@ -1657,6 +1725,13 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
return -EPERM;
}
+ if (!base_lport || !base_lport->tpg_1 ||
+ !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) {
+ pr_err("qla2xxx base_lport or tpg_1 not available\n");
+ return -EPERM;
+ }
+ base_tpg = base_lport->tpg_1;
+
memset(&vport_id, 0, sizeof(vport_id));
vport_id.port_name = npiv_wwpn;
vport_id.node_name = npiv_wwnn;
@@ -1675,7 +1750,6 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
npiv_vha = (struct scsi_qla_host *)vport->dd_data;
npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
lport->qla_vha = npiv_vha;
-
scsi_host_get(npiv_vha->host);
return 0;
}
@@ -1714,8 +1788,6 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
}
lport->lport_npiv_wwpn = npiv_wwpn;
lport->lport_npiv_wwnn = npiv_wwnn;
- tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
- TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
ret = tcm_qla2xxx_init_lport(lport);
@@ -1824,7 +1896,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
.get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
- .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn,
+ .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
.tpg_get_tag = tcm_qla2xxx_get_tag,
.tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
.tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
@@ -1935,7 +2007,7 @@ static int tcm_qla2xxx_register_configfs(void)
*/
npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs =
- tcm_qla2xxx_tpg_attrs;
+ tcm_qla2xxx_npiv_tpg_attrs;
npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 275d8b9a7a34..33aaac8c7d59 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -4,8 +4,6 @@
#define TCM_QLA2XXX_VERSION "v0.1"
/* length of ASCII WWPNs including pad */
#define TCM_QLA2XXX_NAMELEN 32
-/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
-#define TCM_QLA2XXX_NPIV_NAMELEN 66
#include "qla_target.h"
@@ -43,6 +41,9 @@ struct tcm_qla2xxx_tpg {
struct tcm_qla2xxx_tpg_attrib tpg_attrib;
/* Returned by tcm_qla2xxx_make_tpg() */
struct se_portal_group se_tpg;
+ /* Items for dealing with configfs_depend_item */
+ struct completion tpg_base_comp;
+ struct work_struct tpg_base_work;
};
struct tcm_qla2xxx_fc_loopid {
@@ -62,8 +63,6 @@ struct tcm_qla2xxx_lport {
char lport_name[TCM_QLA2XXX_NAMELEN];
/* ASCII formatted naa WWPN for VPD page 83 etc */
char lport_naa_name[TCM_QLA2XXX_NAMELEN];
- /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
- char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
/* map for fc_port pointers in 24-bit FC Port ID space */
struct btree_head32 lport_fcport_map;
/* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7bd7f0d5f050..62ec84b42e31 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
host_dev = scsi_get_device(shost);
if (host_dev && host_dev->dma_mask)
- bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
+ bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
return bounce_limit;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ba9310bc9acb..581ee2a8856b 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -376,10 +376,10 @@ config SPI_PXA2XX_PCI
def_tristate SPI_PXA2XX && PCI
config SPI_RSPI
- tristate "Renesas RSPI controller"
+ tristate "Renesas RSPI/QSPI controller"
depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE
help
- SPI driver for Renesas RSPI blocks.
+ SPI driver for Renesas RSPI and QSPI blocks.
config SPI_S3C24XX
tristate "Samsung S3C24XX series SPI"
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 50406306bc20..bae97ffec4b9 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -361,6 +361,8 @@ static int nuc900_spi_probe(struct platform_device *pdev)
init_completion(&hw->done);
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ if (hw->pdata->lsb)
+ master->mode_bits |= SPI_LSB_FIRST;
master->num_chipselect = hw->pdata->num_cs;
master->bus_num = hw->pdata->bus_num;
hw->bitbang.master = hw->master;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 23756b0f9036..d0b28bba38be 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -755,9 +755,7 @@ static void spi_pump_messages(struct kthread_work *work)
ret = master->transfer_one_message(master, master->cur_msg);
if (ret) {
dev_err(&master->dev,
- "failed to transfer one message from queue: %d\n", ret);
- master->cur_msg->status = ret;
- spi_finalize_current_message(master);
+ "failed to transfer one message from queue\n");
return;
}
}
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 23948f167012..713a97226787 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -295,21 +295,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf,
/* If size is not set, or set to 0, always return EOF. */
if (asma->size == 0)
- goto out;
+ goto out_unlock;
if (!asma->file) {
ret = -EBADF;
- goto out;
+ goto out_unlock;
}
- ret = asma->file->f_op->read(asma->file, buf, len, pos);
- if (ret < 0)
- goto out;
+ mutex_unlock(&ashmem_mutex);
- /** Update backing file pos, since f_ops->read() doesn't */
- asma->file->f_pos = *pos;
+ /*
+ * asma and asma->file are used outside the lock here. We assume
+ * once asma->file is set it will never be changed, and will not
+ * be destroyed until all references to the file are dropped and
+ * ashmem_release is called.
+ */
+ ret = asma->file->f_op->read(asma->file, buf, len, pos);
+ if (ret >= 0) {
+ /** Update backing file pos, since f_ops->read() doesn't */
+ asma->file->f_pos = *pos;
+ }
+ return ret;
-out:
+out_unlock:
mutex_unlock(&ashmem_mutex);
return ret;
}
@@ -498,6 +506,7 @@ out:
static int set_name(struct ashmem_area *asma, void __user *name)
{
+ int len;
int ret = 0;
char local_name[ASHMEM_NAME_LEN];
@@ -510,21 +519,19 @@ static int set_name(struct ashmem_area *asma, void __user *name)
* variable that does not need protection and later copy the local
* variable to the structure member with lock held.
*/
- if (copy_from_user(local_name, name, ASHMEM_NAME_LEN))
- return -EFAULT;
-
+ len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
+ if (len < 0)
+ return len;
+ if (len == ASHMEM_NAME_LEN)
+ local_name[ASHMEM_NAME_LEN - 1] = '\0';
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
- if (unlikely(asma->file)) {
+ if (unlikely(asma->file))
ret = -EINVAL;
- goto out;
- }
- memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN,
- local_name, ASHMEM_NAME_LEN);
- asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
-out:
- mutex_unlock(&ashmem_mutex);
+ else
+ strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
+ mutex_unlock(&ashmem_mutex);
return ret;
}
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index eaec1dab7fe4..1432d956769c 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -2904,7 +2904,7 @@ static int binder_node_release(struct binder_node *node, int refs)
refs++;
if (!ref->death)
- goto out;
+ continue;
death++;
@@ -2917,7 +2917,6 @@ static int binder_node_release(struct binder_node *node, int refs)
BUG();
}
-out:
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"node %d now dead, refs %d, death %d\n",
node->debug_id, refs, death);
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
index af6cd370b30f..ee3a7380e53b 100644
--- a/drivers/staging/android/ion/compat_ion.c
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -35,9 +35,14 @@ struct compat_ion_custom_data {
compat_ulong_t arg;
};
+struct compat_ion_handle_data {
+ compat_int_t handle;
+};
+
#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
struct compat_ion_allocation_data)
-#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \
+ struct compat_ion_handle_data)
#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
struct compat_ion_custom_data)
@@ -64,6 +69,19 @@ static int compat_get_ion_allocation_data(
return err;
}
+static int compat_get_ion_handle_data(
+ struct compat_ion_handle_data __user *data32,
+ struct ion_handle_data __user *data)
+{
+ compat_int_t i;
+ int err;
+
+ err = get_user(i, &data32->handle);
+ err |= put_user(i, &data->handle);
+
+ return err;
+}
+
static int compat_put_ion_allocation_data(
struct compat_ion_allocation_data __user *data32,
struct ion_allocation_data __user *data)
@@ -132,8 +150,8 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
case COMPAT_ION_IOC_FREE:
{
- struct compat_ion_allocation_data __user *data32;
- struct ion_allocation_data __user *data;
+ struct compat_ion_handle_data __user *data32;
+ struct ion_handle_data __user *data;
int err;
data32 = compat_ptr(arg);
@@ -141,7 +159,7 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (data == NULL)
return -EFAULT;
- err = compat_get_ion_allocation_data(data32, data);
+ err = compat_get_ion_handle_data(data32, data);
if (err)
return err;
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index 55b2002753f2..01cdc8aee898 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -17,9 +17,11 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/sizes.h>
+#include <linux/io.h>
#include "ion.h"
#include "ion_priv.h"
@@ -57,7 +59,7 @@ struct ion_platform_heap dummy_heaps[] = {
};
struct ion_platform_data dummy_ion_pdata = {
- .nr = 4,
+ .nr = ARRAY_SIZE(dummy_heaps),
.heaps = dummy_heaps,
};
@@ -69,7 +71,7 @@ static int __init ion_dummy_init(void)
heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr,
GFP_KERNEL);
if (!heaps)
- return PTR_ERR(heaps);
+ return -ENOMEM;
/* Allocate a dummy carveout heap */
@@ -128,6 +130,7 @@ err:
}
return err;
}
+device_initcall(ion_dummy_init);
static void __exit ion_dummy_exit(void)
{
@@ -152,7 +155,4 @@ static void __exit ion_dummy_exit(void)
return;
}
-
-module_init(ion_dummy_init);
-module_exit(ion_dummy_exit);
-
+__exitcall(ion_dummy_exit);
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 296c74f98dc0..37e64d51394c 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -243,12 +243,12 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
init_waitqueue_head(&heap->waitqueue);
heap->task = kthread_run(ion_heap_deferred_free, heap,
"%s", heap->name);
- sched_setscheduler(heap->task, SCHED_IDLE, &param);
if (IS_ERR(heap->task)) {
pr_err("%s: creating thread for deferred free failed\n",
__func__);
return PTR_RET(heap->task);
}
+ sched_setscheduler(heap->task, SCHED_IDLE, &param);
return 0;
}
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index d98673981cc4..fc2e4fccf69d 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -17,6 +17,7 @@
#ifndef _ION_PRIV_H
#define _ION_PRIV_H
+#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/kref.h>
#include <linux/mm_types.h>
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 7f0729130d65..9849f3963e75 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -124,6 +124,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
info->page = page;
info->order = orders[i];
+ INIT_LIST_HEAD(&info->list);
return info;
}
kfree(info);
@@ -145,12 +146,15 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
struct list_head pages;
struct page_info *info, *tmp_info;
int i = 0;
- long size_remaining = PAGE_ALIGN(size);
+ unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
if (align > PAGE_SIZE)
return -EINVAL;
+ if (size / PAGE_SIZE > totalram_pages / 2)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
info = alloc_largest_available(sys_heap, buffer, size_remaining,
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
index 585040be5f18..5aaf71d6974b 100644
--- a/drivers/staging/android/sw_sync.h
+++ b/drivers/staging/android/sw_sync.h
@@ -35,10 +35,27 @@ struct sw_sync_pt {
u32 value;
};
+#if IS_ENABLED(CONFIG_SW_SYNC)
struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
+#else
+static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+ return NULL;
+}
+
+static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+}
+
+static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
+ u32 value)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
#endif /* __KERNEL __ */
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 38e5d3b5ed9b..3d05f662110b 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -79,27 +79,27 @@ static void sync_timeline_free(struct kref *kref)
container_of(kref, struct sync_timeline, kref);
unsigned long flags;
- if (obj->ops->release_obj)
- obj->ops->release_obj(obj);
-
spin_lock_irqsave(&sync_timeline_list_lock, flags);
list_del(&obj->sync_timeline_list);
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+ if (obj->ops->release_obj)
+ obj->ops->release_obj(obj);
+
kfree(obj);
}
void sync_timeline_destroy(struct sync_timeline *obj)
{
obj->destroyed = true;
+ smp_wmb();
/*
- * If this is not the last reference, signal any children
- * that their parent is going away.
+ * signal any children that their parent is going away.
*/
+ sync_timeline_signal(obj);
- if (!kref_put(&obj->kref, sync_timeline_free))
- sync_timeline_signal(obj);
+ kref_put(&obj->kref, sync_timeline_free);
}
EXPORT_SYMBOL(sync_timeline_destroy);
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index 8dfdd2732bdc..95a2358267ba 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -40,7 +40,7 @@ static INT bcm_close(struct net_device *dev)
}
static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
return ClassifyPacket(netdev_priv(dev), skb);
}
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 246080316c90..5b15033a94bf 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -616,8 +616,6 @@ int comedi_auto_config(struct device *hardware_device,
ret = driver->auto_attach(dev, context);
if (ret >= 0)
ret = comedi_device_postconfig(dev);
- if (ret < 0)
- comedi_device_detach(dev);
mutex_unlock(&dev->mutex);
if (ret < 0) {
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 593676cf706a..d9ad2c0fdda2 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -494,6 +494,7 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
+ unsigned int val;
int n, chan, range, ofs;
chan = CR_CHAN(insn->chanspec);
@@ -509,11 +510,14 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
ofs = PCI171x_DA1;
}
+ val = devpriv->ao_data[chan];
- for (n = 0; n < insn->n; n++)
- outw(data[n], dev->iobase + ofs);
+ for (n = 0; n < insn->n; n++) {
+ val = data[n];
+ outw(val, dev->iobase + ofs);
+ }
- devpriv->ao_data[chan] = data[n];
+ devpriv->ao_data[chan] = val;
return n;
@@ -679,6 +683,7 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
+ unsigned int val;
int n, rangereg, chan;
chan = CR_CHAN(insn->chanspec);
@@ -688,13 +693,15 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
outb(rangereg, dev->iobase + PCI1720_RANGE);
devpriv->da_ranges = rangereg;
}
+ val = devpriv->ao_data[chan];
for (n = 0; n < insn->n; n++) {
- outw(data[n], dev->iobase + PCI1720_DA0 + (chan << 1));
+ val = data[n];
+ outw(val, dev->iobase + PCI1720_DA0 + (chan << 1));
outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */
}
- devpriv->ao_data[chan] = data[n];
+ devpriv->ao_data[chan] = val;
return n;
}
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index 3beeb1254152..88c60b6020c4 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -48,6 +48,7 @@
#include <linux/usb.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
+#include <asm/unaligned.h>
#include "comedi_fc.h"
#include "../comedidev.h"
@@ -792,7 +793,8 @@ static int usbduxsigma_ai_insn_read(struct comedi_device *dev,
}
/* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf) + 1)));
+ val = be32_to_cpu(get_unaligned((uint32_t
+ *)(devpriv->insn_buf + 1)));
val &= 0x00ffffff; /* strip status byte */
val ^= 0x00800000; /* convert to unsigned */
@@ -1357,7 +1359,7 @@ static int usbduxsigma_getstatusinfo(struct comedi_device *dev, int chan)
return ret;
/* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf)+1)));
+ val = be32_to_cpu(get_unaligned((uint32_t *)(devpriv->insn_buf + 1)));
val &= 0x00ffffff; /* strip status byte */
val ^= 0x00800000; /* convert to unsigned */
diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c
index 1f61b89eca44..33ac7fb88cbd 100644
--- a/drivers/staging/dgrp/dgrp_net_ops.c
+++ b/drivers/staging/dgrp/dgrp_net_ops.c
@@ -2232,177 +2232,6 @@ done:
return rtn;
}
-/*
- * Common Packet Handling code
- */
-
-static void handle_data_in_packet(struct nd_struct *nd, struct ch_struct *ch,
- long dlen, long plen, int n1, u8 *dbuf)
-{
- char *error;
- long n;
- long remain;
- u8 *buf;
- u8 *b;
-
- remain = nd->nd_remain;
- nd->nd_tx_work = 1;
-
- /*
- * Otherwise data should appear only when we are
- * in the CS_READY state.
- */
-
- if (ch->ch_state < CS_READY) {
- error = "Data received before RWIN established";
- nd->nd_remain = 0;
- nd->nd_state = NS_SEND_ERROR;
- nd->nd_error = error;
- }
-
- /*
- * Assure that the data received is within the
- * allowable window.
- */
-
- n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
-
- if (dlen > n) {
- error = "Receive data overrun";
- nd->nd_remain = 0;
- nd->nd_state = NS_SEND_ERROR;
- nd->nd_error = error;
- }
-
- /*
- * If we received 3 or less characters,
- * assume it is a human typing, and set RTIME
- * to 10 milliseconds.
- *
- * If we receive 10 or more characters,
- * assume its not a human typing, and set RTIME
- * to 100 milliseconds.
- */
-
- if (ch->ch_edelay != DGRP_RTIME) {
- if (ch->ch_rtime != ch->ch_edelay) {
- ch->ch_rtime = ch->ch_edelay;
- ch->ch_flag |= CH_PARAM;
- }
- } else if (dlen <= 3) {
- if (ch->ch_rtime != 10) {
- ch->ch_rtime = 10;
- ch->ch_flag |= CH_PARAM;
- }
- } else {
- if (ch->ch_rtime != DGRP_RTIME) {
- ch->ch_rtime = DGRP_RTIME;
- ch->ch_flag |= CH_PARAM;
- }
- }
-
- /*
- * If a portion of the packet is outside the
- * buffer, shorten the effective length of the
- * data packet to be the amount of data received.
- */
-
- if (remain < plen)
- dlen -= plen - remain;
-
- /*
- * Detect if receive flush is now complete.
- */
-
- if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
- ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
- ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
- ch->ch_flag &= ~CH_RX_FLUSH;
- }
-
- /*
- * If we are ready to receive, move the data into
- * the receive buffer.
- */
-
- ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
-
- if (ch->ch_state == CS_READY &&
- (ch->ch_tun.un_open_count != 0) &&
- (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
- (ch->ch_cflag & CF_CREAD) != 0 &&
- (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
- (ch->ch_send & RR_RX_FLUSH) == 0) {
-
- if (ch->ch_rin + dlen >= RBUF_MAX) {
- n = RBUF_MAX - ch->ch_rin;
-
- memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
-
- ch->ch_rin = 0;
- dbuf += n;
- dlen -= n;
- }
-
- memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
-
- ch->ch_rin += dlen;
-
-
- /*
- * If we are not in fastcook mode, or
- * if there is a fastcook thread
- * waiting for data, send the data to
- * the line discipline.
- */
-
- if ((ch->ch_flag & CH_FAST_READ) == 0 ||
- ch->ch_inwait != 0) {
- dgrp_input(ch);
- }
-
- /*
- * If there is a read thread waiting
- * in select, and we are in fastcook
- * mode, wake him up.
- */
-
- if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
- (ch->ch_flag & CH_FAST_READ) != 0)
- wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
-
- /*
- * Wake any thread waiting in the
- * fastcook loop.
- */
-
- if ((ch->ch_flag & CH_INPUT) != 0) {
- ch->ch_flag &= ~CH_INPUT;
- wake_up_interruptible(&ch->ch_flag_wait);
- }
- }
-
- /*
- * Fabricate and insert a data packet header to
- * preced the remaining data when it comes in.
- */
-
- if (remain < plen) {
- dlen = plen - remain;
- b = buf;
-
- b[0] = 0x90 + n1;
- put_unaligned_be16(dlen, b + 1);
-
- remain = 3;
- if (remain > 0 && b != buf)
- memcpy(buf, b, remain);
-
- nd->nd_remain = remain;
- return;
- }
-}
-
/**
* dgrp_receive() -- decode data packets received from the remote PortServer.
* @nd: pointer to a node structure
@@ -2477,8 +2306,7 @@ static void dgrp_receive(struct nd_struct *nd)
plen = dlen + 1;
dbuf = b + 1;
- handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
- break;
+ goto data;
/*
* Process 2-byte header data packet.
@@ -2492,8 +2320,7 @@ static void dgrp_receive(struct nd_struct *nd)
plen = dlen + 2;
dbuf = b + 2;
- handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
- break;
+ goto data;
/*
* Process 3-byte header data packet.
@@ -2508,6 +2335,159 @@ static void dgrp_receive(struct nd_struct *nd)
dbuf = b + 3;
+ /*
+ * Common packet handling code.
+ */
+
+data:
+ nd->nd_tx_work = 1;
+
+ /*
+ * Otherwise data should appear only when we are
+ * in the CS_READY state.
+ */
+
+ if (ch->ch_state < CS_READY) {
+ error = "Data received before RWIN established";
+ goto prot_error;
+ }
+
+ /*
+ * Assure that the data received is within the
+ * allowable window.
+ */
+
+ n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
+
+ if (dlen > n) {
+ error = "Receive data overrun";
+ goto prot_error;
+ }
+
+ /*
+ * If we received 3 or less characters,
+ * assume it is a human typing, and set RTIME
+ * to 10 milliseconds.
+ *
+ * If we receive 10 or more characters,
+ * assume its not a human typing, and set RTIME
+ * to 100 milliseconds.
+ */
+
+ if (ch->ch_edelay != DGRP_RTIME) {
+ if (ch->ch_rtime != ch->ch_edelay) {
+ ch->ch_rtime = ch->ch_edelay;
+ ch->ch_flag |= CH_PARAM;
+ }
+ } else if (dlen <= 3) {
+ if (ch->ch_rtime != 10) {
+ ch->ch_rtime = 10;
+ ch->ch_flag |= CH_PARAM;
+ }
+ } else {
+ if (ch->ch_rtime != DGRP_RTIME) {
+ ch->ch_rtime = DGRP_RTIME;
+ ch->ch_flag |= CH_PARAM;
+ }
+ }
+
+ /*
+ * If a portion of the packet is outside the
+ * buffer, shorten the effective length of the
+ * data packet to be the amount of data received.
+ */
+
+ if (remain < plen)
+ dlen -= plen - remain;
+
+ /*
+ * Detect if receive flush is now complete.
+ */
+
+ if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
+ ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
+ ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
+ ch->ch_flag &= ~CH_RX_FLUSH;
+ }
+
+ /*
+ * If we are ready to receive, move the data into
+ * the receive buffer.
+ */
+
+ ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
+
+ if (ch->ch_state == CS_READY &&
+ (ch->ch_tun.un_open_count != 0) &&
+ (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
+ (ch->ch_cflag & CF_CREAD) != 0 &&
+ (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
+ (ch->ch_send & RR_RX_FLUSH) == 0) {
+
+ if (ch->ch_rin + dlen >= RBUF_MAX) {
+ n = RBUF_MAX - ch->ch_rin;
+
+ memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
+
+ ch->ch_rin = 0;
+ dbuf += n;
+ dlen -= n;
+ }
+
+ memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
+
+ ch->ch_rin += dlen;
+
+
+ /*
+ * If we are not in fastcook mode, or
+ * if there is a fastcook thread
+ * waiting for data, send the data to
+ * the line discipline.
+ */
+
+ if ((ch->ch_flag & CH_FAST_READ) == 0 ||
+ ch->ch_inwait != 0) {
+ dgrp_input(ch);
+ }
+
+ /*
+ * If there is a read thread waiting
+ * in select, and we are in fastcook
+ * mode, wake him up.
+ */
+
+ if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
+ (ch->ch_flag & CH_FAST_READ) != 0)
+ wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
+
+ /*
+ * Wake any thread waiting in the
+ * fastcook loop.
+ */
+
+ if ((ch->ch_flag & CH_INPUT) != 0) {
+ ch->ch_flag &= ~CH_INPUT;
+
+ wake_up_interruptible(&ch->ch_flag_wait);
+ }
+ }
+
+ /*
+ * Fabricate and insert a data packet header to
+ * preced the remaining data when it comes in.
+ */
+
+ if (remain < plen) {
+ dlen = plen - remain;
+ b = buf;
+
+ b[0] = 0x90 + n1;
+ put_unaligned_be16(dlen, b + 1);
+
+ remain = 3;
+ goto done;
+ }
break;
/*
diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c
index f8788bf0a7d3..cdeffe75496b 100644
--- a/drivers/staging/gdm72xx/gdm_usb.c
+++ b/drivers/staging/gdm72xx/gdm_usb.c
@@ -635,11 +635,14 @@ static int gdm_usb_probe(struct usb_interface *intf,
#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
ret = register_wimax_device(phy_dev, &intf->dev);
+ if (ret)
+ release_usb(udev);
out:
if (ret) {
kfree(phy_dev);
kfree(udev);
+ usb_put_dev(usbdev);
} else {
usb_set_intfdata(intf, phy_dev);
}
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index 35154d60faf6..c9fedb79e3a2 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -77,7 +77,6 @@ struct iio_channel_info {
uint64_t mask;
unsigned be;
unsigned is_signed;
- unsigned enabled;
unsigned location;
};
@@ -335,6 +334,7 @@ inline int build_channel_array(const char *device_dir,
while (ent = readdir(dp), ent != NULL) {
if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
"_en") == 0) {
+ int current_enabled = 0;
current = &(*ci_array)[count++];
ret = asprintf(&filename,
"%s/%s", scan_el_dir, ent->d_name);
@@ -350,10 +350,10 @@ inline int build_channel_array(const char *device_dir,
ret = -errno;
goto error_cleanup_array;
}
- fscanf(sysfsfp, "%u", &current->enabled);
+ fscanf(sysfsfp, "%u", &current_enabled);
fclose(sysfsfp);
- if (!current->enabled) {
+ if (!current_enabled) {
free(filename);
count--;
continue;
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 5ea36410f716..5708ffc62aec 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -393,7 +393,7 @@ static const struct iio_event_spec ad799x_events[] = {
}, {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_FALLING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
}, {
.type = IIO_EV_TYPE_THRESH,
@@ -409,7 +409,13 @@ static const struct iio_event_spec ad799x_events[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.scan_index = (_index), \
- .scan_type = IIO_ST('u', _realbits, 16, 12 - (_realbits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_realbits), \
+ .storagebits = 16, \
+ .shift = 12 - (_realbits), \
+ .endianness = IIO_BE, \
+ }, \
.event_spec = _ev_spec, \
.num_event_specs = _num_ev_spec, \
}
@@ -588,7 +594,8 @@ static int ad799x_probe(struct i2c_client *client,
return 0;
error_free_irq:
- free_irq(client->irq, indio_dev);
+ if (client->irq > 0)
+ free_irq(client->irq, indio_dev);
error_cleanup_ring:
ad799x_ring_cleanup(indio_dev);
error_disable_reg:
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index df71669bb60e..514844efac75 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -757,6 +757,7 @@ static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid)
}
/* if it is released, wait for the next touch via IRQ */
+ lradc->cur_plate = LRADC_TOUCH;
mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1);
mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
}
@@ -1035,8 +1036,6 @@ SHOW_SCALE_AVAILABLE_ATTR(4);
SHOW_SCALE_AVAILABLE_ATTR(5);
SHOW_SCALE_AVAILABLE_ATTR(6);
SHOW_SCALE_AVAILABLE_ATTR(7);
-SHOW_SCALE_AVAILABLE_ATTR(8);
-SHOW_SCALE_AVAILABLE_ATTR(9);
SHOW_SCALE_AVAILABLE_ATTR(10);
SHOW_SCALE_AVAILABLE_ATTR(11);
SHOW_SCALE_AVAILABLE_ATTR(12);
@@ -1053,8 +1052,6 @@ static struct attribute *mxs_lradc_attributes[] = {
&iio_dev_attr_in_voltage5_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage6_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage7_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage8_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage9_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage10_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage11_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage12_scale_available.dev_attr.attr,
@@ -1613,7 +1610,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
* of the array.
*/
scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >>
- (iio->channels[i].scan_type.realbits - s);
+ (LRADC_RESOLUTION - s);
lradc->scale_avail[i][s].nano =
do_div(scale_uv, 100000000) * 10;
lradc->scale_avail[i][s].integer = scale_uv;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 0a4298b744e6..2b96665da8a2 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -629,7 +629,7 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
struct iio_buffer *buffer;
buffer = iio_kfifo_allocate(indio_dev);
- if (buffer)
+ if (!buffer)
return -ENOMEM;
iio_device_attach_buffer(indio_dev, buffer);
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 09ef5fb8bae6..236ed66f116a 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -88,9 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm)
imx_drm_device_put();
- drm_vblank_cleanup(imxdrm->drm);
- drm_kms_helper_poll_fini(imxdrm->drm);
- drm_mode_config_cleanup(imxdrm->drm);
+ drm_vblank_cleanup(drm);
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
return 0;
}
@@ -142,19 +142,19 @@ EXPORT_SYMBOL_GPL(imx_drm_crtc_panel_format);
int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
{
- return drm_vblank_get(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ return drm_vblank_get(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get);
void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc)
{
- drm_vblank_put(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ drm_vblank_put(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put);
void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
{
- drm_handle_vblank(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ drm_handle_vblank(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
@@ -370,29 +370,6 @@ static void imx_drm_connector_unregister(
}
/*
- * register a crtc to the drm core
- */
-static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- int ret;
-
- ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
- if (ret)
- return ret;
-
- drm_crtc_helper_add(imx_drm_crtc->crtc,
- imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
-
- drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
- imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
-
- drm_mode_group_reinit(imxdrm->drm);
-
- return 0;
-}
-
-/*
* Called by the CRTC driver when all CRTCs are registered. This
* puts all the pieces together and initializes the driver.
* Once this is called no more CRTCs can be registered since
@@ -424,15 +401,15 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
mutex_lock(&imxdrm->mutex);
- drm_kms_helper_poll_init(imxdrm->drm);
+ drm_kms_helper_poll_init(drm);
/* setup the grouping for the legacy output */
- ret = drm_mode_group_init_legacy_group(imxdrm->drm,
- &imxdrm->drm->primary->mode_group);
+ ret = drm_mode_group_init_legacy_group(drm,
+ &drm->primary->mode_group);
if (ret)
goto err_kms;
- ret = drm_vblank_init(imxdrm->drm, MAX_CRTC);
+ ret = drm_vblank_init(drm, MAX_CRTC);
if (ret)
goto err_kms;
@@ -441,7 +418,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
- imxdrm->drm->vblank_disable_allowed = true;
+ drm->vblank_disable_allowed = true;
if (!imx_drm_device_get()) {
ret = -EINVAL;
@@ -536,10 +513,18 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
*new_crtc = imx_drm_crtc;
- ret = imx_drm_crtc_register(imx_drm_crtc);
+ ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
if (ret)
goto err_register;
+ drm_crtc_helper_add(crtc,
+ imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
+
+ drm_crtc_init(imxdrm->drm, crtc,
+ imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
+
+ drm_mode_group_reinit(imxdrm->drm);
+
imx_drm_update_possible_crtcs();
mutex_unlock(&imxdrm->mutex);
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c
index f3a1f5e2e492..62ce0e86f14b 100644
--- a/drivers/staging/imx-drm/imx-hdmi.c
+++ b/drivers/staging/imx-drm/imx-hdmi.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/hdmi.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
@@ -52,11 +53,6 @@ enum hdmi_datamap {
YCbCr422_12B = 0x12,
};
-enum hdmi_colorimetry {
- ITU601,
- ITU709,
-};
-
enum imx_hdmi_devtype {
IMX6Q_HDMI,
IMX6DL_HDMI,
@@ -489,12 +485,12 @@ static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi)
if (is_color_space_conversion(hdmi)) {
if (hdmi->hdmi_data.enc_out_format == RGB) {
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
csc_coeff = &csc_coeff_rgb_out_eitu601;
else
csc_coeff = &csc_coeff_rgb_out_eitu709;
} else if (hdmi->hdmi_data.enc_in_format == RGB) {
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
csc_coeff = &csc_coeff_rgb_in_eitu601;
else
csc_coeff = &csc_coeff_rgb_in_eitu709;
@@ -1140,16 +1136,16 @@ static void hdmi_config_AVI(struct imx_hdmi *hdmi)
/* Set up colorimetry */
if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
ext_colorimetry =
HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
- else /* hdmi->hdmi_data.colorimetry == ITU709 */
+ else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
ext_colorimetry =
HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
} else if (hdmi->hdmi_data.enc_out_format != RGB) {
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
- else /* hdmi->hdmi_data.colorimetry == ITU709 */
+ else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
} else { /* Carries no data */
@@ -1379,9 +1375,9 @@ static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode)
(hdmi->vic == 21) || (hdmi->vic == 22) ||
(hdmi->vic == 2) || (hdmi->vic == 3) ||
(hdmi->vic == 17) || (hdmi->vic == 18))
- hdmi->hdmi_data.colorimetry = ITU601;
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
else
- hdmi->hdmi_data.colorimetry = ITU709;
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
(hdmi->vic == 12) || (hdmi->vic == 13) ||
diff --git a/drivers/staging/lustre/TODO b/drivers/staging/lustre/TODO
index 22742d6d62a8..0a2b6cb3775e 100644
--- a/drivers/staging/lustre/TODO
+++ b/drivers/staging/lustre/TODO
@@ -9,5 +9,6 @@
* Other minor misc cleanups...
Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger
-<andreas.dilger@intel.com> and Peng Tao <tao.peng@emc.com>. CCing
-hpdd-discuss <hpdd-discuss@lists.01.org> would be great too.
+<andreas.dilger@intel.com>, Oleg Drokin <oleg.drokin@intel.com> and
+Peng Tao <tao.peng@emc.com>. CCing hpdd-discuss <hpdd-discuss@lists.01.org>
+would be great too.
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
index 596a15fc8996..037ae8a6d531 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
@@ -61,6 +61,8 @@ struct kuc_hdr {
__u16 kuc_msglen; /* Including header */
} __attribute__((aligned(sizeof(__u64))));
+#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE)
+
#define KUC_MAGIC 0x191C /*Lustre9etLinC */
#define KUC_FL_BLOCK 0x01 /* Wait for send */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index d0d942ced01a..dddccca120c9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -120,7 +120,7 @@ do { \
do { \
LASSERT(!in_interrupt() || \
((size) <= LIBCFS_VMALLOC_SIZE && \
- ((mask) & GFP_ATOMIC)) != 0); \
+ ((mask) & __GFP_WAIT) == 0)); \
} while (0)
#define LIBCFS_ALLOC_POST(ptr, size) \
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 93648632ba26..6f58ead20393 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -529,7 +529,7 @@ kiblnd_kvaddr_to_page (unsigned long vaddr)
{
struct page *page;
- if (is_vmalloc_addr(vaddr)) {
+ if (is_vmalloc_addr((void *)vaddr)) {
page = vmalloc_to_page ((void *)vaddr);
LASSERT (page != NULL);
return page;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 68a4f52ec998..b7b53b579c85 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -924,7 +924,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
int
ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
{
- int mpflag = 0;
+ int mpflag = 1;
int type = lntmsg->msg_type;
lnet_process_id_t target = lntmsg->msg_target;
unsigned int payload_niov = lntmsg->msg_niov;
@@ -993,8 +993,9 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
/* The first fragment will be set later in pro_pack */
rc = ksocknal_launch_packet(ni, tx, target);
- if (lntmsg->msg_vmflush)
+ if (!mpflag)
cfs_memory_pressure_restore(mpflag);
+
if (rc == 0)
return (0);
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 6b6c0240e824..7893d83e131f 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -760,7 +760,8 @@ static inline void hsm_set_cl_error(int *flags, int error)
*flags |= (error << CLF_HSM_ERR_L);
}
-#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + sizeof(struct changelog_rec))
+#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + \
+ sizeof(struct changelog_ext_rec))
struct changelog_rec {
__u16 cr_namelen;
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 22d0acc95bc5..52b7731bcc38 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -1086,7 +1086,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
break;
case Q_GETQUOTA:
if (((type == USRQUOTA &&
- uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
+ !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
(type == GRPQUOTA &&
!in_egroup_p(make_kgid(&init_user_ns, id)))) &&
(!cfs_capable(CFS_CAP_SYS_ADMIN) ||
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index d1ad91c34ddc..83013927e131 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1430,7 +1430,7 @@ static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags)
{
struct kuc_hdr *lh = (struct kuc_hdr *)buf;
- LASSERT(len <= CR_MAXSIZE);
+ LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE);
lh->kuc_magic = KUC_MAGIC;
lh->kuc_transport = KUC_TRANSPORT_CHANGELOG;
@@ -1503,7 +1503,7 @@ static int mdc_changelog_send_thread(void *csdata)
CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n",
cs->cs_fp, cs->cs_startrec);
- OBD_ALLOC(cs->cs_buf, CR_MAXSIZE);
+ OBD_ALLOC(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
if (cs->cs_buf == NULL)
GOTO(out, rc = -ENOMEM);
@@ -1540,7 +1540,7 @@ out:
if (ctxt)
llog_ctxt_put(ctxt);
if (cs->cs_buf)
- OBD_FREE(cs->cs_buf, CR_MAXSIZE);
+ OBD_FREE(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
OBD_FREE_PTR(cs);
return rc;
}
diff --git a/drivers/staging/media/go7007/go7007-loader.c b/drivers/staging/media/go7007/go7007-loader.c
index 10bb41c2fb6d..eecb1f2a5574 100644
--- a/drivers/staging/media/go7007/go7007-loader.c
+++ b/drivers/staging/media/go7007/go7007-loader.c
@@ -59,7 +59,7 @@ static int go7007_loader_probe(struct usb_interface *interface,
if (usbdev->descriptor.bNumConfigurations != 1) {
dev_err(&interface->dev, "can't handle multiple config\n");
- return -ENODEV;
+ goto failed2;
}
vendor = le16_to_cpu(usbdev->descriptor.idVendor);
@@ -108,6 +108,7 @@ static int go7007_loader_probe(struct usb_interface *interface,
return 0;
failed2:
+ usb_put_dev(usbdev);
dev_err(&interface->dev, "probe failed\n");
return -ENODEV;
}
@@ -115,6 +116,7 @@ failed2:
static void go7007_loader_disconnect(struct usb_interface *interface)
{
dev_info(&interface->dev, "disconnect\n");
+ usb_put_dev(interface_to_usbdev(interface));
usb_set_intfdata(interface, NULL);
}
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index eedffed17e39..31b269a5fff7 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -307,7 +307,7 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
}
static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
return (u16)smp_processor_id();
}
@@ -892,6 +892,11 @@ static int xlr_setup_mdio(struct xlr_net_priv *priv,
priv->mii_bus->write = xlr_mii_write;
priv->mii_bus->parent = &pdev->dev;
priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (priv->mii_bus->irq == NULL) {
+ pr_err("irq alloc failed\n");
+ mdiobus_free(priv->mii_bus);
+ return -ENOMEM;
+ }
priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq;
/* Scan only the enabled address */
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 47e0a91238a1..5a001d9b4252 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -275,13 +275,6 @@ enum cvmx_usb_pipe_flags {
*/
#define MAX_TRANSFER_PACKETS ((1<<10)-1)
-enum {
- USB_CLOCK_TYPE_REF_12,
- USB_CLOCK_TYPE_REF_24,
- USB_CLOCK_TYPE_REF_48,
- USB_CLOCK_TYPE_CRYSTAL_12,
-};
-
/**
* Logical transactions may take numerous low level
* transactions, especially when splits are concerned. This
@@ -471,19 +464,6 @@ struct octeon_hcd {
/* Returns the IO address to push/pop stuff data from the FIFOs */
#define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
-static int octeon_usb_get_clock_type(void)
-{
- switch (cvmx_sysinfo_get()->board_type) {
- case CVMX_BOARD_TYPE_BBGW_REF:
- case CVMX_BOARD_TYPE_LANAI2_A:
- case CVMX_BOARD_TYPE_LANAI2_U:
- case CVMX_BOARD_TYPE_LANAI2_G:
- case CVMX_BOARD_TYPE_UBNT_E100:
- return USB_CLOCK_TYPE_CRYSTAL_12;
- }
- return USB_CLOCK_TYPE_REF_48;
-}
-
/**
* Read a USB 32bit CSR. It performs the necessary address swizzle
* for 32bit CSRs and logs the value in a readable format if
@@ -582,37 +562,6 @@ static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
return 0; /* Data0 */
}
-
-/**
- * Return the number of USB ports supported by this Octeon
- * chip. If the chip doesn't support USB, or is not supported
- * by this API, a zero will be returned. Most Octeon chips
- * support one usb port, but some support two ports.
- * cvmx_usb_initialize() must be called on independent
- * struct cvmx_usb_state.
- *
- * Returns: Number of port, zero if usb isn't supported
- */
-static int cvmx_usb_get_num_ports(void)
-{
- int arch_ports = 0;
-
- if (OCTEON_IS_MODEL(OCTEON_CN56XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
- arch_ports = 2;
- else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN31XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
- arch_ports = 1;
- else
- arch_ports = 0;
-
- return arch_ports;
-}
-
/**
* Initialize a USB port for use. This must be called before any
* other access to the Octeon USB port is made. The port starts
@@ -628,41 +577,16 @@ static int cvmx_usb_get_num_ports(void)
* Returns: 0 or a negative error code.
*/
static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
- int usb_port_number)
+ int usb_port_number,
+ enum cvmx_usb_initialize_flags flags)
{
union cvmx_usbnx_clk_ctl usbn_clk_ctl;
union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
- enum cvmx_usb_initialize_flags flags = 0;
int i;
/* At first allow 0-1 for the usb port number */
if ((usb_port_number < 0) || (usb_port_number > 1))
return -EINVAL;
- /* For all chips except 52XX there is only one port */
- if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0))
- return -EINVAL;
- /* Try to determine clock type automatically */
- if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12) {
- /* Only 12 MHZ crystals are supported */
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
- } else {
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
-
- switch (octeon_usb_get_clock_type()) {
- case USB_CLOCK_TYPE_REF_12:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
- break;
- case USB_CLOCK_TYPE_REF_24:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
- break;
- case USB_CLOCK_TYPE_REF_48:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
- break;
- default:
- return -EINVAL;
- break;
- }
- }
memset(usb, 0, sizeof(*usb));
usb->init_flags = flags;
@@ -3431,7 +3355,6 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
return 0;
}
-
static const struct hc_driver octeon_hc_driver = {
.description = "Octeon USB",
.product_desc = "Octeon Host Controller",
@@ -3448,15 +3371,74 @@ static const struct hc_driver octeon_hc_driver = {
.hub_control = octeon_usb_hub_control,
};
-
-static int octeon_usb_driver_probe(struct device *dev)
+static int octeon_usb_probe(struct platform_device *pdev)
{
int status;
- int usb_num = to_platform_device(dev)->id;
- int irq = platform_get_irq(to_platform_device(dev), 0);
+ int initialize_flags;
+ int usb_num;
+ struct resource *res_mem;
+ struct device_node *usbn_node;
+ int irq = platform_get_irq(pdev, 0);
+ struct device *dev = &pdev->dev;
struct octeon_hcd *priv;
struct usb_hcd *hcd;
unsigned long flags;
+ u32 clock_rate = 48000000;
+ bool is_crystal_clock = false;
+ const char *clock_type;
+ int i;
+
+ if (dev->of_node == NULL) {
+ dev_err(dev, "Error: empty of_node\n");
+ return -ENXIO;
+ }
+ usbn_node = dev->of_node->parent;
+
+ i = of_property_read_u32(usbn_node,
+ "refclk-frequency", &clock_rate);
+ if (i) {
+ dev_err(dev, "No USBN \"refclk-frequency\"\n");
+ return -ENXIO;
+ }
+ switch (clock_rate) {
+ case 12000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
+ break;
+ case 24000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
+ break;
+ case 48000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
+ break;
+ default:
+ dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n", clock_rate);
+ return -ENXIO;
+
+ }
+
+ i = of_property_read_string(usbn_node,
+ "refclk-type", &clock_type);
+
+ if (!i && strcmp("crystal", clock_type) == 0)
+ is_crystal_clock = true;
+
+ if (is_crystal_clock)
+ initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
+ else
+ initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mem == NULL) {
+ dev_err(dev, "found no memory resource\n");
+ return -ENXIO;
+ }
+ usb_num = (res_mem->start >> 44) & 1;
+
+ if (irq < 0) {
+ /* Defective device tree, but we know how to fix it. */
+ irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56;
+ irq = irq_create_mapping(NULL, hwirq);
+ }
/*
* Set the DMA mask to 64bits so we get buffers already translated for
@@ -3465,6 +3447,26 @@ static int octeon_usb_driver_probe(struct device *dev)
dev->coherent_dma_mask = ~0;
dev->dma_mask = &dev->coherent_dma_mask;
+ /*
+ * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
+ * IOB priority registers. Under heavy network load USB
+ * hardware can be starved by the IOB causing a crash. Give
+ * it a priority boost if it has been waiting more than 400
+ * cycles to avoid this situation.
+ *
+ * Testing indicates that a cnt_val of 8192 is not sufficient,
+ * but no failures are seen with 4096. We choose a value of
+ * 400 to give a safety factor of 10.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
+
+ pri_cnt.u64 = 0;
+ pri_cnt.s.cnt_enb = 1;
+ pri_cnt.s.cnt_val = 400;
+ cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
+ }
+
hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
if (!hcd) {
dev_dbg(dev, "Failed to allocate memory for HCD\n");
@@ -3478,7 +3480,7 @@ static int octeon_usb_driver_probe(struct device *dev)
tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv);
INIT_LIST_HEAD(&priv->dequeue_list);
- status = cvmx_usb_initialize(&priv->usb, usb_num);
+ status = cvmx_usb_initialize(&priv->usb, usb_num, initialize_flags);
if (status) {
dev_dbg(dev, "USB initialization failed with %d\n", status);
kfree(hcd);
@@ -3492,7 +3494,7 @@ static int octeon_usb_driver_probe(struct device *dev)
cvmx_usb_poll(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
- status = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ status = usb_add_hcd(hcd, irq, 0);
if (status) {
dev_dbg(dev, "USB add HCD failed with %d\n", status);
kfree(hcd);
@@ -3500,14 +3502,15 @@ static int octeon_usb_driver_probe(struct device *dev)
}
device_wakeup_enable(hcd->self.controller);
- dev_dbg(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
+ dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
return 0;
}
-static int octeon_usb_driver_remove(struct device *dev)
+static int octeon_usb_remove(struct platform_device *pdev)
{
int status;
+ struct device *dev = &pdev->dev;
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct octeon_hcd *priv = hcd_to_octeon(hcd);
unsigned long flags;
@@ -3525,85 +3528,41 @@ static int octeon_usb_driver_remove(struct device *dev)
return 0;
}
-static struct device_driver octeon_usb_driver = {
- .name = "OcteonUSB",
- .bus = &platform_bus_type,
- .probe = octeon_usb_driver_probe,
- .remove = octeon_usb_driver_remove,
+static struct of_device_id octeon_usb_match[] = {
+ {
+ .compatible = "cavium,octeon-5750-usbc",
+ },
+ {},
};
+static struct platform_driver octeon_usb_driver = {
+ .driver = {
+ .name = "OcteonUSB",
+ .owner = THIS_MODULE,
+ .of_match_table = octeon_usb_match,
+ },
+ .probe = octeon_usb_probe,
+ .remove = octeon_usb_remove,
+};
-#define MAX_USB_PORTS 10
-static struct platform_device *pdev_glob[MAX_USB_PORTS];
-static int octeon_usb_registered;
-static int __init octeon_usb_module_init(void)
+static int __init octeon_usb_driver_init(void)
{
- int num_devices = cvmx_usb_get_num_ports();
- int device;
-
- if (usb_disabled() || num_devices == 0)
- return -ENODEV;
-
- if (driver_register(&octeon_usb_driver))
- return -ENOMEM;
-
- octeon_usb_registered = 1;
-
- /*
- * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
- * IOB priority registers. Under heavy network load USB
- * hardware can be starved by the IOB causing a crash. Give
- * it a priority boost if it has been waiting more than 400
- * cycles to avoid this situation.
- *
- * Testing indicates that a cnt_val of 8192 is not sufficient,
- * but no failures are seen with 4096. We choose a value of
- * 400 to give a safety factor of 10.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
-
- pri_cnt.u64 = 0;
- pri_cnt.s.cnt_enb = 1;
- pri_cnt.s.cnt_val = 400;
- cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
- }
-
- for (device = 0; device < num_devices; device++) {
- struct resource irq_resource;
- struct platform_device *pdev;
- memset(&irq_resource, 0, sizeof(irq_resource));
- irq_resource.start = (device == 0) ? OCTEON_IRQ_USB0 : OCTEON_IRQ_USB1;
- irq_resource.end = irq_resource.start;
- irq_resource.flags = IORESOURCE_IRQ;
- pdev = platform_device_register_simple((char *)octeon_usb_driver. name, device, &irq_resource, 1);
- if (IS_ERR(pdev)) {
- driver_unregister(&octeon_usb_driver);
- octeon_usb_registered = 0;
- return PTR_ERR(pdev);
- }
- if (device < MAX_USB_PORTS)
- pdev_glob[device] = pdev;
+ if (usb_disabled())
+ return 0;
- }
- return 0;
+ return platform_driver_register(&octeon_usb_driver);
}
+module_init(octeon_usb_driver_init);
-static void __exit octeon_usb_module_cleanup(void)
+static void __exit octeon_usb_driver_exit(void)
{
- int i;
+ if (usb_disabled())
+ return;
- for (i = 0; i < MAX_USB_PORTS; i++)
- if (pdev_glob[i]) {
- platform_device_unregister(pdev_glob[i]);
- pdev_glob[i] = NULL;
- }
- if (octeon_usb_registered)
- driver_unregister(&octeon_usb_driver);
+ platform_driver_unregister(&octeon_usb_driver);
}
+module_exit(octeon_usb_driver_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
-MODULE_DESCRIPTION("Cavium Networks Octeon USB Host driver.");
-module_init(octeon_usb_module_init);
-module_exit(octeon_usb_module_cleanup);
+MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver.");
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
index cb060364dfe7..5d965cf06d59 100644
--- a/drivers/staging/ozwpan/ozproto.c
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -668,8 +668,8 @@ void oz_binding_add(const char *net_dev)
if (binding) {
binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
binding->ptype.func = oz_pkt_recv;
- memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
if (net_dev && *net_dev) {
+ memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
oz_dbg(ON, "Adding binding: %s\n", net_dev);
binding->ptype.dev =
dev_get_by_name(&init_net, net_dev);
@@ -680,6 +680,7 @@ void oz_binding_add(const char *net_dev)
}
} else {
oz_dbg(ON, "Binding to all netcards\n");
+ memset(binding->name, 0, OZ_MAX_BINDING_LEN);
binding->ptype.dev = NULL;
}
if (binding) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 153ec61493ab..96df62f95b6b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -912,12 +912,12 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
unsigned char *pbuf;
u32 wpa_ielen = 0;
u8 *pbssid = GetAddr3Ptr(pframe);
- u32 hidden_ssid = 0;
struct HT_info_element *pht_info = NULL;
struct rtw_ieee80211_ht_cap *pht_cap = NULL;
u32 bcn_channel;
unsigned short ht_cap_info;
unsigned char ht_info_infos_0;
+ int ssid_len;
if (is_client_associated_to_ap(Adapter) == false)
return true;
@@ -999,21 +999,15 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
/* checking SSID */
+ ssid_len = 0;
p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
- if (p == NULL) {
- DBG_88E("%s marc: cannot find SSID for survey event\n", __func__);
- hidden_ssid = true;
- } else {
- hidden_ssid = false;
- }
-
- if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
- memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
- bssid->Ssid.SsidLength = *(p + 1);
- } else {
- bssid->Ssid.SsidLength = 0;
- bssid->Ssid.Ssid[0] = '\0';
+ if (p) {
+ ssid_len = *(p + 1);
+ if (ssid_len > NDIS_802_11_LENGTH_SSID)
+ ssid_len = 0;
}
+ memcpy(bssid->Ssid.Ssid, (p + 2), ssid_len);
+ bssid->Ssid.SsidLength = ssid_len;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d "
"cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid,
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index dec992569476..4ad80ae1067f 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -2500,7 +2500,7 @@ static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info
("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n",
poidparam->subcode, poidparam->len, len));
- if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) {
+ if (poidparam->subcode >= ARRAY_SIZE(mp_ioctl_hdl)) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n"));
ret = -EINVAL;
goto _rtw_mp_ioctl_hdl_exit;
@@ -3164,9 +3164,7 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
u8 *p2pie;
uint p2pielen = 0, attr_contentlen = 0;
u8 attr_content[100] = {0x00};
-
- u8 go_devadd_str[17 + 10] = {0x00};
- /* +10 is for the str "go_devadd =", we have to clear it at wrqu->data.pointer */
+ u8 go_devadd_str[17 + 12] = {};
/* Commented by Albert 20121209 */
/* The input data is the GO's interface address which the application wants to know its device address. */
@@ -3223,12 +3221,12 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (!blnMatch)
- sprintf(go_devadd_str, "\n\ndev_add = NULL");
+ snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add = NULL");
else
- sprintf(go_devadd_str, "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
+ snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]);
- if (copy_to_user(wrqu->data.pointer, go_devadd_str, 10 + 17))
+ if (copy_to_user(wrqu->data.pointer, go_devadd_str, sizeof(go_devadd_str)))
return -EFAULT;
return ret;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 68f98fa114d2..7c9ee58f47bb 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -653,7 +653,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
}
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 0a341d6ec51f..2f40ff5901d6 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -53,8 +53,9 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
/*=== Customer ID ===*/
/****** 8188EUS ********/
- {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
+ {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
+ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{} /* Terminating entry */
};
diff --git a/drivers/staging/rtl8821ae/Kconfig b/drivers/staging/rtl8821ae/Kconfig
index 2aa5dac2f1df..abccc9dabd65 100644
--- a/drivers/staging/rtl8821ae/Kconfig
+++ b/drivers/staging/rtl8821ae/Kconfig
@@ -1,6 +1,6 @@
config R8821AE
tristate "RealTek RTL8821AE Wireless LAN NIC driver"
- depends on PCI && WLAN
+ depends on PCI && WLAN && MAC80211
depends on m
select WIRELESS_EXT
select WEXT_PRIV
diff --git a/drivers/staging/rtl8821ae/wifi.h b/drivers/staging/rtl8821ae/wifi.h
index cfe88a1efd55..76bef93ad70a 100644
--- a/drivers/staging/rtl8821ae/wifi.h
+++ b/drivers/staging/rtl8821ae/wifi.h
@@ -1414,7 +1414,7 @@ struct rtl_dm {
/*88e tx power tracking*/
- u8 bb_swing_idx_ofdm[2];
+ u8 bb_swing_idx_ofdm[MAX_RF_PATH];
u8 bb_swing_idx_ofdm_current;
u8 bb_swing_idx_ofdm_base[MAX_RF_PATH];
bool bb_swing_flag_Ofdm;
diff --git a/drivers/staging/usbip/userspace/libsrc/names.c b/drivers/staging/usbip/userspace/libsrc/names.c
index 3c8d28b771e0..81ff8522405c 100644
--- a/drivers/staging/usbip/userspace/libsrc/names.c
+++ b/drivers/staging/usbip/userspace/libsrc/names.c
@@ -169,14 +169,14 @@ static void *my_malloc(size_t size)
struct pool *p;
p = calloc(1, sizeof(struct pool));
- if (!p) {
- free(p);
+ if (!p)
return NULL;
- }
p->mem = calloc(1, size);
- if (!p->mem)
+ if (!p->mem) {
+ free(p);
return NULL;
+ }
p->next = pool_head;
pool_head = p;
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index 9b51586d11d9..0141bc34d5cc 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -149,7 +149,8 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed)
case USB_SPEED_WIRELESS:
break;
default:
- pr_err("speed %d\n", speed);
+ pr_err("Failed attach request for unsupported USB speed: %s\n",
+ usb_speed_string(speed));
return -EINVAL;
}
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index 4a1ddaf5e00f..187fc060de26 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -1061,7 +1061,7 @@ static int wireless_set_essid(struct net_device *dev, struct iw_request_info *in
goto out;
}
- if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN + 1) {
+ if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index e048d6439f4a..cda4d80cfaef 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -507,7 +507,9 @@ int iscsit_handle_status_snack(
u32 last_statsn;
int found_cmd;
- if (conn->exp_statsn > begrun) {
+ if (!begrun) {
+ begrun = conn->exp_statsn;
+ } else if (conn->exp_statsn > begrun) {
pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
" 0x%08x but already got ExpStatSN: 0x%08x on CID:"
" %hu.\n", begrun, runlength, conn->exp_statsn,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 12da9b386169..c3d9df6aaf5f 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -500,7 +500,7 @@ static inline int core_alua_state_lba_dependent(
if (segment_mult) {
u64 tmp = lba;
- start_lba = sector_div(tmp, segment_size * segment_mult);
+ start_lba = do_div(tmp, segment_size * segment_mult);
last_lba = first_lba + segment_size - 1;
if (start_lba >= first_lba &&
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 2f5d77932c80..3013287a2aaa 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -2009,7 +2009,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
sense_reason_t ret = TCM_NO_SENSE;
- int pr_holder = 0;
+ int pr_holder = 0, type;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
@@ -2131,6 +2131,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
ret = TCM_RESERVATION_CONFLICT;
goto out;
}
+ type = pr_reg->pr_res_type;
spin_lock(&pr_tmpl->registration_lock);
/*
@@ -2161,6 +2162,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
* Release the calling I_T Nexus registration now..
*/
__core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1);
+ pr_reg = NULL;
/*
* From spc4r17, section 5.7.11.3 Unregistering
@@ -2174,8 +2176,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
* RESERVATIONS RELEASED.
*/
if (pr_holder &&
- (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
- pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
+ (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
+ type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
list_for_each_entry(pr_reg_p,
&pr_tmpl->registration_list,
pr_reg_list) {
@@ -2194,7 +2196,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
ret = core_scsi3_update_and_write_aptpl(dev, aptpl);
out:
- core_scsi3_put_pr_reg(pr_reg);
+ if (pr_reg)
+ core_scsi3_put_pr_reg(pr_reg);
return ret;
}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index fa3cae393e13..42f18fc1067b 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1074,14 +1074,20 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
struct scatterlist *psg;
void *paddr, *addr;
unsigned int i, len, left;
+ unsigned int offset = sg_off;
left = sectors * dev->prot_length;
for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
len = min(psg->length, left);
+ if (offset >= sg->length) {
+ sg = sg_next(sg);
+ offset = 0;
+ }
+
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
- addr = kmap_atomic(sg_page(sg)) + sg_off;
+ addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
if (read)
memcpy(paddr, addr, len);
@@ -1089,6 +1095,7 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
memcpy(addr, paddr, len);
left -= len;
+ offset += len;
kunmap_atomic(paddr);
kunmap_atomic(addr);
}
@@ -1155,7 +1162,7 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
{
struct se_device *dev = cmd->se_dev;
struct se_dif_v1_tuple *sdt;
- struct scatterlist *dsg;
+ struct scatterlist *dsg, *psg = sg;
sector_t sector = start;
void *daddr, *paddr;
int i, j, offset = sg_off;
@@ -1163,14 +1170,14 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
- paddr = kmap_atomic(sg_page(sg)) + sg->offset;
+ paddr = kmap_atomic(sg_page(psg)) + sg->offset;
for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
- if (offset >= sg->length) {
+ if (offset >= psg->length) {
kunmap_atomic(paddr);
- sg = sg_next(sg);
- paddr = kmap_atomic(sg_page(sg)) + sg->offset;
+ psg = sg_next(psg);
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
offset = 0;
}
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 43c5ca9878bc..3bebc71ea033 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -440,8 +440,8 @@ check_scsi_name:
padding = ((-scsi_target_len) & 3);
if (padding)
scsi_target_len += padding;
- if (scsi_name_len > 256)
- scsi_name_len = 256;
+ if (scsi_target_len > 256)
+ scsi_target_len = 256;
buf[off-1] = scsi_target_len;
off += scsi_target_len;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index c50fd9f11aab..2956250b7225 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -669,9 +669,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
return;
}
- if (!success)
- cmd->transport_state |= CMD_T_FAILED;
-
/*
* Check for case where an explicit ABORT_TASK has been received
* and transport_wait_for_tasks() will be waiting for completion..
@@ -681,7 +678,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&cmd->t_transport_stop_comp);
return;
- } else if (cmd->transport_state & CMD_T_FAILED) {
+ } else if (!success) {
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
INIT_WORK(&cmd->work, target_complete_ok_work);
@@ -1604,6 +1601,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_CHECK_CONDITION_NOT_READY:
+ case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
break;
case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 6496872e2e47..b01659bd4f7c 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -255,13 +255,7 @@ static int __init hvc_opal_init(void)
/* Register as a vio device to receive callbacks */
return platform_driver_register(&hvc_opal_driver);
}
-module_init(hvc_opal_init);
-
-static void __exit hvc_opal_exit(void)
-{
- platform_driver_unregister(&hvc_opal_driver);
-}
-module_exit(hvc_opal_exit);
+device_initcall(hvc_opal_init);
static void udbg_opal_putc(char c)
{
diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
index 0069bb86ba49..08c87920b74a 100644
--- a/drivers/tty/hvc/hvc_rtas.c
+++ b/drivers/tty/hvc/hvc_rtas.c
@@ -102,17 +102,7 @@ static int __init hvc_rtas_init(void)
return 0;
}
-module_init(hvc_rtas_init);
-
-/* This will tear down the tty portion of the driver */
-static void __exit hvc_rtas_exit(void)
-{
- /* Really the fun isn't over until the worker thread breaks down and
- * the tty cleans up */
- if (hvc_rtas_dev)
- hvc_remove(hvc_rtas_dev);
-}
-module_exit(hvc_rtas_exit);
+device_initcall(hvc_rtas_init);
/* This will happen prior to module init. There is no tty at this time? */
static int __init hvc_rtas_console_init(void)
diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
index 72228276fe31..9cf573d06a29 100644
--- a/drivers/tty/hvc/hvc_udbg.c
+++ b/drivers/tty/hvc/hvc_udbg.c
@@ -80,14 +80,7 @@ static int __init hvc_udbg_init(void)
return 0;
}
-module_init(hvc_udbg_init);
-
-static void __exit hvc_udbg_exit(void)
-{
- if (hvc_udbg_dev)
- hvc_remove(hvc_udbg_dev);
-}
-module_exit(hvc_udbg_exit);
+device_initcall(hvc_udbg_init);
static int __init hvc_udbg_console_init(void)
{
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 636c9baad7a5..2dc2831840ca 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -561,18 +561,7 @@ static int __init xen_hvc_init(void)
#endif
return r;
}
-
-static void __exit xen_hvc_fini(void)
-{
- struct xencons_info *entry, *next;
-
- if (list_empty(&xenconsoles))
- return;
-
- list_for_each_entry_safe(entry, next, &xenconsoles, list) {
- xen_console_remove(entry);
- }
-}
+device_initcall(xen_hvc_init);
static int xen_cons_init(void)
{
@@ -598,10 +587,6 @@ static int xen_cons_init(void)
hvc_instantiate(HVC_COOKIE, 0, ops);
return 0;
}
-
-
-module_init(xen_hvc_init);
-module_exit(xen_hvc_fini);
console_initcall(xen_cons_init);
#ifdef CONFIG_EARLY_PRINTK
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index f34461c5f14e..2ebe47b78a3e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1090,6 +1090,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
{
unsigned int addr = 0;
unsigned int modem = 0;
+ unsigned int brk = 0;
struct gsm_dlci *dlci;
int len = clen;
u8 *dp = data;
@@ -1116,6 +1117,16 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
if (len == 0)
return;
}
+ len--;
+ if (len > 0) {
+ while (gsm_read_ea(&brk, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ modem <<= 7;
+ modem |= (brk & 0x7f);
+ }
tty = tty_port_tty_get(&dlci->port);
gsm_process_modem(tty, dlci, modem, clen);
if (tty) {
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index cb8017aa4434..d15624c1b751 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -817,8 +817,7 @@ static void process_echoes(struct tty_struct *tty)
struct n_tty_data *ldata = tty->disc_data;
size_t echoed;
- if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
- ldata->echo_mark == ldata->echo_tail)
+ if (ldata->echo_mark == ldata->echo_tail)
return;
mutex_lock(&ldata->output_lock);
@@ -1244,7 +1243,8 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
if (L_ECHO(tty)) {
echo_char(c, tty);
commit_echoes(tty);
- }
+ } else
+ process_echoes(tty);
isig(signal, tty);
return;
}
@@ -1274,7 +1274,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
if (I_IXON(tty)) {
if (c == START_CHAR(tty)) {
start_tty(tty);
- commit_echoes(tty);
+ process_echoes(tty);
return 0;
}
if (c == STOP_CHAR(tty)) {
@@ -1820,8 +1820,10 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
* Fix tty hang when I_IXON(tty) is cleared, but the tty
* been stopped by STOP_CHAR(tty) before it.
*/
- if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped)
+ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
start_tty(tty);
+ process_echoes(tty);
+ }
/* The termios change make the tty ready for I/O */
if (waitqueue_active(&tty->write_wait))
@@ -1896,7 +1898,7 @@ err:
static inline int input_available_p(struct tty_struct *tty, int poll)
{
struct n_tty_data *ldata = tty->disc_data;
- int amt = poll && !TIME_CHAR(tty) ? MIN_CHAR(tty) : 1;
+ int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1;
if (ldata->icanon && !L_EXTPROC(tty)) {
if (ldata->canon_head != ldata->read_tail)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 61ecd709a722..69932b7556cf 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2433,6 +2433,24 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
serial_dl_write(up, quot);
/*
+ * XR17V35x UARTs have an extra fractional divisor register (DLD)
+ *
+ * We need to recalculate all of the registers, because DLM and DLL
+ * are already rounded to a whole integer.
+ *
+ * When recalculating we use a 32x clock instead of a 16x clock to
+ * allow 1-bit for rounding in the fractional part.
+ */
+ if (up->port.type == PORT_XR17V35X) {
+ unsigned int baud_x32 = (port->uartclk * 2) / baud;
+ u16 quot = baud_x32 / 32;
+ u8 quot_frac = DIV_ROUND_CLOSEST(baud_x32 % 32, 2);
+
+ serial_dl_write(up, quot);
+ serial_port_out(port, 0x2, quot_frac & 0xf);
+ }
+
+ /*
* LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
* is written without DLAB set, this mode will be disabled.
*/
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index faa64e646100..ed3113576740 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -391,7 +391,7 @@ static int dw8250_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int dw8250_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
@@ -409,7 +409,7 @@ static int dw8250_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_RUNTIME
static int dw8250_runtime_suspend(struct device *dev)
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 50228eed3b6f..0ff3e3624d4c 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -783,7 +783,8 @@ static int pci_netmos_9900_setup(struct serial_private *priv,
{
unsigned int bar;
- if ((priv->dev->subsystem_device & 0xff00) == 0x3000) {
+ if ((priv->dev->device != PCI_DEVICE_ID_NETMOS_9865) &&
+ (priv->dev->subsystem_device & 0xff00) == 0x3000) {
/* netmos apparently orders BARs by datasheet layout, so serial
* ports get BARs 0 and 3 (or 1 and 4 for memmapped)
*/
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index fa511ebab67c..77f035158d6c 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -738,9 +738,6 @@ static int serial_omap_startup(struct uart_port *port)
return retval;
}
disable_irq(up->wakeirq);
- } else {
- dev_info(up->port.dev, "no wakeirq for uart%d\n",
- up->port.line);
}
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
@@ -1604,8 +1601,11 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
flags & SER_RS485_RTS_AFTER_SEND);
if (ret < 0)
return ret;
- } else
+ } else if (up->rts_gpio == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else {
up->rts_gpio = -EINVAL;
+ }
if (of_property_read_u32_array(np, "rs485-rts-delay",
rs485_delay, 2) == 0) {
@@ -1687,6 +1687,9 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.iotype = UPIO_MEM;
up->port.irq = uartirq;
up->wakeirq = wakeirq;
+ if (!up->wakeirq)
+ dev_info(up->port.dev, "no wakeirq for uart%d\n",
+ up->port.line);
up->port.regshift = 2;
up->port.fifosize = 64;
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 49a2ffd101a7..b7bfe24d4ebc 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -542,8 +542,10 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
SIRFUART_IO_MODE);
- sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+ spin_lock(&port->lock);
+ sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
+ spin_unlock(&port->lock);
if (sirfport->rx_io_count == 4) {
spin_lock_irqsave(&sirfport->rx_lock, flags);
sirfport->rx_io_count = 0;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 61b1137d7e56..23b5d32954bf 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1164,6 +1164,8 @@ static void csi_J(struct vc_data *vc, int vpar)
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
vc->vc_screenbuf_size >> 1);
set_origin(vc);
+ if (CON_IS_VISIBLE(vc))
+ update_screen(vc);
/* fall through */
case 2: /* erase whole display */
count = vc->vc_cols * vc->vc_rows;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 80de2f88ed2c..4ab2cb62dfce 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -105,7 +105,7 @@ static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
do {
/* flush any pending transfer */
- hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n));
+ hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
cpu_relax();
} while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
@@ -205,7 +205,7 @@ static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
return -EAGAIN;
- hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n));
+ hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
cpu_relax();
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 5d01558cef66..ab90a0156828 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -63,8 +63,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
dynid->id.idProduct = idProduct;
dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE;
if (fields > 2 && bInterfaceClass) {
- if (bInterfaceClass > 255)
- return -EINVAL;
+ if (bInterfaceClass > 255) {
+ retval = -EINVAL;
+ goto fail;
+ }
dynid->id.bInterfaceClass = (u8)bInterfaceClass;
dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS;
@@ -73,17 +75,21 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
if (fields > 4) {
const struct usb_device_id *id = id_table;
- if (!id)
- return -ENODEV;
+ if (!id) {
+ retval = -ENODEV;
+ goto fail;
+ }
for (; id->match_flags; id++)
if (id->idVendor == refVendor && id->idProduct == refProduct)
break;
- if (id->match_flags)
+ if (id->match_flags) {
dynid->id.driver_info = id->driver_info;
- else
- return -ENODEV;
+ } else {
+ retval = -ENODEV;
+ goto fail;
+ }
}
spin_lock(&dynids->lock);
@@ -95,6 +101,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
if (retval)
return retval;
return count;
+
+fail:
+ kfree(dynid);
+ return retval;
}
EXPORT_SYMBOL_GPL(usb_store_new_id);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 199aaea6bfe0..2518c3250750 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1032,7 +1032,6 @@ static int register_root_hub(struct usb_hcd *hcd)
dev_name(&usb_dev->dev), retval);
return retval;
}
- usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
}
retval = usb_new_device (usb_dev);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index babba885978d..64ea21971be2 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -128,7 +128,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
return usb_get_intfdata(hdev->actconfig->interface[0]);
}
-int usb_device_supports_lpm(struct usb_device *udev)
+static int usb_device_supports_lpm(struct usb_device *udev)
{
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
@@ -149,11 +149,6 @@ int usb_device_supports_lpm(struct usb_device *udev)
"Power management will be impacted.\n");
return 0;
}
-
- /* udev is root hub */
- if (!udev->parent)
- return 1;
-
if (udev->parent->lpm_capable)
return 1;
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index c49383669cd8..823857767a16 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -35,7 +35,6 @@ extern int usb_get_device_descriptor(struct usb_device *dev,
unsigned int size);
extern int usb_get_bos_descriptor(struct usb_device *dev);
extern void usb_release_bos_descriptor(struct usb_device *dev);
-extern int usb_device_supports_lpm(struct usb_device *udev);
extern char *usb_cache_string(struct usb_device *udev, int index);
extern int usb_set_configuration(struct usb_device *dev, int configuration);
extern int usb_choose_configuration(struct usb_device *udev);
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 8565d87f94b4..1d129884cc39 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -216,7 +216,7 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
int retval = 0;
if (!select_phy)
- return -ENODEV;
+ return 0;
usbcfg = readl(hsotg->regs + GUSBCFG);
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index f59484d43b35..4d918ed8d343 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2565,25 +2565,14 @@ static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
- int is_control = usb_endpoint_xfer_control(&ep->desc);
- int is_out = usb_endpoint_dir_out(&ep->desc);
- int epnum = usb_endpoint_num(&ep->desc);
- struct usb_device *udev;
unsigned long flags;
dev_dbg(hsotg->dev,
"DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n",
ep->desc.bEndpointAddress);
- udev = to_usb_device(hsotg->dev);
-
spin_lock_irqsave(&hsotg->lock, flags);
-
- usb_settoggle(udev, epnum, is_out, 0);
- if (is_control)
- usb_settoggle(udev, epnum, !is_out, 0);
dwc2_hcd_endpoint_reset(hsotg, ep);
-
spin_unlock_irqrestore(&hsotg->lock, flags);
}
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index d01d0d3f2cf0..eaba547ce26b 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -124,6 +124,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
int retval;
int irq;
+ if (usb_disabled())
+ return -ENODEV;
+
match = of_match_device(dwc2_of_match_table, &dev->dev);
if (match && match->data) {
params = match->data;
diff --git a/drivers/usb/gadget/bcm63xx_udc.c b/drivers/usb/gadget/bcm63xx_udc.c
index 888fbb43b338..e969eb809a85 100644
--- a/drivers/usb/gadget/bcm63xx_udc.c
+++ b/drivers/usb/gadget/bcm63xx_udc.c
@@ -360,24 +360,30 @@ static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
bcm_writel(val, udc->iudma_regs + off);
}
-static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off)
+static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
{
- return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
+ return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
-static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
+static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
+ int chan)
{
- bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
+ bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
-static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off)
+static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
{
- return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
+ return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
-static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
+static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
+ int chan)
{
- bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
+ bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
@@ -638,7 +644,7 @@ static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
} while (!last_bd);
usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
- ENETDMAC_CHANCFG_REG(iudma->ch_idx));
+ ENETDMAC_CHANCFG_REG, iudma->ch_idx);
}
/**
@@ -694,9 +700,9 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
/* stop DMA, then wait for the hardware to wrap up */
- usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx));
+ usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
- while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) &
+ while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
ENETDMAC_CHANCFG_EN_MASK) {
udelay(1);
@@ -713,10 +719,10 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
ch_idx);
usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
- ENETDMAC_CHANCFG_REG(ch_idx));
+ ENETDMAC_CHANCFG_REG, ch_idx);
}
}
- usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx));
+ usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
/* don't leave "live" HW-owned entries for the next guy to step on */
for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
@@ -728,11 +734,11 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
/* set up IRQs, UBUS burst size, and BD base for this channel */
usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
- ENETDMAC_IRMASK_REG(ch_idx));
- usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx));
+ ENETDMAC_IRMASK_REG, ch_idx);
+ usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
- usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx));
- usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx));
+ usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
+ usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
}
/**
@@ -2035,7 +2041,7 @@ static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
spin_lock(&udc->lock);
usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
- ENETDMAC_IR_REG(iudma->ch_idx));
+ ENETDMAC_IR_REG, iudma->ch_idx);
bep = iudma->bep;
rc = iudma_read(udc, iudma);
@@ -2175,18 +2181,18 @@ static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
seq_printf(s, " [ep%d]:\n",
max_t(int, iudma_defaults[ch_idx].ep_num, 0));
seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
- usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)),
- usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)),
- usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)),
- usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx)));
+ usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
+ usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
+ usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
+ usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
- sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx));
- sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx));
+ sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
+ sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
- usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)),
+ usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
sram2 >> 16, sram2 & 0xffff,
sram3 >> 16, sram3 & 0xffff,
- usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx)));
+ usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
iudma->n_bds);
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 306a2b52125c..2b4334394076 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -585,7 +585,6 @@ static ssize_t ffs_epfile_io(struct file *file,
char __user *buf, size_t len, int read)
{
struct ffs_epfile *epfile = file->private_data;
- struct usb_gadget *gadget = epfile->ffs->gadget;
struct ffs_ep *ep;
char *data = NULL;
ssize_t ret, data_len;
@@ -622,6 +621,12 @@ static ssize_t ffs_epfile_io(struct file *file,
/* Allocate & copy */
if (!halt) {
/*
+ * if we _do_ wait above, the epfile->ffs->gadget might be NULL
+ * before the waiting completes, so do not assign to 'gadget' earlier
+ */
+ struct usb_gadget *gadget = epfile->ffs->gadget;
+
+ /*
* Controller may require buffer size to be aligned to
* maxpacketsize of an out endpoint.
*/
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index bf7a56b6d48a..69b76efd11e9 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -1157,7 +1157,7 @@ static int __init printer_bind_config(struct usb_configuration *c)
usb_gadget_set_selfpowered(gadget);
- if (gadget->is_otg) {
+ if (gadget_is_otg(gadget)) {
otg_descriptor.bmAttributes |= USB_OTG_HNP;
printer_cfg_driver.descriptors = otg_desc;
printer_cfg_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index f04b2c3154de..dd9678f85c58 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1629,7 +1629,7 @@ static void s3c2410_udc_reinit(struct s3c2410_udc *dev)
ep->ep.desc = NULL;
ep->halted = 0;
INIT_LIST_HEAD(&ep->queue);
- usb_ep_set_maxpacket_limit(&ep->ep, &ep->ep.maxpacket);
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket);
}
}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 471142725ffe..81cda09b47e3 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -685,8 +685,15 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 status, masked_status, pcd_status = 0, cmd;
int bh;
+ unsigned long flags;
- spin_lock (&ehci->lock);
+ /*
+ * For threadirqs option we use spin_lock_irqsave() variant to prevent
+ * deadlock with ehci hrtimer callback, because hrtimer callbacks run
+ * in interrupt context even when threadirqs is specified. We can go
+ * back to spin_lock() variant when hrtimer callbacks become threaded.
+ */
+ spin_lock_irqsave(&ehci->lock, flags);
status = ehci_readl(ehci, &ehci->regs->status);
@@ -704,7 +711,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* Shared IRQ? */
if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
- spin_unlock(&ehci->lock);
+ spin_unlock_irqrestore(&ehci->lock, flags);
return IRQ_NONE;
}
@@ -815,7 +822,7 @@ dead:
if (bh)
ehci_work (ehci);
- spin_unlock (&ehci->lock);
+ spin_unlock_irqrestore(&ehci->lock, flags);
if (pcd_status)
usb_hcd_poll_rh_status(hcd);
return IRQ_HANDLED;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 47b858fc50b2..7ae0c4d51741 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -238,6 +238,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
int port;
int mask;
int changed;
+ bool fs_idle_delay;
ehci_dbg(ehci, "suspend root hub\n");
@@ -272,6 +273,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
ehci->bus_suspended = 0;
ehci->owned_ports = 0;
changed = 0;
+ fs_idle_delay = false;
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status [port];
@@ -300,16 +302,32 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
}
if (t1 != t2) {
+ /*
+ * On some controllers, Wake-On-Disconnect will
+ * generate false wakeup signals until the bus
+ * switches over to full-speed idle. For their
+ * sake, add a delay if we need one.
+ */
+ if ((t2 & PORT_WKDISC_E) &&
+ ehci_port_speed(ehci, t2) ==
+ USB_PORT_STAT_HIGH_SPEED)
+ fs_idle_delay = true;
ehci_writel(ehci, t2, reg);
changed = 1;
}
}
+ spin_unlock_irq(&ehci->lock);
+
+ if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) {
+ /*
+ * Wait for HCD to enter low-power mode or for the bus
+ * to switch to full-speed idle.
+ */
+ usleep_range(5000, 5500);
+ }
if (changed && ehci->has_tdi_phy_lpm) {
- spin_unlock_irq(&ehci->lock);
- msleep(5); /* 5 ms for HCD to enter low-power mode */
spin_lock_irq(&ehci->lock);
-
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
@@ -322,8 +340,8 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
port, (t3 & HOSTPC_PHCD) ?
"succeeded" : "failed");
}
+ spin_unlock_irq(&ehci->lock);
}
- spin_unlock_irq(&ehci->lock);
/* Apparently some devices need a >= 1-uframe delay here */
if (ehci->bus_suspended)
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index b016d38199f2..eb009a457fb5 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -203,12 +203,12 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
addr, (unsigned int)temp);
addr = &ir_set->erst_base;
- temp_64 = readq(addr);
+ temp_64 = xhci_read_64(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n",
addr, temp_64);
addr = &ir_set->erst_dequeue;
- temp_64 = readq(addr);
+ temp_64 = xhci_read_64(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n",
addr, temp_64);
}
@@ -412,7 +412,7 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
{
u64 val;
- val = readq(&xhci->op_regs->cmd_ring);
+ val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
lower_32_bits(val));
xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 873c272b3ef5..bce4391a0e7d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1958,7 +1958,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
xhci_warn(xhci, "WARN something wrong with SW event ring "
"dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
- temp = readq(&xhci->ir_set->erst_dequeue);
+ temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp &= ERST_PTR_MASK;
/* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
@@ -1967,7 +1967,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Write event ring dequeue pointer, "
"preserving EHB bit");
- writeq(((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
+ xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
&xhci->ir_set->erst_dequeue);
}
@@ -2269,7 +2269,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Device context base array address = 0x%llx (DMA), %p (virt)",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
- writeq(dma, &xhci->op_regs->dcbaa_ptr);
+ xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
/*
* Initialize the ring segment pool. The ring must be a contiguous
@@ -2312,13 +2312,13 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
(unsigned long long)xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */
- val_64 = readq(&xhci->op_regs->cmd_ring);
+ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting command ring address to 0x%x", val);
- writeq(val_64, &xhci->op_regs->cmd_ring);
+ xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
@@ -2396,10 +2396,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set ERST base address for ir_set 0 = 0x%llx",
(unsigned long long)xhci->erst.erst_dma_addr);
- val_64 = readq(&xhci->ir_set->erst_base);
+ val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
- writeq(val_64, &xhci->ir_set->erst_base);
+ xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 3c898c12a06b..04f986d9234f 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -142,6 +142,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
"QUIRK: Resetting on resume");
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ pdev->device == 0x0015 &&
+ pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
+ pdev->subsystem_device == 0xc0cd)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a0b248c34526..0ed64eb68e48 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -307,13 +307,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
return 0;
}
- temp_64 = readq(&xhci->op_regs->cmd_ring);
+ temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
if (!(temp_64 & CMD_RING_RUNNING)) {
xhci_dbg(xhci, "Command ring had been stopped\n");
return 0;
}
xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
- writeq(temp_64 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
+ xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+ &xhci->op_regs->cmd_ring);
/* Section 4.6.1.2 of xHCI 1.0 spec says software should
* time the completion od all xHCI commands, including
@@ -2864,8 +2865,9 @@ hw_died:
/* Clear the event handler busy flag (RW1C);
* the event ring should be empty.
*/
- temp_64 = readq(&xhci->ir_set->erst_dequeue);
- writeq(temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp_64 | ERST_EHB,
+ &xhci->ir_set->erst_dequeue);
spin_unlock(&xhci->lock);
return IRQ_HANDLED;
@@ -2877,7 +2879,7 @@ hw_died:
*/
while (xhci_handle_event(xhci) > 0) {}
- temp_64 = readq(&xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
/* If necessary, update the HW's version of the event ring deq ptr. */
if (event_ring_deq != xhci->event_ring->dequeue) {
deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
@@ -2892,7 +2894,7 @@ hw_died:
/* Clear the event handler busy flag (RW1C); event ring is empty. */
temp_64 |= ERST_EHB;
- writeq(temp_64, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
spin_unlock(&xhci->lock);
@@ -2965,58 +2967,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
while (1) {
- if (room_on_ring(xhci, ep_ring, num_trbs)) {
- union xhci_trb *trb = ep_ring->enqueue;
- unsigned int usable = ep_ring->enq_seg->trbs +
- TRBS_PER_SEGMENT - 1 - trb;
- u32 nop_cmd;
-
- /*
- * Section 4.11.7.1 TD Fragments states that a link
- * TRB must only occur at the boundary between
- * data bursts (eg 512 bytes for 480M).
- * While it is possible to split a large fragment
- * we don't know the size yet.
- * Simplest solution is to fill the trb before the
- * LINK with nop commands.
- */
- if (num_trbs == 1 || num_trbs <= usable || usable == 0)
- break;
-
- if (ep_ring->type != TYPE_BULK)
- /*
- * While isoc transfers might have a buffer that
- * crosses a 64k boundary it is unlikely.
- * Since we can't add NOPs without generating
- * gaps in the traffic just hope it never
- * happens at the end of the ring.
- * This could be fixed by writing a LINK TRB
- * instead of the first NOP - however the
- * TRB_TYPE_LINK_LE32() calls would all need
- * changing to check the ring length.
- */
- break;
-
- if (num_trbs >= TRBS_PER_SEGMENT) {
- xhci_err(xhci, "Too many fragments %d, max %d\n",
- num_trbs, TRBS_PER_SEGMENT - 1);
- return -EINVAL;
- }
-
- nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
- ep_ring->cycle_state);
- ep_ring->num_trbs_free -= usable;
- do {
- trb->generic.field[0] = 0;
- trb->generic.field[1] = 0;
- trb->generic.field[2] = 0;
- trb->generic.field[3] = nop_cmd;
- trb++;
- } while (--usable);
- ep_ring->enqueue = trb;
- if (room_on_ring(xhci, ep_ring, num_trbs))
- break;
- }
+ if (room_on_ring(xhci, ep_ring, num_trbs))
+ break;
if (ep_ring == xhci->cmd_ring) {
xhci_err(xhci, "Do not support expand command ring\n");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ad364394885a..6fe577d46fa2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -611,7 +611,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_ring(xhci, xhci->event_ring);
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
- temp_64 = readq(&xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
@@ -756,11 +756,11 @@ static void xhci_save_registers(struct xhci_hcd *xhci)
{
xhci->s3.command = readl(&xhci->op_regs->command);
xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
- xhci->s3.dcbaa_ptr = readq(&xhci->op_regs->dcbaa_ptr);
+ xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
- xhci->s3.erst_base = readq(&xhci->ir_set->erst_base);
- xhci->s3.erst_dequeue = readq(&xhci->ir_set->erst_dequeue);
+ xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
+ xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
}
@@ -769,11 +769,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci)
{
writel(xhci->s3.command, &xhci->op_regs->command);
writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
- writeq(xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
+ xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
- writeq(xhci->s3.erst_base, &xhci->ir_set->erst_base);
- writeq(xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
+ xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
}
@@ -783,7 +783,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
u64 val_64;
/* step 2: initialize command ring buffer */
- val_64 = readq(&xhci->op_regs->cmd_ring);
+ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
xhci->cmd_ring->dequeue) &
@@ -792,7 +792,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting command ring address to 0x%llx",
(long unsigned long) val_64);
- writeq(val_64, &xhci->op_regs->cmd_ring);
+ xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
}
/*
@@ -3842,7 +3842,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
if (ret) {
return ret;
}
- temp_64 = readq(&xhci->op_regs->dcbaa_ptr);
+ temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Op regs DCBAA ptr = %#016llx", temp_64);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
@@ -4730,11 +4730,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
struct device *dev = hcd->self.controller;
int retval;
- /* Limit the block layer scatter-gather lists to half a segment. */
- hcd->self.sg_tablesize = TRBS_PER_SEGMENT / 2;
-
- /* support to build packet from discontinuous buffers */
- hcd->self.no_sg_constraint = 1;
+ /* Accept arbitrarily long scatter-gather lists */
+ hcd->self.sg_tablesize = ~0;
/* XHCI controllers don't stop the ep queue on short packets :| */
hcd->self.no_stop_on_short = 1;
@@ -4760,6 +4757,14 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
/* xHCI private pointer was set in xhci_pci_probe for the second
* registered roothub.
*/
+ xhci = hcd_to_xhci(hcd);
+ /*
+ * Support arbitrarily aligned sg-list entries on hosts without
+ * TD fragment rules (which are currently unsupported).
+ */
+ if (xhci->hci_version < 0x100)
+ hcd->self.no_sg_constraint = 1;
+
return 0;
}
@@ -4788,6 +4793,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
if (xhci->hci_version > 0x96)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+ if (xhci->hci_version < 0x100)
+ hcd->self.no_sg_constraint = 1;
+
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f8416639bf31..58ed9d088e63 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -28,17 +28,6 @@
#include <linux/kernel.h>
#include <linux/usb/hcd.h>
-/*
- * Registers should always be accessed with double word or quad word accesses.
- *
- * Some xHCI implementations may support 64-bit address pointers. Registers
- * with 64-bit address pointers should be written to with dword accesses by
- * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
- * xHCI implementations that do not support 64-bit address pointers will ignore
- * the high dword, and write order is irrelevant.
- */
-#include <asm-generic/io-64-nonatomic-lo-hi.h>
-
/* Code sharing between pci-quirks and xhci hcd */
#include "xhci-ext-caps.h"
#include "pci-quirks.h"
@@ -1279,7 +1268,7 @@ union xhci_trb {
* since the command ring is 64-byte aligned.
* It must also be greater than 16.
*/
-#define TRBS_PER_SEGMENT 256
+#define TRBS_PER_SEGMENT 64
/* Allow two commands + a link TRB, along with any reserved command TRBs */
#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
@@ -1614,6 +1603,34 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
#define xhci_warn_ratelimited(xhci, fmt, args...) \
dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+/*
+ * Registers should always be accessed with double word or quad word accesses.
+ *
+ * Some xHCI implementations may support 64-bit address pointers. Registers
+ * with 64-bit address pointers should be written to with dword accesses by
+ * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
+ * xHCI implementations that do not support 64-bit address pointers will ignore
+ * the high dword, and write order is irrelevant.
+ */
+static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
+ __le64 __iomem *regs)
+{
+ __u32 __iomem *ptr = (__u32 __iomem *) regs;
+ u64 val_lo = readl(ptr);
+ u64 val_hi = readl(ptr + 1);
+ return val_lo + (val_hi << 32);
+}
+static inline void xhci_write_64(struct xhci_hcd *xhci,
+ const u64 val, __le64 __iomem *regs)
+{
+ __u32 __iomem *ptr = (__u32 __iomem *) regs;
+ u32 val_lo = lower_32_bits(val);
+ u32 val_hi = upper_32_bits(val);
+
+ writel(val_lo, ptr);
+ writel(val_hi, ptr + 1);
+}
+
static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
{
return xhci->quirks & XHCI_LINK_TRB_QUIRK;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index fc192ad9cc6a..239ad0b1ceb6 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -477,8 +477,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb->port1_status |=
(USB_PORT_STAT_C_SUSPEND << 16)
| MUSB_PORT_STAT_RESUME;
+ musb->rh_timer = jiffies
+ + msecs_to_jiffies(20);
schedule_delayed_work(
- &musb->finish_resume_work, 20);
+ &musb->finish_resume_work,
+ msecs_to_jiffies(20));
musb->xceiv->state = OTG_STATE_A_HOST;
musb->is_active = 1;
@@ -2157,11 +2160,19 @@ static void musb_restore_context(struct musb *musb)
void __iomem *musb_base = musb->mregs;
void __iomem *ep_target_regs;
void __iomem *epio;
+ u8 power;
musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
- musb_writeb(musb_base, MUSB_POWER, musb->context.power);
+
+ /* Don't affect SUSPENDM/RESUME bits in POWER reg */
+ power = musb_readb(musb_base, MUSB_POWER);
+ power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
+ musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
+ power |= musb->context.power;
+ musb_writeb(musb_base, MUSB_POWER, power);
+
musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index ed455724017b..abb38c3833ef 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1183,6 +1183,9 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
csr = MUSB_CSR0_H_STATUSPKT
| MUSB_CSR0_TXPKTRDY;
+ /* disable ping token in status phase */
+ csr |= MUSB_CSR0_H_DIS_PING;
+
/* flag status stage */
musb->ep0_stage = MUSB_EP0_STATUS;
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index eb634433ef09..e2d2d8c9891b 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -135,7 +135,8 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
/* later, GetPortStatus will stop RESUME signaling */
musb->port1_status |= MUSB_PORT_STAT_RESUME;
- schedule_delayed_work(&musb->finish_resume_work, 20);
+ schedule_delayed_work(&musb->finish_resume_work,
+ msecs_to_jiffies(20));
}
}
@@ -158,7 +159,6 @@ void musb_port_reset(struct musb *musb, bool do_reset)
*/
power = musb_readb(mbase, MUSB_POWER);
if (do_reset) {
-
/*
* If RESUME is set, we must make sure it stays minimum 20 ms.
* Then we must clear RESUME and wait a bit to let musb start
@@ -167,11 +167,22 @@ void musb_port_reset(struct musb *musb, bool do_reset)
* detected".
*/
if (power & MUSB_POWER_RESUME) {
- while (time_before(jiffies, musb->rh_timer))
- msleep(1);
+ long remain = (unsigned long) musb->rh_timer - jiffies;
+
+ if (musb->rh_timer > 0 && remain > 0) {
+ /* take into account the minimum delay after resume */
+ schedule_delayed_work(
+ &musb->deassert_reset_work, remain);
+ return;
+ }
+
musb_writeb(mbase, MUSB_POWER,
- power & ~MUSB_POWER_RESUME);
- msleep(1);
+ power & ~MUSB_POWER_RESUME);
+
+ /* Give the core 1 ms to clear MUSB_POWER_RESUME */
+ schedule_delayed_work(&musb->deassert_reset_work,
+ msecs_to_jiffies(1));
+ return;
}
power &= 0xf0;
@@ -180,7 +191,8 @@ void musb_port_reset(struct musb *musb, bool do_reset)
musb->port1_status |= USB_PORT_STAT_RESET;
musb->port1_status &= ~USB_PORT_STAT_ENABLE;
- schedule_delayed_work(&musb->deassert_reset_work, 50);
+ schedule_delayed_work(&musb->deassert_reset_work,
+ msecs_to_jiffies(50));
} else {
dev_dbg(musb->controller, "root port reset stopped\n");
musb_writeb(mbase, MUSB_POWER,
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 2a408cdaf7b2..8aa59a2c5eb2 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -659,7 +659,6 @@ static int omap2430_runtime_suspend(struct device *dev)
OTG_INTERFSEL);
omap2430_low_level_exit(musb);
- phy_power_off(musb->phy);
}
return 0;
@@ -674,7 +673,6 @@ static int omap2430_runtime_resume(struct device *dev)
omap2430_low_level_init(musb);
musb_writel(musb->mregs, OTG_INTERFSEL,
musb->context.otg_interfsel);
- phy_power_on(musb->phy);
}
return 0;
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 8546c8dccd51..d204f745ed05 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -159,32 +159,6 @@ put_3p3:
return rc;
}
-#ifdef CONFIG_PM_SLEEP
-#define USB_PHY_SUSP_DIG_VOL 500000
-static int msm_hsusb_config_vddcx(int high)
-{
- int max_vol = USB_PHY_VDD_DIG_VOL_MAX;
- int min_vol;
- int ret;
-
- if (high)
- min_vol = USB_PHY_VDD_DIG_VOL_MIN;
- else
- min_vol = USB_PHY_SUSP_DIG_VOL;
-
- ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol);
- if (ret) {
- pr_err("%s: unable to set the voltage for regulator "
- "HSUSB_VDDCX\n", __func__);
- return ret;
- }
-
- pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
-
- return ret;
-}
-#endif
-
static int msm_hsusb_ldo_set_mode(int on)
{
int ret = 0;
@@ -440,7 +414,32 @@ static int msm_otg_reset(struct usb_phy *phy)
#define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000)
#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
+
+#define USB_PHY_SUSP_DIG_VOL 500000
+static int msm_hsusb_config_vddcx(int high)
+{
+ int max_vol = USB_PHY_VDD_DIG_VOL_MAX;
+ int min_vol;
+ int ret;
+
+ if (high)
+ min_vol = USB_PHY_VDD_DIG_VOL_MIN;
+ else
+ min_vol = USB_PHY_SUSP_DIG_VOL;
+
+ ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol);
+ if (ret) {
+ pr_err("%s: unable to set the voltage for regulator "
+ "HSUSB_VDDCX\n", __func__);
+ return ret;
+ }
+
+ pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
+
+ return ret;
+}
+
static int msm_otg_suspend(struct msm_otg *motg)
{
struct usb_phy *phy = &motg->phy;
@@ -1733,22 +1732,18 @@ static int msm_otg_pm_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
static const struct dev_pm_ops msm_otg_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume)
SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume,
msm_otg_runtime_idle)
};
-#endif
static struct platform_driver msm_otg_driver = {
.remove = msm_otg_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &msm_otg_dev_pm_ops,
-#endif
},
};
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index e6f61e4361df..8afa813d690b 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -130,7 +130,7 @@ struct usb_phy *usb_get_phy(enum usb_phy_type type)
phy = __usb_find_phy(&phy_list, type);
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
- pr_err("unable to find transceiver of type %s\n",
+ pr_debug("PHY: unable to find transceiver of type %s\n",
usb_phy_type_string(type));
goto err0;
}
@@ -228,7 +228,7 @@ struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index)
phy = __usb_find_phy_dev(dev, &phy_bind_list, index);
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
- pr_err("unable to find transceiver\n");
+ dev_dbg(dev, "unable to find transceiver\n");
goto err0;
}
@@ -424,10 +424,8 @@ int usb_bind_phy(const char *dev_name, u8 index,
unsigned long flags;
phy_bind = kzalloc(sizeof(*phy_bind), GFP_KERNEL);
- if (!phy_bind) {
- pr_err("phy_bind(): No memory for phy_bind");
+ if (!phy_bind)
return -ENOMEM;
- }
phy_bind->dev_name = dev_name;
phy_bind->phy_dev_name = phy_dev_name;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ce0d7b0db012..44ab12986805 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -152,6 +152,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -191,6 +192,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
@@ -904,6 +907,8 @@ static const struct usb_device_id id_table_combined[] = {
/* Crucible Devices */
{ USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
+ /* Cressi Devices */
+ { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a7019d1e3058..e599fbfcde5f 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -50,6 +50,7 @@
#define TI_XDS100V2_PID 0xa6d0
#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
+#define FTDI_EV3CON_PID 0xABB9 /* Mindstorms EV3 Console Adapter */
/* US Interface Navigator (http://www.usinterface.com/) */
#define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */
@@ -363,6 +364,12 @@
/* Sprog II (Andrew Crosland's SprogII DCC interface) */
#define FTDI_SPROG_II 0xF0C8
+/*
+ * Two of the Tagsys RFID Readers
+ */
+#define FTDI_TAGSYS_LP101_PID 0xF0E9 /* Tagsys L-P101 RFID*/
+#define FTDI_TAGSYS_P200X_PID 0xF0EE /* Tagsys Medio P200x RFID*/
+
/* an infrared receiver for user access control with IR tags */
#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
@@ -1313,3 +1320,9 @@
* Manufacturer: Smart GSM Team
*/
#define FTDI_Z3X_PID 0x0011
+
+/*
+ * Product: Cressi PC Interface
+ * Manufacturer: Cressi
+ */
+#define FTDI_CRESSI_PID 0x87d0
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5c86f57e4afa..68fc9fe65936 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1362,7 +1362,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
@@ -1525,7 +1526,8 @@ static const struct usb_device_id option_ids[] = {
/* Cinterion */
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index c65437cfd4a2..968a40201e5f 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -139,6 +139,9 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index f112b079ddfc..fb79775447b0 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -71,7 +71,8 @@ DEVICE(hp4x, HP4X_IDS);
/* Suunto ANT+ USB Driver */
#define SUUNTO_IDS() \
- { USB_DEVICE(0x0fcf, 0x1008) }
+ { USB_DEVICE(0x0fcf, 0x1008) }, \
+ { USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */
DEVICE(suunto, SUUNTO_IDS);
/* Siemens USB/MPI adapter */
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 8470e1b114f2..1dd0604d1911 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -18,7 +18,9 @@ config USB_STORAGE
This option depends on 'SCSI' support being enabled, but you
probably also need 'SCSI device support: SCSI disk support'
- (BLK_DEV_SD) for most USB storage devices.
+ (BLK_DEV_SD) for most USB storage devices. Some devices also
+ will require 'Probe all LUNs on each SCSI device'
+ (SCSI_MULTI_LUN).
To compile this driver as a module, choose M here: the
module will be called usb-storage.
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 18509e6c21ab..9d38ddc8da49 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -78,6 +78,8 @@ static const char* host_info(struct Scsi_Host *host)
static int slave_alloc (struct scsi_device *sdev)
{
+ struct us_data *us = host_to_us(sdev->host);
+
/*
* Set the INQUIRY transfer length to 36. We don't use any of
* the extra data and many devices choke if asked for more or
@@ -102,6 +104,10 @@ static int slave_alloc (struct scsi_device *sdev)
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
+ /* Tell the SCSI layer if we know there is more than one LUN */
+ if (us->protocol == USB_PR_BULK && us->max_lun > 0)
+ sdev->sdev_bflags |= BLIST_FORCELUN;
+
return 0;
}
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index 65a6a75066a8..82e8ed0324e3 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
"Cypress ISD-300LP",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
"Super Top",
"USB 2.0 SATA BRIDGE",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ad06255c2ade..adbeb255616a 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1455,6 +1455,13 @@ UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
+/* Reported by Moritz Moeller-Herrmann <moritz-kernel@moeller-herrmann.de> */
+UNUSUAL_DEV( 0x0fca, 0x8004, 0x0201, 0x0201,
+ "Research In Motion",
+ "BlackBerry Bold 9000",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
/* Reported by Michael Stattmann <michael@stattmann.com> */
UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
"Sony Ericsson",
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9a68409580d5..a0fa5de210cf 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -70,7 +70,12 @@ enum {
};
struct vhost_net_ubuf_ref {
- struct kref kref;
+ /* refcount follows semantics similar to kref:
+ * 0: object is released
+ * 1: no outstanding ubufs
+ * >1: outstanding ubufs
+ */
+ atomic_t refcount;
wait_queue_head_t wait;
struct vhost_virtqueue *vq;
};
@@ -116,14 +121,6 @@ static void vhost_net_enable_zcopy(int vq)
vhost_net_zcopy_mask |= 0x1 << vq;
}
-static void vhost_net_zerocopy_done_signal(struct kref *kref)
-{
- struct vhost_net_ubuf_ref *ubufs;
-
- ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
- wake_up(&ubufs->wait);
-}
-
static struct vhost_net_ubuf_ref *
vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
{
@@ -134,21 +131,24 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
if (!ubufs)
return ERR_PTR(-ENOMEM);
- kref_init(&ubufs->kref);
+ atomic_set(&ubufs->refcount, 1);
init_waitqueue_head(&ubufs->wait);
ubufs->vq = vq;
return ubufs;
}
-static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
+static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
{
- kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
+ int r = atomic_sub_return(1, &ubufs->refcount);
+ if (unlikely(!r))
+ wake_up(&ubufs->wait);
+ return r;
}
static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
{
- kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
- wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
+ vhost_net_ubuf_put(ubufs);
+ wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
}
static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
@@ -306,23 +306,26 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
{
struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
struct vhost_virtqueue *vq = ubufs->vq;
- int cnt = atomic_read(&ubufs->kref.refcount);
+ int cnt;
+
+ rcu_read_lock_bh();
/* set len to mark this desc buffers done DMA */
vq->heads[ubuf->desc].len = success ?
VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
- vhost_net_ubuf_put(ubufs);
+ cnt = vhost_net_ubuf_put(ubufs);
/*
* Trigger polling thread if guest stopped submitting new buffers:
- * in this case, the refcount after decrement will eventually reach 1
- * so here it is 2.
+ * in this case, the refcount after decrement will eventually reach 1.
* We also trigger polling periodically after each 16 packets
* (the value 16 here is more or less arbitrary, it's tuned to trigger
* less than 10% of times).
*/
- if (cnt <= 2 || !(cnt % 16))
+ if (cnt <= 1 || !(cnt % 16))
vhost_poll_queue(&vq->poll);
+
+ rcu_read_unlock_bh();
}
/* Expects to be always run from workqueue - which acts as
@@ -420,7 +423,7 @@ static void handle_tx(struct vhost_net *net)
msg.msg_control = ubuf;
msg.msg_controllen = sizeof(ubuf);
ubufs = nvq->ubufs;
- kref_get(&ubufs->kref);
+ atomic_inc(&ubufs->refcount);
nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
} else {
msg.msg_control = NULL;
@@ -780,7 +783,7 @@ static void vhost_net_flush(struct vhost_net *n)
vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = false;
- kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref);
+ atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
}
}
@@ -800,6 +803,8 @@ static int vhost_net_release(struct inode *inode, struct file *f)
fput(tx_sock->file);
if (rx_sock)
fput(rx_sock->file);
+ /* Make sure no callbacks are outstanding */
+ synchronize_rcu_bh();
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
vhost_net_flush(n);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 0a025b8e2a12..e48d4a672580 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1001,6 +1001,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
break;
}
+ /* virtio-scsi spec requires byte 0 of the lun to be 1 */
+ if (unlikely(v_req.lun[0] != 1)) {
+ vhost_scsi_send_bad_target(vs, vq, head, out);
+ continue;
+ }
+
/* Extract the tpgt */
target = v_req.lun[1];
tpg = ACCESS_ONCE(vs_tpg[target]);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 22262a3a0e2d..dade5b7699bc 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -364,7 +364,7 @@ config FB_SA1100
config FB_IMX
tristate "Freescale i.MX1/21/25/27 LCD support"
- depends on FB && IMX_HAVE_PLATFORM_IMX_FB
+ depends on FB && ARCH_MXC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/exynos/Kconfig b/drivers/video/exynos/Kconfig
index 1129d0e9e640..75c8a8e7efc0 100644
--- a/drivers/video/exynos/Kconfig
+++ b/drivers/video/exynos/Kconfig
@@ -22,7 +22,8 @@ config EXYNOS_MIPI_DSI
config EXYNOS_LCD_S6E8AX0
bool "S6E8AX0 MIPI AMOLED LCD Driver"
- depends on (EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE)
+ depends on EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE
+ depends on (LCD_CLASS_DEVICE = y)
default n
help
If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index bbeb8dd7f108..77d6221618f4 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -2160,8 +2160,8 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
*five_taps = false;
do {
- in_height = DIV_ROUND_UP(height, *decim_y);
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_height = height / *decim_y;
+ in_width = width / *decim_x;
*core_clk = dispc.feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height, mem_to_mem);
error = (in_width > maxsinglelinewidth || !*core_clk ||
@@ -2199,8 +2199,8 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
do {
- in_height = DIV_ROUND_UP(height, *decim_y);
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_height = height / *decim_y;
+ in_width = width / *decim_x;
*five_taps = in_height > out_height;
if (in_width > maxsinglelinewidth)
@@ -2268,7 +2268,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
{
u16 in_width, in_width_max;
int decim_x_min = *decim_x;
- u16 in_height = DIV_ROUND_UP(height, *decim_y);
+ u16 in_height = height / *decim_y;
const int maxsinglelinewidth =
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
@@ -2287,7 +2287,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
return -EINVAL;
do {
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_width = width / *decim_x;
} while (*decim_x <= *x_predecim &&
in_width > maxsinglelinewidth && ++*decim_x);
@@ -2466,8 +2466,8 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
if (r)
return r;
- in_width = DIV_ROUND_UP(in_width, x_predecim);
- in_height = DIV_ROUND_UP(in_height, y_predecim);
+ in_width = in_width / x_predecim;
+ in_height = in_height / y_predecim;
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY ||
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 7411f2674e16..23ef21ffc2c4 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -117,7 +117,7 @@ struct dpi_clk_calc_ctx {
/* outputs */
struct dsi_clock_info dsi_cinfo;
- unsigned long long fck;
+ unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index efb9ee9e3c96..ba806c9e7f54 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -46,7 +46,7 @@ static struct {
struct sdi_clk_calc_ctx {
unsigned long pck_min, pck_max;
- unsigned long long fck;
+ unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index a06edbfa95ca..1b5d48c578e1 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -884,7 +884,7 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
@@ -938,7 +938,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 16830d8b777c..9911cd5fddb5 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -1289,7 +1289,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
@@ -1371,7 +1371,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 4c4c566c52a3..79d25894343a 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -223,6 +223,7 @@ config SA1100_WATCHDOG
config DW_WATCHDOG
tristate "Synopsys DesignWare watchdog"
+ depends on HAS_IOMEM
help
Say Y here if to include support for the Synopsys DesignWare
watchdog timer found in many chips.
diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c
index aaf2995d37f4..68b45fc9ba6a 100644
--- a/drivers/watchdog/w83697hf_wdt.c
+++ b/drivers/watchdog/w83697hf_wdt.c
@@ -402,7 +402,7 @@ static int __init wdt_init(void)
if (!found) {
pr_err("No W83697HF/HG could be found\n");
- ret = -EIO;
+ ret = -ENODEV;
goto out;
}
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index d75c811bfa56..45e00afa7f2d 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -16,7 +16,6 @@ xen-pad-$(CONFIG_X86) += xen-acpi-pad.o
dom0-$(CONFIG_X86) += pcpu.o
obj-$(CONFIG_XEN_DOM0) += $(dom0-y)
obj-$(CONFIG_BLOCK) += biomerge.o
-obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 4672e003c0ad..f4a9e3311297 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -862,6 +862,8 @@ int bind_evtchn_to_irq(unsigned int evtchn)
irq = ret;
goto out;
}
+ /* New interdomain events are bound to VCPU 0. */
+ bind_evtchn_to_cpu(evtchn, 0);
} else {
struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 34a2704fbc88..073b4a19a8b0 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -284,10 +284,8 @@ static int map_grant_pages(struct grant_map *map)
}
pr_debug("map %d+%d\n", map->index, map->count);
- err = gnttab_map_refs_userspace(map->map_ops,
- use_ptemod ? map->kmap_ops : NULL,
- map->pages,
- map->count);
+ err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
+ map->pages, map->count);
if (err)
return err;
@@ -317,10 +315,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
}
}
- err = gnttab_unmap_refs_userspace(map->unmap_ops + offset,
- use_ptemod ? map->kmap_ops + offset : NULL,
- map->pages + offset,
- pages);
+ err = gnttab_unmap_refs(map->unmap_ops + offset,
+ use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset,
+ pages);
if (err)
return err;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 8ee13e2e45e2..b84e3ab839aa 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -928,17 +928,15 @@ void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
}
EXPORT_SYMBOL_GPL(gnttab_batch_copy);
-int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count,
- bool m2p_override)
+ struct page **pages, unsigned int count)
{
int i, ret;
bool lazy = false;
pte_t *pte;
- unsigned long mfn, pfn;
+ unsigned long mfn;
- BUG_ON(kmap_ops && !m2p_override);
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
if (ret)
return ret;
@@ -957,12 +955,10 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
map_ops[i].dev_bus_addr >> PAGE_SHIFT);
}
- return 0;
+ return ret;
}
- if (m2p_override &&
- !in_interrupt() &&
- paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
lazy = true;
}
@@ -979,20 +975,8 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
} else {
mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
}
- pfn = page_to_pfn(pages[i]);
-
- WARN_ON(PagePrivate(pages[i]));
- SetPagePrivate(pages[i]);
- set_page_private(pages[i], mfn);
-
- pages[i]->index = pfn_to_mfn(pfn);
- if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
- ret = -ENOMEM;
- goto out;
- }
- if (m2p_override)
- ret = m2p_add_override(mfn, pages[i], kmap_ops ?
- &kmap_ops[i] : NULL);
+ ret = m2p_add_override(mfn, pages[i], kmap_ops ?
+ &kmap_ops[i] : NULL);
if (ret)
goto out;
}
@@ -1003,32 +987,15 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
return ret;
}
-
-int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
- struct page **pages, unsigned int count)
-{
- return __gnttab_map_refs(map_ops, NULL, pages, count, false);
-}
EXPORT_SYMBOL_GPL(gnttab_map_refs);
-int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count)
-{
- return __gnttab_map_refs(map_ops, kmap_ops, pages, count, true);
-}
-EXPORT_SYMBOL_GPL(gnttab_map_refs_userspace);
-
-int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
+int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count,
- bool m2p_override)
+ struct page **pages, unsigned int count)
{
int i, ret;
bool lazy = false;
- unsigned long pfn, mfn;
- BUG_ON(kmap_ops && !m2p_override);
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
if (ret)
return ret;
@@ -1039,33 +1006,17 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
INVALID_P2M_ENTRY);
}
- return 0;
+ return ret;
}
- if (m2p_override &&
- !in_interrupt() &&
- paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
lazy = true;
}
for (i = 0; i < count; i++) {
- pfn = page_to_pfn(pages[i]);
- mfn = get_phys_to_machine(pfn);
- if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
- ret = -EINVAL;
- goto out;
- }
-
- set_page_private(pages[i], INVALID_P2M_ENTRY);
- WARN_ON(!PagePrivate(pages[i]));
- ClearPagePrivate(pages[i]);
- set_phys_to_machine(pfn, pages[i]->index);
- if (m2p_override)
- ret = m2p_remove_override(pages[i],
- kmap_ops ?
- &kmap_ops[i] : NULL,
- mfn);
+ ret = m2p_remove_override(pages[i], kmap_ops ?
+ &kmap_ops[i] : NULL);
if (ret)
goto out;
}
@@ -1076,22 +1027,8 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
return ret;
}
-
-int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *map_ops,
- struct page **pages, unsigned int count)
-{
- return __gnttab_unmap_refs(map_ops, NULL, pages, count, false);
-}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
-int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count)
-{
- return __gnttab_unmap_refs(map_ops, kmap_ops, pages, count, true);
-}
-EXPORT_SYMBOL_GPL(gnttab_unmap_refs_userspace);
-
static unsigned nr_status_frames(unsigned nr_grant_frames)
{
BUG_ON(grefs_per_grant_frame == 0);
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c
deleted file mode 100644
index 4793fc594549..000000000000
--- a/drivers/xen/xencomm.c
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) IBM Corp. 2006
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <asm/page.h>
-#include <xen/xencomm.h>
-#include <xen/interface/xen.h>
-#include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */
-
-static int xencomm_init(struct xencomm_desc *desc,
- void *buffer, unsigned long bytes)
-{
- unsigned long recorded = 0;
- int i = 0;
-
- while ((recorded < bytes) && (i < desc->nr_addrs)) {
- unsigned long vaddr = (unsigned long)buffer + recorded;
- unsigned long paddr;
- int offset;
- int chunksz;
-
- offset = vaddr % PAGE_SIZE; /* handle partial pages */
- chunksz = min(PAGE_SIZE - offset, bytes - recorded);
-
- paddr = xencomm_vtop(vaddr);
- if (paddr == ~0UL) {
- printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n",
- __func__, vaddr);
- return -EINVAL;
- }
-
- desc->address[i++] = paddr;
- recorded += chunksz;
- }
-
- if (recorded < bytes) {
- printk(KERN_DEBUG
- "%s: could only translate %ld of %ld bytes\n",
- __func__, recorded, bytes);
- return -ENOSPC;
- }
-
- /* mark remaining addresses invalid (just for safety) */
- while (i < desc->nr_addrs)
- desc->address[i++] = XENCOMM_INVALID;
-
- desc->magic = XENCOMM_MAGIC;
-
- return 0;
-}
-
-static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
- void *buffer, unsigned long bytes)
-{
- struct xencomm_desc *desc;
- unsigned long buffer_ulong = (unsigned long)buffer;
- unsigned long start = buffer_ulong & PAGE_MASK;
- unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
- unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
- unsigned long size = sizeof(*desc) +
- sizeof(desc->address[0]) * nr_addrs;
-
- /*
- * slab allocator returns at least sizeof(void*) aligned pointer.
- * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
- * cross page boundary.
- */
- if (sizeof(*desc) > sizeof(void *)) {
- unsigned long order = get_order(size);
- desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
- order);
- if (desc == NULL)
- return NULL;
-
- desc->nr_addrs =
- ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
- sizeof(*desc->address);
- } else {
- desc = kmalloc(size, gfp_mask);
- if (desc == NULL)
- return NULL;
-
- desc->nr_addrs = nr_addrs;
- }
- return desc;
-}
-
-void xencomm_free(struct xencomm_handle *desc)
-{
- if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
- struct xencomm_desc *desc__ = (struct xencomm_desc *)desc;
- if (sizeof(*desc__) > sizeof(void *)) {
- unsigned long size = sizeof(*desc__) +
- sizeof(desc__->address[0]) * desc__->nr_addrs;
- unsigned long order = get_order(size);
- free_pages((unsigned long)__va(desc), order);
- } else
- kfree(__va(desc));
- }
-}
-
-static int xencomm_create(void *buffer, unsigned long bytes,
- struct xencomm_desc **ret, gfp_t gfp_mask)
-{
- struct xencomm_desc *desc;
- int rc;
-
- pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
-
- if (bytes == 0) {
- /* don't create a descriptor; Xen recognizes NULL. */
- BUG_ON(buffer != NULL);
- *ret = NULL;
- return 0;
- }
-
- BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
-
- desc = xencomm_alloc(gfp_mask, buffer, bytes);
- if (!desc) {
- printk(KERN_DEBUG "%s failure\n", "xencomm_alloc");
- return -ENOMEM;
- }
-
- rc = xencomm_init(desc, buffer, bytes);
- if (rc) {
- printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc);
- xencomm_free((struct xencomm_handle *)__pa(desc));
- return rc;
- }
-
- *ret = desc;
- return 0;
-}
-
-static struct xencomm_handle *xencomm_create_inline(void *ptr)
-{
- unsigned long paddr;
-
- BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
-
- paddr = (unsigned long)xencomm_pa(ptr);
- BUG_ON(paddr & XENCOMM_INLINE_FLAG);
- return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
-}
-
-/* "mini" routine, for stack-based communications: */
-static int xencomm_create_mini(void *buffer,
- unsigned long bytes, struct xencomm_mini *xc_desc,
- struct xencomm_desc **ret)
-{
- int rc = 0;
- struct xencomm_desc *desc;
- BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
-
- desc = (void *)xc_desc;
-
- desc->nr_addrs = XENCOMM_MINI_ADDRS;
-
- rc = xencomm_init(desc, buffer, bytes);
- if (!rc)
- *ret = desc;
-
- return rc;
-}
-
-struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
-{
- int rc;
- struct xencomm_desc *desc;
-
- if (xencomm_is_phys_contiguous((unsigned long)ptr))
- return xencomm_create_inline(ptr);
-
- rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
-
- if (rc || desc == NULL)
- return NULL;
-
- return xencomm_pa(desc);
-}
-
-struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
- struct xencomm_mini *xc_desc)
-{
- int rc;
- struct xencomm_desc *desc = NULL;
-
- if (xencomm_is_phys_contiguous((unsigned long)ptr))
- return xencomm_create_inline(ptr);
-
- rc = xencomm_create_mini(ptr, bytes, xc_desc,
- &desc);
-
- if (rc)
- return NULL;
-
- return xencomm_pa(desc);
-}