summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/ima_policy12
-rw-r--r--Documentation/cpu-freq/governors.txt4
-rw-r--r--Documentation/kernel-parameters.txt9
-rw-r--r--Documentation/lguest/lguest.c1
-rw-r--r--Documentation/networking/ip-sysctl.txt8
-rw-r--r--MAINTAINERS28
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/include/asm/cacheflush.h3
-rw-r--r--arch/arm/kernel/setup.c1
-rw-r--r--arch/arm/mach-gemini/gpio.c4
-rw-r--r--arch/arm/mach-mx25/clock.c58
-rw-r--r--arch/arm/mach-mx25/mx25pdk.c2
-rw-r--r--arch/arm/mach-mx3/mx31ads.c4
-rw-r--r--arch/arm/mach-omap2/gpmc.c5
-rw-r--r--arch/arm/mach-omap2/irq.c4
-rw-r--r--arch/arm/mach-omap2/mmc-twl4030.c7
-rw-r--r--arch/arm/mach-omap2/mux.c10
-rw-r--r--arch/arm/mach-omap2/mux34xx.c47
-rw-r--r--arch/arm/mach-omap2/serial.c11
-rw-r--r--arch/arm/mach-realview/realview_pbx.c4
-rw-r--r--arch/arm/mm/alignment.c3
-rw-r--r--arch/arm/mm/proc-arm6_7.S2
-rw-r--r--arch/arm/plat-mxc/audmux-v2.c137
-rw-r--r--arch/arm/plat-mxc/include/mach/board-mx31lite.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx35.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/irqs.h5
-rw-r--r--arch/arm/plat-omap/clock.c4
-rw-r--r--arch/arm/plat-omap/gpio.c4
-rw-r--r--arch/arm/plat-omap/omap_device.c10
-rw-r--r--arch/arm/tools/mach-types46
-rw-r--r--arch/arm/vfp/vfpmodule.c5
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c15
-rw-r--r--arch/ia64/include/asm/acpi.h1
-rw-r--r--arch/ia64/include/asm/elf.h4
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/microblaze/configs/mmu_defconfig112
-rw-r--r--arch/microblaze/configs/nommu_defconfig101
-rw-r--r--arch/microblaze/include/asm/io.h2
-rw-r--r--arch/microblaze/kernel/cpu/cache.c27
-rw-r--r--arch/microblaze/kernel/entry-nommu.S10
-rw-r--r--arch/microblaze/kernel/setup.c1
-rw-r--r--arch/mips/bcm47xx/prom.c8
-rw-r--r--arch/mips/configs/ip27_defconfig917
-rw-r--r--arch/mips/kernel/cpu-probe.c4
-rw-r--r--arch/mips/kernel/traps.c1
-rw-r--r--arch/mips/mm/c-octeon.c4
-rw-r--r--arch/mips/mm/cache.c2
-rw-r--r--arch/mips/mm/highmem.c1
-rw-r--r--arch/mips/sni/rm200.c2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/kernel/pci.c7
-rw-r--r--arch/parisc/kernel/signal.c4
-rw-r--r--arch/powerpc/mm/tlb_hash64.c12
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c3
-rw-r--r--arch/powerpc/platforms/85xx/smp.c21
-rw-r--r--arch/powerpc/platforms/pseries/xics.c14
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S3
-rw-r--r--arch/sh/kernel/dwarf.c20
-rw-r--r--arch/sh/kernel/entry-common.S8
-rw-r--r--arch/sh/kernel/ptrace_64.c11
-rw-r--r--arch/sh/kernel/signal_64.c4
-rw-r--r--arch/sparc/include/asm/stat.h4
-rw-r--r--arch/sparc/kernel/kstack.h4
-rw-r--r--arch/sparc/kernel/of_device_32.c2
-rw-r--r--arch/sparc/kernel/pci.c7
-rw-r--r--arch/sparc/kernel/process_32.c2
-rw-r--r--arch/sparc/kernel/process_64.c8
-rw-r--r--arch/sparc/kernel/signal32.c10
-rw-r--r--arch/sparc/kernel/signal_32.c6
-rw-r--r--arch/sparc/kernel/signal_64.c8
-rw-r--r--arch/sparc/kernel/tsb.S6
-rw-r--r--arch/x86/include/asm/elf.h5
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/system.h4
-rw-r--r--arch/x86/kernel/acpi/boot.c13
-rw-r--r--arch/x86/kernel/apic/apic.c17
-rw-r--r--arch/x86/kernel/apic/probe_32.c29
-rw-r--r--arch/x86/kernel/apic/probe_64.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c3
-rw-r--r--arch/x86/kernel/hw_breakpoint.c30
-rw-r--r--arch/x86/kernel/mpparse.c7
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/ptrace.c7
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kvm/i8254.c3
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--block/blk-core.c11
-rw-r--r--block/cfq-iosched.c49
-rw-r--r--drivers/acpi/dock.c1
-rw-r--r--drivers/acpi/processor_idle.c36
-rw-r--r--drivers/acpi/processor_pdc.c14
-rw-r--r--drivers/acpi/processor_perflib.c6
-rw-r--r--drivers/acpi/scan.c27
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/ata/ahci.c27
-rw-r--r--drivers/ata/libata-scsi.c2
-rw-r--r--drivers/ata/libata-sff.c3
-rw-r--r--drivers/base/class.c2
-rw-r--r--drivers/block/cciss.c3
-rw-r--r--drivers/block/virtio_blk.c61
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c1
-rw-r--r--drivers/char/Kconfig8
-rw-r--r--drivers/char/agp/amd64-agp.c5
-rw-r--r--drivers/char/hvc_beat.c2
-rw-r--r--drivers/char/hvc_console.c7
-rw-r--r--drivers/char/hvc_console.h7
-rw-r--r--drivers/char/hvc_iseries.c2
-rw-r--r--drivers/char/hvc_iucv.c2
-rw-r--r--drivers/char/hvc_rtas.c2
-rw-r--r--drivers/char/hvc_udbg.c2
-rw-r--r--drivers/char/hvc_vio.c2
-rw-r--r--drivers/char/hvc_xen.c2
-rw-r--r--drivers/char/tpm/tpm_infineon.c79
-rw-r--r--drivers/char/tty_io.c4
-rw-r--r--drivers/char/virtio_console.c1578
-rw-r--r--drivers/clocksource/cs5535-clockevt.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c3
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/dmaengine.c1
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/ioat/dma_v2.c2
-rw-r--r--drivers/dma/ipu/ipu_idmac.c25
-rw-r--r--drivers/edac/amd64_edac.c15
-rw-r--r--drivers/edac/mpc85xx_edac.c8
-rw-r--r--drivers/firewire/net.c53
-rw-r--r--drivers/firewire/ohci.c13
-rw-r--r--drivers/gpu/drm/ati_pcigart.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c47
-rw-r--r--drivers/gpu/drm/drm_mm.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c146
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c38
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c42
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c276
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c18
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c40
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c155
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c113
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c49
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c58
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c1
-rw-r--r--drivers/gpu/drm/radeon/Kconfig12
-rw-r--r--drivers/gpu/drm/radeon/atom.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c10
-rw-r--r--drivers/gpu/drm/radeon/r100.c14
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/r420.c3
-rw-r--r--drivers/gpu/drm/radeon/r520.c3
-rw-r--r--drivers/gpu/drm/radeon/r600.c56
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c5
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h11
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c107
-rw-r--r--drivers/gpu/drm/radeon/rs400.c28
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c4
-rw-r--r--drivers/gpu/drm/radeon/rv770.c33
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c49
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c108
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c16
-rw-r--r--drivers/gpu/vga/vgaarb.c2
-rw-r--r--drivers/hwmon/adt7462.c2
-rw-r--r--drivers/hwmon/lm78.c25
-rw-r--r--drivers/hwmon/w83781d.c26
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c12
-rw-r--r--drivers/infiniband/core/cma.c4
-rw-r--r--drivers/input/input-polldev.c6
-rw-r--r--drivers/input/mouse/psmouse-base.c9
-rw-r--r--drivers/input/serio/i8042.c8
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c8
-rw-r--r--drivers/md/dm-log-userspace-transfer.c10
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-region-hash.c5
-rw-r--r--drivers/md/dm-snap-persistent.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-sysfs.c8
-rw-r--r--drivers/md/dm.c21
-rw-r--r--drivers/md/md.c14
-rw-r--r--drivers/md/raid5.c14
-rw-r--r--drivers/media/common/saa7146_video.c4
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c20
-rw-r--r--drivers/media/dvb/dvb-usb/Kconfig4
-rw-r--r--drivers/media/dvb/frontends/l64781.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c1
-rw-r--r--drivers/media/video/bt8xx/bttv-i2c.c8
-rw-r--r--drivers/media/video/bt8xx/bttvp.h1
-rw-r--r--drivers/media/video/mt9t112.c2
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c8
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/mmc/card/mmc_test.c9
-rw-r--r--drivers/net/ax88796.c2
-rw-r--r--drivers/net/benet/be_cmds.c1
-rw-r--r--drivers/net/cxgb3/sge.c20
-rw-r--r--drivers/net/e1000/e1000_main.c19
-rw-r--r--drivers/net/igb/igb_main.c20
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c22
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c11
-rw-r--r--drivers/net/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/sfc/efx.c1
-rw-r--r--drivers/net/sfc/falcon_boards.c45
-rw-r--r--drivers/net/sfc/mcdi.c2
-rw-r--r--drivers/net/sfc/qt202x_phy.c2
-rw-r--r--drivers/net/sky2.c8
-rw-r--r--drivers/net/tc35815.c1
-rw-r--r--drivers/net/usb/cdc_ether.c5
-rw-r--r--drivers/net/via-velocity.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c4
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/main.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c22
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c6
-rw-r--r--drivers/pci/quirks.c17
-rw-r--r--drivers/platform/x86/acer-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/power/wm97xx_battery.c10
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/regulator/lp3971.c4
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/scsi/zfcp_fc.c9
-rw-r--r--drivers/scsi/arm/fas216.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c18
-rw-r--r--drivers/scsi/fcoe/libfcoe.c2
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c3
-rw-r--r--drivers/scsi/libfc/fc_lport.c3
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/libiscsi_tcp.c8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c8
-rw-r--r--drivers/serial/8250.c7
-rw-r--r--drivers/ssb/main.c3
-rw-r--r--drivers/usb/core/devio.c48
-rw-r--r--drivers/usb/gadget/f_eem.c3
-rw-r--r--drivers/usb/gadget/multi.c2
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c1
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c1
-rw-r--r--drivers/usb/host/ehci-hub.c13
-rw-r--r--drivers/usb/host/fhci-tds.c6
-rw-r--r--drivers/usb/host/r8a66597-hcd.c41
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/otg/Kconfig1
-rw-r--r--drivers/usb/serial/ftdi_sio.c25
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h18
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/video/efifb.c11
-rw-r--r--drivers/virtio/virtio_balloon.c109
-rw-r--r--drivers/virtio/virtio_pci.c2
-rw-r--r--drivers/virtio/virtio_ring.c59
-rw-r--r--drivers/watchdog/bfin_wdt.c13
-rw-r--r--fs/9p/v9fs.c33
-rw-r--r--fs/9p/v9fs_vfs.h1
-rw-r--r--fs/9p/vfs_file.c19
-rw-r--r--fs/9p/vfs_inode.c2
-rw-r--r--fs/befs/linuxvfs.c1
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/btrfs/disk-io.c7
-rw-r--r--fs/btrfs/extent-tree.c8
-rw-r--r--fs/btrfs/extent_io.c3
-rw-r--r--fs/btrfs/file.c8
-rw-r--r--fs/btrfs/inode.c50
-rw-r--r--fs/btrfs/relocation.c3
-rw-r--r--fs/cachefiles/namei.c12
-rw-r--r--fs/cifs/CHANGES4
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/connect.c30
-rw-r--r--fs/cifs/inode.c12
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cifs/sess.c11
-rw-r--r--fs/compat_ioctl.c9
-rw-r--r--fs/exec.c20
-rw-r--r--fs/fcntl.c6
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/glock.c4
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/gfs2/lock_dlm.c11
-rw-r--r--fs/gfs2/ops_fstype.c14
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/namei.c20
-rw-r--r--fs/nfs/direct.c3
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/fscache.c9
-rw-r--r--fs/nfs/inode.c4
-rw-r--r--fs/nfs/mount_clnt.c2
-rw-r--r--fs/nfs/nfs2xdr.c2
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c78
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/nfs4xdr.c6
-rw-r--r--fs/nfs/pagelist.c17
-rw-r--r--fs/nfs/super.c15
-rw-r--r--fs/nfs/sysctl.c2
-rw-r--r--fs/nfs/write.c6
-rw-r--r--fs/nfsd/export.c10
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/buffer_head_io.c2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c6
-rw-r--r--fs/ocfs2/cluster/tcp.c10
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h4
-rw-r--r--fs/ocfs2/dlm/dlmapi.h2
-rw-r--r--fs/ocfs2/dlm/dlmast.c2
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c2
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlm/dlmlock.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c38
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c147
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c8
-rw-r--r--fs/ocfs2/dlmglue.c85
-rw-r--r--fs/ocfs2/export.c2
-rw-r--r--fs/ocfs2/extent_map.c2
-rw-r--r--fs/ocfs2/file.c18
-rw-r--r--fs/ocfs2/inode.c4
-rw-r--r--fs/ocfs2/ioctl.c14
-rw-r--r--fs/ocfs2/journal.c2
-rw-r--r--fs/ocfs2/ocfs2.h4
-rw-r--r--fs/ocfs2/ocfs2_fs.h11
-rw-r--r--fs/ocfs2/refcounttree.c12
-rw-r--r--fs/ocfs2/stack_o2cb.c12
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/ocfs2/symlink.c10
-rw-r--r--fs/ocfs2/uptodate.c4
-rw-r--r--fs/proc/base.c24
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/sysfs/inode.c35
-rw-r--r--include/drm/nouveau_drm.h1
-rw-r--r--include/drm/vmwgfx_drm.h20
-rw-r--r--include/linux/amba/bus.h6
-rw-r--r--include/linux/ata.h4
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/hw_breakpoint.h2
-rw-r--r--include/linux/ima.h4
-rw-r--r--include/linux/input.h1
-rw-r--r--include/linux/kfifo.h2
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/virtio.h4
-rw-r--r--include/linux/virtio_balloon.h15
-rw-r--r--include/linux/virtio_blk.h13
-rw-r--r--include/linux/virtio_console.h30
-rw-r--r--include/net/netns/conntrack.h3
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--init/main.c2
-rw-r--r--kernel/futex.c30
-rw-r--r--kernel/hw_breakpoint.c2
-rw-r--r--kernel/kfifo.c3
-rw-r--r--kernel/kgdb.c6
-rw-r--r--kernel/perf_event.c13
-rw-r--r--kernel/softirq.c15
-rw-r--r--kernel/softlockup.c15
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--kernel/trace/trace_stack.c24
-rw-r--r--lib/idr.c9
-rw-r--r--mm/migrate.c39
-rw-r--r--mm/oom_kill.c2
-rw-r--r--net/9p/client.c53
-rw-r--r--net/9p/trans_fd.c10
-rw-r--r--net/9p/trans_rdma.c9
-rw-r--r--net/9p/trans_virtio.c4
-rw-r--r--net/bluetooth/hci_conn.c3
-rw-r--r--net/bluetooth/hci_event.c1
-rw-r--r--net/bluetooth/hidp/core.c49
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bluetooth/rfcomm/core.c8
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/net-sysfs.c3
-rw-r--r--net/core/pktgen.c1
-rw-r--r--net/dccp/ccid.c2
-rw-r--r--net/dccp/ccid.h8
-rw-r--r--net/dccp/probe.c4
-rw-r--r--net/ipv4/devinet.c7
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/ipcomp.c6
-rw-r--r--net/ipv4/netfilter/arp_tables.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c4
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c22
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv6/addrconf.c16
-rw-r--r--net/ipv6/ipcomp6.c6
-rw-r--r--net/ipv6/netfilter/ip6_tables.c4
-rw-r--r--net/irda/irnet/irnet_ppp.c5
-rw-r--r--net/key/af_key.c15
-rw-r--r--net/mac80211/ibss.c2
-rw-r--r--net/mac80211/rate.c3
-rw-r--r--net/mac80211/scan.c18
-rw-r--r--net/netfilter/nf_conntrack_core.c116
-rw-r--r--net/netfilter/nf_conntrack_expect.c4
-rw-r--r--net/netfilter/nf_conntrack_helper.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c2
-rw-r--r--net/netfilter/nf_conntrack_standalone.c7
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/sched/Kconfig16
-rw-r--r--net/xfrm/xfrm_state.c12
-rw-r--r--security/integrity/ima/ima.h3
-rw-r--r--security/integrity/ima/ima_api.c4
-rw-r--r--security/integrity/ima/ima_iint.c12
-rw-r--r--security/integrity/ima/ima_main.c239
-rw-r--r--security/integrity/ima/ima_policy.c9
-rw-r--r--security/security.c2
-rw-r--r--security/selinux/ss/ebitmap.c2
-rw-r--r--sound/pci/ctxfi/ctatc.c15
-rw-r--r--sound/pci/ctxfi/ctvmem.c38
-rw-r--r--sound/pci/ctxfi/ctvmem.h8
-rw-r--r--sound/pci/hda/hda_intel.c26
-rw-r--r--sound/pci/ice1712/aureon.c12
-rw-r--r--sound/soc/omap/omap3pandora.c1
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/util/event.c4
-rw-r--r--tools/perf/util/probe-event.c3
488 files changed, 6533 insertions, 2588 deletions
diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
index 6434f0df012e..6cd6daefaaed 100644
--- a/Documentation/ABI/testing/ima_policy
+++ b/Documentation/ABI/testing/ima_policy
@@ -20,7 +20,7 @@ Description:
lsm: [[subj_user=] [subj_role=] [subj_type=]
[obj_user=] [obj_role=] [obj_type=]]
- base: func:= [BPRM_CHECK][FILE_MMAP][INODE_PERMISSION]
+ base: func:= [BPRM_CHECK][FILE_MMAP][FILE_CHECK]
mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
fsmagic:= hex value
uid:= decimal value
@@ -40,11 +40,11 @@ Description:
measure func=BPRM_CHECK
measure func=FILE_MMAP mask=MAY_EXEC
- measure func=INODE_PERM mask=MAY_READ uid=0
+ measure func=FILE_CHECK mask=MAY_READ uid=0
The default policy measures all executables in bprm_check,
all files mmapped executable in file_mmap, and all files
- open for read by root in inode_permission.
+ open for read by root in do_filp_open.
Examples of LSM specific definitions:
@@ -54,8 +54,8 @@ Description:
dont_measure obj_type=var_log_t
dont_measure obj_type=auditd_log_t
- measure subj_user=system_u func=INODE_PERM mask=MAY_READ
- measure subj_role=system_r func=INODE_PERM mask=MAY_READ
+ measure subj_user=system_u func=FILE_CHECK mask=MAY_READ
+ measure subj_role=system_r func=FILE_CHECK mask=MAY_READ
Smack:
- measure subj_user=_ func=INODE_PERM mask=MAY_READ
+ measure subj_user=_ func=FILE_CHECK mask=MAY_READ
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index aed082f49d09..737988fca64d 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -145,8 +145,8 @@ show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT.
up_threshold: defines what the average CPU usage between the samplings
of 'sampling_rate' needs to be for the kernel to make a decision on
whether it should increase the frequency. For example when it is set
-to its default value of '80' it means that between the checking
-intervals the CPU needs to be on average more than 80% in use to then
+to its default value of '95' it means that between the checking
+intervals the CPU needs to be on average more than 95% in use to then
decide that the CPU frequency needs to be increased.
ignore_nice_load: this parameter takes a value of '0' or '1'. When
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 736d45602886..e7848a0d99eb 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -199,6 +199,10 @@ and is between 256 and 4096 characters. It is defined in the file
acpi_display_output=video
See above.
+ acpi_early_pdc_eval [HW,ACPI] Evaluate processor _PDC methods
+ early. Needed on some platforms to properly
+ initialize the EC.
+
acpi_irq_balance [HW,ACPI]
ACPI will balance active IRQs
default in APIC mode
@@ -311,6 +315,11 @@ and is between 256 and 4096 characters. It is defined in the file
aic79xx= [HW,SCSI]
See Documentation/scsi/aic79xx.txt.
+ alignment= [KNL,ARM]
+ Allow the default userspace alignment fault handler
+ behaviour to be specified. Bit 0 enables warnings,
+ bit 1 enables fixups, and bit 2 sends a segfault.
+
amd_iommu= [HW,X86-84]
Pass parameters to the AMD IOMMU driver in the system.
Possible values are:
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index 42208511b5c0..3119f5db75bd 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -34,7 +34,6 @@
#include <sys/uio.h>
#include <termios.h>
#include <getopt.h>
-#include <zlib.h>
#include <assert.h>
#include <sched.h>
#include <limits.h>
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 006b39dec87d..e87f3cdc8a6a 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -1074,10 +1074,10 @@ regen_max_retry - INTEGER
Default: 5
max_addresses - INTEGER
- Number of maximum addresses per interface. 0 disables limitation.
- It is recommended not set too large value (or 0) because it would
- be too easy way to crash kernel to allow to create too much of
- autoconfigured addresses.
+ Maximum number of autoconfigured addresses per interface. Setting
+ to zero disables the limitation. It is not recommended to set this
+ value too large (or to zero) because it would be an easy way to
+ crash the kernel by allowing too many addresses to be created.
Default: 16
disable_ipv6 - BOOLEAN
diff --git a/MAINTAINERS b/MAINTAINERS
index 03f38c18f323..317ed38826d7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -616,10 +616,10 @@ M: Richard Purdie <rpurdie@rpsys.net>
S: Maintained
ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
-M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+M: Paulius Zaleckas <paulius.zaleckas@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://gitorious.org/linux-gemini/mainline.git
-S: Maintained
+S: Odd Fixes
F: arch/arm/mach-gemini/
ARM/EBSA110 MACHINE SUPPORT
@@ -641,9 +641,9 @@ T: topgit git://git.openezx.org/openezx.git
F: arch/arm/mach-pxa/ezx.c
ARM/FARADAY FA526 PORT
-M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
+M: Paulius Zaleckas <paulius.zaleckas@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
+S: Odd Fixes
F: arch/arm/mm/*-fa*
ARM/FOOTBRIDGE ARCHITECTURE
@@ -1733,10 +1733,9 @@ F: include/linux/tfrc.h
F: net/dccp/
DECnet NETWORK LAYER
-M: Christine Caulfield <christine.caulfield@googlemail.com>
W: http://linux-decnet.sourceforge.net
L: linux-decnet-user@lists.sourceforge.net
-S: Maintained
+S: Orphan
F: Documentation/networking/decnet.txt
F: net/decnet/
@@ -2394,6 +2393,12 @@ L: linuxppc-dev@ozlabs.org
S: Odd Fixes
F: drivers/char/hvc_*
+VIRTIO CONSOLE DRIVER
+M: Amit Shah <amit.shah@redhat.com>
+L: virtualization@lists.linux-foundation.org
+S: Maintained
+F: drivers/char/virtio_console.c
+
GSPCA FINEPIX SUBDRIVER
M: Frank Zago <frank@zago.net>
L: linux-media@vger.kernel.org
@@ -3411,8 +3416,10 @@ S: Maintained
F: drivers/scsi/sym53c8xx_2/
LTP (Linux Test Project)
-M: Subrata Modak <subrata@linux.vnet.ibm.com>
-M: Mike Frysinger <vapier@gentoo.org>
+M: Rishikesh K Rajak <risrajak@linux.vnet.ibm.com>
+M: Garrett Cooper <yanegomi@gmail.com>
+M: Mike Frysinger <vapier@gentoo.org>
+M: Subrata Modak <subrata@linux.vnet.ibm.com>
L: ltp-list@lists.sourceforge.net (subscribers-only)
W: http://ltp.sourceforge.net/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/galak/ltp.git
@@ -3488,9 +3495,9 @@ S: Maintained
F: drivers/net/wireless/libertas/
MARVELL MV643XX ETHERNET DRIVER
-M: Lennert Buytenhek <buytenh@marvell.com>
+M: Lennert Buytenhek <buytenh@wantstofly.org>
L: netdev@vger.kernel.org
-S: Supported
+S: Maintained
F: drivers/net/mv643xx_eth.*
F: include/linux/mv643xx.h
@@ -3836,6 +3843,7 @@ NETWORKING DRIVERS
L: netdev@vger.kernel.org
W: http://www.linuxfoundation.org/en/Net
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6.git
S: Odd Fixes
F: drivers/net/
F: include/linux/if_*
diff --git a/Makefile b/Makefile
index 394aec712c7d..1b24895212d8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 33
-EXTRAVERSION = -rc6
+EXTRAVERSION =
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 4c33ca82f9b1..184a6bd54825 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -702,6 +702,7 @@ config ARCH_OMAP
select ARCH_HAS_CPUFREQ
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
+ select ARCH_HAS_HOLES_MEMORYMODEL
help
Support for TI's OMAP platform (OMAP1 and OMAP2).
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 9e7582572741..356d702c0808 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -94,7 +94,7 @@ CFLAGS_ABI +=-funwind-tables
endif
ifeq ($(CONFIG_THUMB2_KERNEL),y)
-AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=thumb,-Wa$(comma)-mauto-it)
+AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it)
AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index c77d2fa1f6e5..8113bb5fb66e 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -42,7 +42,8 @@
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
- defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
+ defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
+ defined(CONFIG_CPU_ARM1026)
# define MULTI_CACHE 1
#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index c6c57b640b6b..621acad8ea43 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -102,6 +102,7 @@ struct cpu_cache_fns cpu_cache;
#endif
#ifdef CONFIG_OUTER_CACHE
struct outer_cache_fns outer_cache;
+EXPORT_SYMBOL(outer_cache);
#endif
struct stack {
diff --git a/arch/arm/mach-gemini/gpio.c b/arch/arm/mach-gemini/gpio.c
index e7263854bc7b..fe3bd5ac8b10 100644
--- a/arch/arm/mach-gemini/gpio.c
+++ b/arch/arm/mach-gemini/gpio.c
@@ -86,7 +86,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
unsigned int reg_both, reg_level, reg_type;
reg_type = __raw_readl(base + GPIO_INT_TYPE);
- reg_level = __raw_readl(base + GPIO_INT_BOTH_EDGE);
+ reg_level = __raw_readl(base + GPIO_INT_LEVEL);
reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE);
switch (type) {
@@ -117,7 +117,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
}
__raw_writel(reg_type, base + GPIO_INT_TYPE);
- __raw_writel(reg_level, base + GPIO_INT_BOTH_EDGE);
+ __raw_writel(reg_level, base + GPIO_INT_LEVEL);
__raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE);
gpio_ack_irq(irq);
diff --git a/arch/arm/mach-mx25/clock.c b/arch/arm/mach-mx25/clock.c
index 6e838b857712..6acc88bcdc40 100644
--- a/arch/arm/mach-mx25/clock.c
+++ b/arch/arm/mach-mx25/clock.c
@@ -119,6 +119,11 @@ static unsigned long get_rate_nfc(struct clk *clk)
return get_rate_per(8);
}
+static unsigned long get_rate_gpt(struct clk *clk)
+{
+ return get_rate_per(5);
+}
+
static unsigned long get_rate_otg(struct clk *clk)
{
return 48000000; /* FIXME */
@@ -144,7 +149,7 @@ static void clk_cgcr_disable(struct clk *clk)
__raw_writel(reg, clk->enable_reg);
}
-#define DEFINE_CLOCK(name, i, er, es, gr, sr) \
+#define DEFINE_CLOCK(name, i, er, es, gr, sr, s) \
static struct clk name = { \
.id = i, \
.enable_reg = CRM_BASE + er, \
@@ -153,27 +158,30 @@ static void clk_cgcr_disable(struct clk *clk)
.set_rate = sr, \
.enable = clk_cgcr_enable, \
.disable = clk_cgcr_disable, \
+ .secondary = s, \
}
-DEFINE_CLOCK(gpt_clk, 0, CCM_CGCR0, 5, get_rate_ipg, NULL);
-DEFINE_CLOCK(cspi1_clk, 0, CCM_CGCR1, 5, get_rate_ipg, NULL);
-DEFINE_CLOCK(cspi2_clk, 0, CCM_CGCR1, 6, get_rate_ipg, NULL);
-DEFINE_CLOCK(cspi3_clk, 0, CCM_CGCR1, 7, get_rate_ipg, NULL);
-DEFINE_CLOCK(uart1_clk, 0, CCM_CGCR2, 14, get_rate_uart, NULL);
-DEFINE_CLOCK(uart2_clk, 0, CCM_CGCR2, 15, get_rate_uart, NULL);
-DEFINE_CLOCK(uart3_clk, 0, CCM_CGCR2, 16, get_rate_uart, NULL);
-DEFINE_CLOCK(uart4_clk, 0, CCM_CGCR2, 17, get_rate_uart, NULL);
-DEFINE_CLOCK(uart5_clk, 0, CCM_CGCR2, 18, get_rate_uart, NULL);
-DEFINE_CLOCK(nfc_clk, 0, CCM_CGCR0, 8, get_rate_nfc, NULL);
-DEFINE_CLOCK(usbotg_clk, 0, CCM_CGCR0, 28, get_rate_otg, NULL);
-DEFINE_CLOCK(pwm1_clk, 0, CCM_CGCR1, 31, get_rate_ipg, NULL);
-DEFINE_CLOCK(pwm2_clk, 0, CCM_CGCR2, 0, get_rate_ipg, NULL);
-DEFINE_CLOCK(pwm3_clk, 0, CCM_CGCR2, 1, get_rate_ipg, NULL);
-DEFINE_CLOCK(pwm4_clk, 0, CCM_CGCR2, 2, get_rate_ipg, NULL);
-DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL);
-DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL);
-DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL);
-DEFINE_CLOCK(fec_clk, 0, CCM_CGCR0, 23, get_rate_ipg, NULL);
+DEFINE_CLOCK(gpt_clk, 0, CCM_CGCR0, 5, get_rate_gpt, NULL, NULL);
+DEFINE_CLOCK(uart_per_clk, 0, CCM_CGCR0, 15, get_rate_uart, NULL, NULL);
+DEFINE_CLOCK(cspi1_clk, 0, CCM_CGCR1, 5, get_rate_ipg, NULL, NULL);
+DEFINE_CLOCK(cspi2_clk, 0, CCM_CGCR1, 6, get_rate_ipg, NULL, NULL);
+DEFINE_CLOCK(cspi3_clk, 0, CCM_CGCR1, 7, get_rate_ipg, NULL, NULL);
+DEFINE_CLOCK(fec_ahb_clk, 0, CCM_CGCR0, 23, NULL, NULL, NULL);
+DEFINE_CLOCK(uart1_clk, 0, CCM_CGCR2, 14, get_rate_uart, NULL, &uart_per_clk);
+DEFINE_CLOCK(uart2_clk, 0, CCM_CGCR2, 15, get_rate_uart, NULL, &uart_per_clk);
+DEFINE_CLOCK(uart3_clk, 0, CCM_CGCR2, 16, get_rate_uart, NULL, &uart_per_clk);
+DEFINE_CLOCK(uart4_clk, 0, CCM_CGCR2, 17, get_rate_uart, NULL, &uart_per_clk);
+DEFINE_CLOCK(uart5_clk, 0, CCM_CGCR2, 18, get_rate_uart, NULL, &uart_per_clk);
+DEFINE_CLOCK(nfc_clk, 0, CCM_CGCR0, 8, get_rate_nfc, NULL, NULL);
+DEFINE_CLOCK(usbotg_clk, 0, CCM_CGCR0, 28, get_rate_otg, NULL, NULL);
+DEFINE_CLOCK(pwm1_clk, 0, CCM_CGCR1, 31, get_rate_ipg, NULL, NULL);
+DEFINE_CLOCK(pwm2_clk, 0, CCM_CGCR2, 0, get_rate_ipg, NULL, NULL);
+DEFINE_CLOCK(pwm3_clk, 0, CCM_CGCR2, 1, get_rate_ipg, NULL, NULL);
+DEFINE_CLOCK(pwm4_clk, 0, CCM_CGCR2, 2, get_rate_ipg, NULL, NULL);
+DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL, NULL);
+DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL, NULL);
+DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL, NULL);
+DEFINE_CLOCK(fec_clk, 0, CCM_CGCR1, 15, get_rate_ipg, NULL, &fec_ahb_clk);
#define _REGISTER_CLOCK(d, n, c) \
{ \
@@ -208,13 +216,21 @@ static struct clk_lookup lookups[] = {
_REGISTER_CLOCK("fec.0", NULL, fec_clk)
};
-int __init mx25_clocks_init(unsigned long fref)
+int __init mx25_clocks_init(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(lookups); i++)
clkdev_add(&lookups[i]);
+ /* Turn off all clocks except the ones we need to survive, namely:
+ * EMI, GPIO1-3 (CCM_CGCR1[18:16]), GPT1, IOMUXC (CCM_CGCR1[27]), IIM,
+ * SCC
+ */
+ __raw_writel((1 << 19), CRM_BASE + CCM_CGCR0);
+ __raw_writel((0xf << 16) | (3 << 26), CRM_BASE + CCM_CGCR1);
+ __raw_writel((1 << 5), CRM_BASE + CCM_CGCR2);
+
mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
return 0;
diff --git a/arch/arm/mach-mx25/mx25pdk.c b/arch/arm/mach-mx25/mx25pdk.c
index 921bc99ea231..6f06089246eb 100644
--- a/arch/arm/mach-mx25/mx25pdk.c
+++ b/arch/arm/mach-mx25/mx25pdk.c
@@ -91,7 +91,7 @@ static void __init mx25pdk_init(void)
static void __init mx25pdk_timer_init(void)
{
- mx25_clocks_init(26000000);
+ mx25_clocks_init();
}
static struct sys_timer mx25pdk_timer = {
diff --git a/arch/arm/mach-mx3/mx31ads.c b/arch/arm/mach-mx3/mx31ads.c
index 3e7bafa2ddbb..938c549767dc 100644
--- a/arch/arm/mach-mx3/mx31ads.c
+++ b/arch/arm/mach-mx3/mx31ads.c
@@ -173,6 +173,7 @@ static void expio_unmask_irq(u32 irq)
}
static struct irq_chip expio_irq_chip = {
+ .name = "EXPIO(CPLD)",
.ack = expio_ack_irq,
.mask = expio_mask_irq,
.unmask = expio_unmask_irq,
@@ -302,6 +303,7 @@ static struct regulator_init_data ldo1_data = {
.min_uV = 2800000,
.max_uV = 2800000,
.valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
.apply_uV = 1,
},
};
@@ -322,6 +324,7 @@ static struct regulator_init_data ldo2_data = {
.min_uV = 3300000,
.max_uV = 3300000,
.valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
.apply_uV = 1,
},
.num_consumer_supplies = ARRAY_SIZE(ldo2_consumers),
@@ -459,6 +462,7 @@ static int mx31_wm8350_init(struct wm8350 *wm8350)
static struct wm8350_platform_data __initdata mx31_wm8350_pdata = {
.init = mx31_wm8350_init,
+ .irq_base = MXC_BOARD_IRQ_START + MXC_MAX_EXP_IO_LINES,
};
#endif
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 3f1334f62e7a..7027cdc1ba49 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -505,7 +505,7 @@ static void __init gpmc_mem_init(void)
void __init gpmc_init(void)
{
u32 l;
- char *ck;
+ char *ck = NULL;
if (cpu_is_omap24xx()) {
ck = "core_l3_ck";
@@ -521,6 +521,9 @@ void __init gpmc_init(void)
l = OMAP44XX_GPMC_BASE;
}
+ if (WARN_ON(!ck))
+ return;
+
gpmc_l3_clk = clk_get(NULL, ck);
if (IS_ERR(gpmc_l3_clk)) {
printk(KERN_ERR "Could not get GPMC clock %s\n", ck);
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 27054025da2b..26aeef560aa3 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -194,7 +194,7 @@ void __init omap_init_irq(void)
int i;
for (i = 0; i < ARRAY_SIZE(irq_banks); i++) {
- unsigned long base;
+ unsigned long base = 0;
struct omap_irq_bank *bank = irq_banks + i;
if (cpu_is_omap24xx())
@@ -202,6 +202,8 @@ void __init omap_init_irq(void)
else if (cpu_is_omap34xx())
base = OMAP34XX_IC_BASE;
+ BUG_ON(!base);
+
/* Static mapping, never released */
bank->base_reg = ioremap(base, SZ_4K);
if (!bank->base_reg) {
diff --git a/arch/arm/mach-omap2/mmc-twl4030.c b/arch/arm/mach-omap2/mmc-twl4030.c
index 0c3c72d934bf..8afe9dd3f150 100644
--- a/arch/arm/mach-omap2/mmc-twl4030.c
+++ b/arch/arm/mach-omap2/mmc-twl4030.c
@@ -408,6 +408,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
{
struct twl4030_hsmmc_info *c;
int nr_hsmmc = ARRAY_SIZE(hsmmc_data);
+ int i;
if (cpu_is_omap2430()) {
control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
@@ -434,7 +435,7 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
mmc = kzalloc(sizeof(struct omap_mmc_platform_data), GFP_KERNEL);
if (!mmc) {
pr_err("Cannot allocate memory for mmc device!\n");
- return;
+ goto done;
}
if (c->name)
@@ -532,6 +533,10 @@ void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
continue;
c->dev = mmc->dev;
}
+
+done:
+ for (i = 0; i < nr_hsmmc; i++)
+ kfree(hsmmc_data[i]);
}
#endif
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 3f59bd12cbbf..5fef73f4743d 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -486,7 +486,7 @@ int __init omap_mux_init_signal(char *muxname, int val)
static inline void omap_mux_decode(struct seq_file *s, u16 val)
{
char *flags[OMAP_MUX_MAX_NR_FLAGS];
- char mode[14];
+ char mode[sizeof("OMAP_MUX_MODE") + 1];
int i = -1;
sprintf(mode, "OMAP_MUX_MODE%d", val & 0x7);
@@ -553,6 +553,7 @@ static int omap_mux_dbg_board_show(struct seq_file *s, void *unused)
if (!m0_name)
continue;
+ /* REVISIT: Needs to be updated if mode0 names get longer */
for (i = 0; i < OMAP_MUX_DEFNAME_LEN; i++) {
if (m0_name[i] == '\0') {
m0_def[i] = m0_name[i];
@@ -960,7 +961,12 @@ static void __init omap_mux_init_list(struct omap_mux *superset)
while (superset->reg_offset != OMAP_MUX_TERMINATOR) {
struct omap_mux *entry;
-#ifndef CONFIG_OMAP_MUX
+#ifdef CONFIG_OMAP_MUX
+ if (!superset->muxnames || !superset->muxnames[0]) {
+ superset++;
+ continue;
+ }
+#else
/* Skip pins that are not muxed as GPIO by bootloader */
if (!OMAP_MODE_GPIO(omap_mux_read(superset->reg_offset))) {
superset++;
diff --git a/arch/arm/mach-omap2/mux34xx.c b/arch/arm/mach-omap2/mux34xx.c
index 68e0a595f9a1..07aa7b3c95f7 100644
--- a/arch/arm/mach-omap2/mux34xx.c
+++ b/arch/arm/mach-omap2/mux34xx.c
@@ -649,6 +649,53 @@ static struct omap_mux __initdata omap3_muxmodes[] = {
_OMAP3_MUXENTRY(UART3_TX_IRTX, 166,
"uart3_tx_irtx", NULL, NULL, NULL,
"gpio_166", NULL, NULL, "safe_mode"),
+
+ /* Only on 3630, see omap36xx_cbp_subset for the signals */
+ _OMAP3_MUXENTRY(GPMC_A11, 0,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MBUSFLAG, 0,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MREAD, 0,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MWRITE, 0,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_SBUSFLAG, 0,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_SREAD, 0,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_SWRITE, 0,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(GPMC_A11, 0,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MCAD28, 0,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MCAD29, 0,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MCAD32, 0,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MCAD33, 0,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MCAD34, 0,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MCAD35, 0,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL),
+ _OMAP3_MUXENTRY(SAD2D_MCAD36, 0,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL),
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 8c964bec8159..e10a02df6e1d 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -36,7 +36,13 @@
#define UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV 0x52
#define UART_OMAP_WER 0x17 /* Wake-up enable register */
-#define DEFAULT_TIMEOUT (5 * HZ)
+/*
+ * NOTE: By default the serial timeout is disabled as it causes lost characters
+ * over the serial ports. This means that the UART clocks will stay on until
+ * disabled via sysfs. This also causes that any deeper omap sleep states are
+ * blocked.
+ */
+#define DEFAULT_TIMEOUT 0
struct omap_uart_state {
int num;
@@ -422,7 +428,8 @@ static void omap_uart_idle_init(struct omap_uart_state *uart)
uart->timeout = DEFAULT_TIMEOUT;
setup_timer(&uart->timer, omap_uart_idle_timer,
(unsigned long) uart);
- mod_timer(&uart->timer, jiffies + uart->timeout);
+ if (uart->timeout)
+ mod_timer(&uart->timer, jiffies + uart->timeout);
omap_uart_smart_idle_enable(uart, 0);
if (cpu_is_omap34xx()) {
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index a21a4b395f73..d94857eb0690 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -334,8 +334,8 @@ static void realview_pbx_reset(char mode)
* in the system FPGA
*/
__raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
- __raw_writel(0x0000, reset_ctrl);
- __raw_writel(0x0004, reset_ctrl);
+ __raw_writel(0x00F0, reset_ctrl);
+ __raw_writel(0x00F4, reset_ctrl);
}
static void __init realview_pbx_init(void)
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index b270d6228fe2..62820eda84d9 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -11,6 +11,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/moduleparam.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -77,6 +78,8 @@ static unsigned long ai_dword;
static unsigned long ai_multi;
static int ai_usermode;
+core_param(alignment, ai_usermode, int, 0600);
+
#define UM_WARN (1 << 0)
#define UM_FIXUP (1 << 1)
#define UM_SIGNAL (1 << 2)
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 3f9cd3d8f6d5..795dc615f43b 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -41,7 +41,7 @@ ENTRY(cpu_arm7_dcache_clean_area)
ENTRY(cpu_arm7_data_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
- ldr r8, [r0] @ read arm instruction
+ ldr r8, [r2] @ read arm instruction
tst r8, #1 << 20 @ L = 0 -> write?
orreq r1, r1, #1 << 11 @ yes.
and r7, r8, #15 << 24
diff --git a/arch/arm/plat-mxc/audmux-v2.c b/arch/arm/plat-mxc/audmux-v2.c
index 6f21096086fd..b06954a84436 100644
--- a/arch/arm/plat-mxc/audmux-v2.c
+++ b/arch/arm/plat-mxc/audmux-v2.c
@@ -23,6 +23,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <mach/audmux.h>
#include <mach/hardware.h>
@@ -32,6 +33,140 @@ static void __iomem *audmux_base;
#define MXC_AUDMUX_V2_PTCR(x) ((x) * 8)
#define MXC_AUDMUX_V2_PDCR(x) ((x) * 8 + 4)
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *audmux_debugfs_root;
+
+static int audmux_open_file(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+/* There is an annoying discontinuity in the SSI numbering with regard
+ * to the Linux number of the devices */
+static const char *audmux_port_string(int port)
+{
+ switch (port) {
+ case MX31_AUDMUX_PORT1_SSI0:
+ return "imx-ssi.0";
+ case MX31_AUDMUX_PORT2_SSI1:
+ return "imx-ssi.1";
+ case MX31_AUDMUX_PORT3_SSI_PINS_3:
+ return "SSI3";
+ case MX31_AUDMUX_PORT4_SSI_PINS_4:
+ return "SSI4";
+ case MX31_AUDMUX_PORT5_SSI_PINS_5:
+ return "SSI5";
+ case MX31_AUDMUX_PORT6_SSI_PINS_6:
+ return "SSI6";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t ret;
+ char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ int port = (int)file->private_data;
+ u32 pdcr, ptcr;
+
+ if (!buf)
+ return -ENOMEM;
+
+ if (audmux_clk)
+ clk_enable(audmux_clk);
+
+ ptcr = readl(audmux_base + MXC_AUDMUX_V2_PTCR(port));
+ pdcr = readl(audmux_base + MXC_AUDMUX_V2_PDCR(port));
+
+ if (audmux_clk)
+ clk_disable(audmux_clk);
+
+ ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
+ pdcr, ptcr);
+
+ if (ptcr & MXC_AUDMUX_V2_PTCR_TFSDIR)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "TxFS output from %s, ",
+ audmux_port_string((ptcr >> 27) & 0x7));
+ else
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "TxFS input, ");
+
+ if (ptcr & MXC_AUDMUX_V2_PTCR_TCLKDIR)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "TxClk output from %s",
+ audmux_port_string((ptcr >> 22) & 0x7));
+ else
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "TxClk input");
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+
+ if (ptcr & MXC_AUDMUX_V2_PTCR_SYN) {
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "Port is symmetric");
+ } else {
+ if (ptcr & MXC_AUDMUX_V2_PTCR_RFSDIR)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "RxFS output from %s, ",
+ audmux_port_string((ptcr >> 17) & 0x7));
+ else
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "RxFS input, ");
+
+ if (ptcr & MXC_AUDMUX_V2_PTCR_RCLKDIR)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "RxClk output from %s",
+ audmux_port_string((ptcr >> 12) & 0x7));
+ else
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "RxClk input");
+ }
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ "\nData received from %s\n",
+ audmux_port_string((pdcr >> 13) & 0x7));
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations audmux_debugfs_fops = {
+ .open = audmux_open_file,
+ .read = audmux_read_file,
+};
+
+static void audmux_debugfs_init(void)
+{
+ int i;
+ char buf[20];
+
+ audmux_debugfs_root = debugfs_create_dir("audmux", NULL);
+ if (!audmux_debugfs_root) {
+ pr_warning("Failed to create AUDMUX debugfs root\n");
+ return;
+ }
+
+ for (i = 1; i < 8; i++) {
+ snprintf(buf, sizeof(buf), "ssi%d", i);
+ if (!debugfs_create_file(buf, 0444, audmux_debugfs_root,
+ (void *)i, &audmux_debugfs_fops))
+ pr_warning("Failed to create AUDMUX port %d debugfs file\n",
+ i);
+ }
+}
+#else
+static inline void audmux_debugfs_init(void)
+{
+}
+#endif
+
int mxc_audmux_v2_configure_port(unsigned int port, unsigned int ptcr,
unsigned int pdcr)
{
@@ -68,6 +203,8 @@ static int mxc_audmux_v2_init(void)
if (cpu_is_mx31() || cpu_is_mx35())
audmux_base = IO_ADDRESS(AUDMUX_BASE_ADDR);
+ audmux_debugfs_init();
+
return 0;
}
diff --git a/arch/arm/plat-mxc/include/mach/board-mx31lite.h b/arch/arm/plat-mxc/include/mach/board-mx31lite.h
index 0184b638c268..2b2da0367578 100644
--- a/arch/arm/plat-mxc/include/mach/board-mx31lite.h
+++ b/arch/arm/plat-mxc/include/mach/board-mx31lite.h
@@ -25,7 +25,7 @@
#ifndef __ASSEMBLY__
-enum mx31lilly_boards {
+enum mx31lite_boards {
MX31LITE_NOBOARD = 0,
MX31LITE_DB = 1,
};
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index 286cb9b0a25b..4bf1068ffad9 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -32,7 +32,7 @@ extern void mxc91231_init_irq(void);
extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int);
extern int mx1_clocks_init(unsigned long fref);
extern int mx21_clocks_init(unsigned long lref, unsigned long fref);
-extern int mx25_clocks_init(unsigned long fref);
+extern int mx25_clocks_init(void);
extern int mx27_clocks_init(unsigned long fref);
extern int mx31_clocks_init(unsigned long fref);
extern int mx35_clocks_init(void);
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx35.h b/arch/arm/plat-mxc/include/mach/iomux-mx35.h
index 00b0ac1db225..c88d40795f7a 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx35.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx35.h
@@ -671,7 +671,7 @@
#define MX35_PAD_LD8__SDMA_SDMA_DEBUG_PC_8 IOMUX_PAD(0x634, 0x1d0, 6, 0x0, 0, NO_PAD_CTRL)
#define MX35_PAD_LD9__IPU_DISPB_DAT_9 IOMUX_PAD(0x638, 0x1d4, 0, 0x0, 0, NO_PAD_CTRL)
-#define MX35_PAD_LD9__GPIO2_9 IOMUX_PAD(0x638, 0x1d4, 5, 0x8e4 0, NO_PAD_CTRL)
+#define MX35_PAD_LD9__GPIO2_9 IOMUX_PAD(0x638, 0x1d4, 5, 0x8e4, 0, NO_PAD_CTRL)
#define MX35_PAD_LD9__SDMA_SDMA_DEBUG_PC_9 IOMUX_PAD(0x638, 0x1d4, 6, 0x0, 0, NO_PAD_CTRL)
#define MX35_PAD_LD10__IPU_DISPB_DAT_10 IOMUX_PAD(0x63c, 0x1d8, 0, 0x0, 0, NO_PAD_CTRL)
diff --git a/arch/arm/plat-mxc/include/mach/irqs.h b/arch/arm/plat-mxc/include/mach/irqs.h
index ead9d592168d..0cb347645db4 100644
--- a/arch/arm/plat-mxc/include/mach/irqs.h
+++ b/arch/arm/plat-mxc/include/mach/irqs.h
@@ -37,7 +37,12 @@
* within sensible limits.
*/
#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + MXC_GPIO_IRQS)
+
+#ifdef CONFIG_MACH_MX31ADS_WM1133_EV1
+#define MXC_BOARD_IRQS 80
+#else
#define MXC_BOARD_IRQS 16
+#endif
#define MXC_IPU_IRQ_START (MXC_BOARD_IRQ_START + MXC_BOARD_IRQS)
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index d9f8c844c385..4becbdd1935c 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -391,7 +391,7 @@ static struct dentry *clk_debugfs_root;
static int clk_debugfs_register_one(struct clk *c)
{
int err;
- struct dentry *d, *child;
+ struct dentry *d, *child, *child_tmp;
struct clk *pa = c->parent;
char s[255];
char *p = s;
@@ -423,7 +423,7 @@ static int clk_debugfs_register_one(struct clk *c)
err_out:
d = c->dent;
- list_for_each_entry(child, &d->d_subdirs, d_u.d_child)
+ list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
debugfs_remove(child);
debugfs_remove(c->dent);
return err;
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index d17620c50c28..d2422c766cca 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -750,6 +750,7 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
}
#endif
+#ifdef CONFIG_ARCH_OMAP1
/*
* This only applies to chips that can't do both rising and falling edge
* detection at once. For all other chips, this function is a noop.
@@ -760,11 +761,9 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
u32 l = 0;
switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
case METHOD_MPUIO:
reg += OMAP_MPUIO_GPIO_INT_EDGE;
break;
-#endif
#ifdef CONFIG_ARCH_OMAP15XX
case METHOD_GPIO_1510:
reg += OMAP1510_GPIO_INT_CONTROL;
@@ -787,6 +786,7 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
__raw_writel(l, reg);
}
+#endif
static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
{
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index 1e5648d3e3d8..2ed72013c2e2 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -89,16 +89,6 @@
#define USE_WAKEUP_LAT 0
#define IGNORE_WAKEUP_LAT 1
-/* XXX this should be moved into a separate file */
-#if defined(CONFIG_ARCH_OMAP2420)
-# define OMAP_32KSYNCT_BASE 0x48004000
-#elif defined(CONFIG_ARCH_OMAP2430)
-# define OMAP_32KSYNCT_BASE 0x49020000
-#elif defined(CONFIG_ARCH_OMAP3430)
-# define OMAP_32KSYNCT_BASE 0x48320000
-#else
-# error Unknown OMAP device
-#endif
/* Private functions */
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 5a79fc6ee818..31c2f4c30a95 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
-# Last update: Thu Jan 28 22:15:54 2010
+# Last update: Sat Feb 20 14:16:15 2010
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@@ -2257,7 +2257,7 @@ oratisalog MACH_ORATISALOG ORATISALOG 2268
oratismadi MACH_ORATISMADI ORATISMADI 2269
oratisot16 MACH_ORATISOT16 ORATISOT16 2270
oratisdesk MACH_ORATISDESK ORATISDESK 2271
-v2_ca9 MACH_V2P_CA9 V2P_CA9 2272
+vexpress MACH_VEXPRESS VEXPRESS 2272
sintexo MACH_SINTEXO SINTEXO 2273
cm3389 MACH_CM3389 CM3389 2274
omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275
@@ -2636,3 +2636,45 @@ hw90240 MACH_HW90240 HW90240 2648
dm365_leopard MACH_DM365_LEOPARD DM365_LEOPARD 2649
mityomapl138 MACH_MITYOMAPL138 MITYOMAPL138 2650
scat110 MACH_SCAT110 SCAT110 2651
+acer_a1 MACH_ACER_A1 ACER_A1 2652
+cmcontrol MACH_CMCONTROL CMCONTROL 2653
+pelco_lamar MACH_PELCO_LAMAR PELCO_LAMAR 2654
+rfp43 MACH_RFP43 RFP43 2655
+sk86r0301 MACH_SK86R0301 SK86R0301 2656
+ctpxa MACH_CTPXA CTPXA 2657
+epb_arm9_a MACH_EPB_ARM9_A EPB_ARM9_A 2658
+guruplug MACH_GURUPLUG GURUPLUG 2659
+spear310 MACH_SPEAR310 SPEAR310 2660
+spear320 MACH_SPEAR320 SPEAR320 2661
+robotx MACH_ROBOTX ROBOTX 2662
+lsxhl MACH_LSXHL LSXHL 2663
+smartlite MACH_SMARTLITE SMARTLITE 2664
+cws2 MACH_CWS2 CWS2 2665
+m619 MACH_M619 M619 2666
+smartview MACH_SMARTVIEW SMARTVIEW 2667
+lsa_salsa MACH_LSA_SALSA LSA_SALSA 2668
+kizbox MACH_KIZBOX KIZBOX 2669
+htccharmer MACH_HTCCHARMER HTCCHARMER 2670
+guf_neso_lt MACH_GUF_NESO_LT GUF_NESO_LT 2671
+pm9g45 MACH_PM9G45 PM9G45 2672
+htcpanther MACH_HTCPANTHER HTCPANTHER 2673
+htcpanther_cdma MACH_HTCPANTHER_CDMA HTCPANTHER_CDMA 2674
+reb01 MACH_REB01 REB01 2675
+aquila MACH_AQUILA AQUILA 2676
+spark_sls_hw2 MACH_SPARK_SLS_HW2 SPARK_SLS_HW2 2677
+sheeva_esata MACH_ESATA_SHEEVAPLUG ESATA_SHEEVAPLUG 2678
+surf7x30 MACH_SURF7X30 SURF7X30 2679
+micro2440 MACH_MICRO2440 MICRO2440 2680
+am2440 MACH_AM2440 AM2440 2681
+tq2440 MACH_TQ2440 TQ2440 2682
+lpc2478oem MACH_LPC2478OEM LPC2478OEM 2683
+ak880x MACH_AK880X AK880X 2684
+cobra3530 MACH_COBRA3530 COBRA3530 2685
+pmppb MACH_PMPPB PMPPB 2686
+u6715 MACH_U6715 U6715 2687
+axar1500_sender MACH_AXAR1500_SENDER AXAR1500_SENDER 2688
+g30_dvb MACH_G30_DVB G30_DVB 2689
+vc088x MACH_VC088X VC088X 2690
+mioa702 MACH_MIOA702 MIOA702 2691
+hpmin MACH_HPMIN HPMIN 2692
+ak880xak MACH_AK880XAK AK880XAK 2693
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index f60a5400a25b..a63c4be99b36 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -197,10 +197,13 @@ static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_
}
/*
- * Update the FPSCR with the additional exception flags.
+ * If any of the status flags are set, update the FPSCR.
* Comparison instructions always return at least one of
* these flags set.
*/
+ if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))
+ fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);
+
fpscr |= exceptions;
fmxr(FPSCR, fpscr);
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 1aa1ea5e9212..b13d1879e51b 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1325,7 +1325,7 @@ struct platform_device *__init
at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
{
struct platform_device *pdev;
- struct mci_dma_slave *slave;
+ struct mci_dma_data *slave;
u32 pioa_mask;
u32 piob_mask;
@@ -1344,7 +1344,9 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
ARRAY_SIZE(atmel_mci0_resource)))
goto fail;
- slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL);
+ slave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL);
+ if (!slave)
+ goto fail;
slave->sdata.dma_dev = &dw_dmac0_device.dev;
slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
@@ -1357,7 +1359,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
if (platform_device_add_data(pdev, data,
sizeof(struct mci_platform_data)))
- goto fail;
+ goto fail_free;
/* CLK line is common to both slots */
pioa_mask = 1 << 10;
@@ -1381,7 +1383,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
/* Slot is unused */
break;
default:
- goto fail;
+ goto fail_free;
}
select_peripheral(PIOA, pioa_mask, PERIPH_A, 0);
@@ -1408,7 +1410,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
break;
default:
if (!data->slot[0].bus_width)
- goto fail;
+ goto fail_free;
data->slot[1].bus_width = 0;
break;
@@ -1419,9 +1421,10 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
platform_device_add(pdev);
return pdev;
+fail_free:
+ kfree(slave);
fail:
data->dma_slave = NULL;
- kfree(slave);
platform_device_put(pdev);
return NULL;
}
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 7ae58892ba8d..e97b255d97bc 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -94,6 +94,7 @@ ia64_acpi_release_global_lock (unsigned int *lock)
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
+#define acpi_ht 0 /* no HT-only mode on IA64 */
#endif
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
static inline void disable_acpi(void) { }
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index e14108b19c09..4c41656ede87 100644
--- a/arch/ia64/include/asm/elf.h
+++ b/arch/ia64/include/asm/elf.h
@@ -201,7 +201,9 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
relevant until we have real hardware to play with... */
#define ELF_PLATFORM NULL
-#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+#define SET_PERSONALITY(ex) \
+ set_personality((current->personality & ~PER_MASK) | PER_LINUX)
+
#define elf_read_implies_exec(ex, executable_stack) \
((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index ece1bf994499..e456f062f241 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
-DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid);
+DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index bb7c374713ad..6fced1fe3bf0 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.31
-# Thu Sep 24 10:28:50 2009
+# Linux kernel version: 2.6.33-rc6
+# Wed Feb 3 10:02:59 2010
#
CONFIG_MICROBLAZE=y
# CONFIG_SWAP is not set
@@ -19,8 +19,12 @@ CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_CSUM=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_PCI is not set
CONFIG_NO_DMA=y
+CONFIG_DTC=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -44,6 +48,7 @@ CONFIG_SYSVIPC_SYSCTL=y
#
CONFIG_TREE_RCU=y
# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
@@ -64,10 +69,12 @@ CONFIG_INITRAMFS_ROOT_GID=0
CONFIG_RD_GZIP=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
+# CONFIG_RD_LZO is not set
# CONFIG_INITRAMFS_COMPRESSION_NONE is not set
CONFIG_INITRAMFS_COMPRESSION_GZIP=y
# CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set
# CONFIG_INITRAMFS_COMPRESSION_LZMA is not set
+# CONFIG_INITRAMFS_COMPRESSION_LZO is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
CONFIG_ANON_INODES=y
@@ -90,21 +97,20 @@ CONFIG_EVENTFD=y
CONFIG_AIO=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
#
# GCOV-based kernel profiling
#
-# CONFIG_SLOW_WORK is not set
+CONFIG_SLOW_WORK=y
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_BASE_SMALL=1
@@ -123,14 +129,41 @@ CONFIG_LBDAF=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
# CONFIG_DEFAULT_DEADLINE is not set
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
# CONFIG_FREEZER is not set
#
@@ -139,11 +172,6 @@ CONFIG_DEFAULT_IOSCHED="cfq"
CONFIG_PLATFORM_GENERIC=y
CONFIG_OPT_LIB_FUNCTION=y
CONFIG_OPT_LIB_ASM=y
-CONFIG_ALLOW_EDIT_AUTO=y
-
-#
-# Automatic platform settings from Kconfig.auto
-#
#
# Definitions for MICROBLAZE0
@@ -203,12 +231,11 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_PAGEFLAGS_EXTENDED=y
-CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_SPLIT_PTLOCK_CPUS=999999
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
@@ -289,7 +316,13 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-# CONFIG_WIRELESS is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -313,6 +346,10 @@ CONFIG_OF_DEVICE=y
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
# CONFIG_BLK_DEV_LOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
# CONFIG_BLK_DEV_NBD is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
@@ -349,7 +386,6 @@ CONFIG_NETDEVICES=y
# CONFIG_PHYLIB is not set
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
-# CONFIG_ETHOC is not set
# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
@@ -359,12 +395,12 @@ CONFIG_NET_ETHERNET=y
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
CONFIG_XILINX_EMACLITE=y
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
CONFIG_WLAN=y
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
+# CONFIG_HOSTAP is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -408,6 +444,7 @@ CONFIG_SERIAL_UARTLITE=y
CONFIG_SERIAL_UARTLITE_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -433,7 +470,6 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
#
@@ -526,8 +562,6 @@ CONFIG_PROC_FS=y
CONFIG_PROC_SYSCTL=y
CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
-CONFIG_TMPFS=y
-# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
CONFIG_MISC_FILESYSTEMS=y
@@ -638,11 +672,13 @@ CONFIG_NLS_DEFAULT="iso8859-1"
#
# Kernel hacking
#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_PRINTK_TIME is not set
CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
@@ -662,6 +698,9 @@ CONFIG_DEBUG_SLAB=y
# CONFIG_DEBUG_SLAB_LEAK is not set
CONFIG_DEBUG_SPINLOCK=y
# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
@@ -680,10 +719,29 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
# CONFIG_SAMPLES is not set
-# CONFIG_KMEMCHECK is not set
CONFIG_EARLY_PRINTK=y
# CONFIG_HEART_BEAT is not set
CONFIG_DEBUG_BOOTMEM=y
@@ -694,7 +752,11 @@ CONFIG_DEBUG_BOOTMEM=y
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
#
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index adb839bab704..ce2da535246a 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.31
-# Thu Sep 24 10:29:43 2009
+# Linux kernel version: 2.6.33-rc6
+# Wed Feb 3 10:03:21 2010
#
CONFIG_MICROBLAZE=y
# CONFIG_SWAP is not set
@@ -19,8 +19,12 @@ CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_GENERIC_GPIO=y
CONFIG_GENERIC_CSUM=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
# CONFIG_PCI is not set
CONFIG_NO_DMA=y
+CONFIG_DTC=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@@ -46,6 +50,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
#
CONFIG_TREE_RCU=y
# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
@@ -81,16 +86,16 @@ CONFIG_EVENTFD=y
CONFIG_AIO=y
#
-# Performance Counters
+# Kernel Performance Events And Counters
#
CONFIG_VM_EVENT_COUNTERS=y
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+# CONFIG_MMAP_ALLOW_UNINITIALIZED is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
#
# GCOV-based kernel profiling
@@ -116,14 +121,41 @@ CONFIG_LBDAF=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-# CONFIG_DEFAULT_AS is not set
# CONFIG_DEFAULT_DEADLINE is not set
CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
# CONFIG_FREEZER is not set
#
@@ -132,7 +164,10 @@ CONFIG_DEFAULT_IOSCHED="cfq"
CONFIG_PLATFORM_GENERIC=y
# CONFIG_SELFMOD is not set
# CONFIG_OPT_LIB_FUNCTION is not set
-# CONFIG_ALLOW_EDIT_AUTO is not set
+
+#
+# Definitions for MICROBLAZE0
+#
CONFIG_KERNEL_BASE_ADDR=0x90000000
CONFIG_XILINX_MICROBLAZE0_FAMILY="virtex5"
CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1
@@ -190,7 +225,6 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
-CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_NOMMU_INITIAL_TRIM_EXCESS=1
#
@@ -274,9 +308,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_AF_RXRPC is not set
CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
-CONFIG_CFG80211_DEFAULT_PS_VALUE=0
-CONFIG_WIRELESS_OLD_REGULATORY=y
-# CONFIG_WIRELESS_EXT is not set
# CONFIG_LIB80211 is not set
#
@@ -301,9 +332,9 @@ CONFIG_STANDALONE=y
# CONFIG_CONNECTOR is not set
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
CONFIG_MTD_CONCAT=y
CONFIG_MTD_PARTITIONS=y
-# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
CONFIG_MTD_CMDLINE_PARTS=y
# CONFIG_MTD_OF_PARTS is not set
@@ -387,6 +418,10 @@ CONFIG_OF_DEVICE=y
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
# CONFIG_BLK_DEV_LOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
@@ -423,7 +458,6 @@ CONFIG_NETDEVICES=y
# CONFIG_PHYLIB is not set
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
-# CONFIG_ETHOC is not set
# CONFIG_DNET is not set
# CONFIG_IBM_NEW_EMAC_ZMII is not set
# CONFIG_IBM_NEW_EMAC_RGMII is not set
@@ -433,12 +467,12 @@ CONFIG_NET_ETHERNET=y
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_XILINX_EMACLITE is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
CONFIG_WLAN=y
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
+# CONFIG_HOSTAP is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -482,6 +516,7 @@ CONFIG_SERIAL_UARTLITE=y
CONFIG_SERIAL_UARTLITE_CONSOLE=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -508,7 +543,6 @@ CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
#
@@ -616,7 +650,6 @@ CONFIG_INOTIFY_USER=y
CONFIG_PROC_FS=y
CONFIG_PROC_SYSCTL=y
CONFIG_SYSFS=y
-# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
CONFIG_MISC_FILESYSTEMS=y
@@ -672,11 +705,13 @@ CONFIG_MSDOS_PARTITION=y
#
# Kernel hacking
#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_PRINTK_TIME is not set
CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_UNUSED_SYMBOLS=y
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
@@ -695,12 +730,16 @@ CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_SELFTEST=y
CONFIG_DEBUG_OBJECTS_FREE=y
CONFIG_DEBUG_OBJECTS_TIMERS=y
+# CONFIG_DEBUG_OBJECTS_WORK is not set
CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
# CONFIG_DEBUG_SLAB is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
@@ -720,8 +759,28 @@ CONFIG_DEBUG_SG=y
# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
# CONFIG_DYNAMIC_DEBUG is not set
# CONFIG_SAMPLES is not set
CONFIG_EARLY_PRINTK=y
@@ -734,7 +793,11 @@ CONFIG_EARLY_PRINTK=y
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
#
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index fc9997b73c09..267c7c779e53 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -217,7 +217,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
* Little endian
*/
-#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a));
+#define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a))
#define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a))
#define in_le32(a) __le32_to_cpu(__raw_readl(a))
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
index d9d63831cc2f..2a56bccce4e0 100644
--- a/arch/microblaze/kernel/cpu/cache.c
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -172,16 +172,15 @@ do { \
/* It is used only first parameter for OP - for wic, wdc */
#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
do { \
- int step = -line_length; \
- int count = end - start; \
- BUG_ON(count <= 0); \
+ int volatile temp; \
+ BUG_ON(end - start <= 0); \
\
- __asm__ __volatile__ (" 1: addk %0, %0, %1; \
- " #op " %0, r0; \
- bgtid %1, 1b; \
- addk %1, %1, %2; \
- " : : "r" (start), "r" (count), \
- "r" (step) : "memory"); \
+ __asm__ __volatile__ (" 1: " #op " %1, r0; \
+ cmpu %0, %1, %2; \
+ bgtid %0, 1b; \
+ addk %1, %1, %3; \
+ " : : "r" (temp), "r" (start), "r" (end),\
+ "r" (line_length) : "memory"); \
} while (0);
static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
@@ -313,16 +312,6 @@ static void __invalidate_dcache_all_wb(void)
pr_debug("%s\n", __func__);
CACHE_ALL_LOOP2(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
wdc.clear)
-
-#if 0
- unsigned int i;
-
- pr_debug("%s\n", __func__);
-
- /* Just loop through cache size and invalidate it */
- for (i = 0; i < cpuinfo.dcache_size; i += cpuinfo.dcache_line_length)
- __invalidate_dcache(0, i);
-#endif
}
static void __invalidate_dcache_range_wb(unsigned long start,
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
index 95b0855802df..391d6197fc3b 100644
--- a/arch/microblaze/kernel/entry-nommu.S
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -122,7 +122,7 @@ ENTRY(_interrupt)
ret_from_intr:
lwi r11, r1, PT_MODE
- bneid r11, 3f
+ bneid r11, no_intr_resched
lwi r6, r31, TS_THREAD_INFO /* get thread info */
lwi r19, r6, TI_FLAGS /* get flags in thread info */
@@ -133,16 +133,18 @@ ret_from_intr:
bralid r15, schedule
nop
1: andi r11, r19, _TIF_SIGPENDING
- beqid r11, no_intr_reshed
+ beqid r11, no_intr_resched
addk r5, r1, r0
addk r7, r0, r0
bralid r15, do_signal
addk r6, r0, r0
-no_intr_reshed:
+no_intr_resched:
+ /* Disable interrupts, we are now committed to the state restore */
+ disable_irq
+
/* save mode indicator */
lwi r11, r1, PT_MODE
-3:
swi r11, r0, PER_CPU(KM)
/* save r31 */
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 5372b24ad049..bb8c4b9ccb80 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -54,6 +54,7 @@ void __init setup_arch(char **cmdline_p)
microblaze_cache_init();
+ invalidate_dcache();
enable_dcache();
invalidate_icache();
diff --git a/arch/mips/bcm47xx/prom.c b/arch/mips/bcm47xx/prom.c
index c51405e57921..29d3cbf9555f 100644
--- a/arch/mips/bcm47xx/prom.c
+++ b/arch/mips/bcm47xx/prom.c
@@ -141,6 +141,14 @@ static __init void prom_init_mem(void)
break;
}
+ /* Ignoring the last page when ddr size is 128M. Cached
+ * accesses to last page is causing the processor to prefetch
+ * using address above 128M stepping out of the ddr address
+ * space.
+ */
+ if (mem == 0x8000000)
+ mem -= 0x1000;
+
add_memory_region(0, mem, BOOT_MEM_RAM);
}
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index ed84b4cb3c8d..84b6503f10b9 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.23-rc2
-# Tue Aug 7 13:04:24 2007
+# Linux kernel version: 2.6.33-rc6
+# Wed Feb 3 18:12:31 2010
#
CONFIG_MIPS=y
@@ -9,20 +9,28 @@ CONFIG_MIPS=y
# Machine selection
#
# CONFIG_MACH_ALCHEMY is not set
+# CONFIG_AR7 is not set
+# CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
-# CONFIG_LEMOTE_FULONG is not set
+# CONFIG_LASAT is not set
+# CONFIG_MACH_LOONGSON is not set
# CONFIG_MIPS_MALTA is not set
# CONFIG_MIPS_SIM is not set
-# CONFIG_MARKEINS is not set
+# CONFIG_NEC_MARKEINS is not set
# CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
# CONFIG_PNX8550_JBS is not set
# CONFIG_PNX8550_STB810 is not set
# CONFIG_PMC_MSP is not set
# CONFIG_PMC_YOSEMITE is not set
+# CONFIG_POWERTV is not set
# CONFIG_SGI_IP22 is not set
CONFIG_SGI_IP27=y
+# CONFIG_SGI_IP28 is not set
# CONFIG_SGI_IP32 is not set
# CONFIG_SIBYTE_CRHINE is not set
# CONFIG_SIBYTE_CARMEL is not set
@@ -33,32 +41,39 @@ CONFIG_SGI_IP27=y
# CONFIG_SIBYTE_SENTOSA is not set
# CONFIG_SIBYTE_BIGSUR is not set
# CONFIG_SNI_RM is not set
-# CONFIG_TOSHIBA_JMR3927 is not set
-# CONFIG_TOSHIBA_RBTX4927 is not set
-# CONFIG_TOSHIBA_RBTX4938 is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
# CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
CONFIG_SGI_SN_M_MODE=y
# CONFIG_SGI_SN_N_MODE is not set
# CONFIG_MAPPED_KERNEL is not set
# CONFIG_REPLICATE_KTEXT is not set
# CONFIG_REPLICATE_EXHANDLERS is not set
+CONFIG_LOONGSON_UART_BASE=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_SUPPORTS_OPROFILE=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_TIME=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_ARC=y
CONFIG_DMA_COHERENT=y
-CONFIG_EARLY_PRINTK=y
CONFIG_SYS_HAS_EARLY_PRINTK=y
# CONFIG_NO_IOPORT is not set
CONFIG_CPU_BIG_ENDIAN=y
# CONFIG_CPU_LITTLE_ENDIAN is not set
CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
+CONFIG_DEFAULT_SGI_PARTITION=y
CONFIG_MIPS_L1_CACHE_SHIFT=7
CONFIG_ARC64=y
CONFIG_BOOT_ELF64=y
@@ -66,7 +81,8 @@ CONFIG_BOOT_ELF64=y
#
# CPU selection
#
-# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_LOONGSON2E is not set
+# CONFIG_CPU_LOONGSON2F is not set
# CONFIG_CPU_MIPS32_R1 is not set
# CONFIG_CPU_MIPS32_R2 is not set
# CONFIG_CPU_MIPS64_R1 is not set
@@ -79,6 +95,7 @@ CONFIG_BOOT_ELF64=y
# CONFIG_CPU_TX49XX is not set
# CONFIG_CPU_R5000 is not set
# CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
# CONFIG_CPU_R6000 is not set
# CONFIG_CPU_NEVADA is not set
# CONFIG_CPU_R8000 is not set
@@ -86,6 +103,7 @@ CONFIG_CPU_R10000=y
# CONFIG_CPU_RM7000 is not set
# CONFIG_CPU_RM9000 is not set
# CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
CONFIG_SYS_HAS_CPU_R10000=y
CONFIG_SYS_SUPPORTS_64BIT_KERNEL=y
CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
@@ -99,6 +117,7 @@ CONFIG_64BIT=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_32KB is not set
# CONFIG_PAGE_SIZE_64KB is not set
CONFIG_CPU_HAS_PREFETCH=y
CONFIG_MIPS_MT_DISABLED=y
@@ -110,6 +129,7 @@ CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_IRQ_PER_CPU=y
CONFIG_CPU_SUPPORTS_HIGHMEM=y
CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_NUMA=y
CONFIG_SYS_SUPPORTS_NUMA=y
CONFIG_NODES_SHIFT=6
@@ -120,16 +140,22 @@ CONFIG_DISCONTIGMEM_MANUAL=y
CONFIG_DISCONTIGMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_NEED_MULTIPLE_NODES=y
-# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_MIGRATION=y
-CONFIG_RESOURCES_64BIT=y
+CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
CONFIG_SMP=y
CONFIG_SYS_SUPPORTS_SMP=y
CONFIG_NR_CPUS_DEFAULT_64=y
CONFIG_NR_CPUS=64
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
# CONFIG_HZ_48 is not set
# CONFIG_HZ_100 is not set
# CONFIG_HZ_128 is not set
@@ -142,13 +168,13 @@ CONFIG_HZ=1000
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
-CONFIG_PREEMPT_BKL=y
# CONFIG_MIPS_INSANE_LARGE is not set
# CONFIG_KEXEC is not set
CONFIG_SECCOMP=y
CONFIG_LOCKDEP_SUPPORT=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -162,20 +188,41 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=64
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
+# CONFIG_GROUP_SCHED is not set
CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+# CONFIG_CGROUP_NS is not set
+# CONFIG_CGROUP_FREEZER is not set
+# CONFIG_CGROUP_DEVICE is not set
CONFIG_CPUSETS=y
-CONFIG_SYSFS_DEPRECATED=y
+CONFIG_PROC_PID_CPUSET=y
+# CONFIG_CGROUP_CPUACCT is not set
+# CONFIG_RESOURCE_COUNTERS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
CONFIG_RELAY=y
+# CONFIG_NAMESPACES is not set
# CONFIG_BLK_DEV_INITRD is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
@@ -184,44 +231,92 @@ CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
+# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
-CONFIG_ANON_INODES=y
CONFIG_EPOLL=y
CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_HAVE_SYSCALL_WRAPPERS=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+
+#
+# GCOV-based kernel profiling
+#
+CONFIG_SLOW_WORK=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_KMOD=y
CONFIG_STOP_MACHINE=y
CONFIG_BLOCK=y
-# CONFIG_BLK_DEV_IO_TRACE is not set
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+# CONFIG_BLK_CGROUP is not set
+CONFIG_BLOCK_COMPAT=y
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
+# CONFIG_CFQ_GROUP_IOSCHED is not set
# CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+# CONFIG_FREEZER is not set
#
# Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -230,11 +325,10 @@ CONFIG_HW_HAS_PCI=y
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCI_LEGACY is not set
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
CONFIG_MMU=y
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
# CONFIG_PCCARD is not set
# CONFIG_HOTPLUG_PCI is not set
@@ -242,8 +336,9 @@ CONFIG_MMU=y
# Executable file formats
#
CONFIG_BINFMT_ELF=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+# CONFIG_HAVE_AOUT is not set
# CONFIG_BINFMT_MISC is not set
-# CONFIG_BUILD_ELF64 is not set
CONFIG_MIPS32_COMPAT=y
CONFIG_COMPAT=y
CONFIG_SYSVIPC_COMPAT=y
@@ -255,13 +350,10 @@ CONFIG_BINFMT_ELF32=y
# Power management options
#
CONFIG_PM=y
-# CONFIG_PM_LEGACY is not set
# CONFIG_PM_DEBUG is not set
-
-#
-# Networking
-#
+# CONFIG_PM_RUNTIME is not set
CONFIG_NET=y
+CONFIG_COMPAT_NETLINK_MESSAGES=y
#
# Networking options
@@ -273,6 +365,8 @@ CONFIG_XFRM=y
CONFIG_XFRM_USER=m
# CONFIG_XFRM_SUB_POLICY is not set
CONFIG_XFRM_MIGRATE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_XFRM_IPCOMP=m
CONFIG_NET_KEY=y
CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
@@ -292,19 +386,40 @@ CONFIG_IP_PNP=y
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
-# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_TUNNEL=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_LRO=y
CONFIG_INET_DIAG=y
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
CONFIG_TCP_MD5SIG=y
-# CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_TUNNEL=m
+CONFIG_INET6_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_SIT_6RD=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
CONFIG_NETWORK_SECMARK=y
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
@@ -314,9 +429,11 @@ CONFIG_IP_SCTP=m
# CONFIG_SCTP_HMAC_NONE is not set
# CONFIG_SCTP_HMAC_SHA1 is not set
CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
@@ -326,12 +443,9 @@ CONFIG_SCTP_HMAC_MD5=y
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_FIFO=y
#
# Queueing/Scheduling
@@ -340,7 +454,7 @@ CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_RR=m
+CONFIG_NET_SCH_MULTIQ=y
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TEQL=m
@@ -348,6 +462,7 @@ CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
CONFIG_NET_SCH_NETEM=m
+# CONFIG_NET_SCH_DRR is not set
CONFIG_NET_SCH_INGRESS=m
#
@@ -364,41 +479,63 @@ CONFIG_NET_CLS_U32=m
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=y
# CONFIG_NET_EMATCH is not set
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=y
CONFIG_NET_ACT_GACT=m
CONFIG_GACT_PROB=y
CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_PEDIT=m
# CONFIG_NET_ACT_SIMP is not set
-CONFIG_NET_CLS_POLICE=y
+CONFIG_NET_ACT_SKBEDIT=m
# CONFIG_NET_CLS_IND is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
-CONFIG_CFG80211=m
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_SPY=y
+CONFIG_WEXT_PRIV=y
+CONFIG_CFG80211=m
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_WIRELESS_OLD_REGULATORY is not set
+CONFIG_CFG80211_WEXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=m
+CONFIG_LIB80211_CRYPT_WEP=m
+CONFIG_LIB80211_CRYPT_CCMP=m
+CONFIG_LIB80211_CRYPT_TKIP=m
+# CONFIG_LIB80211_DEBUG is not set
CONFIG_MAC80211=m
-# CONFIG_MAC80211_DEBUG is not set
-CONFIG_IEEE80211=m
-# CONFIG_IEEE80211_DEBUG is not set
-CONFIG_IEEE80211_CRYPT_WEP=m
-CONFIG_IEEE80211_CRYPT_CCMP=m
-CONFIG_IEEE80211_CRYPT_TKIP=m
-CONFIG_IEEE80211_SOFTMAC=m
-# CONFIG_IEEE80211_SOFTMAC_DEBUG is not set
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_MINSTREL=y
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel"
+# CONFIG_MAC80211_MESH is not set
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
CONFIG_RFKILL=m
+CONFIG_RFKILL_LEDS=y
# CONFIG_NET_9P is not set
#
@@ -408,9 +545,13 @@ CONFIG_RFKILL=m
#
# Generic Driver Options
#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
# CONFIG_SYS_HYPERVISOR is not set
CONFIG_CONNECTOR=m
# CONFIG_MTD is not set
@@ -423,14 +564,19 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
+# CONFIG_BLK_DEV_DRBD is not set
# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_OSD=m
# CONFIG_BLK_DEV_SX8 is not set
# CONFIG_BLK_DEV_RAM is not set
CONFIG_CDROM_PKTCDVD=m
CONFIG_CDROM_PKTCDVD_BUFFERS=8
# CONFIG_CDROM_PKTCDVD_WCACHE is not set
CONFIG_ATA_OVER_ETH=m
+# CONFIG_BLK_DEV_HD is not set
# CONFIG_MISC_DEVICES is not set
+CONFIG_EEPROM_93CX6=m
+CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
#
@@ -453,10 +599,6 @@ CONFIG_BLK_DEV_SR=m
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_CHR_DEV_SCH=m
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
# CONFIG_SCSI_MULTI_LUN is not set
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
@@ -471,11 +613,18 @@ CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_ISCSI_ATTRS=m
CONFIG_SCSI_SAS_ATTRS=m
CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SAS_HOST_SMP=y
# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
+CONFIG_SCSI_CXGB3_ISCSI=m
+CONFIG_SCSI_BNX2_ISCSI=m
+CONFIG_BE2ISCSI=m
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+CONFIG_SCSI_HPSA=m
# CONFIG_SCSI_3W_9XXX is not set
+CONFIG_SCSI_3W_SAS=m
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
@@ -483,11 +632,21 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_AIC79XX is not set
CONFIG_SCSI_AIC94XX=m
# CONFIG_AIC94XX_DEBUG is not set
+CONFIG_SCSI_MVSAS=m
+# CONFIG_SCSI_MVSAS_DEBUG is not set
+CONFIG_SCSI_DPT_I2O=m
+# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_ARCMSR is not set
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
# CONFIG_MEGARAID_SAS is not set
+CONFIG_SCSI_MPT2SAS=m
+CONFIG_SCSI_MPT2SAS_MAX_SGE=128
+# CONFIG_SCSI_MPT2SAS_LOGGING is not set
# CONFIG_SCSI_HPTIOP is not set
+CONFIG_LIBFC=m
+# CONFIG_LIBFCOE is not set
+# CONFIG_FCOE is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_IPS is not set
@@ -502,16 +661,31 @@ CONFIG_SCSI_QLOGIC_1280=y
# CONFIG_SCSI_DC395x is not set
# CONFIG_SCSI_DC390T is not set
# CONFIG_SCSI_DEBUG is not set
+CONFIG_SCSI_PMCRAID=m
+# CONFIG_SCSI_PM8001 is not set
# CONFIG_SCSI_SRP is not set
+CONFIG_SCSI_BFA_FC=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_SCSI_OSD_DPRINT_SENSE=1
+# CONFIG_SCSI_OSD_DEBUG is not set
# CONFIG_ATA is not set
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
+CONFIG_MD_AUTODETECT=y
CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=y
CONFIG_MD_RAID1=y
CONFIG_MD_RAID10=m
CONFIG_MD_RAID456=y
-CONFIG_MD_RAID5_RESHAPE=y
+# CONFIG_MULTICORE_RAID456 is not set
+CONFIG_MD_RAID6_PQ=y
+# CONFIG_ASYNC_RAID6_TEST is not set
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
@@ -519,36 +693,39 @@ CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
+CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
-CONFIG_DM_MULTIPATH_EMC=m
-CONFIG_DM_MULTIPATH_RDAC=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
# CONFIG_DM_DELAY is not set
+CONFIG_DM_UEVENT=y
+# CONFIG_FUSION is not set
#
-# Fusion MPT device support
+# IEEE 1394 (FireWire) support
#
-# CONFIG_FUSION is not set
-# CONFIG_FUSION_SPI is not set
-# CONFIG_FUSION_FC is not set
-# CONFIG_FUSION_SAS is not set
#
-# IEEE 1394 (FireWire) support
+# You can enable one or both FireWire driver stacks.
+#
+
+#
+# The newer stack is recommended.
#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
# CONFIG_I2O is not set
CONFIG_NETDEVICES=y
-CONFIG_NETDEVICES_MULTIQUEUE=y
CONFIG_IFB=m
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
CONFIG_MACVLAN=m
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
+CONFIG_VETH=m
# CONFIG_ARCNET is not set
-CONFIG_PHYLIB=m
+CONFIG_PHYLIB=y
#
# MII PHY device drivers
@@ -562,23 +739,51 @@ CONFIG_VITESSE_PHY=m
CONFIG_SMSC_PHY=m
# CONFIG_BROADCOM_PHY is not set
CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_NATIONAL_PHY=m
+CONFIG_STE10XP=m
+CONFIG_LSI_ET1011C_PHY=m
# CONFIG_FIXED_PHY is not set
+CONFIG_MDIO_BITBANG=m
CONFIG_NET_ETHERNET=y
CONFIG_MII=y
CONFIG_AX88796=m
+CONFIG_AX88796_93CX6=y
CONFIG_SGI_IOC3_ETH=y
# CONFIG_HAPPYMEAL is not set
# CONFIG_SUNGEM is not set
# CONFIG_CASSINI is not set
# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_SMC91X=m
# CONFIG_DM9000 is not set
+CONFIG_ETHOC=m
+CONFIG_SMSC911X=m
+CONFIG_DNET=m
# CONFIG_NET_TULIP is not set
# CONFIG_HP100 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_NET_PCI is not set
+CONFIG_B44=m
+CONFIG_B44_PCI_AUTOSELECT=y
+CONFIG_B44_PCICORE_AUTOSELECT=y
+CONFIG_B44_PCI=y
+CONFIG_KS8842=m
+CONFIG_KS8851_MLL=m
+CONFIG_ATL2=m
CONFIG_NETDEV_1000=y
# CONFIG_ACENIC is not set
# CONFIG_DL2K is not set
# CONFIG_E1000 is not set
+CONFIG_E1000E=m
+CONFIG_IP1000=m
+CONFIG_IGB=m
+CONFIG_IGBVF=m
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
@@ -588,24 +793,75 @@ CONFIG_NETDEV_1000=y
# CONFIG_SKY2 is not set
CONFIG_VIA_VELOCITY=m
# CONFIG_TIGON3 is not set
-# CONFIG_BNX2 is not set
+CONFIG_BNX2=m
+CONFIG_CNIC=m
CONFIG_QLA3XXX=m
# CONFIG_ATL1 is not set
+CONFIG_ATL1E=m
+CONFIG_ATL1C=m
+CONFIG_JME=m
CONFIG_NETDEV_10000=y
+CONFIG_MDIO=m
# CONFIG_CHELSIO_T1 is not set
+CONFIG_CHELSIO_T3_DEPENDS=y
CONFIG_CHELSIO_T3=m
+CONFIG_ENIC=m
+CONFIG_IXGBE=m
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
+CONFIG_VXGE=m
+# CONFIG_VXGE_DEBUG_TRACE_ALL is not set
# CONFIG_MYRI10GE is not set
CONFIG_NETXEN_NIC=m
-# CONFIG_MLX4_CORE is not set
+CONFIG_NIU=m
+CONFIG_MLX4_EN=m
+CONFIG_MLX4_CORE=m
+# CONFIG_MLX4_DEBUG is not set
+CONFIG_TEHUTI=m
+CONFIG_BNX2X=m
+CONFIG_QLGE=m
+CONFIG_SFC=m
+CONFIG_BE2NET=m
# CONFIG_TR is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-CONFIG_WLAN_80211=y
+CONFIG_WLAN=y
+CONFIG_LIBERTAS_THINFIRM=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_RTL8180=m
+CONFIG_ADM8211=m
+# CONFIG_MAC80211_HWSIM is not set
+CONFIG_MWL8K=m
+CONFIG_ATH_COMMON=m
+# CONFIG_ATH_DEBUG is not set
+CONFIG_ATH5K=m
+# CONFIG_ATH5K_DEBUG is not set
+CONFIG_ATH9K_HW=m
+CONFIG_ATH9K_COMMON=m
+CONFIG_ATH9K=m
+CONFIG_B43=m
+CONFIG_B43_PCI_AUTOSELECT=y
+CONFIG_B43_PCICORE_AUTOSELECT=y
+CONFIG_B43_PHY_LP=y
+CONFIG_B43_LEDS=y
+CONFIG_B43_HWRNG=y
+# CONFIG_B43_DEBUG is not set
+CONFIG_B43LEGACY=m
+CONFIG_B43LEGACY_PCI_AUTOSELECT=y
+CONFIG_B43LEGACY_PCICORE_AUTOSELECT=y
+CONFIG_B43LEGACY_LEDS=y
+CONFIG_B43LEGACY_HWRNG=y
+# CONFIG_B43LEGACY_DEBUG is not set
+CONFIG_B43LEGACY_DMA=y
+CONFIG_B43LEGACY_PIO=y
+CONFIG_B43LEGACY_DMA_AND_PIO_MODE=y
+# CONFIG_B43LEGACY_DMA_MODE is not set
+# CONFIG_B43LEGACY_PIO_MODE is not set
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
CONFIG_IPW2100=m
CONFIG_IPW2100_MONITOR=y
CONFIG_IPW2100_DEBUG=y
@@ -615,38 +871,57 @@ CONFIG_IPW2200_RADIOTAP=y
CONFIG_IPW2200_PROMISCUOUS=y
CONFIG_IPW2200_QOS=y
CONFIG_IPW2200_DEBUG=y
+CONFIG_LIBIPW=m
+# CONFIG_LIBIPW_DEBUG is not set
+CONFIG_IWLWIFI=m
+CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT=y
+# CONFIG_IWLWIFI_DEBUG is not set
+CONFIG_IWLAGN=m
+CONFIG_IWL4965=y
+CONFIG_IWL5000=y
+CONFIG_IWL3945=m
+CONFIG_IWL3945_SPECTRUM_MEASUREMENT=y
CONFIG_LIBERTAS=m
# CONFIG_LIBERTAS_DEBUG is not set
CONFIG_HERMES=m
+# CONFIG_HERMES_CACHE_FW_ON_INIT is not set
CONFIG_PLX_HERMES=m
CONFIG_TMD_HERMES=m
CONFIG_NORTEL_HERMES=m
CONFIG_PCI_HERMES=m
-CONFIG_ATMEL=m
-CONFIG_PCI_ATMEL=m
-CONFIG_PRISM54=m
-CONFIG_HOSTAP=m
-CONFIG_HOSTAP_FIRMWARE=y
-CONFIG_HOSTAP_FIRMWARE_NVRAM=y
-CONFIG_HOSTAP_PLX=m
-CONFIG_HOSTAP_PCI=m
-CONFIG_BCM43XX=m
-CONFIG_BCM43XX_DEBUG=y
-CONFIG_BCM43XX_DMA=y
-CONFIG_BCM43XX_PIO=y
-CONFIG_BCM43XX_DMA_AND_PIO_MODE=y
-# CONFIG_BCM43XX_DMA_MODE is not set
-# CONFIG_BCM43XX_PIO_MODE is not set
+CONFIG_P54_COMMON=m
+CONFIG_P54_PCI=m
+CONFIG_P54_LEDS=y
+CONFIG_RT2X00=m
+CONFIG_RT2400PCI=m
+CONFIG_RT2500PCI=m
+CONFIG_RT61PCI=m
+CONFIG_RT2800PCI_PCI=m
+CONFIG_RT2800PCI=m
+CONFIG_RT2800_LIB=m
+CONFIG_RT2X00_LIB_PCI=m
+CONFIG_RT2X00_LIB=m
+CONFIG_RT2X00_LIB_HT=y
+CONFIG_RT2X00_LIB_FIRMWARE=y
+CONFIG_RT2X00_LIB_CRYPTO=y
+CONFIG_RT2X00_LIB_LEDS=y
+# CONFIG_RT2X00_DEBUG is not set
+CONFIG_WL12XX=m
+CONFIG_WL1251=m
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
# CONFIG_NET_FC is not set
-# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_VMXNET3 is not set
# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
@@ -664,13 +939,16 @@ CONFIG_SERIO_SERPORT=y
# CONFIG_SERIO_PCIPS2 is not set
CONFIG_SERIO_LIBPS2=m
CONFIG_SERIO_RAW=m
+CONFIG_SERIO_ALTERA_PS2=m
# CONFIG_GAMEPORT is not set
#
# Character devices
#
# CONFIG_VT is not set
+CONFIG_DEVKMEM=y
# CONFIG_SERIAL_NONSTANDARD is not set
+CONFIG_NOZOMI=m
#
# Serial drivers
@@ -693,95 +971,258 @@ CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
CONFIG_UNIX98_PTYS=y
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_IPMI_HANDLER is not set
-# CONFIG_WATCHDOG is not set
CONFIG_HW_RANDOM=m
-# CONFIG_RTC is not set
+CONFIG_HW_RANDOM_TIMERIOMEM=m
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
-# CONFIG_DRM is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
CONFIG_DEVPORT=y
-# CONFIG_I2C is not set
+CONFIG_I2C=m
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=m
+CONFIG_I2C_ALGOPCA=m
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+CONFIG_I2C_ALI1535=m
+CONFIG_I2C_ALI1563=m
+CONFIG_I2C_ALI15X3=m
+CONFIG_I2C_AMD756=m
+CONFIG_I2C_AMD8111=m
+CONFIG_I2C_I801=m
+CONFIG_I2C_ISCH=m
+CONFIG_I2C_PIIX4=m
+CONFIG_I2C_NFORCE2=m
+CONFIG_I2C_SIS5595=m
+CONFIG_I2C_SIS630=m
+CONFIG_I2C_SIS96X=m
+CONFIG_I2C_VIA=m
+CONFIG_I2C_VIAPRO=m
#
-# SPI support
+# I2C system bus drivers (mostly embedded / system-on-chip)
#
+CONFIG_I2C_OCORES=m
+CONFIG_I2C_SIMTEC=m
+
+#
+# External I2C/SMBus adapter drivers
+#
+CONFIG_I2C_PARPORT_LIGHT=m
+CONFIG_I2C_TAOS_EVM=m
+
+#
+# Other I2C/SMBus bus drivers
+#
+CONFIG_I2C_PCA_PLATFORM=m
+CONFIG_I2C_STUB=m
+
+#
+# Miscellaneous I2C Chip support
+#
+CONFIG_SENSORS_TSL2550=m
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
+
+#
+# PPS support
+#
+CONFIG_PPS=m
+# CONFIG_PPS_DEBUG is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
+CONFIG_THERMAL=m
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
#
-# Multifunction device drivers
-#
-# CONFIG_MFD_SM501 is not set
+# Sonics Silicon Backplane
+#
+CONFIG_SSB=m
+CONFIG_SSB_SPROM=y
+CONFIG_SSB_PCIHOST_POSSIBLE=y
+CONFIG_SSB_PCIHOST=y
+CONFIG_SSB_B43_PCI_BRIDGE=y
+# CONFIG_SSB_SILENT is not set
+# CONFIG_SSB_DEBUG is not set
+CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
+CONFIG_SSB_DRIVER_PCICORE=y
+# CONFIG_SSB_DRIVER_MIPS is not set
#
-# Multimedia devices
+# Multifunction device drivers
#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_DAB is not set
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_WM8400 is not set
+CONFIG_MFD_WM8350=m
+CONFIG_MFD_WM8350_I2C=m
+CONFIG_MFD_PCF50633=m
+CONFIG_PCF50633_ADC=m
+CONFIG_PCF50633_GPIO=m
+CONFIG_AB3100_CORE=m
+CONFIG_AB3100_OTP=m
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
#
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
-
-#
-# Display device support
-#
-# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_VGA_ARB is not set
+# CONFIG_DRM is not set
# CONFIG_VGASTATE is not set
# CONFIG_VIDEO_OUTPUT_CONTROL is not set
# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
-# Sound
+# Display device support
#
+# CONFIG_DISPLAY_SUPPORT is not set
# CONFIG_SOUND is not set
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
CONFIG_USB_ARCH_HAS_EHCI=y
# CONFIG_USB is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# Enable Host or Gadget support to see Inventra options
#
#
-# USB Gadget Support
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_UWB is not set
# CONFIG_MMC is not set
-# CONFIG_NEW_LEDS is not set
-# CONFIG_INFINIBAND is not set
-# CONFIG_RTC_CLASS is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+
+#
+# LED drivers
+#
+CONFIG_LEDS_LP3944=m
+CONFIG_LEDS_PCA955X=m
+CONFIG_LEDS_WM8350=m
+CONFIG_LEDS_BD2802=m
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
#
-# DMA Engine support
+# iptables trigger is under Netfilter config (LED target)
#
-# CONFIG_DMA_ENGINE is not set
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
#
-# DMA Clients
+# RTC interfaces
#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
#
-# DMA Devices
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+
#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+CONFIG_RTC_DRV_M48T35=y
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+# CONFIG_RTC_DRV_WM8350 is not set
+# CONFIG_RTC_DRV_PCF50633 is not set
+CONFIG_RTC_DRV_AB3100=m
#
-# Userspace I/O
+# on-CPU RTC drivers
#
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
CONFIG_UIO=y
# CONFIG_UIO_CIF is not set
+# CONFIG_UIO_PDRV is not set
+# CONFIG_UIO_PDRV_GENIRQ is not set
+CONFIG_UIO_SMX=m
+CONFIG_UIO_AEC=m
+CONFIG_UIO_SERCOS3=m
+CONFIG_UIO_PCI_GENERIC=m
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
#
# File systems
@@ -792,36 +1233,58 @@ CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_XATTR=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_EXT4DEV_FS is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_XATTR=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_EXT4_DEBUG is not set
CONFIG_JBD=y
-CONFIG_JBD_DEBUG=y
+CONFIG_JBD2=y
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
-CONFIG_XFS_SECURITY=y
CONFIG_XFS_POSIX_ACL=y
# CONFIG_XFS_RT is not set
+# CONFIG_XFS_DEBUG is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
+CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QUOTACTL=y
-CONFIG_DNOTIFY=y
CONFIG_AUTOFS_FS=m
# CONFIG_AUTOFS4_FS is not set
CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
CONFIG_GENERIC_ACL=y
#
+# Caches
+#
+CONFIG_FSCACHE=m
+CONFIG_FSCACHE_STATS=y
+# CONFIG_FSCACHE_HISTOGRAM is not set
+# CONFIG_FSCACHE_DEBUG is not set
+# CONFIG_FSCACHE_OBJECT_LIST is not set
+CONFIG_CACHEFILES=m
+# CONFIG_CACHEFILES_DEBUG is not set
+# CONFIG_CACHEFILES_HISTOGRAM is not set
+
+#
# CD-ROM/DVD Filesystems
#
# CONFIG_ISO9660_FS is not set
@@ -840,16 +1303,13 @@ CONFIG_GENERIC_ACL=y
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_ECRYPT_FS is not set
@@ -859,28 +1319,32 @@ CONFIG_CONFIGFS_FS=m
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_CRAMFS is not set
+CONFIG_SQUASHFS=m
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+CONFIG_OMFS_FS=m
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
+CONFIG_EXOFS_FS=m
+# CONFIG_EXOFS_DEBUG is not set
+CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
-# CONFIG_NFSD is not set
# CONFIG_ROOT_NFS is not set
+# CONFIG_NFSD is not set
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
+CONFIG_EXPORTFS=m
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
CONFIG_SUNRPC_GSS=y
-# CONFIG_SUNRPC_BIND34 is not set
CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
@@ -910,35 +1374,37 @@ CONFIG_SGI_PARTITION=y
# CONFIG_KARMA_PARTITION is not set
# CONFIG_EFI_PARTITION is not set
# CONFIG_SYSV68_PARTITION is not set
-
-#
-# Native Language Support
-#
# CONFIG_NLS is not set
-
-#
-# Distributed Lock Manager
-#
CONFIG_DLM=m
# CONFIG_DLM_DEBUG is not set
#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
-
-#
# Kernel hacking
#
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_CROSSCOMPILE=y
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_EARLY_PRINTK=y
# CONFIG_CMDLINE_BOOL is not set
#
@@ -947,65 +1413,140 @@ CONFIG_CROSSCOMPILE=y
CONFIG_KEYS=y
CONFIG_KEYS_DEBUG_PROC_KEYS=y
# CONFIG_SECURITY is not set
-CONFIG_XOR_BLOCKS=m
-CONFIG_ASYNC_CORE=m
-CONFIG_ASYNC_MEMCPY=m
-CONFIG_ASYNC_XOR=m
+CONFIG_SECURITYFS=y
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_XOR_BLOCKS=y
+CONFIG_ASYNC_CORE=y
+CONFIG_ASYNC_MEMCPY=y
+CONFIG_ASYNC_XOR=y
+CONFIG_ASYNC_PQ=y
+CONFIG_ASYNC_RAID6_RECOV=y
CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_FIPS=y
CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_ABLKCIPHER=m
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_AUTHENC=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_SEQIV=m
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+
+#
+# Hash modes
+#
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_VMAC=m
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_GHASH=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA1=m
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_GF128MUL=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_DES=y
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_TWOFISH_COMMON=m
-CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
CONFIG_CRYPTO_AES=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_CAMELLIA=m
-# CONFIG_CRYPTO_TEST is not set
+CONFIG_CRYPTO_ZLIB=m
+CONFIG_CRYPTO_LZO=m
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_HW=y
+CONFIG_CRYPTO_DEV_HIFN_795X=m
+# CONFIG_CRYPTO_DEV_HIFN_795X_RNG is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
CONFIG_CRC_CCITT=m
-# CONFIG_CRC16 is not set
-# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC16=y
+CONFIG_CRC_T10DIF=m
+CONFIG_CRC_ITU_T=m
CONFIG_CRC32=y
-# CONFIG_CRC7 is not set
+CONFIG_CRC7=m
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=m
CONFIG_ZLIB_DEFLATE=m
-CONFIG_PLIST=y
+CONFIG_LZO_COMPRESS=m
+CONFIG_LZO_DECOMPRESS=m
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 9c187a64649b..758ad426c57f 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -287,9 +287,9 @@ static inline int __cpu_has_fpu(void)
static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
{
#ifdef __NEED_VMBITS_PROBE
- write_c0_entryhi(0x3ffffffffffff000ULL);
+ write_c0_entryhi(0x3fffffffffffe000ULL);
back_to_back_c0_hazard();
- c->vmbits = fls64(read_c0_entryhi() & 0x3ffffffffffff000ULL);
+ c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
#endif
}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 338dfe8ed002..31b204b26ba0 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1501,6 +1501,7 @@ void __cpuinit per_cpu_trap_init(void)
cp0_perfcount_irq = -1;
} else {
cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
+ cp0_compare_irq_shift = cp0_compare_irq;
cp0_perfcount_irq = -1;
}
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 94e05e5733c1..e06f1af760a7 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -174,7 +174,7 @@ static void octeon_flush_cache_page(struct vm_area_struct *vma,
* Probe Octeon's caches
*
*/
-static void __devinit probe_octeon(void)
+static void __cpuinit probe_octeon(void)
{
unsigned long icache_size;
unsigned long dcache_size;
@@ -235,7 +235,7 @@ static void __devinit probe_octeon(void)
* Setup the Octeon cache flush routines
*
*/
-void __devinit octeon_cache_init(void)
+void __cpuinit octeon_cache_init(void)
{
extern unsigned long ebase;
extern char except_vec2_octeon;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 102b2dfa542a..e716cafc346d 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -155,7 +155,7 @@ static inline void setup_protection_map(void)
protection_map[15] = PAGE_SHARED;
}
-void __devinit cpu_cache_init(void)
+void __cpuinit cpu_cache_init(void)
{
if (cpu_has_3k_cache) {
extern void __weak r3k_cache_init(void);
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index e274fda329f4..127d732474bf 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -1,5 +1,6 @@
#include <linux/module.h>
#include <linux/highmem.h>
+#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
index 46f00691f448..31e2583ec622 100644
--- a/arch/mips/sni/rm200.c
+++ b/arch/mips/sni/rm200.c
@@ -404,7 +404,7 @@ void __init sni_rm200_i8259_irqs(void)
if (!rm200_pic_master)
return;
rm200_pic_slave = ioremap_nocache(0x160000a0, 4);
- if (!rm200_pic_master) {
+ if (!rm200_pic_slave) {
iounmap(rm200_pic_master);
return;
}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 524d9352f17e..f388dc68f605 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,7 +18,6 @@ config PARISC
select BUG
select HAVE_PERF_EVENTS
select GENERIC_ATOMIC64 if !64BIT
- select HAVE_ARCH_TRACEHOOK
help
The PA-RISC microprocessor is designed by Hewlett-Packard and used
in many of their workstations & servers (HP9000 700 and 800 series,
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index f7064abc3bb6..9e74bfe071dc 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -18,7 +18,6 @@
#include <asm/io.h>
#include <asm/system.h>
-#include <asm/cache.h> /* for L1_CACHE_BYTES */
#include <asm/superio.h>
#define DEBUG_RESOURCES 0
@@ -123,6 +122,10 @@ static int __init pcibios_init(void)
} else {
printk(KERN_WARNING "pci_bios != NULL but init() is!\n");
}
+
+ /* Set the CLS for PCI as early as possible. */
+ pci_cache_line_size = pci_dfl_cache_line_size;
+
return 0;
}
@@ -171,7 +174,7 @@ void pcibios_set_master(struct pci_dev *dev)
** upper byte is PCI_LATENCY_TIMER.
*/
pci_write_config_word(dev, PCI_CACHE_LINE_SIZE,
- (0x80 << 8) | (L1_CACHE_BYTES / sizeof(u32)));
+ (0x80 << 8) | pci_cache_line_size);
}
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index fb37ac52e46c..35c827e94e31 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -468,7 +468,9 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- tracehook_signal_handler(sig, info, ka, regs, 0);
+ tracehook_signal_handler(sig, info, ka, regs,
+ test_thread_flag(TIF_SINGLESTEP) ||
+ test_thread_flag(TIF_BLOCKSTEP));
return 1;
}
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 282d9306361f..1ec06576f619 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -63,15 +63,21 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
if (huge) {
#ifdef CONFIG_HUGETLB_PAGE
psize = get_slice_psize(mm, addr);
+ /* Mask the address for the correct page size */
+ addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
#else
BUG();
psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
#endif
- } else
+ } else {
psize = pte_pagesize_index(mm, addr, pte);
+ /* Mask the address for the standard page size. If we
+ * have a 64k page kernel, but the hardware does not
+ * support 64k pages, this might be different from the
+ * hardware page size encoded in the slice table. */
+ addr &= PAGE_MASK;
+ }
- /* Mask the address for the correct page size */
- addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
/* Build full vaddr */
if (!is_kernel_addr(addr)) {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 21f61b8c445b..cc29c0f5300d 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -338,7 +338,8 @@ static void __init mpc85xx_mds_pic_init(void)
}
mpic = mpic_alloc(np, r.start,
- MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
+ MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
+ MPIC_BROKEN_FRR_NIRQS,
0, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
of_node_put(np);
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 04160a4cc699..a15f582300d8 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -46,6 +46,7 @@ smp_85xx_kick_cpu(int nr)
__iomem u32 *bptr_vaddr;
struct device_node *np;
int n = 0;
+ int ioremappable;
WARN_ON (nr < 0 || nr >= NR_CPUS);
@@ -59,21 +60,37 @@ smp_85xx_kick_cpu(int nr)
return;
}
+ /*
+ * A secondary core could be in a spinloop in the bootpage
+ * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
+ * The bootpage and highmem can be accessed via ioremap(), but
+ * we need to directly access the spinloop if its in lowmem.
+ */
+ ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
+
/* Map the spin table */
- bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
+ if (ioremappable)
+ bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
+ else
+ bptr_vaddr = phys_to_virt(*cpu_rel_addr);
local_irq_save(flags);
out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
+ if (!ioremappable)
+ flush_dcache_range((ulong)bptr_vaddr,
+ (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
+
/* Wait a bit for the CPU to ack. */
while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
mdelay(1);
local_irq_restore(flags);
- iounmap(bptr_vaddr);
+ if (ioremappable)
+ iounmap(bptr_vaddr);
pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
}
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 1ee66db003be..f5f79196721c 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -784,9 +784,13 @@ static void xics_set_cpu_priority(unsigned char cppr)
{
struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
- BUG_ON(os_cppr->index != 0);
+ /*
+ * we only really want to set the priority when there's
+ * just one cppr value on the stack
+ */
+ WARN_ON(os_cppr->index != 0);
- os_cppr->stack[os_cppr->index] = cppr;
+ os_cppr->stack[0] = cppr;
if (firmware_has_feature(FW_FEATURE_LPAR))
lpar_cppr_info(cppr);
@@ -821,8 +825,14 @@ void xics_setup_cpu(void)
void xics_teardown_cpu(void)
{
+ struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr);
int cpu = smp_processor_id();
+ /*
+ * we have to reset the cppr index to 0 because we're
+ * not going to return from the IPI
+ */
+ os_cppr->index = 0;
xics_set_cpu_priority(0);
/* Clear any pending IPI request */
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index f2ef4b619ce1..c25dfac7dd76 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -293,12 +293,12 @@ struct _lowcore
__u64 clock_comparator; /* 0x02d0 */
__u32 machine_flags; /* 0x02d8 */
__u32 ftrace_func; /* 0x02dc */
- __u8 pad_0x02f0[0x0300-0x02f0]; /* 0x02f0 */
+ __u8 pad_0x02e0[0x0300-0x02e0]; /* 0x02e0 */
/* Interrupt response block */
__u8 irb[64]; /* 0x0300 */
- __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */
+ __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */
/*
* 0xe00 contains the address of the IPL Parameter Information
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 3f7e2a22c7c2..f6a389c996cb 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -132,7 +132,6 @@ ENTRY(tlb_protection_violation_store)
mov #1, r5
call_handle_tlbmiss:
- setup_frame_reg
mov.l 1f, r0
mov r5, r8
mov.l @r0, r6
@@ -365,6 +364,8 @@ handle_exception:
mov.l @k2, k2 ! read out vector and keep in k2
handle_exception_special:
+ setup_frame_reg
+
! Setup return address and jump to exception handler
mov.l 7f, r9 ! fetch return address
stc r2_bank, r0 ! k2 (vector)
diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c
index 88d28ec3780a..e51168064e56 100644
--- a/arch/sh/kernel/dwarf.c
+++ b/arch/sh/kernel/dwarf.c
@@ -540,6 +540,8 @@ void dwarf_free_frame(struct dwarf_frame *frame)
mempool_free(frame, dwarf_frame_pool);
}
+extern void ret_from_irq(void);
+
/**
* dwarf_unwind_stack - unwind the stack
*
@@ -678,6 +680,24 @@ struct dwarf_frame * dwarf_unwind_stack(unsigned long pc,
addr = frame->cfa + reg->addr;
frame->return_addr = __raw_readl(addr);
+ /*
+ * Ah, the joys of unwinding through interrupts.
+ *
+ * Interrupts are tricky - the DWARF info needs to be _really_
+ * accurate and unfortunately I'm seeing a lot of bogus DWARF
+ * info. For example, I've seen interrupts occur in epilogues
+ * just after the frame pointer (r14) had been restored. The
+ * problem was that the DWARF info claimed that the CFA could be
+ * reached by using the value of the frame pointer before it was
+ * restored.
+ *
+ * So until the compiler can be trusted to produce reliable
+ * DWARF info when it really matters, let's stop unwinding once
+ * we've calculated the function that was interrupted.
+ */
+ if (prev && prev->pc == (unsigned long)ret_from_irq)
+ frame->return_addr = 0;
+
return frame;
bail:
diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
index f0abd58c3a69..2b15ae60c3a0 100644
--- a/arch/sh/kernel/entry-common.S
+++ b/arch/sh/kernel/entry-common.S
@@ -70,8 +70,14 @@ ret_from_exception:
CFI_STARTPROC simple
CFI_DEF_CFA r14, 0
CFI_REL_OFFSET 17, 64
- CFI_REL_OFFSET 15, 0
+ CFI_REL_OFFSET 15, 60
CFI_REL_OFFSET 14, 56
+ CFI_REL_OFFSET 13, 52
+ CFI_REL_OFFSET 12, 48
+ CFI_REL_OFFSET 11, 44
+ CFI_REL_OFFSET 10, 40
+ CFI_REL_OFFSET 9, 36
+ CFI_REL_OFFSET 8, 32
preempt_stop()
ENTRY(ret_from_irq)
!
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index 873ebdc4f98e..b063eb8b18e3 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -133,6 +133,8 @@ void user_enable_single_step(struct task_struct *child)
struct pt_regs *regs = child->thread.uregs;
regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
+
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *child)
@@ -140,6 +142,8 @@ void user_disable_single_step(struct task_struct *child)
struct pt_regs *regs = child->thread.uregs;
regs->sr &= ~SR_SSTEP;
+
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
static int genregs_get(struct task_struct *target,
@@ -454,6 +458,8 @@ asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
+ int step;
+
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
regs->regs[9]);
@@ -461,8 +467,9 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->regs[9]);
- if (test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, 0);
+ step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
}
/* Called with interrupts disabled */
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index ce76dbdef294..580e97d46ca5 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -118,7 +118,9 @@ static int do_signal(struct pt_regs *regs, sigset_t *oldset)
* clear the TS_RESTORE_SIGMASK flag.
*/
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
- tracehook_signal_handler(signr, &info, &ka, regs, 0);
+
+ tracehook_signal_handler(signr, &info, &ka, regs,
+ test_thread_flag(TIF_SINGLESTEP));
return 1;
}
}
diff --git a/arch/sparc/include/asm/stat.h b/arch/sparc/include/asm/stat.h
index 55db5eca08e2..39327d6a57eb 100644
--- a/arch/sparc/include/asm/stat.h
+++ b/arch/sparc/include/asm/stat.h
@@ -53,8 +53,8 @@ struct stat {
ino_t st_ino;
mode_t st_mode;
short st_nlink;
- uid_t st_uid;
- gid_t st_gid;
+ uid16_t st_uid;
+ gid16_t st_gid;
unsigned short st_rdev;
off_t st_size;
time_t st_atime;
diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h
index 4248d969272f..5247283d1c03 100644
--- a/arch/sparc/kernel/kstack.h
+++ b/arch/sparc/kernel/kstack.h
@@ -11,6 +11,10 @@ static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
{
unsigned long base = (unsigned long) tp;
+ /* Stack pointer must be 16-byte aligned. */
+ if (sp & (16UL - 1))
+ return false;
+
if (sp >= (base + sizeof(struct thread_info)) &&
sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
return true;
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index 4c26eb59e742..53a58b349849 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -105,7 +105,7 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
static int of_bus_ambapp_match(struct device_node *np)
{
- return !strcmp(np->name, "ambapp");
+ return !strcmp(np->type, "ambapp");
}
static void of_bus_ambapp_count_cells(struct device_node *child,
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 539e83f8e087..592b03d85167 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -247,6 +247,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
struct pci_bus *bus, int devfn)
{
struct dev_archdata *sd;
+ struct pci_slot *slot;
struct of_device *op;
struct pci_dev *dev;
const char *type;
@@ -286,6 +287,11 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev->dev.bus = &pci_bus_type;
dev->devfn = devfn;
dev->multifunction = 0; /* maybe a lie? */
+ set_pcie_port_type(dev);
+
+ list_for_each_entry(slot, &dev->bus->slots, list)
+ if (PCI_SLOT(dev->devfn) == slot->number)
+ dev->slot = slot;
dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
dev->device = of_getintprop_default(node, "device-id", 0xffff);
@@ -322,6 +328,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev->current_state = 4; /* unknown power state */
dev->error_state = pci_channel_io_normal;
+ dev->dma_mask = 0xffffffff;
if (!strcmp(node->name, "pci")) {
/* a PCI-PCI bridge */
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 2830b415e214..c49865b30719 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -526,7 +526,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
* Set some valid stack frames to give to the child.
*/
childstack = (struct sparc_stackf __user *)
- (sp & ~0x7UL);
+ (sp & ~0xfUL);
parentstack = (struct sparc_stackf __user *)
regs->u_regs[UREG_FP];
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index c3f1cce0e95e..cb70476bd8f5 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -398,11 +398,11 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
} else
__get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
- /* Now 8-byte align the stack as this is mandatory in the
- * Sparc ABI due to how register windows work. This hides
- * the restriction from thread libraries etc. -DaveM
+ /* Now align the stack as this is mandatory in the Sparc ABI
+ * due to how register windows work. This hides the
+ * restriction from thread libraries etc.
*/
- csp &= ~7UL;
+ csp &= ~15UL;
distance = fp - psp;
rval = (csp - distance);
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index ba5b09ad6666..ea22cd373c64 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -120,8 +120,8 @@ struct rt_signal_frame32 {
};
/* Align macros */
-#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 7) & (~7)))
-#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7)))
+#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15)))
+#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15)))
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
@@ -420,15 +420,17 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns
sp = current->sas_ss_sp + current->sas_ss_size;
}
+ sp -= framesize;
+
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
- sp &= ~7UL;
+ sp &= ~15UL;
- return (void __user *)(sp - framesize);
+ return (void __user *) sp;
}
static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 7ce1a1005b1d..9882df92ba0a 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -267,15 +267,17 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re
sp = current->sas_ss_sp + current->sas_ss_size;
}
+ sp -= framesize;
+
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
- sp &= ~7UL;
+ sp &= ~15UL;
- return (void __user *)(sp - framesize);
+ return (void __user *) sp;
}
static inline int
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 647afbda7ae1..9fa48c30037e 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -353,7 +353,7 @@ segv:
/* Checks if the fp is valid */
static int invalid_frame_pointer(void __user *fp, int fplen)
{
- if (((unsigned long) fp) & 7)
+ if (((unsigned long) fp) & 15)
return 1;
return 0;
}
@@ -396,15 +396,17 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *
sp = current->sas_ss_sp + current->sas_ss_size;
}
+ sp -= framesize;
+
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
- sp &= ~7UL;
+ sp &= ~15UL;
- return (void __user *)(sp - framesize);
+ return (void __user *) sp;
}
static inline void
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index 8c91d9b29a2f..db15d123f054 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -191,10 +191,12 @@ tsb_dtlb_load:
tsb_itlb_load:
/* Executable bit must be set. */
-661: andcc %g5, _PAGE_EXEC_4U, %g0
- .section .sun4v_1insn_patch, "ax"
+661: sethi %hi(_PAGE_EXEC_4U), %g4
+ andcc %g5, %g4, %g0
+ .section .sun4v_2insn_patch, "ax"
.word 661b
andcc %g5, _PAGE_EXEC_4V, %g0
+ nop
.previous
be,pn %xcc, tsb_do_fault
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 1994d3f58443..f2ad2163109d 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -170,10 +170,7 @@ static inline void elf_common_init(struct thread_struct *t,
}
#define ELF_PLAT_INIT(_r, load_addr) \
-do { \
- elf_common_init(&current->thread, _r, 0); \
- clear_thread_flag(TIF_IA32); \
-} while (0)
+ elf_common_init(&current->thread, _r, 0)
#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \
elf_common_init(&current->thread, regs, __USER_DS)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index fc801bab1b3b..b753ea59703a 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -450,6 +450,8 @@ struct thread_struct {
struct perf_event *ptrace_bps[HBP_NUM];
/* Debug status used for traps, single steps, etc... */
unsigned long debugreg6;
+ /* Keep track of the exact dr7 value set by the user */
+ unsigned long ptrace_dr7;
/* Fault info: */
unsigned long cr2;
unsigned long trap_no;
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index ecb544e65382..e04740f7a0bb 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -11,9 +11,9 @@
#include <linux/irqflags.h>
/* entries in ARCH_DLINFO: */
-#ifdef CONFIG_IA32_EMULATION
+#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
# define AT_VECTOR_SIZE_ARCH 2
-#else
+#else /* else it's non-compat x86-64 */
# define AT_VECTOR_SIZE_ARCH 1
#endif
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 036d28adf59d..af1c5833ff23 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -1185,9 +1185,6 @@ static void __init acpi_process_madt(void)
if (!error) {
acpi_lapic = 1;
-#ifdef CONFIG_X86_BIGSMP
- generic_bigsmp_probe();
-#endif
/*
* Parse MADT IO-APIC entries
*/
@@ -1197,8 +1194,6 @@ static void __init acpi_process_madt(void)
acpi_ioapic = 1;
smp_found_config = 1;
- if (apic->setup_apic_routing)
- apic->setup_apic_routing();
}
}
if (error == -EINVAL) {
@@ -1349,14 +1344,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
},
{
.callback = force_acpi_ht,
- .ident = "ASUS P2B-DS",
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
- DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
- },
- },
- {
- .callback = force_acpi_ht,
.ident = "ASUS CUR-DLS",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 3987e4408f75..dfca210f6a10 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1641,9 +1641,7 @@ int __init APIC_init_uniprocessor(void)
#endif
enable_IR_x2apic();
-#ifdef CONFIG_X86_64
default_setup_apic_routing();
-#endif
verify_local_APIC();
connect_bsp_APIC();
@@ -1891,21 +1889,6 @@ void __cpuinit generic_processor_info(int apicid, int version)
if (apicid > max_physical_apicid)
max_physical_apicid = apicid;
-#ifdef CONFIG_X86_32
- if (num_processors > 8) {
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_INTEL:
- if (!APIC_XAPIC(version)) {
- def_to_bigsmp = 0;
- break;
- }
- /* If P4 and above fall through */
- case X86_VENDOR_AMD:
- def_to_bigsmp = 1;
- }
- }
-#endif
-
#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 1a6559f6768c..99d2fe016084 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -52,7 +52,32 @@ static int __init print_ipi_mode(void)
}
late_initcall(print_ipi_mode);
-void default_setup_apic_routing(void)
+void __init default_setup_apic_routing(void)
+{
+ int version = apic_version[boot_cpu_physical_apicid];
+
+ if (num_possible_cpus() > 8) {
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ if (!APIC_XAPIC(version)) {
+ def_to_bigsmp = 0;
+ break;
+ }
+ /* If P4 and above fall through */
+ case X86_VENDOR_AMD:
+ def_to_bigsmp = 1;
+ }
+ }
+
+#ifdef CONFIG_X86_BIGSMP
+ generic_bigsmp_probe();
+#endif
+
+ if (apic->setup_apic_routing)
+ apic->setup_apic_routing();
+}
+
+static void setup_apic_flat_routing(void)
{
#ifdef CONFIG_X86_IO_APIC
printk(KERN_INFO
@@ -103,7 +128,7 @@ struct apic apic_default = {
.init_apic_ldr = default_init_apic_ldr,
.ioapic_phys_id_map = default_ioapic_phys_id_map,
- .setup_apic_routing = default_setup_apic_routing,
+ .setup_apic_routing = setup_apic_flat_routing,
.multi_timer_check = NULL,
.apicid_to_node = default_apicid_to_node,
.cpu_to_logical_apicid = default_cpu_to_logical_apicid,
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index 450fe2064a14..83e9be4778e2 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -67,7 +67,7 @@ void __init default_setup_apic_routing(void)
}
#endif
- if (apic == &apic_flat && num_processors > 8)
+ if (apic == &apic_flat && num_possible_cpus() > 8)
apic = &apic_physflat;
printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index f125e5c551c0..6e44519960c8 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1356,6 +1356,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
kfree(data->powernow_table);
kfree(data);
+ per_cpu(powernow_data, pol->cpu) = NULL;
return 0;
}
@@ -1375,7 +1376,7 @@ static unsigned int powernowk8_get(unsigned int cpu)
int err;
if (!data)
- return -EINVAL;
+ return 0;
smp_call_function_single(cpu, query_values_on_cpu, &err, true);
if (err)
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 05d5fec64a94..bb6006e3e295 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -212,25 +212,6 @@ static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
-/*
- * Store a breakpoint's encoded address, length, and type.
- */
-static int arch_store_info(struct perf_event *bp)
-{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- /*
- * For kernel-addresses, either the address or symbol name can be
- * specified.
- */
- if (info->name)
- info->address = (unsigned long)
- kallsyms_lookup_name(info->name);
- if (info->address)
- return 0;
-
- return -EINVAL;
-}
-
int arch_bp_generic_fields(int x86_len, int x86_type,
int *gen_len, int *gen_type)
{
@@ -362,10 +343,13 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
return ret;
}
- ret = arch_store_info(bp);
-
- if (ret < 0)
- return ret;
+ /*
+ * For kernel-addresses, either the address or symbol name can be
+ * specified.
+ */
+ if (info->name)
+ info->address = (unsigned long)
+ kallsyms_lookup_name(info->name);
/*
* Check that the low-order bits of the address are appropriate
* for the alignment implied by len.
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 40b54ceb68b5..a2c1edd2d3ac 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -359,13 +359,6 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
x86_init.mpparse.mpc_record(1);
}
-#ifdef CONFIG_X86_BIGSMP
- generic_bigsmp_probe();
-#endif
-
- if (apic->setup_apic_routing)
- apic->setup_apic_routing();
-
if (!num_processors)
printk(KERN_ERR "MPTABLE: no processors registered!\n");
return num_processors;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 41a26a82470a..126f0b493d04 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -527,6 +527,7 @@ void set_personality_ia32(void)
/* Make sure to be in 32bit mode */
set_thread_flag(TIF_IA32);
+ current->personality |= force_personality32;
/* Prepare the first "return" to user space */
current_thread_info()->status |= TS_COMPAT;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 017d937639fe..0c1033d61e59 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -702,7 +702,7 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
} else if (n == 6) {
val = thread->debugreg6;
} else if (n == 7) {
- val = ptrace_get_dr7(thread->ptrace_bps);
+ val = thread->ptrace_dr7;
}
return val;
}
@@ -778,8 +778,11 @@ int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val)
return rc;
}
/* All that's left is DR7 */
- if (n == 7)
+ if (n == 7) {
rc = ptrace_write_dr7(tsk, val);
+ if (!rc)
+ thread->ptrace_dr7 = val;
+ }
ret_path:
return rc;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 678d0b8c26f3..b4e870cbdc60 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1083,9 +1083,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
set_cpu_sibling_map(0);
enable_IR_x2apic();
-#ifdef CONFIG_X86_64
default_setup_apic_routing();
-#endif
if (smp_sanity_check(max_cpus) < 0) {
printk(KERN_INFO "SMP disabled\n");
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 296aba49472a..15578f180e59 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -467,6 +467,9 @@ static int pit_ioport_read(struct kvm_io_device *this,
return -EOPNOTSUPP;
addr &= KVM_PIT_CHANNEL_MASK;
+ if (addr == 3)
+ return 0;
+
s = &pit_state->channels[addr];
mutex_lock(&pit_state->lock);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1ddcad452add..a1e1bc9d412d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -670,7 +670,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
{
static int version;
struct pvclock_wall_clock wc;
- struct timespec now, sys, boot;
+ struct timespec boot;
if (!wall_clock)
return;
@@ -685,9 +685,7 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
* wall clock specified here. guest system time equals host
* system time for us, thus we must fill in host boot time here.
*/
- now = current_kernel_time();
- ktime_get_ts(&sys);
- boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
+ getboottime(&boot);
wc.sec = boot.tv_sec;
wc.nsec = boot.tv_nsec;
@@ -762,6 +760,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
local_irq_save(flags);
kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
ktime_get_ts(&ts);
+ monotonic_to_bootbased(&ts);
local_irq_restore(flags);
/* With all the info we got, fill in the values */
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 71da1bca13cb..738e6593799d 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -18,7 +18,7 @@ static inline pte_t gup_get_pte(pte_t *ptep)
#else
/*
* With get_user_pages_fast, we walk down the pagetables without taking
- * any locks. For this we would like to load the pointers atoimcally,
+ * any locks. For this we would like to load the pointers atomically,
* but that is not possible (without expensive cmpxchg8b) on PAE. What
* we do have is the guarantee that a pte will only either go from not
* present to present, or present to not present or both -- it will not
diff --git a/block/blk-core.c b/block/blk-core.c
index 718897e6d37f..d1a9a0a64f95 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1147,7 +1147,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
*/
static inline bool queue_should_plug(struct request_queue *q)
{
- return !(blk_queue_nonrot(q) && blk_queue_queuing(q));
+ return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
}
static int __make_request(struct request_queue *q, struct bio *bio)
@@ -1859,15 +1859,8 @@ void blk_dequeue_request(struct request *rq)
* and to it is freed is accounted as io that is in progress at
* the driver side.
*/
- if (blk_account_rq(rq)) {
+ if (blk_account_rq(rq))
q->in_flight[rq_is_sync(rq)]++;
- /*
- * Mark this device as supporting hardware queuing, if
- * we have more IOs in flight than 4.
- */
- if (!blk_queue_queuing(q) && queue_in_flight(q) > 4)
- set_bit(QUEUE_FLAG_CQ, &q->queue_flags);
- }
}
/**
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 17b768d0d42f..023f4e69a337 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -42,16 +42,13 @@ static const int cfq_hist_divisor = 4;
*/
#define CFQ_MIN_TT (2)
-/*
- * Allow merged cfqqs to perform this amount of seeky I/O before
- * deciding to break the queues up again.
- */
-#define CFQQ_COOP_TOUT (HZ)
-
#define CFQ_SLICE_SCALE (5)
#define CFQ_HW_QUEUE_MIN (5)
#define CFQ_SERVICE_SHIFT 12
+#define CFQQ_SEEK_THR 8 * 1024
+#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
+
#define RQ_CIC(rq) \
((struct cfq_io_context *) (rq)->elevator_private)
#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
@@ -137,7 +134,6 @@ struct cfq_queue {
u64 seek_total;
sector_t seek_mean;
sector_t last_request_pos;
- unsigned long seeky_start;
pid_t pid;
@@ -314,6 +310,7 @@ enum cfqq_state_flags {
CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
CFQ_CFQQ_FLAG_sync, /* synchronous queue */
CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
+ CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
};
@@ -342,6 +339,7 @@ CFQ_CFQQ_FNS(prio_changed);
CFQ_CFQQ_FNS(slice_new);
CFQ_CFQQ_FNS(sync);
CFQ_CFQQ_FNS(coop);
+CFQ_CFQQ_FNS(split_coop);
CFQ_CFQQ_FNS(deep);
CFQ_CFQQ_FNS(wait_busy);
#undef CFQ_CFQQ_FNS
@@ -1566,6 +1564,15 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_clear_cfqq_wait_busy(cfqq);
/*
+ * If this cfqq is shared between multiple processes, check to
+ * make sure that those processes are still issuing I/Os within
+ * the mean seek distance. If not, it may be time to break the
+ * queues apart again.
+ */
+ if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
+ cfq_mark_cfqq_split_coop(cfqq);
+
+ /*
* store what was left of this slice, if the queue idled/timed out
*/
if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
@@ -1663,9 +1670,6 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
return cfqd->last_position - blk_rq_pos(rq);
}
-#define CFQQ_SEEK_THR 8 * 1024
-#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
-
static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct request *rq, bool for_preempt)
{
@@ -3000,19 +3004,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
total = cfqq->seek_total + (cfqq->seek_samples/2);
do_div(total, cfqq->seek_samples);
cfqq->seek_mean = (sector_t)total;
-
- /*
- * If this cfqq is shared between multiple processes, check to
- * make sure that those processes are still issuing I/Os within
- * the mean seek distance. If not, it may be time to break the
- * queues apart again.
- */
- if (cfq_cfqq_coop(cfqq)) {
- if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
- cfqq->seeky_start = jiffies;
- else if (!CFQQ_SEEKY(cfqq))
- cfqq->seeky_start = 0;
- }
}
/*
@@ -3453,14 +3444,6 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
return cic_to_cfqq(cic, 1);
}
-static int should_split_cfqq(struct cfq_queue *cfqq)
-{
- if (cfqq->seeky_start &&
- time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
- return 1;
- return 0;
-}
-
/*
* Returns NULL if a new cfqq should be allocated, or the old cfqq if this
* was the last process referring to said cfqq.
@@ -3469,9 +3452,9 @@ static struct cfq_queue *
split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
{
if (cfqq_process_refs(cfqq) == 1) {
- cfqq->seeky_start = 0;
cfqq->pid = current->pid;
cfq_clear_cfqq_coop(cfqq);
+ cfq_clear_cfqq_split_coop(cfqq);
return cfqq;
}
@@ -3510,7 +3493,7 @@ new_queue:
/*
* If the queue was seeky for too long, break it apart.
*/
- if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) {
+ if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
cfqq = split_cfqq(cic, cfqq);
if (!cfqq)
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index bbc2c1315c47..b2586f57e1f5 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -935,6 +935,7 @@ static int dock_add(acpi_handle handle)
struct platform_device *dd;
id = dock_station_count;
+ memset(&ds, 0, sizeof(ds));
dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
if (IS_ERR(dd))
return PTR_ERR(dd);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 7c0441f63b39..cc978a8c00b7 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -110,6 +110,14 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
(void *)2},
+ { set_max_cstate, "Pavilion zv5000", {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
+ (void *)1},
+ { set_max_cstate, "Asus L8400B", {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
+ (void *)1},
{},
};
@@ -872,12 +880,14 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
return(acpi_idle_enter_c1(dev, state));
local_irq_disable();
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we test
- * NEED_RESCHED:
- */
- smp_mb();
+ if (cx->entry_method != ACPI_CSTATE_FFH) {
+ current_thread_info()->status &= ~TS_POLLING;
+ /*
+ * TS_POLLING-cleared state must be visible before we test
+ * NEED_RESCHED:
+ */
+ smp_mb();
+ }
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
@@ -957,12 +967,14 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
}
local_irq_disable();
- current_thread_info()->status &= ~TS_POLLING;
- /*
- * TS_POLLING-cleared state must be visible before we test
- * NEED_RESCHED:
- */
- smp_mb();
+ if (cx->entry_method != ACPI_CSTATE_FFH) {
+ current_thread_info()->status &= ~TS_POLLING;
+ /*
+ * TS_POLLING-cleared state must be visible before we test
+ * NEED_RESCHED:
+ */
+ smp_mb();
+ }
if (unlikely(need_resched())) {
current_thread_info()->status |= TS_POLLING;
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
index 7247819dbd80..e306ba9aa34e 100644
--- a/drivers/acpi/processor_pdc.c
+++ b/drivers/acpi/processor_pdc.c
@@ -125,6 +125,8 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
return status;
}
+static int early_pdc_done;
+
void acpi_processor_set_pdc(acpi_handle handle)
{
struct acpi_object_list *obj_list;
@@ -132,6 +134,9 @@ void acpi_processor_set_pdc(acpi_handle handle)
if (arch_has_acpi_pdc() == false)
return;
+ if (early_pdc_done)
+ return;
+
obj_list = acpi_processor_alloc_pdc();
if (!obj_list)
return;
@@ -151,6 +156,13 @@ static int set_early_pdc_optin(const struct dmi_system_id *id)
return 0;
}
+static int param_early_pdc_optin(char *s)
+{
+ early_pdc_optin = 1;
+ return 1;
+}
+__setup("acpi_early_pdc_eval", param_early_pdc_optin);
+
static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = {
{
set_early_pdc_optin, "HP Envy", {
@@ -192,4 +204,6 @@ void __init acpi_early_processor_set_pdc(void)
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
early_init_pdc, NULL, NULL, NULL);
+
+ early_pdc_done = 1;
}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 2cabadcc4d8c..a959f6a07508 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -413,7 +413,11 @@ static int acpi_processor_get_performance_info(struct acpi_processor *pr)
if (result)
goto update_bios;
- return 0;
+ /* We need to call _PPC once when cpufreq starts */
+ if (ignore_ppc != 1)
+ result = acpi_processor_get_platform_limit(pr);
+
+ return result;
/*
* Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index ff9f6226085d..3e009674f333 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1336,9 +1336,25 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
if (child)
*child = device;
- return 0;
+
+ if (device)
+ return 0;
+ else
+ return -ENODEV;
}
+/*
+ * acpi_bus_add and acpi_bus_start
+ *
+ * scan a given ACPI tree and (probably recently hot-plugged)
+ * create and add or starts found devices.
+ *
+ * If no devices were found -ENODEV is returned which does not
+ * mean that this is a real error, there just have been no suitable
+ * ACPI objects in the table trunk from which the kernel could create
+ * a device and add/start an appropriate driver.
+ */
+
int
acpi_bus_add(struct acpi_device **child,
struct acpi_device *parent, acpi_handle handle, int type)
@@ -1348,8 +1364,7 @@ acpi_bus_add(struct acpi_device **child,
memset(&ops, 0, sizeof(ops));
ops.acpi_op_add = 1;
- acpi_bus_scan(handle, &ops, child);
- return 0;
+ return acpi_bus_scan(handle, &ops, child);
}
EXPORT_SYMBOL(acpi_bus_add);
@@ -1357,11 +1372,13 @@ int acpi_bus_start(struct acpi_device *device)
{
struct acpi_bus_ops ops;
+ if (!device)
+ return -EINVAL;
+
memset(&ops, 0, sizeof(ops));
ops.acpi_op_start = 1;
- acpi_bus_scan(device->handle, &ops, NULL);
- return 0;
+ return acpi_bus_scan(device->handle, &ops, NULL);
}
EXPORT_SYMBOL(acpi_bus_start);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index f336bca7c450..8a0ed2800e63 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
unsigned long table_end;
acpi_size tbl_size;
- if (acpi_disabled)
+ if (acpi_disabled && !acpi_ht)
return -ENODEV;
if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
struct acpi_table_header *table = NULL;
acpi_size tbl_size;
- if (acpi_disabled)
+ if (acpi_disabled && !acpi_ht)
return -ENODEV;
if (!handler)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index b8bea100a160..a6a736a7dbf2 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -2868,6 +2868,21 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
},
.driver_data = "F.23", /* cutoff BIOS version */
},
+ /*
+ * Acer eMachines G725 has the same problem. BIOS
+ * V1.03 is known to be broken. V3.04 is known to
+ * work. Inbetween, there are V1.06, V2.06 and V3.03
+ * that we don't have much idea about. For now,
+ * blacklist anything older than V3.04.
+ */
+ {
+ .ident = "G725",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
+ },
+ .driver_data = "V3.04", /* cutoff BIOS version */
+ },
{ } /* terminate list */
};
const struct dmi_system_id *dmi = dmi_first_match(sysids);
@@ -3067,8 +3082,16 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ahci_save_initial_config(pdev, hpriv);
/* prepare host */
- if (hpriv->cap & HOST_CAP_NCQ)
- pi.flags |= ATA_FLAG_NCQ | ATA_FLAG_FPDMA_AA;
+ if (hpriv->cap & HOST_CAP_NCQ) {
+ pi.flags |= ATA_FLAG_NCQ;
+ /* Auto-activate optimization is supposed to be supported on
+ all AHCI controllers indicating NCQ support, but it seems
+ to be broken at least on some NVIDIA MCP79 chipsets.
+ Until we get info on which NVIDIA chipsets don't have this
+ issue, if any, disable AA on all NVIDIA AHCIs. */
+ if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
+ pi.flags |= ATA_FLAG_FPDMA_AA;
+ }
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index f4ea5a8c325b..d096fbcbc771 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2875,7 +2875,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
* write indication (used for PIO/DMA setup), result TF is
* copied back and we don't whine too much about its failure.
*/
- tf->flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+ tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
if (scmd->sc_data_direction == DMA_TO_DEVICE)
tf->flags |= ATA_TFLAG_WRITE;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 741065c9da67..730ef3c384ca 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -893,6 +893,9 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
do_write);
}
+ if (!do_write)
+ flush_dcache_page(page);
+
qc->curbytes += qc->sect_size;
qc->cursg_ofs += qc->sect_size;
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 161746deab4b..6e2c3b064f53 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -59,6 +59,8 @@ static void class_release(struct kobject *kobj)
else
pr_debug("class '%s' does not have a release() function, "
"be careful\n", class->name);
+
+ kfree(cp);
}
static struct sysfs_ops class_sysfs_ops = {
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 873e594860d3..9291614ac6b7 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -337,6 +337,9 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
if (*pos > h->highest_lun)
return 0;
+ if (drv == NULL) /* it's possible for h->drv[] to have holes. */
+ return 0;
+
if (drv->heads == 0)
return 0;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 51042f0ba7e1..7eff828b2117 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -243,10 +243,12 @@ static int index_to_minor(int index)
static int __devinit virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
+ struct request_queue *q;
int err;
u64 cap;
- u32 v;
- u32 blk_size, sg_elems;
+ u32 v, blk_size, sg_elems, opt_io_size;
+ u16 min_io_size;
+ u8 physical_block_exp, alignment_offset;
if (index_to_minor(index) >= 1 << MINORBITS)
return -ENOSPC;
@@ -293,13 +295,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
goto out_mempool;
}
- vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
- if (!vblk->disk->queue) {
+ q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
+ if (!q) {
err = -ENOMEM;
goto out_put_disk;
}
- vblk->disk->queue->queuedata = vblk;
+ q->queuedata = vblk;
if (index < 26) {
sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
@@ -323,10 +325,10 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
/* If barriers are supported, tell block layer that queue is ordered */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
- blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_DRAIN_FLUSH,
+ blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH,
virtblk_prepare_flush);
else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
- blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL);
+ blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL);
/* If disk is read-only in the host, the guest should obey */
if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
@@ -345,14 +347,14 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
set_capacity(vblk->disk, cap);
/* We can handle whatever the host told us to handle. */
- blk_queue_max_phys_segments(vblk->disk->queue, vblk->sg_elems-2);
- blk_queue_max_hw_segments(vblk->disk->queue, vblk->sg_elems-2);
+ blk_queue_max_phys_segments(q, vblk->sg_elems-2);
+ blk_queue_max_hw_segments(q, vblk->sg_elems-2);
/* No need to bounce any requests */
- blk_queue_bounce_limit(vblk->disk->queue, BLK_BOUNCE_ANY);
+ blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
/* No real sector limit. */
- blk_queue_max_sectors(vblk->disk->queue, -1U);
+ blk_queue_max_sectors(q, -1U);
/* Host can optionally specify maximum segment size and number of
* segments. */
@@ -360,16 +362,45 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
offsetof(struct virtio_blk_config, size_max),
&v);
if (!err)
- blk_queue_max_segment_size(vblk->disk->queue, v);
+ blk_queue_max_segment_size(q, v);
else
- blk_queue_max_segment_size(vblk->disk->queue, -1U);
+ blk_queue_max_segment_size(q, -1U);
/* Host can optionally specify the block size of the device */
err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
offsetof(struct virtio_blk_config, blk_size),
&blk_size);
if (!err)
- blk_queue_logical_block_size(vblk->disk->queue, blk_size);
+ blk_queue_logical_block_size(q, blk_size);
+ else
+ blk_size = queue_logical_block_size(q);
+
+ /* Use topology information if available */
+ err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
+ offsetof(struct virtio_blk_config, physical_block_exp),
+ &physical_block_exp);
+ if (!err && physical_block_exp)
+ blk_queue_physical_block_size(q,
+ blk_size * (1 << physical_block_exp));
+
+ err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
+ offsetof(struct virtio_blk_config, alignment_offset),
+ &alignment_offset);
+ if (!err && alignment_offset)
+ blk_queue_alignment_offset(q, blk_size * alignment_offset);
+
+ err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
+ offsetof(struct virtio_blk_config, min_io_size),
+ &min_io_size);
+ if (!err && min_io_size)
+ blk_queue_io_min(q, blk_size * min_io_size);
+
+ err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
+ offsetof(struct virtio_blk_config, opt_io_size),
+ &opt_io_size);
+ if (!err && opt_io_size)
+ blk_queue_io_opt(q, blk_size * opt_io_size);
+
add_disk(vblk->disk);
return 0;
@@ -412,7 +443,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
- VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH
+ VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY
};
/*
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index f36defa37764..57d965b7f521 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -808,6 +808,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
exit:
sdio_release_host(card->func);
+ kfree(tmpbuf);
return ret;
}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index e023682be2c4..3141dd3b6e53 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -666,6 +666,14 @@ config VIRTIO_CONSOLE
help
Virtio console for use with lguest and other hypervisors.
+ Also serves as a general-purpose serial device for data
+ transfer between the guest and host. Character devices at
+ /dev/vportNpn will be created when corresponding ports are
+ found, where N is the device number and n is the port number
+ within that device. If specified by the host, a sysfs
+ attribute called 'name' will be populated with a name for
+ the port which can be used by udev scripts to create a
+ symlink to the device.
config HVCS
tristate "IBM Hypervisor Virtual Console Server support"
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 34cf04e21795..fd50ead59c79 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -767,16 +767,19 @@ int __init agp_amd64_init(void)
static int __init agp_amd64_mod_init(void)
{
+#ifndef MODULE
if (gart_iommu_aperture)
return agp_bridges_found ? 0 : -ENODEV;
-
+#endif
return agp_amd64_init();
}
static void __exit agp_amd64_cleanup(void)
{
+#ifndef MODULE
if (gart_iommu_aperture)
return;
+#endif
if (aperture_resource)
release_resource(aperture_resource);
pci_unregister_driver(&agp_amd64_pci_driver);
diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
index 0afc8b82212e..6913fc33270c 100644
--- a/drivers/char/hvc_beat.c
+++ b/drivers/char/hvc_beat.c
@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
return cnt;
}
-static struct hv_ops hvc_beat_get_put_ops = {
+static const struct hv_ops hvc_beat_get_put_ops = {
.get_chars = hvc_beat_get_chars,
.put_chars = hvc_beat_put_chars,
};
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 416d3423150d..d8dac5820f0e 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
* console interfaces but can still be used as a tty device. This has to be
* static because kmalloc will not work during early console init.
*/
-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
{[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
@@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kref *kref)
* vty adapters do NOT get an hvc_instantiate() callback since they
* appear after early console init.
*/
-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
{
struct hvc_struct *hp;
@@ -749,7 +749,8 @@ static const struct tty_operations hvc_ops = {
};
struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
- struct hv_ops *ops, int outbuf_size)
+ const struct hv_ops *ops,
+ int outbuf_size)
{
struct hvc_struct *hp;
int i;
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index 10950ca706d8..52ddf4d3716c 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -55,7 +55,7 @@ struct hvc_struct {
int outbuf_size;
int n_outbuf;
uint32_t vtermno;
- struct hv_ops *ops;
+ const struct hv_ops *ops;
int irq_requested;
int data;
struct winsize ws;
@@ -76,11 +76,12 @@ struct hv_ops {
};
/* Register a vterm and a slot index for use as a console (console_init) */
-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
+extern int hvc_instantiate(uint32_t vtermno, int index,
+ const struct hv_ops *ops);
/* register a vterm for hvc tty operation (module_init or hotplug add) */
extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
- struct hv_ops *ops, int outbuf_size);
+ const struct hv_ops *ops, int outbuf_size);
/* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
extern int hvc_remove(struct hvc_struct *hp);
diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
index 936d05bf37fa..fd0242676a2a 100644
--- a/drivers/char/hvc_iseries.c
+++ b/drivers/char/hvc_iseries.c
@@ -197,7 +197,7 @@ done:
return sent;
}
-static struct hv_ops hvc_get_put_ops = {
+static const struct hv_ops hvc_get_put_ops = {
.get_chars = get_chars,
.put_chars = put_chars,
.notifier_add = notifier_add_irq,
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index fe62bd0e17b7..21681a81cc35 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -922,7 +922,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
/* HVC operations */
-static struct hv_ops hvc_iucv_ops = {
+static const struct hv_ops hvc_iucv_ops = {
.get_chars = hvc_iucv_get_chars,
.put_chars = hvc_iucv_put_chars,
.notifier_add = hvc_iucv_notifier_add,
diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
index 88590d040046..61c4a61558d9 100644
--- a/drivers/char/hvc_rtas.c
+++ b/drivers/char/hvc_rtas.c
@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
return i;
}
-static struct hv_ops hvc_rtas_get_put_ops = {
+static const struct hv_ops hvc_rtas_get_put_ops = {
.get_chars = hvc_rtas_read_console,
.put_chars = hvc_rtas_write_console,
};
diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
index bd63ba878a56..b0957e61a7be 100644
--- a/drivers/char/hvc_udbg.c
+++ b/drivers/char/hvc_udbg.c
@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
return i;
}
-static struct hv_ops hvc_udbg_ops = {
+static const struct hv_ops hvc_udbg_ops = {
.get_chars = hvc_udbg_get,
.put_chars = hvc_udbg_put,
};
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 10be343d6ae7..27370e99c66f 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
return got;
}
-static struct hv_ops hvc_get_put_ops = {
+static const struct hv_ops hvc_get_put_ops = {
.get_chars = filtered_get_chars,
.put_chars = hvc_put_chars,
.notifier_add = notifier_add_irq,
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index b1a71638c772..60446f82a3fc 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -122,7 +122,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
return recv;
}
-static struct hv_ops hvc_ops = {
+static const struct hv_ops hvc_ops = {
.get_chars = read_console,
.put_chars = write_console,
.notifier_add = notifier_add_irq,
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index ecba4942fc8e..f58440791e65 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -39,12 +39,12 @@
struct tpm_inf_dev {
int iotype;
- void __iomem *mem_base; /* MMIO ioremap'd addr */
- unsigned long map_base; /* phys MMIO base */
- unsigned long map_size; /* MMIO region size */
- unsigned int index_off; /* index register offset */
+ void __iomem *mem_base; /* MMIO ioremap'd addr */
+ unsigned long map_base; /* phys MMIO base */
+ unsigned long map_size; /* MMIO region size */
+ unsigned int index_off; /* index register offset */
- unsigned int data_regs; /* Data registers */
+ unsigned int data_regs; /* Data registers */
unsigned int data_size;
unsigned int config_port; /* IO Port config index reg */
@@ -406,14 +406,14 @@ static const struct tpm_vendor_specific tpm_inf = {
.miscdev = {.fops = &inf_ops,},
};
-static const struct pnp_device_id tpm_pnp_tbl[] = {
+static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
/* Infineon TPMs */
{"IFX0101", 0},
{"IFX0102", 0},
{"", 0}
};
-MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
+MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
const struct pnp_device_id *dev_id)
@@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
!(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
- tpm_dev.iotype = TPM_INF_IO_PORT;
+ tpm_dev.iotype = TPM_INF_IO_PORT;
tpm_dev.config_port = pnp_port_start(dev, 0);
tpm_dev.config_size = pnp_port_len(dev, 0);
@@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
goto err_last;
}
} else if (pnp_mem_valid(dev, 0) &&
- !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
+ !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
- tpm_dev.iotype = TPM_INF_IO_MEM;
+ tpm_dev.iotype = TPM_INF_IO_MEM;
tpm_dev.map_base = pnp_mem_start(dev, 0);
tpm_dev.map_size = pnp_mem_len(dev, 0);
@@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
"product id 0x%02x%02x"
"%s\n",
tpm_dev.iotype == TPM_INF_IO_PORT ?
- tpm_dev.config_port :
- tpm_dev.map_base + tpm_dev.index_off,
+ tpm_dev.config_port :
+ tpm_dev.map_base + tpm_dev.index_off,
tpm_dev.iotype == TPM_INF_IO_PORT ?
- tpm_dev.data_regs :
- tpm_dev.map_base + tpm_dev.data_regs,
+ tpm_dev.data_regs :
+ tpm_dev.map_base + tpm_dev.data_regs,
version[0], version[1],
vendorid[0], vendorid[1],
productid[0], productid[1], chipname);
@@ -607,20 +607,55 @@ static __devexit void tpm_inf_pnp_remove(struct pnp_dev *dev)
iounmap(tpm_dev.mem_base);
release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
}
+ tpm_dev_vendor_release(chip);
tpm_remove_hardware(chip->dev);
}
}
+static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
+{
+ struct tpm_chip *chip = pnp_get_drvdata(dev);
+ int rc;
+ if (chip) {
+ u8 savestate[] = {
+ 0, 193, /* TPM_TAG_RQU_COMMAND */
+ 0, 0, 0, 10, /* blob length (in bytes) */
+ 0, 0, 0, 152 /* TPM_ORD_SaveState */
+ };
+ dev_info(&dev->dev, "saving TPM state\n");
+ rc = tpm_inf_send(chip, savestate, sizeof(savestate));
+ if (rc < 0) {
+ dev_err(&dev->dev, "error while saving TPM state\n");
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int tpm_inf_pnp_resume(struct pnp_dev *dev)
+{
+ /* Re-configure TPM after suspending */
+ tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
+ tpm_config_out(IOLIMH, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
+ tpm_config_out(IOLIML, TPM_INF_ADDR);
+ tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
+ /* activate register */
+ tpm_config_out(TPM_DAR, TPM_INF_ADDR);
+ tpm_config_out(0x01, TPM_INF_DATA);
+ tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
+ /* disable RESET, LP and IRQC */
+ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
+ return tpm_pm_resume(&dev->dev);
+}
+
static struct pnp_driver tpm_inf_pnp_driver = {
.name = "tpm_inf_pnp",
- .driver = {
- .owner = THIS_MODULE,
- .suspend = tpm_pm_suspend,
- .resume = tpm_pm_resume,
- },
- .id_table = tpm_pnp_tbl,
+ .id_table = tpm_inf_pnp_tbl,
.probe = tpm_inf_pnp_probe,
- .remove = __devexit_p(tpm_inf_pnp_remove),
+ .suspend = tpm_inf_pnp_suspend,
+ .resume = tpm_inf_pnp_resume,
+ .remove = __devexit_p(tpm_inf_pnp_remove)
};
static int __init init_inf(void)
@@ -638,5 +673,5 @@ module_exit(cleanup_inf);
MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
-MODULE_VERSION("1.9");
+MODULE_VERSION("1.9.2");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index c6f3b48be9dd..dcb9083ecde0 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1951,8 +1951,10 @@ static int tty_fasync(int fd, struct file *filp, int on)
pid = task_pid(current);
type = PIDTYPE_PID;
}
- retval = __f_setown(filp, pid, type, 0);
+ get_pid(pid);
spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+ retval = __f_setown(filp, pid, type, 0);
+ put_pid(pid);
if (retval)
goto out;
} else {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index a035ae39a359..213373b5f17f 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1,18 +1,6 @@
-/*D:300
- * The Guest console driver
- *
- * Writing console drivers is one of the few remaining Dark Arts in Linux.
- * Fortunately for us, the path of virtual consoles has been well-trodden by
- * the PowerPC folks, who wrote "hvc_console.c" to generically support any
- * virtual console. We use that infrastructure which only requires us to write
- * the basic put_chars and get_chars functions and call the right register
- * functions.
- :*/
-
-/*M:002 The console can be flooded: while the Guest is processing input the
- * Host can send more. Buffering in the Host could alleviate this, but it is a
- * difficult problem in general. :*/
-/* Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation
+/*
+ * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
+ * Copyright (C) 2009, 2010 Red Hat, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,142 +16,694 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
#include <linux/err.h>
+#include <linux/fs.h>
#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
#include <linux/virtio.h>
#include <linux/virtio_console.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
#include "hvc_console.h"
-/*D:340 These represent our input and output console queues, and the virtio
- * operations for them. */
-static struct virtqueue *in_vq, *out_vq;
-static struct virtio_device *vdev;
+/*
+ * This is a global struct for storing common data for all the devices
+ * this driver handles.
+ *
+ * Mainly, it has a linked list for all the consoles in one place so
+ * that callbacks from hvc for get_chars(), put_chars() work properly
+ * across multiple devices and multiple ports per device.
+ */
+struct ports_driver_data {
+ /* Used for registering chardevs */
+ struct class *class;
+
+ /* Used for exporting per-port information to debugfs */
+ struct dentry *debugfs_dir;
+
+ /* Number of devices this driver is handling */
+ unsigned int index;
+
+ /*
+ * This is used to keep track of the number of hvc consoles
+ * spawned by this driver. This number is given as the first
+ * argument to hvc_alloc(). To correctly map an initial
+ * console spawned via hvc_instantiate to the console being
+ * hooked up via hvc_alloc, we need to pass the same vtermno.
+ *
+ * We also just assume the first console being initialised was
+ * the first one that got used as the initial console.
+ */
+ unsigned int next_vtermno;
+
+ /* All the console devices handled by this driver */
+ struct list_head consoles;
+};
+static struct ports_driver_data pdrvdata;
+
+DEFINE_SPINLOCK(pdrvdata_lock);
+
+/* This struct holds information that's relevant only for console ports */
+struct console {
+ /* We'll place all consoles in a list in the pdrvdata struct */
+ struct list_head list;
+
+ /* The hvc device associated with this console port */
+ struct hvc_struct *hvc;
+
+ /*
+ * This number identifies the number that we used to register
+ * with hvc in hvc_instantiate() and hvc_alloc(); this is the
+ * number passed on by the hvc callbacks to us to
+ * differentiate between the other console ports handled by
+ * this driver
+ */
+ u32 vtermno;
+};
+
+struct port_buffer {
+ char *buf;
+
+ /* size of the buffer in *buf above */
+ size_t size;
+
+ /* used length of the buffer */
+ size_t len;
+ /* offset in the buf from which to consume data */
+ size_t offset;
+};
+
+/*
+ * This is a per-device struct that stores data common to all the
+ * ports for that device (vdev->priv).
+ */
+struct ports_device {
+ /*
+ * Workqueue handlers where we process deferred work after
+ * notification
+ */
+ struct work_struct control_work;
+ struct work_struct config_work;
+
+ struct list_head ports;
+
+ /* To protect the list of ports */
+ spinlock_t ports_lock;
+
+ /* To protect the vq operations for the control channel */
+ spinlock_t cvq_lock;
+
+ /* The current config space is stored here */
+ struct virtio_console_config config;
+
+ /* The virtio device we're associated with */
+ struct virtio_device *vdev;
+
+ /*
+ * A couple of virtqueues for the control channel: one for
+ * guest->host transfers, one for host->guest transfers
+ */
+ struct virtqueue *c_ivq, *c_ovq;
+
+ /* Array of per-port IO virtqueues */
+ struct virtqueue **in_vqs, **out_vqs;
+
+ /* Used for numbering devices for sysfs and debugfs */
+ unsigned int drv_index;
+
+ /* Major number for this device. Ports will be created as minors. */
+ int chr_major;
+};
+
+/* This struct holds the per-port data */
+struct port {
+ /* Next port in the list, head is in the ports_device */
+ struct list_head list;
+
+ /* Pointer to the parent virtio_console device */
+ struct ports_device *portdev;
+
+ /* The current buffer from which data has to be fed to readers */
+ struct port_buffer *inbuf;
+
+ /*
+ * To protect the operations on the in_vq associated with this
+ * port. Has to be a spinlock because it can be called from
+ * interrupt context (get_char()).
+ */
+ spinlock_t inbuf_lock;
+
+ /* The IO vqs for this port */
+ struct virtqueue *in_vq, *out_vq;
+
+ /* File in the debugfs directory that exposes this port's information */
+ struct dentry *debugfs_file;
+
+ /*
+ * The entries in this struct will be valid if this port is
+ * hooked up to an hvc console
+ */
+ struct console cons;
+
+ /* Each port associates with a separate char device */
+ struct cdev cdev;
+ struct device *dev;
+
+ /* A waitqueue for poll() or blocking read operations */
+ wait_queue_head_t waitqueue;
+
+ /* The 'name' of the port that we expose via sysfs properties */
+ char *name;
+
+ /* The 'id' to identify the port with the Host */
+ u32 id;
+
+ /* Is the host device open */
+ bool host_connected;
+
+ /* We should allow only one process to open a port */
+ bool guest_connected;
+};
+
+/* This is the very early arch-specified put chars function. */
+static int (*early_put_chars)(u32, const char *, int);
+
+static struct port *find_port_by_vtermno(u32 vtermno)
+{
+ struct port *port;
+ struct console *cons;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdrvdata_lock, flags);
+ list_for_each_entry(cons, &pdrvdata.consoles, list) {
+ if (cons->vtermno == vtermno) {
+ port = container_of(cons, struct port, cons);
+ goto out;
+ }
+ }
+ port = NULL;
+out:
+ spin_unlock_irqrestore(&pdrvdata_lock, flags);
+ return port;
+}
+
+static struct port *find_port_by_id(struct ports_device *portdev, u32 id)
+{
+ struct port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&portdev->ports_lock, flags);
+ list_for_each_entry(port, &portdev->ports, list)
+ if (port->id == id)
+ goto out;
+ port = NULL;
+out:
+ spin_unlock_irqrestore(&portdev->ports_lock, flags);
+
+ return port;
+}
+
+static struct port *find_port_by_vq(struct ports_device *portdev,
+ struct virtqueue *vq)
+{
+ struct port *port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&portdev->ports_lock, flags);
+ list_for_each_entry(port, &portdev->ports, list)
+ if (port->in_vq == vq || port->out_vq == vq)
+ goto out;
+ port = NULL;
+out:
+ spin_unlock_irqrestore(&portdev->ports_lock, flags);
+ return port;
+}
+
+static bool is_console_port(struct port *port)
+{
+ if (port->cons.hvc)
+ return true;
+ return false;
+}
+
+static inline bool use_multiport(struct ports_device *portdev)
+{
+ /*
+ * This condition can be true when put_chars is called from
+ * early_init
+ */
+ if (!portdev->vdev)
+ return 0;
+ return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT);
+}
-/* This is our input buffer, and how much data is left in it. */
-static unsigned int in_len;
-static char *in, *inbuf;
+static void free_buf(struct port_buffer *buf)
+{
+ kfree(buf->buf);
+ kfree(buf);
+}
+
+static struct port_buffer *alloc_buf(size_t buf_size)
+{
+ struct port_buffer *buf;
-/* The operations for our console. */
-static struct hv_ops virtio_cons;
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ goto fail;
+ buf->buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf->buf)
+ goto free_buf;
+ buf->len = 0;
+ buf->offset = 0;
+ buf->size = buf_size;
+ return buf;
+
+free_buf:
+ kfree(buf);
+fail:
+ return NULL;
+}
+
+/* Callers should take appropriate locks */
+static void *get_inbuf(struct port *port)
+{
+ struct port_buffer *buf;
+ struct virtqueue *vq;
+ unsigned int len;
-/* The hvc device */
-static struct hvc_struct *hvc;
+ vq = port->in_vq;
+ buf = vq->vq_ops->get_buf(vq, &len);
+ if (buf) {
+ buf->len = len;
+ buf->offset = 0;
+ }
+ return buf;
+}
-/*D:310 The put_chars() callback is pretty straightforward.
+/*
+ * Create a scatter-gather list representing our input buffer and put
+ * it in the queue.
*
- * We turn the characters into a scatter-gather list, add it to the output
- * queue and then kick the Host. Then we sit here waiting for it to finish:
- * inefficient in theory, but in practice implementations will do it
- * immediately (lguest's Launcher does). */
-static int put_chars(u32 vtermno, const char *buf, int count)
+ * Callers should take appropriate locks.
+ */
+static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
{
struct scatterlist sg[1];
+ int ret;
+
+ sg_init_one(sg, buf->buf, buf->size);
+
+ ret = vq->vq_ops->add_buf(vq, sg, 0, 1, buf);
+ vq->vq_ops->kick(vq);
+ return ret;
+}
+
+/* Discard any unread data this port has. Callers lockers. */
+static void discard_port_data(struct port *port)
+{
+ struct port_buffer *buf;
+ struct virtqueue *vq;
unsigned int len;
+ int ret;
- /* This is a convenient routine to initialize a single-elem sg list */
- sg_init_one(sg, buf, count);
+ vq = port->in_vq;
+ if (port->inbuf)
+ buf = port->inbuf;
+ else
+ buf = vq->vq_ops->get_buf(vq, &len);
- /* add_buf wants a token to identify this buffer: we hand it any
- * non-NULL pointer, since there's only ever one buffer. */
- if (out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, (void *)1) >= 0) {
- /* Tell Host to go! */
- out_vq->vq_ops->kick(out_vq);
- /* Chill out until it's done with the buffer. */
- while (!out_vq->vq_ops->get_buf(out_vq, &len))
- cpu_relax();
+ ret = 0;
+ while (buf) {
+ if (add_inbuf(vq, buf) < 0) {
+ ret++;
+ free_buf(buf);
+ }
+ buf = vq->vq_ops->get_buf(vq, &len);
}
+ port->inbuf = NULL;
+ if (ret)
+ dev_warn(port->dev, "Errors adding %d buffers back to vq\n",
+ ret);
+}
- /* We're expected to return the amount of data we wrote: all of it. */
- return count;
+static bool port_has_data(struct port *port)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(&port->inbuf_lock, flags);
+ if (port->inbuf) {
+ ret = true;
+ goto out;
+ }
+ port->inbuf = get_inbuf(port);
+ if (port->inbuf) {
+ ret = true;
+ goto out;
+ }
+ ret = false;
+out:
+ spin_unlock_irqrestore(&port->inbuf_lock, flags);
+ return ret;
}
-/* Create a scatter-gather list representing our input buffer and put it in the
- * queue. */
-static void add_inbuf(void)
+static ssize_t send_control_msg(struct port *port, unsigned int event,
+ unsigned int value)
{
struct scatterlist sg[1];
- sg_init_one(sg, inbuf, PAGE_SIZE);
+ struct virtio_console_control cpkt;
+ struct virtqueue *vq;
+ int len;
+
+ if (!use_multiport(port->portdev))
+ return 0;
+
+ cpkt.id = port->id;
+ cpkt.event = event;
+ cpkt.value = value;
+
+ vq = port->portdev->c_ovq;
- /* We should always be able to add one buffer to an empty queue. */
- if (in_vq->vq_ops->add_buf(in_vq, sg, 0, 1, inbuf) < 0)
- BUG();
- in_vq->vq_ops->kick(in_vq);
+ sg_init_one(sg, &cpkt, sizeof(cpkt));
+ if (vq->vq_ops->add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
+ vq->vq_ops->kick(vq);
+ while (!vq->vq_ops->get_buf(vq, &len))
+ cpu_relax();
+ }
+ return 0;
}
-/*D:350 get_chars() is the callback from the hvc_console infrastructure when
- * an interrupt is received.
- *
- * Most of the code deals with the fact that the hvc_console() infrastructure
- * only asks us for 16 bytes at a time. We keep in_offset and in_used fields
- * for partially-filled buffers. */
-static int get_chars(u32 vtermno, char *buf, int count)
+static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count)
{
- /* If we don't have an input queue yet, we can't get input. */
- BUG_ON(!in_vq);
+ struct scatterlist sg[1];
+ struct virtqueue *out_vq;
+ ssize_t ret;
+ unsigned int len;
+
+ out_vq = port->out_vq;
+
+ sg_init_one(sg, in_buf, in_count);
+ ret = out_vq->vq_ops->add_buf(out_vq, sg, 1, 0, in_buf);
+
+ /* Tell Host to go! */
+ out_vq->vq_ops->kick(out_vq);
+
+ if (ret < 0) {
+ len = 0;
+ goto fail;
+ }
+
+ /*
+ * Wait till the host acknowledges it pushed out the data we
+ * sent. Also ensure we return to userspace the number of
+ * bytes that were successfully consumed by the host.
+ */
+ while (!out_vq->vq_ops->get_buf(out_vq, &len))
+ cpu_relax();
+fail:
+ /* We're expected to return the amount of data we wrote */
+ return len;
+}
+
+/*
+ * Give out the data that's requested from the buffer that we have
+ * queued up.
+ */
+static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
+ bool to_user)
+{
+ struct port_buffer *buf;
+ unsigned long flags;
+
+ if (!out_count || !port_has_data(port))
+ return 0;
+
+ buf = port->inbuf;
+ out_count = min(out_count, buf->len - buf->offset);
+
+ if (to_user) {
+ ssize_t ret;
+
+ ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
+ if (ret)
+ return -EFAULT;
+ } else {
+ memcpy(out_buf, buf->buf + buf->offset, out_count);
+ }
+
+ buf->offset += out_count;
+
+ if (buf->offset == buf->len) {
+ /*
+ * We're done using all the data in this buffer.
+ * Re-queue so that the Host can send us more data.
+ */
+ spin_lock_irqsave(&port->inbuf_lock, flags);
+ port->inbuf = NULL;
+
+ if (add_inbuf(port->in_vq, buf) < 0)
+ dev_warn(port->dev, "failed add_buf\n");
+
+ spin_unlock_irqrestore(&port->inbuf_lock, flags);
+ }
+ /* Return the number of bytes actually copied */
+ return out_count;
+}
- /* No buffer? Try to get one. */
- if (!in_len) {
- in = in_vq->vq_ops->get_buf(in_vq, &in_len);
- if (!in)
+/* The condition that must be true for polling to end */
+static bool wait_is_over(struct port *port)
+{
+ return port_has_data(port) || !port->host_connected;
+}
+
+static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct port *port;
+ ssize_t ret;
+
+ port = filp->private_data;
+
+ if (!port_has_data(port)) {
+ /*
+ * If nothing's connected on the host just return 0 in
+ * case of list_empty; this tells the userspace app
+ * that there's no connection
+ */
+ if (!port->host_connected)
return 0;
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(port->waitqueue,
+ wait_is_over(port));
+ if (ret < 0)
+ return ret;
+ }
+ /*
+ * We could've received a disconnection message while we were
+ * waiting for more data.
+ *
+ * This check is not clubbed in the if() statement above as we
+ * might receive some data as well as the host could get
+ * disconnected after we got woken up from our wait. So we
+ * really want to give off whatever data we have and only then
+ * check for host_connected.
+ */
+ if (!port_has_data(port) && !port->host_connected)
+ return 0;
+
+ return fill_readbuf(port, ubuf, count, true);
+}
+
+static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct port *port;
+ char *buf;
+ ssize_t ret;
+
+ port = filp->private_data;
+
+ count = min((size_t)(32 * 1024), count);
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = copy_from_user(buf, ubuf, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto free_buf;
}
- /* You want more than we have to give? Well, try wanting less! */
- if (in_len < count)
- count = in_len;
+ ret = send_buf(port, buf, count);
+free_buf:
+ kfree(buf);
+ return ret;
+}
+
+static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
+{
+ struct port *port;
+ unsigned int ret;
+
+ port = filp->private_data;
+ poll_wait(filp, &port->waitqueue, wait);
+
+ ret = 0;
+ if (port->inbuf)
+ ret |= POLLIN | POLLRDNORM;
+ if (port->host_connected)
+ ret |= POLLOUT;
+ if (!port->host_connected)
+ ret |= POLLHUP;
+
+ return ret;
+}
+
+static int port_fops_release(struct inode *inode, struct file *filp)
+{
+ struct port *port;
+
+ port = filp->private_data;
+
+ /* Notify host of port being closed */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
+
+ spin_lock_irq(&port->inbuf_lock);
+ port->guest_connected = false;
+
+ discard_port_data(port);
+
+ spin_unlock_irq(&port->inbuf_lock);
+
+ return 0;
+}
+
+static int port_fops_open(struct inode *inode, struct file *filp)
+{
+ struct cdev *cdev = inode->i_cdev;
+ struct port *port;
+
+ port = container_of(cdev, struct port, cdev);
+ filp->private_data = port;
+
+ /*
+ * Don't allow opening of console port devices -- that's done
+ * via /dev/hvc
+ */
+ if (is_console_port(port))
+ return -ENXIO;
+
+ /* Allow only one process to open a particular port at a time */
+ spin_lock_irq(&port->inbuf_lock);
+ if (port->guest_connected) {
+ spin_unlock_irq(&port->inbuf_lock);
+ return -EMFILE;
+ }
- /* Copy across to their buffer and increment offset. */
- memcpy(buf, in, count);
- in += count;
- in_len -= count;
+ port->guest_connected = true;
+ spin_unlock_irq(&port->inbuf_lock);
- /* Finished? Re-register buffer so Host will use it again. */
- if (in_len == 0)
- add_inbuf();
+ /* Notify host of port being opened */
+ send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1);
- return count;
+ return 0;
}
-/*:*/
-/*D:320 Console drivers are initialized very early so boot messages can go out,
- * so we do things slightly differently from the generic virtio initialization
- * of the net and block drivers.
+/*
+ * The file operations that we support: programs in the guest can open
+ * a console device, read from it, write to it, poll for data and
+ * close it. The devices are at
+ * /dev/vport<device number>p<port number>
+ */
+static const struct file_operations port_fops = {
+ .owner = THIS_MODULE,
+ .open = port_fops_open,
+ .read = port_fops_read,
+ .write = port_fops_write,
+ .poll = port_fops_poll,
+ .release = port_fops_release,
+};
+
+/*
+ * The put_chars() callback is pretty straightforward.
*
- * At this stage, the console is output-only. It's too early to set up a
- * virtqueue, so we let the drivers do some boutique early-output thing. */
-int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
+ * We turn the characters into a scatter-gather list, add it to the
+ * output queue and then kick the Host. Then we sit here waiting for
+ * it to finish: inefficient in theory, but in practice
+ * implementations will do it immediately (lguest's Launcher does).
+ */
+static int put_chars(u32 vtermno, const char *buf, int count)
{
- virtio_cons.put_chars = put_chars;
- return hvc_instantiate(0, 0, &virtio_cons);
+ struct port *port;
+
+ port = find_port_by_vtermno(vtermno);
+ if (!port)
+ return 0;
+
+ if (unlikely(early_put_chars))
+ return early_put_chars(vtermno, buf, count);
+
+ return send_buf(port, (void *)buf, count);
}
/*
- * virtio console configuration. This supports:
- * - console resize
+ * get_chars() is the callback from the hvc_console infrastructure
+ * when an interrupt is received.
+ *
+ * We call out to fill_readbuf that gets us the required data from the
+ * buffers that are queued up.
*/
-static void virtcons_apply_config(struct virtio_device *dev)
+static int get_chars(u32 vtermno, char *buf, int count)
{
+ struct port *port;
+
+ port = find_port_by_vtermno(vtermno);
+ if (!port)
+ return 0;
+
+ /* If we don't have an input queue yet, we can't get input. */
+ BUG_ON(!port->in_vq);
+
+ return fill_readbuf(port, buf, count, false);
+}
+
+static void resize_console(struct port *port)
+{
+ struct virtio_device *vdev;
struct winsize ws;
- if (virtio_has_feature(dev, VIRTIO_CONSOLE_F_SIZE)) {
- dev->config->get(dev,
- offsetof(struct virtio_console_config, cols),
- &ws.ws_col, sizeof(u16));
- dev->config->get(dev,
- offsetof(struct virtio_console_config, rows),
- &ws.ws_row, sizeof(u16));
- hvc_resize(hvc, ws);
+ vdev = port->portdev->vdev;
+ if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) {
+ vdev->config->get(vdev,
+ offsetof(struct virtio_console_config, cols),
+ &ws.ws_col, sizeof(u16));
+ vdev->config->get(vdev,
+ offsetof(struct virtio_console_config, rows),
+ &ws.ws_row, sizeof(u16));
+ hvc_resize(port->cons.hvc, ws);
}
}
-/*
- * we support only one console, the hvc struct is a global var
- * We set the configuration at this point, since we now have a tty
- */
+/* We set the configuration at this point, since we now have a tty */
static int notifier_add_vio(struct hvc_struct *hp, int data)
{
+ struct port *port;
+
+ port = find_port_by_vtermno(hp->vtermno);
+ if (!port)
+ return -EINVAL;
+
hp->irq_requested = 1;
- virtcons_apply_config(vdev);
+ resize_console(port);
return 0;
}
@@ -173,79 +713,797 @@ static void notifier_del_vio(struct hvc_struct *hp, int data)
hp->irq_requested = 0;
}
-static void hvc_handle_input(struct virtqueue *vq)
+/* The operations for console ports. */
+static const struct hv_ops hv_ops = {
+ .get_chars = get_chars,
+ .put_chars = put_chars,
+ .notifier_add = notifier_add_vio,
+ .notifier_del = notifier_del_vio,
+ .notifier_hangup = notifier_del_vio,
+};
+
+/*
+ * Console drivers are initialized very early so boot messages can go
+ * out, so we do things slightly differently from the generic virtio
+ * initialization of the net and block drivers.
+ *
+ * At this stage, the console is output-only. It's too early to set
+ * up a virtqueue, so we let the drivers do some boutique early-output
+ * thing.
+ */
+int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
{
- if (hvc_poll(hvc))
+ early_put_chars = put_chars;
+ return hvc_instantiate(0, 0, &hv_ops);
+}
+
+int init_port_console(struct port *port)
+{
+ int ret;
+
+ /*
+ * The Host's telling us this port is a console port. Hook it
+ * up with an hvc console.
+ *
+ * To set up and manage our virtual console, we call
+ * hvc_alloc().
+ *
+ * The first argument of hvc_alloc() is the virtual console
+ * number. The second argument is the parameter for the
+ * notification mechanism (like irq number). We currently
+ * leave this as zero, virtqueues have implicit notifications.
+ *
+ * The third argument is a "struct hv_ops" containing the
+ * put_chars() get_chars(), notifier_add() and notifier_del()
+ * pointers. The final argument is the output buffer size: we
+ * can do any size, so we put PAGE_SIZE here.
+ */
+ port->cons.vtermno = pdrvdata.next_vtermno;
+
+ port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE);
+ if (IS_ERR(port->cons.hvc)) {
+ ret = PTR_ERR(port->cons.hvc);
+ dev_err(port->dev,
+ "error %d allocating hvc for port\n", ret);
+ port->cons.hvc = NULL;
+ return ret;
+ }
+ spin_lock_irq(&pdrvdata_lock);
+ pdrvdata.next_vtermno++;
+ list_add_tail(&port->cons.list, &pdrvdata.consoles);
+ spin_unlock_irq(&pdrvdata_lock);
+ port->guest_connected = true;
+
+ /* Notify host of port being opened */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1);
+
+ return 0;
+}
+
+static ssize_t show_port_name(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct port *port;
+
+ port = dev_get_drvdata(dev);
+
+ return sprintf(buffer, "%s\n", port->name);
+}
+
+static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL);
+
+static struct attribute *port_sysfs_entries[] = {
+ &dev_attr_name.attr,
+ NULL
+};
+
+static struct attribute_group port_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = port_sysfs_entries,
+};
+
+static int debugfs_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+{
+ struct port *port;
+ char *buf;
+ ssize_t ret, out_offset, out_count;
+
+ out_count = 1024;
+ buf = kmalloc(out_count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ port = filp->private_data;
+ out_offset = 0;
+ out_offset += snprintf(buf + out_offset, out_count,
+ "name: %s\n", port->name ? port->name : "");
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "guest_connected: %d\n", port->guest_connected);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "host_connected: %d\n", port->host_connected);
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "is_console: %s\n",
+ is_console_port(port) ? "yes" : "no");
+ out_offset += snprintf(buf + out_offset, out_count - out_offset,
+ "console_vtermno: %u\n", port->cons.vtermno);
+
+ ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations port_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_open,
+ .read = debugfs_read,
+};
+
+/* Remove all port-specific data. */
+static int remove_port(struct port *port)
+{
+ struct port_buffer *buf;
+
+ spin_lock_irq(&port->portdev->ports_lock);
+ list_del(&port->list);
+ spin_unlock_irq(&port->portdev->ports_lock);
+
+ if (is_console_port(port)) {
+ spin_lock_irq(&pdrvdata_lock);
+ list_del(&port->cons.list);
+ spin_unlock_irq(&pdrvdata_lock);
+ hvc_remove(port->cons.hvc);
+ }
+ if (port->guest_connected)
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0);
+
+ sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
+ device_destroy(pdrvdata.class, port->dev->devt);
+ cdev_del(&port->cdev);
+
+ /* Remove unused data this port might have received. */
+ discard_port_data(port);
+
+ /* Remove buffers we queued up for the Host to send us data in. */
+ while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
+ free_buf(buf);
+
+ kfree(port->name);
+
+ debugfs_remove(port->debugfs_file);
+
+ kfree(port);
+ return 0;
+}
+
+/* Any private messages that the Host and Guest want to share */
+static void handle_control_message(struct ports_device *portdev,
+ struct port_buffer *buf)
+{
+ struct virtio_console_control *cpkt;
+ struct port *port;
+ size_t name_size;
+ int err;
+
+ cpkt = (struct virtio_console_control *)(buf->buf + buf->offset);
+
+ port = find_port_by_id(portdev, cpkt->id);
+ if (!port) {
+ /* No valid header at start of buffer. Drop it. */
+ dev_dbg(&portdev->vdev->dev,
+ "Invalid index %u in control packet\n", cpkt->id);
+ return;
+ }
+
+ switch (cpkt->event) {
+ case VIRTIO_CONSOLE_CONSOLE_PORT:
+ if (!cpkt->value)
+ break;
+ if (is_console_port(port))
+ break;
+
+ init_port_console(port);
+ /*
+ * Could remove the port here in case init fails - but
+ * have to notify the host first.
+ */
+ break;
+ case VIRTIO_CONSOLE_RESIZE:
+ if (!is_console_port(port))
+ break;
+ port->cons.hvc->irq_requested = 1;
+ resize_console(port);
+ break;
+ case VIRTIO_CONSOLE_PORT_OPEN:
+ port->host_connected = cpkt->value;
+ wake_up_interruptible(&port->waitqueue);
+ break;
+ case VIRTIO_CONSOLE_PORT_NAME:
+ /*
+ * Skip the size of the header and the cpkt to get the size
+ * of the name that was sent
+ */
+ name_size = buf->len - buf->offset - sizeof(*cpkt) + 1;
+
+ port->name = kmalloc(name_size, GFP_KERNEL);
+ if (!port->name) {
+ dev_err(port->dev,
+ "Not enough space to store port name\n");
+ break;
+ }
+ strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
+ name_size - 1);
+ port->name[name_size - 1] = 0;
+
+ /*
+ * Since we only have one sysfs attribute, 'name',
+ * create it only if we have a name for the port.
+ */
+ err = sysfs_create_group(&port->dev->kobj,
+ &port_attribute_group);
+ if (err)
+ dev_err(port->dev,
+ "Error %d creating sysfs device attributes\n",
+ err);
+
+ break;
+ case VIRTIO_CONSOLE_PORT_REMOVE:
+ /*
+ * Hot unplug the port. We don't decrement nr_ports
+ * since we don't want to deal with extra complexities
+ * of using the lowest-available port id: We can just
+ * pick up the nr_ports number as the id and not have
+ * userspace send it to us. This helps us in two
+ * ways:
+ *
+ * - We don't need to have a 'port_id' field in the
+ * config space when a port is hot-added. This is a
+ * good thing as we might queue up multiple hotplug
+ * requests issued in our workqueue.
+ *
+ * - Another way to deal with this would have been to
+ * use a bitmap of the active ports and select the
+ * lowest non-active port from that map. That
+ * bloats the already tight config space and we
+ * would end up artificially limiting the
+ * max. number of ports to sizeof(bitmap). Right
+ * now we can support 2^32 ports (as the port id is
+ * stored in a u32 type).
+ *
+ */
+ remove_port(port);
+ break;
+ }
+}
+
+static void control_work_handler(struct work_struct *work)
+{
+ struct ports_device *portdev;
+ struct virtqueue *vq;
+ struct port_buffer *buf;
+ unsigned int len;
+
+ portdev = container_of(work, struct ports_device, control_work);
+ vq = portdev->c_ivq;
+
+ spin_lock(&portdev->cvq_lock);
+ while ((buf = vq->vq_ops->get_buf(vq, &len))) {
+ spin_unlock(&portdev->cvq_lock);
+
+ buf->len = len;
+ buf->offset = 0;
+
+ handle_control_message(portdev, buf);
+
+ spin_lock(&portdev->cvq_lock);
+ if (add_inbuf(portdev->c_ivq, buf) < 0) {
+ dev_warn(&portdev->vdev->dev,
+ "Error adding buffer to queue\n");
+ free_buf(buf);
+ }
+ }
+ spin_unlock(&portdev->cvq_lock);
+}
+
+static void in_intr(struct virtqueue *vq)
+{
+ struct port *port;
+ unsigned long flags;
+
+ port = find_port_by_vq(vq->vdev->priv, vq);
+ if (!port)
+ return;
+
+ spin_lock_irqsave(&port->inbuf_lock, flags);
+ if (!port->inbuf)
+ port->inbuf = get_inbuf(port);
+
+ /*
+ * Don't queue up data when port is closed. This condition
+ * can be reached when a console port is not yet connected (no
+ * tty is spawned) and the host sends out data to console
+ * ports. For generic serial ports, the host won't
+ * (shouldn't) send data till the guest is connected.
+ */
+ if (!port->guest_connected)
+ discard_port_data(port);
+
+ spin_unlock_irqrestore(&port->inbuf_lock, flags);
+
+ wake_up_interruptible(&port->waitqueue);
+
+ if (is_console_port(port) && hvc_poll(port->cons.hvc))
hvc_kick();
}
-/*D:370 Once we're further in boot, we get probed like any other virtio device.
- * At this stage we set up the output virtqueue.
- *
- * To set up and manage our virtual console, we call hvc_alloc(). Since we
- * never remove the console device we never need this pointer again.
+static void control_intr(struct virtqueue *vq)
+{
+ struct ports_device *portdev;
+
+ portdev = vq->vdev->priv;
+ schedule_work(&portdev->control_work);
+}
+
+static void config_intr(struct virtio_device *vdev)
+{
+ struct ports_device *portdev;
+
+ portdev = vdev->priv;
+ if (use_multiport(portdev)) {
+ /* Handle port hot-add */
+ schedule_work(&portdev->config_work);
+ }
+ /*
+ * We'll use this way of resizing only for legacy support.
+ * For newer userspace (VIRTIO_CONSOLE_F_MULTPORT+), use
+ * control messages to indicate console size changes so that
+ * it can be done per-port
+ */
+ resize_console(find_port_by_id(portdev, 0));
+}
+
+static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
+{
+ struct port_buffer *buf;
+ unsigned int ret;
+ int err;
+
+ ret = 0;
+ do {
+ buf = alloc_buf(PAGE_SIZE);
+ if (!buf)
+ break;
+
+ spin_lock_irq(lock);
+ err = add_inbuf(vq, buf);
+ if (err < 0) {
+ spin_unlock_irq(lock);
+ free_buf(buf);
+ break;
+ }
+ ret++;
+ spin_unlock_irq(lock);
+ } while (err > 0);
+
+ return ret;
+}
+
+static int add_port(struct ports_device *portdev, u32 id)
+{
+ char debugfs_name[16];
+ struct port *port;
+ struct port_buffer *buf;
+ dev_t devt;
+ int err;
+
+ port = kmalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ port->portdev = portdev;
+ port->id = id;
+
+ port->name = NULL;
+ port->inbuf = NULL;
+ port->cons.hvc = NULL;
+
+ port->host_connected = port->guest_connected = false;
+
+ port->in_vq = portdev->in_vqs[port->id];
+ port->out_vq = portdev->out_vqs[port->id];
+
+ cdev_init(&port->cdev, &port_fops);
+
+ devt = MKDEV(portdev->chr_major, id);
+ err = cdev_add(&port->cdev, devt, 1);
+ if (err < 0) {
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d adding cdev for port %u\n", err, id);
+ goto free_port;
+ }
+ port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev,
+ devt, port, "vport%up%u",
+ port->portdev->drv_index, id);
+ if (IS_ERR(port->dev)) {
+ err = PTR_ERR(port->dev);
+ dev_err(&port->portdev->vdev->dev,
+ "Error %d creating device for port %u\n",
+ err, id);
+ goto free_cdev;
+ }
+
+ spin_lock_init(&port->inbuf_lock);
+ init_waitqueue_head(&port->waitqueue);
+
+ /* Fill the in_vq with buffers so the host can send us data. */
+ err = fill_queue(port->in_vq, &port->inbuf_lock);
+ if (!err) {
+ dev_err(port->dev, "Error allocating inbufs\n");
+ err = -ENOMEM;
+ goto free_device;
+ }
+
+ /*
+ * If we're not using multiport support, this has to be a console port
+ */
+ if (!use_multiport(port->portdev)) {
+ err = init_port_console(port);
+ if (err)
+ goto free_inbufs;
+ }
+
+ spin_lock_irq(&portdev->ports_lock);
+ list_add_tail(&port->list, &port->portdev->ports);
+ spin_unlock_irq(&portdev->ports_lock);
+
+ /*
+ * Tell the Host we're set so that it can send us various
+ * configuration parameters for this port (eg, port name,
+ * caching, whether this is a console port, etc.)
+ */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+
+ if (pdrvdata.debugfs_dir) {
+ /*
+ * Finally, create the debugfs file that we can use to
+ * inspect a port's state at any time
+ */
+ sprintf(debugfs_name, "vport%up%u",
+ port->portdev->drv_index, id);
+ port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+ pdrvdata.debugfs_dir,
+ port,
+ &port_debugfs_ops);
+ }
+ return 0;
+
+free_inbufs:
+ while ((buf = port->in_vq->vq_ops->detach_unused_buf(port->in_vq)))
+ free_buf(buf);
+free_device:
+ device_destroy(pdrvdata.class, port->dev->devt);
+free_cdev:
+ cdev_del(&port->cdev);
+free_port:
+ kfree(port);
+fail:
+ return err;
+}
+
+/*
+ * The workhandler for config-space updates.
*
- * Finally we put our input buffer in the input queue, ready to receive. */
-static int __devinit virtcons_probe(struct virtio_device *dev)
+ * This is called when ports are hot-added.
+ */
+static void config_work_handler(struct work_struct *work)
+{
+ struct virtio_console_config virtconconf;
+ struct ports_device *portdev;
+ struct virtio_device *vdev;
+ int err;
+
+ portdev = container_of(work, struct ports_device, config_work);
+
+ vdev = portdev->vdev;
+ vdev->config->get(vdev,
+ offsetof(struct virtio_console_config, nr_ports),
+ &virtconconf.nr_ports,
+ sizeof(virtconconf.nr_ports));
+
+ if (portdev->config.nr_ports == virtconconf.nr_ports) {
+ /*
+ * Port 0 got hot-added. Since we already did all the
+ * other initialisation for it, just tell the Host
+ * that the port is ready if we find the port. In
+ * case the port was hot-removed earlier, we call
+ * add_port to add the port.
+ */
+ struct port *port;
+
+ port = find_port_by_id(portdev, 0);
+ if (!port)
+ add_port(portdev, 0);
+ else
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+ return;
+ }
+ if (virtconconf.nr_ports > portdev->config.max_nr_ports) {
+ dev_warn(&vdev->dev,
+ "More ports specified (%u) than allowed (%u)",
+ portdev->config.nr_ports + 1,
+ portdev->config.max_nr_ports);
+ return;
+ }
+ if (virtconconf.nr_ports < portdev->config.nr_ports)
+ return;
+
+ /* Hot-add ports */
+ while (virtconconf.nr_ports - portdev->config.nr_ports) {
+ err = add_port(portdev, portdev->config.nr_ports);
+ if (err)
+ break;
+ portdev->config.nr_ports++;
+ }
+}
+
+static int init_vqs(struct ports_device *portdev)
{
- vq_callback_t *callbacks[] = { hvc_handle_input, NULL};
- const char *names[] = { "input", "output" };
- struct virtqueue *vqs[2];
+ vq_callback_t **io_callbacks;
+ char **io_names;
+ struct virtqueue **vqs;
+ u32 i, j, nr_ports, nr_queues;
int err;
- vdev = dev;
+ nr_ports = portdev->config.max_nr_ports;
+ nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
- /* This is the scratch page we use to receive console input */
- inbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!inbuf) {
+ vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
+ if (!vqs) {
err = -ENOMEM;
goto fail;
}
+ io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL);
+ if (!io_callbacks) {
+ err = -ENOMEM;
+ goto free_vqs;
+ }
+ io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL);
+ if (!io_names) {
+ err = -ENOMEM;
+ goto free_callbacks;
+ }
+ portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
+ GFP_KERNEL);
+ if (!portdev->in_vqs) {
+ err = -ENOMEM;
+ goto free_names;
+ }
+ portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
+ GFP_KERNEL);
+ if (!portdev->out_vqs) {
+ err = -ENOMEM;
+ goto free_invqs;
+ }
+
+ /*
+ * For backward compat (newer host but older guest), the host
+ * spawns a console port first and also inits the vqs for port
+ * 0 before others.
+ */
+ j = 0;
+ io_callbacks[j] = in_intr;
+ io_callbacks[j + 1] = NULL;
+ io_names[j] = "input";
+ io_names[j + 1] = "output";
+ j += 2;
+ if (use_multiport(portdev)) {
+ io_callbacks[j] = control_intr;
+ io_callbacks[j + 1] = NULL;
+ io_names[j] = "control-i";
+ io_names[j + 1] = "control-o";
+
+ for (i = 1; i < nr_ports; i++) {
+ j += 2;
+ io_callbacks[j] = in_intr;
+ io_callbacks[j + 1] = NULL;
+ io_names[j] = "input";
+ io_names[j + 1] = "output";
+ }
+ }
/* Find the queues. */
- /* FIXME: This is why we want to wean off hvc: we do nothing
- * when input comes in. */
- err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names);
+ err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs,
+ io_callbacks,
+ (const char **)io_names);
if (err)
+ goto free_outvqs;
+
+ j = 0;
+ portdev->in_vqs[0] = vqs[0];
+ portdev->out_vqs[0] = vqs[1];
+ j += 2;
+ if (use_multiport(portdev)) {
+ portdev->c_ivq = vqs[j];
+ portdev->c_ovq = vqs[j + 1];
+
+ for (i = 1; i < nr_ports; i++) {
+ j += 2;
+ portdev->in_vqs[i] = vqs[j];
+ portdev->out_vqs[i] = vqs[j + 1];
+ }
+ }
+ kfree(io_callbacks);
+ kfree(io_names);
+ kfree(vqs);
+
+ return 0;
+
+free_names:
+ kfree(io_names);
+free_callbacks:
+ kfree(io_callbacks);
+free_outvqs:
+ kfree(portdev->out_vqs);
+free_invqs:
+ kfree(portdev->in_vqs);
+free_vqs:
+ kfree(vqs);
+fail:
+ return err;
+}
+
+static const struct file_operations portdev_fops = {
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Once we're further in boot, we get probed like any other virtio
+ * device.
+ *
+ * If the host also supports multiple console ports, we check the
+ * config space to see how many ports the host has spawned. We
+ * initialize each port found.
+ */
+static int __devinit virtcons_probe(struct virtio_device *vdev)
+{
+ struct ports_device *portdev;
+ u32 i;
+ int err;
+ bool multiport;
+
+ portdev = kmalloc(sizeof(*portdev), GFP_KERNEL);
+ if (!portdev) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ /* Attach this portdev to this virtio_device, and vice-versa. */
+ portdev->vdev = vdev;
+ vdev->priv = portdev;
+
+ spin_lock_irq(&pdrvdata_lock);
+ portdev->drv_index = pdrvdata.index++;
+ spin_unlock_irq(&pdrvdata_lock);
+
+ portdev->chr_major = register_chrdev(0, "virtio-portsdev",
+ &portdev_fops);
+ if (portdev->chr_major < 0) {
+ dev_err(&vdev->dev,
+ "Error %d registering chrdev for device %u\n",
+ portdev->chr_major, portdev->drv_index);
+ err = portdev->chr_major;
goto free;
+ }
- in_vq = vqs[0];
- out_vq = vqs[1];
+ multiport = false;
+ portdev->config.nr_ports = 1;
+ portdev->config.max_nr_ports = 1;
+ if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) {
+ multiport = true;
+ vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT;
- /* Start using the new console output. */
- virtio_cons.get_chars = get_chars;
- virtio_cons.put_chars = put_chars;
- virtio_cons.notifier_add = notifier_add_vio;
- virtio_cons.notifier_del = notifier_del_vio;
- virtio_cons.notifier_hangup = notifier_del_vio;
-
- /* The first argument of hvc_alloc() is the virtual console number, so
- * we use zero. The second argument is the parameter for the
- * notification mechanism (like irq number). We currently leave this
- * as zero, virtqueues have implicit notifications.
- *
- * The third argument is a "struct hv_ops" containing the put_chars()
- * get_chars(), notifier_add() and notifier_del() pointers.
- * The final argument is the output buffer size: we can do any size,
- * so we put PAGE_SIZE here. */
- hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE);
- if (IS_ERR(hvc)) {
- err = PTR_ERR(hvc);
- goto free_vqs;
+ vdev->config->get(vdev, offsetof(struct virtio_console_config,
+ nr_ports),
+ &portdev->config.nr_ports,
+ sizeof(portdev->config.nr_ports));
+ vdev->config->get(vdev, offsetof(struct virtio_console_config,
+ max_nr_ports),
+ &portdev->config.max_nr_ports,
+ sizeof(portdev->config.max_nr_ports));
+ if (portdev->config.nr_ports > portdev->config.max_nr_ports) {
+ dev_warn(&vdev->dev,
+ "More ports (%u) specified than allowed (%u). Will init %u ports.",
+ portdev->config.nr_ports,
+ portdev->config.max_nr_ports,
+ portdev->config.max_nr_ports);
+
+ portdev->config.nr_ports = portdev->config.max_nr_ports;
+ }
+ }
+
+ /* Let the Host know we support multiple ports.*/
+ vdev->config->finalize_features(vdev);
+
+ err = init_vqs(portdev);
+ if (err < 0) {
+ dev_err(&vdev->dev, "Error %d initializing vqs\n", err);
+ goto free_chrdev;
+ }
+
+ spin_lock_init(&portdev->ports_lock);
+ INIT_LIST_HEAD(&portdev->ports);
+
+ if (multiport) {
+ spin_lock_init(&portdev->cvq_lock);
+ INIT_WORK(&portdev->control_work, &control_work_handler);
+ INIT_WORK(&portdev->config_work, &config_work_handler);
+
+ err = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
+ if (!err) {
+ dev_err(&vdev->dev,
+ "Error allocating buffers for control queue\n");
+ err = -ENOMEM;
+ goto free_vqs;
+ }
}
- /* Register the input buffer the first time. */
- add_inbuf();
+ for (i = 0; i < portdev->config.nr_ports; i++)
+ add_port(portdev, i);
+
+ /* Start using the new console output. */
+ early_put_chars = NULL;
return 0;
free_vqs:
vdev->config->del_vqs(vdev);
+ kfree(portdev->in_vqs);
+ kfree(portdev->out_vqs);
+free_chrdev:
+ unregister_chrdev(portdev->chr_major, "virtio-portsdev");
free:
- kfree(inbuf);
+ kfree(portdev);
fail:
return err;
}
+static void virtcons_remove(struct virtio_device *vdev)
+{
+ struct ports_device *portdev;
+ struct port *port, *port2;
+ struct port_buffer *buf;
+ unsigned int len;
+
+ portdev = vdev->priv;
+
+ cancel_work_sync(&portdev->control_work);
+ cancel_work_sync(&portdev->config_work);
+
+ list_for_each_entry_safe(port, port2, &portdev->ports, list)
+ remove_port(port);
+
+ unregister_chrdev(portdev->chr_major, "virtio-portsdev");
+
+ while ((buf = portdev->c_ivq->vq_ops->get_buf(portdev->c_ivq, &len)))
+ free_buf(buf);
+
+ while ((buf = portdev->c_ivq->vq_ops->detach_unused_buf(portdev->c_ivq)))
+ free_buf(buf);
+
+ vdev->config->del_vqs(vdev);
+ kfree(portdev->in_vqs);
+ kfree(portdev->out_vqs);
+
+ kfree(portdev);
+}
+
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -253,6 +1511,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_CONSOLE_F_SIZE,
+ VIRTIO_CONSOLE_F_MULTIPORT,
};
static struct virtio_driver virtio_console = {
@@ -262,14 +1521,41 @@ static struct virtio_driver virtio_console = {
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtcons_probe,
- .config_changed = virtcons_apply_config,
+ .remove = virtcons_remove,
+ .config_changed = config_intr,
};
static int __init init(void)
{
+ int err;
+
+ pdrvdata.class = class_create(THIS_MODULE, "virtio-ports");
+ if (IS_ERR(pdrvdata.class)) {
+ err = PTR_ERR(pdrvdata.class);
+ pr_err("Error %d creating virtio-ports class\n", err);
+ return err;
+ }
+
+ pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL);
+ if (!pdrvdata.debugfs_dir) {
+ pr_warning("Error %ld creating debugfs dir for virtio-ports\n",
+ PTR_ERR(pdrvdata.debugfs_dir));
+ }
+ INIT_LIST_HEAD(&pdrvdata.consoles);
+
return register_virtio_driver(&virtio_console);
}
+
+static void __exit fini(void)
+{
+ unregister_virtio_driver(&virtio_console);
+
+ class_destroy(pdrvdata.class);
+ if (pdrvdata.debugfs_dir)
+ debugfs_remove_recursive(pdrvdata.debugfs_dir);
+}
module_init(init);
+module_exit(fini);
MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio console driver");
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index 27d20fac19d1..b314a999aabe 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -21,7 +21,7 @@
#define DRV_NAME "cs5535-clockevt"
-static int timer_irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ;
+static int timer_irq;
module_param_named(irq, timer_irq, int, 0644);
MODULE_PARM_DESC(irq, "Which IRQ to use for the clock source MFGPT ticks.");
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 4b34ade2332b..bd444dc93cf2 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
(dbs_tuners_ins.up_threshold -
dbs_tuners_ins.down_differential);
+ if (freq_next < policy->min)
+ freq_next = policy->min;
+
if (!dbs_tuners_ins.powersave_bias) {
__cpufreq_driver_target(policy, freq_next,
CPUFREQ_RELATION_L);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index b5f2ee0f8e2c..64a937262a40 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -613,8 +613,6 @@ static void dma_tasklet(unsigned long data)
cohd_fin->pending_irqs--;
cohc->completed = cohd_fin->desc.cookie;
- BUG_ON(cohc->nbr_active_done && cohd_fin == NULL);
-
if (cohc->nbr_active_done == 0)
return;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 6f51a0a7a8bb..e7a3230fb7d5 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -826,6 +826,7 @@ void dma_async_device_unregister(struct dma_device *device)
chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex);
device_unregister(&chan->dev->device);
+ free_percpu(chan->local);
}
}
EXPORT_SYMBOL(dma_async_device_unregister);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 8b905161fbf4..948d563941c9 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -467,7 +467,7 @@ err_srcs:
if (iterations > 0)
while (!kthread_should_stop()) {
- DECLARE_WAIT_QUEUE_HEAD(wait_dmatest_exit);
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
interruptible_sleep_on(&wait_dmatest_exit);
}
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5f7a500e18d0..5cc37afe2bc1 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -249,7 +249,7 @@ int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
if (is_ioat_active(status) || is_ioat_idle(status))
ioat_suspend(chan);
while (is_ioat_active(status) || is_ioat_idle(status)) {
- if (end && time_after(jiffies, end)) {
+ if (tmo && time_after(jiffies, end)) {
err = -ETIMEDOUT;
break;
}
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 9a5bc1a7389e..e80bae1673fa 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -761,12 +761,10 @@ static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
* @buffer_n: buffer number to update.
* 0 or 1 are the only valid values.
* @phyaddr: buffer physical address.
- * @return: Returns 0 on success or negative error code on failure. This
- * function will fail if the buffer is set to ready.
*/
/* Called under spin_lock(_irqsave)(&ichan->lock) */
-static int ipu_update_channel_buffer(struct idmac_channel *ichan,
- int buffer_n, dma_addr_t phyaddr)
+static void ipu_update_channel_buffer(struct idmac_channel *ichan,
+ int buffer_n, dma_addr_t phyaddr)
{
enum ipu_channel channel = ichan->dma_chan.chan_id;
uint32_t reg;
@@ -806,8 +804,6 @@ static int ipu_update_channel_buffer(struct idmac_channel *ichan,
}
spin_unlock_irqrestore(&ipu_data.lock, flags);
-
- return 0;
}
/* Called under spin_lock_irqsave(&ichan->lock) */
@@ -816,7 +812,6 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
{
unsigned int chan_id = ichan->dma_chan.chan_id;
struct device *dev = &ichan->dma_chan.dev->device;
- int ret;
if (async_tx_test_ack(&desc->txd))
return -EINTR;
@@ -827,14 +822,7 @@ static int ipu_submit_buffer(struct idmac_channel *ichan,
* could make it conditional on status >= IPU_CHANNEL_ENABLED, but
* doing it again shouldn't hurt either.
*/
- ret = ipu_update_channel_buffer(ichan, buf_idx,
- sg_dma_address(sg));
-
- if (ret < 0) {
- dev_err(dev, "Updating sg %p on channel 0x%x buffer %d failed!\n",
- sg, chan_id, buf_idx);
- return ret;
- }
+ ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
ipu_select_buffer(chan_id, buf_idx);
dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
@@ -1379,10 +1367,11 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (likely(sgnew) &&
ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
+ callback = descnew->txd.callback;
+ callback_param = descnew->txd.callback_param;
spin_unlock(&ichan->lock);
- callback(callback_param);
+ if (callback)
+ callback(callback_param);
spin_lock(&ichan->lock);
}
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 000dc67b85b7..3391e6739d06 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -2658,10 +2658,11 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
* the memory system completely. A command line option allows to force-enable
* hardware ECC later in amd64_enable_ecc_error_reporting().
*/
-static const char *ecc_warning =
- "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n"
- " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n"
- " Also, use of the override can cause unknown side effects.\n";
+static const char *ecc_msg =
+ "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
+ " Either enable ECC checking or force module loading by setting "
+ "'ecc_enable_override'.\n"
+ " (Note that use of the override may cause unknown side effects.)\n";
static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
{
@@ -2673,7 +2674,7 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
if (!ecc_enabled)
- amd64_printk(KERN_WARNING, "This node reports that Memory ECC "
+ amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
"is currently disabled, set F3x%x[22] (%s).\n",
K8_NBCFG, pci_name(pvt->misc_f3_ctl));
else
@@ -2681,13 +2682,13 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
if (!nb_mce_en)
- amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR "
+ amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
"0x%08x[4] on node %d to enable.\n",
MSR_IA32_MCG_CTL, pvt->mc_node_id);
if (!ecc_enabled || !nb_mce_en) {
if (!ecc_enable_override) {
- amd64_printk(KERN_WARNING, "%s", ecc_warning);
+ amd64_printk(KERN_NOTICE, "%s", ecc_msg);
return -ENODEV;
}
ecc_enable_override = 0;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index cf27402af97b..ecd5928d7110 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -804,8 +804,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
end <<= (24 - PAGE_SHIFT);
end |= (1 << (24 - PAGE_SHIFT)) - 1;
- csrow->first_page = start >> PAGE_SHIFT;
- csrow->last_page = end >> PAGE_SHIFT;
+ csrow->first_page = start;
+ csrow->last_page = end;
csrow->nr_pages = end + 1 - start;
csrow->grain = 8;
csrow->mtype = mtype;
@@ -892,10 +892,6 @@ static int __devinit mpc85xx_mc_err_probe(struct of_device *op,
mpc85xx_init_csrows(mci);
-#ifdef CONFIG_EDAC_DEBUG
- edac_mc_register_mcidev_debug((struct attribute **)debug_attr);
-#endif
-
/* store the original error disable bits */
orig_ddr_err_disable =
in_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DISABLE);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index cbaf420c36c5..2d3dc7ded0a9 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -893,20 +893,31 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
static struct kmem_cache *fwnet_packet_task_cache;
+static void fwnet_free_ptask(struct fwnet_packet_task *ptask)
+{
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+}
+
static int fwnet_send_packet(struct fwnet_packet_task *ptask);
static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
{
- struct fwnet_device *dev;
+ struct fwnet_device *dev = ptask->dev;
unsigned long flags;
-
- dev = ptask->dev;
+ bool free;
spin_lock_irqsave(&dev->lock, flags);
- list_del(&ptask->pt_link);
- spin_unlock_irqrestore(&dev->lock, flags);
- ptask->outstanding_pkts--; /* FIXME access inside lock */
+ ptask->outstanding_pkts--;
+
+ /* Check whether we or the networking TX soft-IRQ is last user. */
+ free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link));
+
+ if (ptask->outstanding_pkts == 0)
+ list_del(&ptask->pt_link);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
if (ptask->outstanding_pkts > 0) {
u16 dg_size;
@@ -951,10 +962,10 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
}
fwnet_send_packet(ptask);
- } else {
- dev_kfree_skb_any(ptask->skb);
- kmem_cache_free(fwnet_packet_task_cache, ptask);
}
+
+ if (free)
+ fwnet_free_ptask(ptask);
}
static void fwnet_write_complete(struct fw_card *card, int rcode,
@@ -977,6 +988,7 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
unsigned tx_len;
struct rfc2734_header *bufhdr;
unsigned long flags;
+ bool free;
dev = ptask->dev;
tx_len = ptask->max_payload;
@@ -1022,12 +1034,16 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
generation, SCODE_100, 0ULL, ptask->skb->data,
tx_len + 8, fwnet_write_complete, ptask);
- /* FIXME race? */
spin_lock_irqsave(&dev->lock, flags);
- list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
+
+ /* If the AT tasklet already ran, we may be last user. */
+ free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
+ if (!free)
+ list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
+
spin_unlock_irqrestore(&dev->lock, flags);
- return 0;
+ goto out;
}
fw_send_request(dev->card, &ptask->transaction,
@@ -1035,12 +1051,19 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
ptask->generation, ptask->speed, ptask->fifo_addr,
ptask->skb->data, tx_len, fwnet_write_complete, ptask);
- /* FIXME race? */
spin_lock_irqsave(&dev->lock, flags);
- list_add_tail(&ptask->pt_link, &dev->sent_list);
+
+ /* If the AT tasklet already ran, we may be last user. */
+ free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link));
+ if (!free)
+ list_add_tail(&ptask->pt_link, &dev->sent_list);
+
spin_unlock_irqrestore(&dev->lock, flags);
dev->netdev->trans_start = jiffies;
+ out:
+ if (free)
+ fwnet_free_ptask(ptask);
return 0;
}
@@ -1298,6 +1321,8 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
spin_unlock_irqrestore(&dev->lock, flags);
ptask->max_payload = max_payload;
+ INIT_LIST_HEAD(&ptask->pt_link);
+
fwnet_send_packet(ptask);
return NETDEV_TX_OK;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 2345d4103fe6..43ebf337b131 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2101,11 +2101,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
u32 payload_index, payload_end_index, next_page_index;
int page, end_page, i, length, offset;
- /*
- * FIXME: Cycle lost behavior should be configurable: lose
- * packet, retransmit or terminate..
- */
-
p = packet;
payload_index = payload;
@@ -2135,6 +2130,14 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
if (!p->skip) {
d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
d[0].req_count = cpu_to_le16(8);
+ /*
+ * Link the skip address to this descriptor itself. This causes
+ * a context to skip a cycle whenever lost cycles or FIFO
+ * overruns occur, without dropping the data. The application
+ * should then decide whether this is an error condition or not.
+ * FIXME: Make the context's cycle-lost behaviour configurable?
+ */
+ d[0].branch_address = cpu_to_le32(d_bus | z);
header = (__le32 *) &d[1];
header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index a1fce68e3bbe..17be051b7aa3 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -113,7 +113,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
DRM_ERROR("fail to set dma mask to 0x%Lx\n",
- gart_info->table_mask);
+ (unsigned long long)gart_info->table_mask);
ret = 1;
goto done;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f665b05592f3..ab6c97330412 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
return mode;
}
+/*
+ * EDID is delightfully ambiguous about how interlaced modes are to be
+ * encoded. Our internal representation is of frame height, but some
+ * HDTV detailed timings are encoded as field height.
+ *
+ * The format list here is from CEA, in frame size. Technically we
+ * should be checking refresh rate too. Whatever.
+ */
+static void
+drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
+ struct detailed_pixel_timing *pt)
+{
+ int i;
+ static const struct {
+ int w, h;
+ } cea_interlaced[] = {
+ { 1920, 1080 },
+ { 720, 480 },
+ { 1440, 480 },
+ { 2880, 480 },
+ { 720, 576 },
+ { 1440, 576 },
+ { 2880, 576 },
+ };
+ static const int n_sizes =
+ sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
+
+ if (!(pt->misc & DRM_EDID_PT_INTERLACED))
+ return;
+
+ for (i = 0; i < n_sizes; i++) {
+ if ((mode->hdisplay == cea_interlaced[i].w) &&
+ (mode->vdisplay == cea_interlaced[i].h / 2)) {
+ mode->vdisplay *= 2;
+ mode->vsync_start *= 2;
+ mode->vsync_end *= 2;
+ mode->vtotal *= 2;
+ mode->vtotal |= 1;
+ }
+ }
+
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+}
+
/**
* drm_mode_detailed - create a new mode from an EDID detailed timing section
* @dev: DRM device (needed to create new mode)
@@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
drm_mode_set_name(mode);
- if (pt->misc & DRM_EDID_PT_INTERLACED)
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ drm_mode_do_interlace_quirk(mode, pt);
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index cdec32977129..2ac074c8f5d2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -405,7 +405,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
wasted += alignment - tmp;
}
- if (entry->size >= size + wasted) {
+ if (entry->size >= size + wasted &&
+ (entry->start + wasted + size) <= end) {
if (!best_match)
return entry;
if (entry->size < best_size) {
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e660ac07f3b2..2307f98349f7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -735,8 +735,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
sizeof(struct drm_clip_rect), GFP_KERNEL);
- if (cliprects == NULL)
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
goto fail_batch_free;
+ }
ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 46d88965852a..cf4cb3e9a0c2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -120,7 +120,7 @@ const static struct intel_device_info intel_gm45_info = {
const static struct intel_device_info intel_pineview_info = {
.is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
- .has_pipe_cxsr = 1,
+ .need_gfx_hws = 1,
.has_hotplug = 1,
};
@@ -174,26 +174,20 @@ const static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
-static int i915_suspend(struct drm_device *dev, pm_message_t state)
+static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!dev || !dev_priv) {
- DRM_ERROR("dev: %p, dev_priv: %p\n", dev, dev_priv);
- DRM_ERROR("DRM not initialized, aborting suspend.\n");
- return -ENODEV;
- }
-
- if (state.event == PM_EVENT_PRETHAW)
- return 0;
-
pci_save_state(dev->pdev);
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (i915_gem_idle(dev))
+ int error = i915_gem_idle(dev);
+ if (error) {
dev_err(&dev->pdev->dev,
- "GEM idle failed, resume may fail\n");
+ "GEM idle failed, resume might fail\n");
+ return error;
+ }
drm_irq_uninstall(dev);
}
@@ -201,26 +195,42 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
intel_opregion_free(dev, 1);
+ /* Modeset on resume, not lid events */
+ dev_priv->modeset_on_lid = 0;
+
+ return 0;
+}
+
+static int i915_suspend(struct drm_device *dev, pm_message_t state)
+{
+ int error;
+
+ if (!dev || !dev->dev_private) {
+ DRM_ERROR("dev: %p\n", dev);
+ DRM_ERROR("DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ if (state.event == PM_EVENT_PRETHAW)
+ return 0;
+
+ error = i915_drm_freeze(dev);
+ if (error)
+ return error;
+
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
pci_disable_device(dev->pdev);
pci_set_power_state(dev->pdev, PCI_D3hot);
}
- /* Modeset on resume, not lid events */
- dev_priv->modeset_on_lid = 0;
-
return 0;
}
-static int i915_resume(struct drm_device *dev)
+static int i915_drm_thaw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int ret = 0;
-
- if (pci_enable_device(dev->pdev))
- return -1;
- pci_set_master(dev->pdev);
+ int error = 0;
i915_restore_state(dev);
@@ -231,21 +241,28 @@ static int i915_resume(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
- ret = i915_gem_init_ringbuffer(dev);
- if (ret != 0)
- ret = -1;
+ error = i915_gem_init_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
drm_irq_install(dev);
- }
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+
/* Resume the modeset for every activated CRTC */
drm_helper_resume_force_mode(dev);
}
dev_priv->modeset_on_lid = 0;
- return ret;
+ return error;
+}
+
+static int i915_resume(struct drm_device *dev)
+{
+ if (pci_enable_device(dev->pdev))
+ return -EIO;
+
+ pci_set_master(dev->pdev);
+
+ return i915_drm_thaw(dev);
}
/**
@@ -386,57 +403,62 @@ i915_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
-static int
-i915_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int i915_pm_suspend(struct device *dev)
{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int error;
- return i915_suspend(dev, state);
-}
+ if (!drm_dev || !drm_dev->dev_private) {
+ dev_err(dev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
-static int
-i915_pci_resume(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ error = i915_drm_freeze(drm_dev);
+ if (error)
+ return error;
- return i915_resume(dev);
-}
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
-static int
-i915_pm_suspend(struct device *dev)
-{
- return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
+ return 0;
}
-static int
-i915_pm_resume(struct device *dev)
+static int i915_pm_resume(struct device *dev)
{
- return i915_pci_resume(to_pci_dev(dev));
-}
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
-static int
-i915_pm_freeze(struct device *dev)
-{
- return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE);
+ return i915_resume(drm_dev);
}
-static int
-i915_pm_thaw(struct device *dev)
+static int i915_pm_freeze(struct device *dev)
{
- /* thaw during hibernate, do nothing! */
- return 0;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ if (!drm_dev || !drm_dev->dev_private) {
+ dev_err(dev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ return i915_drm_freeze(drm_dev);
}
-static int
-i915_pm_poweroff(struct device *dev)
+static int i915_pm_thaw(struct device *dev)
{
- return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_drm_thaw(drm_dev);
}
-static int
-i915_pm_restore(struct device *dev)
+static int i915_pm_poweroff(struct device *dev)
{
- return i915_pci_resume(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+
+ return i915_drm_freeze(drm_dev);
}
const struct dev_pm_ops i915_pm_ops = {
@@ -445,7 +467,7 @@ const struct dev_pm_ops i915_pm_ops = {
.freeze = i915_pm_freeze,
.thaw = i915_pm_thaw,
.poweroff = i915_pm_poweroff,
- .restore = i915_pm_restore,
+ .restore = i915_pm_resume,
};
static struct vm_operations_struct i915_gem_vm_ops = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aaf934d96f21..b99b6a841d95 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -493,6 +493,15 @@ typedef struct drm_i915_private {
struct list_head flushing_list;
/**
+ * List of objects currently pending a GPU write flush.
+ *
+ * All elements on this list will belong to either the
+ * active_list or flushing_list, last_rendering_seqno can
+ * be used to differentiate between the two elements.
+ */
+ struct list_head gpu_write_list;
+
+ /**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
@@ -592,6 +601,8 @@ struct drm_i915_gem_object {
/** This object's place on the active/flushing/inactive lists */
struct list_head list;
+ /** This object's place on GPU write list */
+ struct list_head gpu_write_list;
/** This object's place on the fenced object LRU */
struct list_head fence_list;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dda787aafcc6..ec8a0d7ffa39 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1552,6 +1552,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ BUG_ON(!list_empty(&obj_priv->gpu_write_list));
+
obj_priv->last_rendering_seqno = 0;
if (obj_priv->active) {
obj_priv->active = 0;
@@ -1622,7 +1624,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
struct drm_i915_gem_object *obj_priv, *next;
list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.flushing_list, list) {
+ &dev_priv->mm.gpu_write_list,
+ gpu_write_list) {
struct drm_gem_object *obj = obj_priv->obj;
if ((obj->write_domain & flush_domains) ==
@@ -1630,6 +1633,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
@@ -2084,8 +2088,8 @@ static int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t seqno;
int ret;
+ uint32_t seqno;
bool lists_empty;
spin_lock(&dev_priv->mm.active_list_lock);
@@ -2107,6 +2111,8 @@ i915_gem_evict_everything(struct drm_device *dev)
if (ret)
return ret;
+ BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
ret = i915_gem_evict_from_inactive_list(dev);
if (ret)
return ret;
@@ -2701,7 +2707,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
seqno = i915_add_request(dev, NULL, obj->write_domain);
- obj->write_domain = 0;
+ BUG_ON(obj->write_domain);
i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
@@ -3564,6 +3570,9 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
uint32_t reloc_count = 0, i;
int ret = 0;
+ if (relocs == NULL)
+ return 0;
+
for (i = 0; i < buffer_count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
int unwritten;
@@ -3653,7 +3662,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_gem_object *batch_obj;
struct drm_i915_gem_object *obj_priv;
struct drm_clip_rect *cliprects = NULL;
- struct drm_i915_gem_relocation_entry *relocs;
+ struct drm_i915_gem_relocation_entry *relocs = NULL;
int ret = 0, ret2, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains, reloc_index;
@@ -3679,8 +3688,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->num_cliprects != 0) {
cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
GFP_KERNEL);
- if (cliprects == NULL)
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
goto pre_mutex_err;
+ }
ret = copy_from_user(cliprects,
(struct drm_clip_rect __user *)
@@ -3722,6 +3733,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (object_list[i] == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
exec_list[i].handle, i);
+ /* prevent error path from reading uninitialized data */
+ args->buffer_count = i + 1;
ret = -EBADF;
goto err;
}
@@ -3730,6 +3743,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (obj_priv->in_execbuffer) {
DRM_ERROR("Object %p appears more than once in object list\n",
object_list[i]);
+ /* prevent error path from reading uninitialized data */
+ args->buffer_count = i + 1;
ret = -EBADF;
goto err;
}
@@ -3843,16 +3858,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
- if (dev->flush_domains)
+ if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
(void)i915_add_request(dev, file_priv,
dev->flush_domains);
}
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
+ if (obj->write_domain)
+ list_move_tail(&obj_priv->gpu_write_list,
+ &dev_priv->mm.gpu_write_list);
+ else
+ list_del_init(&obj_priv->gpu_write_list);
+
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
old_write_domain);
@@ -3926,6 +3948,7 @@ err:
mutex_unlock(&dev->struct_mutex);
+pre_mutex_err:
/* Copy the updated relocations out regardless of current error
* state. Failure to update the relocs would mean that the next
* time userland calls execbuf, it would do so with presumed offset
@@ -3940,7 +3963,6 @@ err:
ret = ret2;
}
-pre_mutex_err:
drm_free_large(object_list);
kfree(cliprects);
@@ -4363,6 +4385,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
obj_priv->obj = obj;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj_priv->list);
+ INIT_LIST_HEAD(&obj_priv->gpu_write_list);
INIT_LIST_HEAD(&obj_priv->fence_list);
obj_priv->madv = I915_MADV_WILLNEED;
@@ -4814,6 +4837,7 @@ i915_gem_load(struct drm_device *dev)
spin_lock_init(&dev_priv->mm.active_list_lock);
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+ INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 89a071a3e6fb..a17d6bdfe63e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -309,6 +309,22 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
if (de_iir & DE_GSE)
ironlake_opregion_gse_intr(dev);
+ if (de_iir & DE_PLANEA_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip(dev, 0);
+ }
+
+ if (de_iir & DE_PLANEB_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip(dev, 1);
+ }
+
+ if (de_iir & DE_PIPEA_VBLANK)
+ drm_handle_vblank(dev, 0);
+
+ if (de_iir & DE_PIPEB_VBLANK)
+ drm_handle_vblank(dev, 1);
+
/* check event from PCH */
if ((de_iir & DE_PCH_EVENT) &&
(pch_iir & SDE_HOTPLUG_MASK)) {
@@ -844,11 +860,11 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
if (!(pipeconf & PIPEACONF_ENABLE))
return -EINVAL;
- if (IS_IRONLAKE(dev))
- return 0;
-
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (IS_I965G(dev))
+ if (IS_IRONLAKE(dev))
+ ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+ else if (IS_I965G(dev))
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
else
@@ -866,13 +882,14 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- if (IS_IRONLAKE(dev))
- return;
-
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, pipe,
- PIPE_VBLANK_INTERRUPT_ENABLE |
- PIPE_START_VBLANK_INTERRUPT_ENABLE);
+ if (IS_IRONLAKE(dev))
+ ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
+ DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+ else
+ i915_disable_pipestat(dev_priv, pipe,
+ PIPE_VBLANK_INTERRUPT_ENABLE |
+ PIPE_START_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
@@ -1015,13 +1032,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
+ u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
u32 render_mask = GT_USER_INTERRUPT;
u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
dev_priv->irq_mask_reg = ~display_mask;
- dev_priv->de_irq_enable_reg = display_mask;
+ dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 847006c5218e..ab1bd2d3d3b6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -338,6 +338,7 @@
#define FBC_CTL_PERIODIC (1<<30)
#define FBC_CTL_INTERVAL_SHIFT (16)
#define FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define FBC_C3_IDLE (1<<13)
#define FBC_CTL_STRIDE_SHIFT (5)
#define FBC_CTL_FENCENO (1<<0)
#define FBC_COMMAND 0x0320c
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index ddefc871edfe..79dd4026586f 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -157,6 +157,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ /* disable HPD first */
+ I915_WRITE(PCH_ADPA, adpa);
+ (void)I915_READ(PCH_ADPA);
adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
ADPA_CRT_HOTPLUG_WARMUP_10MS |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 45da78ef4a92..b27202d23ebc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -240,33 +240,86 @@ struct intel_limit {
#define IRONLAKE_DOT_MAX 350000
#define IRONLAKE_VCO_MIN 1760000
#define IRONLAKE_VCO_MAX 3510000
-#define IRONLAKE_N_MIN 1
-#define IRONLAKE_N_MAX 6
-#define IRONLAKE_M_MIN 79
-#define IRONLAKE_M_MAX 127
#define IRONLAKE_M1_MIN 12
#define IRONLAKE_M1_MAX 22
#define IRONLAKE_M2_MIN 5
#define IRONLAKE_M2_MAX 9
-#define IRONLAKE_P_SDVO_DAC_MIN 5
-#define IRONLAKE_P_SDVO_DAC_MAX 80
-#define IRONLAKE_P_LVDS_MIN 28
-#define IRONLAKE_P_LVDS_MAX 112
-#define IRONLAKE_P1_MIN 1
-#define IRONLAKE_P1_MAX 8
-#define IRONLAKE_P2_SDVO_DAC_SLOW 10
-#define IRONLAKE_P2_SDVO_DAC_FAST 5
-#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
-#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
-#define IRONLAKE_P_DISPLAY_PORT_MIN 10
-#define IRONLAKE_P_DISPLAY_PORT_MAX 20
-#define IRONLAKE_P2_DISPLAY_PORT_FAST 10
-#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10
-#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0
-#define IRONLAKE_P1_DISPLAY_PORT_MIN 1
-#define IRONLAKE_P1_DISPLAY_PORT_MAX 2
+/* We have parameter ranges for different type of outputs. */
+
+/* DAC & HDMI Refclk 120Mhz */
+#define IRONLAKE_DAC_N_MIN 1
+#define IRONLAKE_DAC_N_MAX 5
+#define IRONLAKE_DAC_M_MIN 79
+#define IRONLAKE_DAC_M_MAX 127
+#define IRONLAKE_DAC_P_MIN 5
+#define IRONLAKE_DAC_P_MAX 80
+#define IRONLAKE_DAC_P1_MIN 1
+#define IRONLAKE_DAC_P1_MAX 8
+#define IRONLAKE_DAC_P2_SLOW 10
+#define IRONLAKE_DAC_P2_FAST 5
+
+/* LVDS single-channel 120Mhz refclk */
+#define IRONLAKE_LVDS_S_N_MIN 1
+#define IRONLAKE_LVDS_S_N_MAX 3
+#define IRONLAKE_LVDS_S_M_MIN 79
+#define IRONLAKE_LVDS_S_M_MAX 118
+#define IRONLAKE_LVDS_S_P_MIN 28
+#define IRONLAKE_LVDS_S_P_MAX 112
+#define IRONLAKE_LVDS_S_P1_MIN 2
+#define IRONLAKE_LVDS_S_P1_MAX 8
+#define IRONLAKE_LVDS_S_P2_SLOW 14
+#define IRONLAKE_LVDS_S_P2_FAST 14
+
+/* LVDS dual-channel 120Mhz refclk */
+#define IRONLAKE_LVDS_D_N_MIN 1
+#define IRONLAKE_LVDS_D_N_MAX 3
+#define IRONLAKE_LVDS_D_M_MIN 79
+#define IRONLAKE_LVDS_D_M_MAX 127
+#define IRONLAKE_LVDS_D_P_MIN 14
+#define IRONLAKE_LVDS_D_P_MAX 56
+#define IRONLAKE_LVDS_D_P1_MIN 2
+#define IRONLAKE_LVDS_D_P1_MAX 8
+#define IRONLAKE_LVDS_D_P2_SLOW 7
+#define IRONLAKE_LVDS_D_P2_FAST 7
+
+/* LVDS single-channel 100Mhz refclk */
+#define IRONLAKE_LVDS_S_SSC_N_MIN 1
+#define IRONLAKE_LVDS_S_SSC_N_MAX 2
+#define IRONLAKE_LVDS_S_SSC_M_MIN 79
+#define IRONLAKE_LVDS_S_SSC_M_MAX 126
+#define IRONLAKE_LVDS_S_SSC_P_MIN 28
+#define IRONLAKE_LVDS_S_SSC_P_MAX 112
+#define IRONLAKE_LVDS_S_SSC_P1_MIN 2
+#define IRONLAKE_LVDS_S_SSC_P1_MAX 8
+#define IRONLAKE_LVDS_S_SSC_P2_SLOW 14
+#define IRONLAKE_LVDS_S_SSC_P2_FAST 14
+
+/* LVDS dual-channel 100Mhz refclk */
+#define IRONLAKE_LVDS_D_SSC_N_MIN 1
+#define IRONLAKE_LVDS_D_SSC_N_MAX 3
+#define IRONLAKE_LVDS_D_SSC_M_MIN 79
+#define IRONLAKE_LVDS_D_SSC_M_MAX 126
+#define IRONLAKE_LVDS_D_SSC_P_MIN 14
+#define IRONLAKE_LVDS_D_SSC_P_MAX 42
+#define IRONLAKE_LVDS_D_SSC_P1_MIN 2
+#define IRONLAKE_LVDS_D_SSC_P1_MAX 6
+#define IRONLAKE_LVDS_D_SSC_P2_SLOW 7
+#define IRONLAKE_LVDS_D_SSC_P2_FAST 7
+
+/* DisplayPort */
+#define IRONLAKE_DP_N_MIN 1
+#define IRONLAKE_DP_N_MAX 2
+#define IRONLAKE_DP_M_MIN 81
+#define IRONLAKE_DP_M_MAX 90
+#define IRONLAKE_DP_P_MIN 10
+#define IRONLAKE_DP_P_MAX 20
+#define IRONLAKE_DP_P2_FAST 10
+#define IRONLAKE_DP_P2_SLOW 10
+#define IRONLAKE_DP_P2_LIMIT 0
+#define IRONLAKE_DP_P1_MIN 1
+#define IRONLAKE_DP_P1_MAX 2
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -474,33 +527,78 @@ static const intel_limit_t intel_limits_pineview_lvds = {
.find_pll = intel_find_best_PLL,
};
-static const intel_limit_t intel_limits_ironlake_sdvo = {
+static const intel_limit_t intel_limits_ironlake_dac = {
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
- .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
- .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
+ .n = { .min = IRONLAKE_DAC_N_MIN, .max = IRONLAKE_DAC_N_MAX },
+ .m = { .min = IRONLAKE_DAC_M_MIN, .max = IRONLAKE_DAC_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
- .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX },
- .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
+ .p = { .min = IRONLAKE_DAC_P_MIN, .max = IRONLAKE_DAC_P_MAX },
+ .p1 = { .min = IRONLAKE_DAC_P1_MIN, .max = IRONLAKE_DAC_P1_MAX },
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
- .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
- .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
+ .p2_slow = IRONLAKE_DAC_P2_SLOW,
+ .p2_fast = IRONLAKE_DAC_P2_FAST },
.find_pll = intel_g4x_find_best_PLL,
};
-static const intel_limit_t intel_limits_ironlake_lvds = {
+static const intel_limit_t intel_limits_ironlake_single_lvds = {
.dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
- .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
- .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
+ .n = { .min = IRONLAKE_LVDS_S_N_MIN, .max = IRONLAKE_LVDS_S_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_S_M_MIN, .max = IRONLAKE_LVDS_S_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
- .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX },
- .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
+ .p = { .min = IRONLAKE_LVDS_S_P_MIN, .max = IRONLAKE_LVDS_S_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_S_P1_MIN, .max = IRONLAKE_LVDS_S_P1_MAX },
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
- .p2_slow = IRONLAKE_P2_LVDS_SLOW,
- .p2_fast = IRONLAKE_P2_LVDS_FAST },
+ .p2_slow = IRONLAKE_LVDS_S_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_S_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_LVDS_D_N_MIN, .max = IRONLAKE_LVDS_D_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_D_M_MIN, .max = IRONLAKE_LVDS_D_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_LVDS_D_P_MIN, .max = IRONLAKE_LVDS_D_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_D_P1_MIN, .max = IRONLAKE_LVDS_D_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_LVDS_D_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_D_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_LVDS_S_SSC_N_MIN, .max = IRONLAKE_LVDS_S_SSC_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_S_SSC_M_MIN, .max = IRONLAKE_LVDS_S_SSC_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_LVDS_S_SSC_P_MIN, .max = IRONLAKE_LVDS_S_SSC_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_S_SSC_P1_MIN,.max = IRONLAKE_LVDS_S_SSC_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_LVDS_S_SSC_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_S_SSC_P2_FAST },
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
+ .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
+ .n = { .min = IRONLAKE_LVDS_D_SSC_N_MIN, .max = IRONLAKE_LVDS_D_SSC_N_MAX },
+ .m = { .min = IRONLAKE_LVDS_D_SSC_M_MIN, .max = IRONLAKE_LVDS_D_SSC_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_LVDS_D_SSC_P_MIN, .max = IRONLAKE_LVDS_D_SSC_P_MAX },
+ .p1 = { .min = IRONLAKE_LVDS_D_SSC_P1_MIN,.max = IRONLAKE_LVDS_D_SSC_P1_MAX },
+ .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
+ .p2_slow = IRONLAKE_LVDS_D_SSC_P2_SLOW,
+ .p2_fast = IRONLAKE_LVDS_D_SSC_P2_FAST },
.find_pll = intel_g4x_find_best_PLL,
};
@@ -509,34 +607,53 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
.max = IRONLAKE_DOT_MAX },
.vco = { .min = IRONLAKE_VCO_MIN,
.max = IRONLAKE_VCO_MAX},
- .n = { .min = IRONLAKE_N_MIN,
- .max = IRONLAKE_N_MAX },
- .m = { .min = IRONLAKE_M_MIN,
- .max = IRONLAKE_M_MAX },
+ .n = { .min = IRONLAKE_DP_N_MIN,
+ .max = IRONLAKE_DP_N_MAX },
+ .m = { .min = IRONLAKE_DP_M_MIN,
+ .max = IRONLAKE_DP_M_MAX },
.m1 = { .min = IRONLAKE_M1_MIN,
.max = IRONLAKE_M1_MAX },
.m2 = { .min = IRONLAKE_M2_MIN,
.max = IRONLAKE_M2_MAX },
- .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN,
- .max = IRONLAKE_P_DISPLAY_PORT_MAX },
- .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN,
- .max = IRONLAKE_P1_DISPLAY_PORT_MAX},
- .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT,
- .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW,
- .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST },
+ .p = { .min = IRONLAKE_DP_P_MIN,
+ .max = IRONLAKE_DP_P_MAX },
+ .p1 = { .min = IRONLAKE_DP_P1_MIN,
+ .max = IRONLAKE_DP_P1_MAX},
+ .p2 = { .dot_limit = IRONLAKE_DP_P2_LIMIT,
+ .p2_slow = IRONLAKE_DP_P2_SLOW,
+ .p2_fast = IRONLAKE_DP_P2_FAST },
.find_pll = intel_find_pll_ironlake_dp,
};
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
- limit = &intel_limits_ironlake_lvds;
- else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ int refclk = 120;
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
+ refclk = 100;
+
+ if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP) {
+ /* LVDS dual channel */
+ if (refclk == 100)
+ limit = &intel_limits_ironlake_dual_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_dual_lvds;
+ } else {
+ if (refclk == 100)
+ limit = &intel_limits_ironlake_single_lvds_100m;
+ else
+ limit = &intel_limits_ironlake_single_lvds;
+ }
+ } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
HAS_eDP)
limit = &intel_limits_ironlake_display_port;
else
- limit = &intel_limits_ironlake_sdvo;
+ limit = &intel_limits_ironlake_dac;
return limit;
}
@@ -914,6 +1031,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* enable it... */
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
+ if (IS_I945GM(dev))
+ fbc_ctl |= FBC_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
if (obj_priv->tiling_mode != I915_TILING_NONE)
@@ -1638,6 +1757,7 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_OFF:
DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
+ drm_vblank_off(dev, pipe);
/* Disable display plane */
temp = I915_READ(dspcntr_reg);
if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
@@ -2519,6 +2639,10 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
sr_entries = roundup(sr_entries / cacheline_size, 1);
DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
@@ -2562,6 +2686,10 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
srwm = 1;
srwm &= 0x3f;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@@ -2630,6 +2758,10 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
if (srwm < 0)
srwm = 1;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
+ } else {
+ /* Turn off self refresh if both pipes are enabled */
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -3949,7 +4081,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
- struct drm_gem_object *obj;
+ struct drm_gem_object *old_fb_obj;
+ struct drm_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
int pending;
};
@@ -3960,8 +4093,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
container_of(__work, struct intel_unpin_work, work);
mutex_lock(&work->dev->struct_mutex);
- i915_gem_object_unpin(work->obj);
- drm_gem_object_unreference(work->obj);
+ i915_gem_object_unpin(work->old_fb_obj);
+ drm_gem_object_unreference(work->pending_flip_obj);
+ drm_gem_object_unreference(work->old_fb_obj);
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
@@ -3984,6 +4118,12 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
+ if (work && !work->pending) {
+ obj_priv = work->pending_flip_obj->driver_private;
+ DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
+ obj_priv,
+ atomic_read(&obj_priv->pending_flip));
+ }
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
@@ -4004,8 +4144,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev->event_lock, flags);
- obj_priv = work->obj->driver_private;
- if (atomic_dec_and_test(&obj_priv->pending_flip))
+ obj_priv = work->pending_flip_obj->driver_private;
+
+ /* Initial scanout buffer will have a 0 pending flip count */
+ if ((atomic_read(&obj_priv->pending_flip) == 0) ||
+ atomic_dec_and_test(&obj_priv->pending_flip))
DRM_WAKEUP(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
}
@@ -4018,8 +4161,11 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
- if (intel_crtc->unpin_work)
+ if (intel_crtc->unpin_work) {
intel_crtc->unpin_work->pending = 1;
+ } else {
+ DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
+ }
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -4035,7 +4181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
- int ret;
+ int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
+ int ret, pipesrc;
RING_LOCALS;
work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -4047,12 +4194,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->event = event;
work->dev = crtc->dev;
intel_fb = to_intel_framebuffer(crtc->fb);
- work->obj = intel_fb->obj;
+ work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
+ DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
mutex_unlock(&dev->struct_mutex);
@@ -4066,19 +4214,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ret = intel_pin_and_fence_fb_obj(dev, obj);
if (ret != 0) {
+ DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
+ obj->driver_private);
kfree(work);
+ intel_crtc->unpin_work = NULL;
mutex_unlock(&dev->struct_mutex);
return ret;
}
- /* Reference the old fb object for the scheduled work. */
- drm_gem_object_reference(work->obj);
+ /* Reference the objects for the scheduled work. */
+ drm_gem_object_reference(work->old_fb_obj);
+ drm_gem_object_reference(obj);
crtc->fb = fb;
i915_gem_object_flush_write_domain(obj);
drm_vblank_get(dev, intel_crtc->pipe);
obj_priv = obj->driver_private;
atomic_inc(&obj_priv->pending_flip);
+ work->pending_flip_obj = obj;
BEGIN_LP_RING(4);
OUT_RING(MI_DISPLAY_FLIP |
@@ -4086,7 +4239,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(fb->pitch);
if (IS_I965G(dev)) {
OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
- OUT_RING((fb->width << 16) | fb->height);
+ pipesrc = I915_READ(pipesrc_reg);
+ OUT_RING(pipesrc & 0x0fff0fff);
} else {
OUT_RING(obj_priv->gtt_offset);
OUT_RING(MI_NOOP);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 371d753e362b..aaabbcbe5905 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -148,7 +148,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(fbo, PAGE_SIZE);
+ ret = i915_gem_object_pin(fbo, 64*1024);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index aa74e59bec61..c2e8a45780d5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -611,7 +611,7 @@ static const struct dmi_system_id bad_lid_status[] = {
{
.ident = "Samsung SX20S",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Phoenix Technologies LTD"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
DMI_MATCH(DMI_BOARD_NAME, "SX20S"),
},
},
@@ -623,12 +623,26 @@ static const struct dmi_system_id bad_lid_status[] = {
},
},
{
+ .ident = "Aspire 1810T",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1810T"),
+ },
+ },
+ {
.ident = "PC-81005",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
},
},
+ {
+ .ident = "Clevo M5x0N",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
+ DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
+ },
+ },
{ }
};
@@ -643,7 +657,7 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
{
enum drm_connector_status status = connector_status_connected;
- if (!acpi_lid_open() && !dmi_check_system(bad_lid_status))
+ if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
status = connector_status_disconnected;
return status;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index eaacfd0920df..82678d30ab06 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2345,6 +2345,14 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
+ } else if (flags & SDVO_OUTPUT_CVBS0) {
+
+ sdvo_priv->controlled_output = SDVO_OUTPUT_CVBS0;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+ sdvo_priv->is_tv = true;
+ intel_output->needs_tv_clock = true;
+ intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
} else if (flags & SDVO_OUTPUT_LVDS0) {
sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 1cf488247a16..48227e744753 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -90,21 +90,21 @@ int nouveau_hybrid_setup(struct drm_device *dev)
{
int result;
- if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY,
+ if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
&result))
return -ENODEV;
NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
- if (result & 0x1) { /* Stamina mode - disable the external GPU */
+ if (result) { /* Ensure that the external GPU is enabled */
+ nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
+ nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
+ NULL);
+ } else { /* Stamina mode - disable the external GPU */
nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
NULL);
nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
NULL);
- } else { /* Ensure that the external GPU is enabled */
- nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
- nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
- NULL);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index d7f8d8b4a4b8..0e9cd1d49130 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1865,7 +1865,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
- if (dev_priv->card_type >= NV_50)
+ if (dev_priv->card_type >= NV_40)
return 1;
/*
@@ -3765,7 +3765,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct init_exec iexec = {true, false};
struct nvbios *bios = &dev_priv->VBIOS;
uint8_t *table = &bios->data[bios->display.script_table_ptr];
uint8_t *otable = NULL;
@@ -3845,8 +3844,6 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
}
- bios->display.output = dcbent;
-
if (pxclk == 0) {
script = ROM16(otable[6]);
if (!script) {
@@ -3855,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -1) {
script = ROM16(otable[8]);
@@ -3865,7 +3862,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk == -2) {
if (table[4] >= 12)
@@ -3878,7 +3875,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk > 0) {
script = ROM16(otable[table[4] + i*6 + 2]);
@@ -3890,7 +3887,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
} else
if (pxclk < 0) {
script = ROM16(otable[table[4] + i*6 + 4]);
@@ -3902,7 +3899,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
- parse_init_table(bios, script, &iexec);
+ nouveau_bios_run_init_table(dev, script, dcbent);
}
return 0;
@@ -5865,9 +5862,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
struct nvbios *bios = &dev_priv->VBIOS;
struct init_exec iexec = { true, false };
+ mutex_lock(&bios->lock);
bios->display.output = dcbent;
parse_init_table(bios, table, &iexec);
bios->display.output = NULL;
+ mutex_unlock(&bios->lock);
}
static bool NVInitVBIOS(struct drm_device *dev)
@@ -5876,6 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
struct nvbios *bios = &dev_priv->VBIOS;
memset(bios, 0, sizeof(struct nvbios));
+ mutex_init(&bios->lock);
bios->dev = dev;
if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 058e98c76d89..fd94bd6dc264 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -205,6 +205,8 @@ struct nvbios {
struct drm_device *dev;
struct nouveau_bios_info pub;
+ struct mutex lock;
+
uint8_t data[NV_PROM_SIZE];
unsigned int length;
bool execute;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index db0ed4c13f98..028719fddf76 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -65,8 +65,10 @@ nouveau_bo_fixup_align(struct drm_device *dev,
/*
* Some of the tile_flags have a periodic structure of N*4096 bytes,
- * align to to that as well as the page size. Overallocate memory to
- * avoid corruption of other buffer objects.
+ * align to to that as well as the page size. Align the size to the
+ * appropriate boundaries. This does imply that sizes are rounded up
+ * 3-7 pages, so be aware of this and do not waste memory by allocating
+ * many small buffers.
*/
if (dev_priv->card_type == NV_50) {
uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
@@ -77,22 +79,20 @@ nouveau_bo_fixup_align(struct drm_device *dev,
case 0x2800:
case 0x4800:
case 0x7a00:
- *size = roundup(*size, block_size);
if (is_power_of_2(block_size)) {
- *size += 3 * block_size;
for (i = 1; i < 10; i++) {
*align = 12 * i * block_size;
if (!(*align % 65536))
break;
}
} else {
- *size += 6 * block_size;
for (i = 1; i < 10; i++) {
*align = 8 * i * block_size;
if (!(*align % 65536))
break;
}
}
+ *size = roundup(*size, *align);
break;
default:
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 343d718a9667..2281f99da7fc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -278,12 +278,11 @@ nouveau_channel_free(struct nouveau_channel *chan)
/* Ensure the channel is no longer active on the GPU */
pfifo->reassign(dev, false);
- if (pgraph->channel(dev) == chan) {
- pgraph->fifo_access(dev, false);
+ pgraph->fifo_access(dev, false);
+ if (pgraph->channel(dev) == chan)
pgraph->unload_context(dev);
- pgraph->fifo_access(dev, true);
- }
pgraph->destroy_context(chan);
+ pgraph->fifo_access(dev, true);
if (pfifo->channel_id(dev) == chan->id) {
pfifo->disable(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7e6d673f3a23..d2f63353ea97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -88,13 +88,14 @@ nouveau_connector_destroy(struct drm_connector *drm_connector)
{
struct nouveau_connector *nv_connector =
nouveau_connector(drm_connector);
- struct drm_device *dev = nv_connector->base.dev;
-
- NV_DEBUG_KMS(dev, "\n");
+ struct drm_device *dev;
if (!nv_connector)
return;
+ dev = nv_connector->base.dev;
+ NV_DEBUG_KMS(dev, "\n");
+
kfree(nv_connector->edid);
drm_sysfs_connector_remove(drm_connector);
drm_connector_cleanup(drm_connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index dd4937224220..f954ad93e81f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -502,12 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
break;
}
- if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
- ret = -EREMOTEIO;
- goto out;
- }
-
if (cmd & 1) {
+ if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
+ ret = -EREMOTEIO;
+ goto out;
+ }
+
for (i = 0; i < 4; i++) {
data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 343ab7f17ccc..da3b93b84502 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -56,7 +56,7 @@ int nouveau_vram_pushbuf;
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
-int nouveau_vram_notify;
+int nouveau_vram_notify = 1;
module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
@@ -75,6 +75,14 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
int nouveau_ignorelid = 0;
module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
+MODULE_PARM_DESC(noagp, "Disable all acceleration");
+int nouveau_noaccel = 0;
+module_param_named(noaccel, nouveau_noaccel, int, 0400);
+
+MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
+int nouveau_nofbaccel = 0;
+module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
+
MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
"\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
"\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 6b9690418bc7..1c15ef37b71c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -583,6 +583,7 @@ struct drm_nouveau_private {
uint64_t vm_end;
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
int vm_vram_pt_nr;
+ uint64_t vram_sys_base;
/* the mtrr covering the FB */
int fb_mtrr;
@@ -678,6 +679,8 @@ extern int nouveau_reg_debug;
extern char *nouveau_vbios;
extern int nouveau_ctxfw;
extern int nouveau_ignorelid;
+extern int nouveau_nofbaccel;
+extern int nouveau_noaccel;
/* nouveau_state.c */
extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0b05c869e0e7..ea879a2efef3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -107,6 +107,34 @@ static struct fb_ops nouveau_fbcon_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
};
+static struct fb_ops nv04_fbcon_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_setcolreg = drm_fb_helper_setcolreg,
+ .fb_fillrect = nv04_fbcon_fillrect,
+ .fb_copyarea = nv04_fbcon_copyarea,
+ .fb_imageblit = nv04_fbcon_imageblit,
+ .fb_sync = nouveau_fbcon_sync,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static struct fb_ops nv50_fbcon_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_setcolreg = drm_fb_helper_setcolreg,
+ .fb_fillrect = nv50_fbcon_fillrect,
+ .fb_copyarea = nv50_fbcon_copyarea,
+ .fb_imageblit = nv50_fbcon_imageblit,
+ .fb_sync = nouveau_fbcon_sync,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
@@ -267,8 +295,12 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
dev_priv->fbdev_info = info;
strcpy(info->fix.id, "nouveaufb");
- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
- FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT;
+ if (nouveau_nofbaccel)
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
+ else
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
+ FBINFO_HWACCEL_FILLRECT |
+ FBINFO_HWACCEL_IMAGEBLIT;
info->fbops = &nouveau_fbcon_ops;
info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
dev_priv->vm_vram_base;
@@ -316,13 +348,15 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
par->nouveau_fb = nouveau_fb;
par->dev = dev;
- if (dev_priv->channel) {
+ if (dev_priv->channel && !nouveau_nofbaccel) {
switch (dev_priv->card_type) {
case NV_50:
nv50_fbcon_accel_init(info);
+ info->fbops = &nv50_fbcon_ops;
break;
default:
nv04_fbcon_accel_init(info);
+ info->fbops = &nv04_fbcon_ops;
break;
};
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 462e0b87b4bd..f9c34e1a8c11 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,7 +40,13 @@ int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
void nouveau_fbcon_restore(void);
void nouveau_fbcon_zfill(struct drm_device *dev);
+void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv04_fbcon_accel_init(struct fb_info *info);
+void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
int nv50_fbcon_accel_init(struct fb_info *info);
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 6ac804b0c9f9..70cc30803e3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -925,7 +925,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
}
if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
+ spin_lock(&nvbo->bo.lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
+ spin_unlock(&nvbo->bo.lock);
} else {
ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
if (ret == 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index 419f4c2b3b89..c7ebec696747 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -97,8 +97,8 @@ nouveau_grctx_prog_load(struct drm_device *dev)
}
pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
- if (!pgraph->ctxprog) {
- NV_ERROR(dev, "OOM copying ctxprog\n");
+ if (!pgraph->ctxvals) {
+ NV_ERROR(dev, "OOM copying ctxvals\n");
release_firmware(fw);
nouveau_grctx_fini(dev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 3b9bad66162a..447f9f69d6b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -211,6 +211,20 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
get + 4);
}
+ if (status & NV_PFIFO_INTR_SEMAPHORE) {
+ uint32_t sem;
+
+ status &= ~NV_PFIFO_INTR_SEMAPHORE;
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_SEMAPHORE);
+
+ sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
if (status) {
NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
status, chid);
@@ -566,86 +580,99 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
static void
nv50_pgraph_irq_handler(struct drm_device *dev)
{
- uint32_t status, nsource;
+ uint32_t status;
- status = nv_rd32(dev, NV03_PGRAPH_INTR);
- nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
- if (status & 0x00000001) {
- nouveau_pgraph_intr_notify(dev, nsource);
- status &= ~0x00000001;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
- }
+ if (status & 0x00000001) {
+ nouveau_pgraph_intr_notify(dev, nsource);
+ status &= ~0x00000001;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
+ }
- if (status & 0x00000010) {
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
+ if (status & 0x00000010) {
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
- status &= ~0x00000010;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
- }
+ status &= ~0x00000010;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
+ }
- if (status & 0x00001000) {
- nv_wr32(dev, 0x400500, 0x00000000);
- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
- nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
- NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
- nv_wr32(dev, 0x400500, 0x00010001);
+ if (status & 0x00001000) {
+ nv_wr32(dev, 0x400500, 0x00000000);
+ nv_wr32(dev, NV03_PGRAPH_INTR,
+ NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
+ NV40_PGRAPH_INTR_EN) &
+ ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ nv_wr32(dev, 0x400500, 0x00010001);
- nv50_graph_context_switch(dev);
+ nv50_graph_context_switch(dev);
- status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- }
+ status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ }
- if (status & 0x00100000) {
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_DATA_ERROR);
+ if (status & 0x00100000) {
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_DATA_ERROR);
- status &= ~0x00100000;
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
- }
+ status &= ~0x00100000;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
+ }
- if (status & 0x00200000) {
- int r;
-
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
-
- NV_ERROR(dev, "magic set 1:\n");
- for (r = 0x408900; r <= 0x408910; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000);
- for (r = 0x408e08; r <= 0x408e24; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000);
-
- NV_ERROR(dev, "magic set 2:\n");
- for (r = 0x409900; r <= 0x409910; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000);
- for (r = 0x409e08; r <= 0x409e24; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
- nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000);
-
- status &= ~0x00200000;
- nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
- nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
- }
+ if (status & 0x00200000) {
+ int r;
+
+ nouveau_pgraph_intr_error(dev, nsource |
+ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
+
+ NV_ERROR(dev, "magic set 1:\n");
+ for (r = 0x408900; r <= 0x408910; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x408900,
+ nv_rd32(dev, 0x408904) | 0xc0000000);
+ for (r = 0x408e08; r <= 0x408e24; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x408e08,
+ nv_rd32(dev, 0x408e08) | 0xc0000000);
+
+ NV_ERROR(dev, "magic set 2:\n");
+ for (r = 0x409900; r <= 0x409910; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x409900,
+ nv_rd32(dev, 0x409904) | 0xc0000000);
+ for (r = 0x409e08; r <= 0x409e24; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ nv_wr32(dev, 0x409e08,
+ nv_rd32(dev, 0x409e08) | 0xc0000000);
+
+ status &= ~0x00200000;
+ nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
+ }
- if (status) {
- NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
- nv_wr32(dev, NV03_PGRAPH_INTR, status);
- }
+ if (status) {
+ NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
+ status);
+ nv_wr32(dev, NV03_PGRAPH_INTR, status);
+ }
- {
- const int isb = (1 << 16) | (1 << 0);
+ {
+ const int isb = (1 << 16) | (1 << 0);
- if ((nv_rd32(dev, 0x400500) & isb) != isb)
- nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+ if ((nv_rd32(dev, 0x400500) & isb) != isb)
+ nv_wr32(dev, 0x400500,
+ nv_rd32(dev, 0x400500) | isb);
+ }
}
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
+ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 8f3a12f614ed..2dc09dbd817d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
uint32_t flags, uint64_t phys)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj **pgt;
- unsigned psz, pfl, pages;
-
- if (virt >= dev_priv->vm_gart_base &&
- (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
- psz = 12;
- pgt = &dev_priv->gart_info.sg_ctxdma;
- pfl = 0x21;
- virt -= dev_priv->vm_gart_base;
- } else
- if (virt >= dev_priv->vm_vram_base &&
- (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
- psz = 16;
- pgt = dev_priv->vm_vram_pt;
- pfl = 0x01;
- virt -= dev_priv->vm_vram_base;
- } else {
- NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
- virt, virt + size - 1);
- return -EINVAL;
- }
+ struct nouveau_gpuobj *pgt;
+ unsigned block;
+ int i;
- pages = size >> psz;
+ virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
+ size = (size >> 16) << 1;
+
+ phys |= ((uint64_t)flags << 32);
+ phys |= 1;
+ if (dev_priv->vram_sys_base) {
+ phys += dev_priv->vram_sys_base;
+ phys |= 0x30;
+ }
dev_priv->engine.instmem.prepare_access(dev, true);
- if (flags & 0x80000000) {
- while (pages--) {
- struct nouveau_gpuobj *pt = pgt[virt >> 29];
- unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
+ while (size) {
+ unsigned offset_h = upper_32_bits(phys);
+ unsigned offset_l = lower_32_bits(phys);
+ unsigned pte, end;
+
+ for (i = 7; i >= 0; i--) {
+ block = 1 << (i + 1);
+ if (size >= block && !(virt & (block - 1)))
+ break;
+ }
+ offset_l |= (i << 7);
- nv_wo32(dev, pt, pte++, 0x00000000);
- nv_wo32(dev, pt, pte++, 0x00000000);
+ phys += block << 15;
+ size -= block;
- virt += (1 << psz);
- }
- } else {
- while (pages--) {
- struct nouveau_gpuobj *pt = pgt[virt >> 29];
- unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
- unsigned offset_h = upper_32_bits(phys) & 0xff;
- unsigned offset_l = lower_32_bits(phys);
+ while (block) {
+ pgt = dev_priv->vm_vram_pt[virt >> 14];
+ pte = virt & 0x3ffe;
- nv_wo32(dev, pt, pte++, offset_l | pfl);
- nv_wo32(dev, pt, pte++, offset_h | flags);
+ end = pte + block;
+ if (end > 16384)
+ end = 16384;
+ block -= (end - pte);
+ virt += (end - pte);
- phys += (1 << psz);
- virt += (1 << psz);
+ while (pte < end) {
+ nv_wo32(dev, pgt, pte++, offset_l);
+ nv_wo32(dev, pgt, pte++, offset_h);
+ }
}
}
dev_priv->engine.instmem.finish_access(dev);
@@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
void
nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
{
- nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *pgt;
+ unsigned pages, pte, end;
+
+ virt -= dev_priv->vm_vram_base;
+ pages = (size >> 16) << 1;
+
+ dev_priv->engine.instmem.prepare_access(dev, true);
+ while (pages) {
+ pgt = dev_priv->vm_vram_pt[virt >> 29];
+ pte = (virt & 0x1ffe0000ULL) >> 15;
+
+ end = pte + pages;
+ if (end > 16384)
+ end = 16384;
+ pages -= (end - pte);
+ virt += (end - pte) << 15;
+
+ while (pte < end)
+ nv_wo32(dev, pgt, pte++, 0);
+ }
+ dev_priv->engine.instmem.finish_access(dev);
+
+ nv_wr32(dev, 0x100c80, 0x00050001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ return;
+ }
+
+ nv_wr32(dev, 0x100c80, 0x00000001);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
+ NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
+ }
}
/*
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 6c66a34b6345..d99dc087f9b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -34,15 +34,20 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct nouveau_bo *ntfy = NULL;
+ uint32_t flags;
int ret;
- ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ?
- TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT,
+ if (nouveau_vram_notify)
+ flags = TTM_PL_FLAG_VRAM;
+ else
+ flags = TTM_PL_FLAG_TT;
+
+ ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
0, 0x0000, false, true, &ntfy);
if (ret)
return ret;
- ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(ntfy, flags);
if (ret)
goto out_err;
@@ -128,6 +133,8 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
target = NV_DMA_TARGET_PCI;
} else {
target = NV_DMA_TARGET_AGP;
+ if (dev_priv->card_type >= NV_50)
+ offset += dev_priv->vm_gart_base;
}
} else {
NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 6c2cf81716df..e7c100ba63a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -885,11 +885,12 @@ int
nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
struct nouveau_gpuobj **gpuobj_ret)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct drm_nouveau_private *dev_priv;
struct nouveau_gpuobj *gpuobj;
if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
return -EINVAL;
+ dev_priv = chan->dev->dev_private;
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
if (!gpuobj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 251f1b3b38b9..aa9b310e41be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -99,6 +99,7 @@
* the card will hang early on in the X init process.
*/
# define NV_PMC_ENABLE_UNK13 (1<<13)
+#define NV40_PMC_GRAPH_UNITS 0x00001540
#define NV40_PMC_BACKLIGHT 0x000015f0
# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
#define NV40_PMC_1700 0x00001700
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 4c7f1e403e80..ed1590577b6c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -54,11 +54,12 @@ static void
nouveau_sgdma_clear(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct drm_device *dev = nvbe->dev;
-
- NV_DEBUG(nvbe->dev, "\n");
+ struct drm_device *dev;
if (nvbe && nvbe->pages) {
+ dev = nvbe->dev;
+ NV_DEBUG(dev, "\n");
+
if (nvbe->bound)
be->func->unbind(be);
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index f2d0187ba152..a4851af5b05e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -310,6 +310,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
static unsigned int
nouveau_vga_set_decode(void *priv, bool state)
{
+ struct drm_device *dev = priv;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset >= 0x40)
+ nv_wr32(dev, 0x88054, state);
+ else
+ nv_wr32(dev, 0x1854, state);
+
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -427,15 +435,19 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_timer;
- /* PGRAPH */
- ret = engine->graph.init(dev);
- if (ret)
- goto out_fb;
+ if (nouveau_noaccel)
+ engine->graph.accel_blocked = true;
+ else {
+ /* PGRAPH */
+ ret = engine->graph.init(dev);
+ if (ret)
+ goto out_fb;
- /* PFIFO */
- ret = engine->fifo.init(dev);
- if (ret)
- goto out_graph;
+ /* PFIFO */
+ ret = engine->fifo.init(dev);
+ if (ret)
+ goto out_graph;
+ }
/* this call irq_preinstall, register irq handler and
* call irq_postinstall
@@ -479,9 +491,11 @@ nouveau_card_init(struct drm_device *dev)
out_irq:
drm_irq_uninstall(dev);
out_fifo:
- engine->fifo.takedown(dev);
+ if (!nouveau_noaccel)
+ engine->fifo.takedown(dev);
out_graph:
- engine->graph.takedown(dev);
+ if (!nouveau_noaccel)
+ engine->graph.takedown(dev);
out_fb:
engine->fb.takedown(dev);
out_timer:
@@ -518,8 +532,10 @@ static void nouveau_card_takedown(struct drm_device *dev)
dev_priv->channel = NULL;
}
- engine->fifo.takedown(dev);
- engine->graph.takedown(dev);
+ if (!nouveau_noaccel) {
+ engine->fifo.takedown(dev);
+ engine->graph.takedown(dev);
+ }
engine->fb.takedown(dev);
engine->timer.takedown(dev);
engine->mc.takedown(dev);
@@ -817,6 +833,15 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
getparam->value = dev_priv->vm_vram_base;
break;
+ case NOUVEAU_GETPARAM_GRAPH_UNITS:
+ /* NV40 and NV50 versions are quite different, but register
+ * address is the same. User is supposed to know the card
+ * family anyway... */
+ if (dev_priv->chipset >= 0x40) {
+ getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
+ break;
+ }
+ /* FALLTHRU */
default:
NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index d0e038d28948..1d73b15d70da 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- uint8_t saved_seq1, saved_pi, saved_rpc1;
+ uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
uint8_t saved_palette0[3], saved_palette_mask;
uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
int i;
@@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
/* only implemented for head A for now */
NVSetOwner(dev, 0);
+ saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
+
saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
@@ -203,6 +206,7 @@ out:
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
+ NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
if (blue == 0x18) {
NV_INFO(dev, "Load detected on head A\n");
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index d910873c1368..fd01caabd5c3 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -27,7 +27,7 @@
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
-static void
+void
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbcon_par *par = info->par;
@@ -54,7 +54,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
FIRE_RING(chan);
}
-static void
+void
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbcon_par *par = info->par;
@@ -88,7 +88,7 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
FIRE_RING(chan);
}
-static void
+void
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbcon_par *par = info->par;
@@ -307,9 +307,6 @@ nv04_fbcon_accel_init(struct fb_info *info)
FIRE_RING(chan);
- info->fbops->fb_fillrect = nv04_fbcon_fillrect;
- info->fbops->fb_copyarea = nv04_fbcon_copyarea;
- info->fbops->fb_imageblit = nv04_fbcon_imageblit;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 58b917c3341b..21ac6e49b6ee 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder)
nouveau_encoder(encoder)->restore.output);
nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
+
+ nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
}
static int nv17_tv_create_resources(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 40b7360841f8..d1a651e3400c 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -298,14 +298,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
static void
nv50_crtc_destroy(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
-
- NV_DEBUG_KMS(dev, "\n");
+ struct drm_device *dev;
+ struct nouveau_crtc *nv_crtc;
if (!crtc)
return;
+ dev = crtc->dev;
+ nv_crtc = nouveau_crtc(crtc);
+
+ NV_DEBUG_KMS(dev, "\n");
+
drm_crtc_cleanup(&nv_crtc->base);
nv50_cursor_fini(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index e4f279ee61cf..0f57cdf7ccb2 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -3,7 +3,7 @@
#include "nouveau_dma.h"
#include "nouveau_fbcon.h"
-static void
+void
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbcon_par *par = info->par;
@@ -46,7 +46,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
FIRE_RING(chan);
}
-static void
+void
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbcon_par *par = info->par;
@@ -81,7 +81,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
FIRE_RING(chan);
}
-static void
+void
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbcon_par *par = info->par;
@@ -262,9 +262,6 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
dev_priv->vm_vram_base);
- info->fbops->fb_fillrect = nv50_fbcon_fillrect;
- info->fbops->fb_copyarea = nv50_fbcon_copyarea;
- info->fbops->fb_imageblit = nv50_fbcon_imageblit;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 32b244bcb482..204a79ff10f4 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -317,17 +317,20 @@ void
nv50_fifo_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
NV_DEBUG(dev, "ch%d\n", chan->id);
- nouveau_gpuobj_ref_del(dev, &chan->ramfc);
- nouveau_gpuobj_ref_del(dev, &chan->cache);
-
+ /* This will ensure the channel is seen as disabled. */
+ chan->ramfc = NULL;
nv50_fifo_channel_disable(dev, chan->id, false);
/* Dummy channel, also used on ch 127 */
if (chan->id == 0)
nv50_fifo_channel_disable(dev, 127, false);
+
+ nouveau_gpuobj_ref_del(dev, &ramfc);
+ nouveau_gpuobj_ref_del(dev, &chan->cache);
}
int
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 20319e59d368..6d504801b514 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -165,6 +165,12 @@ nv50_graph_channel(struct drm_device *dev)
uint32_t inst;
int i;
+ /* Be sure we're not in the middle of a context switch or bad things
+ * will happen, such as unloading the wrong pgraph context.
+ */
+ if (!nv_wait(0x400300, 0x00000001, 0x00000000))
+ NV_ERROR(dev, "Ctxprog is still running\n");
+
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
return NULL;
@@ -275,7 +281,7 @@ nv50_graph_load_context(struct nouveau_channel *chan)
int
nv50_graph_unload_context(struct drm_device *dev)
{
- uint32_t inst, fifo = nv_rd32(dev, 0x400500);
+ uint32_t inst;
inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
@@ -283,12 +289,10 @@ nv50_graph_unload_context(struct drm_device *dev)
inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
nouveau_wait_for_idle(dev);
- nv_wr32(dev, 0x400500, fifo & ~1);
nv_wr32(dev, 0x400784, inst);
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
nouveau_wait_for_idle(dev);
- nv_wr32(dev, 0x400500, fifo);
nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 94400f777e7f..f0dc4e36ef05 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev)
for (i = 0x1700; i <= 0x1710; i += 4)
priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
+ if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
+ dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
+ else
+ dev_priv->vram_sys_base = 0;
+
/* Reserve the last MiB of VRAM, we should probably try to avoid
* setting up the below tables over the top of the VBIOS image at
* some point.
@@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev)
* We map the entire fake channel into the start of the PRAMIN BAR
*/
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
- 0, &priv->pramin_pt);
+ 0, &priv->pramin_pt);
if (ret)
return ret;
- for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) {
- if (v < (c_offset + c_size))
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
- else
- BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
+ v = c_offset | 1;
+ if (dev_priv->vram_sys_base) {
+ v += dev_priv->vram_sys_base;
+ v |= 0x30;
+ }
+
+ i = 0;
+ while (v < dev_priv->vram_sys_base + c_offset + c_size) {
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+ v += 0x1000;
+ i += 8;
+ }
+
+ while (i < pt_size) {
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+ i += 8;
}
BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
@@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- uint32_t pte, pte_end, vram;
+ struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
+ uint32_t pte, pte_end;
+ uint64_t vram;
if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
return -EINVAL;
@@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
gpuobj->im_pramin->start, gpuobj->im_pramin->size);
- pte = (gpuobj->im_pramin->start >> 12) << 3;
- pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+ pte = (gpuobj->im_pramin->start >> 12) << 1;
+ pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
vram = gpuobj->im_backing_start;
NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
gpuobj->im_pramin->start, pte, pte_end);
NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
+ vram |= 1;
+ if (dev_priv->vram_sys_base) {
+ vram += dev_priv->vram_sys_base;
+ vram |= 0x30;
+ }
+
dev_priv->engine.instmem.prepare_access(dev, true);
while (pte < pte_end) {
- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
-
- pte += 8;
+ nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
+ nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
vram += NV50_INSTMEM_PAGE_SIZE;
}
dev_priv->engine.instmem.finish_access(dev);
@@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
if (gpuobj->im_bound == 0)
return -EINVAL;
- pte = (gpuobj->im_pramin->start >> 12) << 3;
- pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+ pte = (gpuobj->im_pramin->start >> 12) << 1;
+ pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
dev_priv->engine.instmem.prepare_access(dev, true);
while (pte < pte_end) {
- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
- nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
- pte += 8;
+ nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
+ nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
}
dev_priv->engine.instmem.finish_access(dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index ecf1936b8224..c2fff543b06f 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -101,6 +101,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
struct nouveau_encoder *nvenc = nouveau_encoder(enc);
if (nvenc == nv_encoder ||
+ nvenc->disconnect != nv50_sor_disconnect ||
nvenc->dcb->or != nv_encoder->dcb->or)
continue;
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 5982321be4d5..1c02d23f6fcc 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,10 +1,14 @@
config DRM_RADEON_KMS
- bool "Enable modesetting on radeon by default"
+ bool "Enable modesetting on radeon by default - NEW DRIVER"
depends on DRM_RADEON
help
- Choose this option if you want kernel modesetting enabled by default,
- and you have a new enough userspace to support this. Running old
- userspaces with this enabled will cause pain.
+ Choose this option if you want kernel modesetting enabled by default.
+
+ This is a completely new driver. It's only part of the existing drm
+ for compatibility reasons. It requires an entirely different graphics
+ stack above it and works very differently from the old drm stack.
+ i.e. don't enable this unless you know what you are doing it may
+ cause issues or bugs compared to the previous userspace driver stack.
When kernel modesetting is enabled the IOCTL of radeon/drm
driver are considered as invalid and an error message is printed
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index e3b44562d265..7f152f66f196 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/sched.h>
+#include <asm/unaligned.h>
#define ATOM_DEBUG
@@ -212,7 +213,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
case ATOM_ARG_PS:
idx = U8(*ptr);
(*ptr)++;
- val = le32_to_cpu(ctx->ps[idx]);
+ /* get_unaligned_le32 avoids unaligned accesses from atombios
+ * tables, noticed on a DEC Alpha. */
+ val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
if (print)
DEBUG("PS[0x%02X,0x%04X]", idx, val);
break;
@@ -640,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
uint8_t count = U8((*ptr)++);
SDEBUG(" count: %d\n", count);
if (arg == ATOM_UNIT_MICROSEC)
- schedule_timeout_uninterruptible(usecs_to_jiffies(count));
+ udelay(count);
else
schedule_timeout_uninterruptible(msecs_to_jiffies(count));
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 71060114d5de..99915a682d59 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -332,11 +332,13 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
+ int retry_count = 0;
memset(&args, 0, sizeof(args));
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
+retry:
memcpy(base, req_bytes, num_bytes);
args.lpAuxRequest = 0;
@@ -347,10 +349,12 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- if (args.ucReplyStatus) {
- DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
+ if (args.ucReplyStatus && !args.ucDataOutLen) {
+ if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
+ goto retry;
+ DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
- chan->rec.i2c_id, args.ucReplyStatus);
+ chan->rec.i2c_id, args.ucReplyStatus, retry_count);
return false;
}
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 11c9a3fe6810..c0d4650cdb79 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -354,11 +354,17 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
return RREG32(RADEON_CRTC2_CRNT_FRAME);
}
+/* Who ever call radeon_fence_emit should call ring_lock and ask
+ * for enough space (today caller are ib schedule and buffer move) */
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
- /* Who ever call radeon_fence_emit should call ring_lock and ask
- * for enough space (today caller are ib schedule and buffer move) */
+ /* We have to make sure that caches are flushed before
+ * CPU might read something from VRAM. */
+ radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
+ radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
/* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(0x1720, 0));
radeon_ring_write(rdev, (1 << 16) | (1 << 17));
@@ -3369,7 +3375,6 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev)
{
- r100_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -3481,13 +3486,12 @@ int r100_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- r100_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 0051d11b907c..43b55a030b4d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -506,11 +506,14 @@ void r300_vram_info(struct radeon_device *rdev)
/* DDR for all card after R300 & IGP */
rdev->mc.vram_is_ddr = true;
+
tmp = RREG32(RADEON_MEM_CNTL);
- if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
- rdev->mc.vram_width = 128;
- } else {
- rdev->mc.vram_width = 64;
+ tmp &= R300_MEM_NUM_CHANNELS_MASK;
+ switch (tmp) {
+ case 0: rdev->mc.vram_width = 64; break;
+ case 1: rdev->mc.vram_width = 128; break;
+ case 2: rdev->mc.vram_width = 256; break;
+ default: rdev->mc.vram_width = 128; break;
}
r100_vram_init_sizes(rdev);
@@ -1327,7 +1330,6 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev)
{
- r300_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -1418,15 +1420,15 @@ int r300_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- r300_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
- radeon_irq_kms_fini(rdev);
+ radeon_agp_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 4526faaacca8..d9373246c97f 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -389,16 +389,15 @@ int r420_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- r420_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
radeon_agp_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 9a189072f2b9..ddf5731eba0d 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -294,13 +294,12 @@ int r520_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rv515_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 1b6d0001b20e..2ffcf5a03551 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1654,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
rdev->cp.align_mask = 16 - 1;
}
+void r600_cp_fini(struct radeon_device *rdev)
+{
+ r600_cp_stop(rdev);
+ radeon_ring_fini(rdev);
+}
+
/*
* GPU scratch registers helpers function.
@@ -1861,6 +1867,12 @@ int r600_startup(struct radeon_device *rdev)
return r;
}
r600_gpu_init(rdev);
+ r = r600_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+ rdev->asic->copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
/* pin copy shader into vram */
if (rdev->r600_blit.shader_obj) {
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
@@ -1938,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev)
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
}
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: audio resume failed\n");
+ return r;
+ }
+
return r;
}
@@ -1945,6 +1964,7 @@ int r600_suspend(struct radeon_device *rdev)
{
int r;
+ r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
rdev->cp.ready = false;
@@ -2045,19 +2065,15 @@ int r600_init(struct radeon_device *rdev)
r = r600_pcie_gart_init(rdev);
if (r)
return r;
- r = r600_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
- r600_suspend(rdev);
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r600_cp_fini(rdev);
r600_wb_fini(rdev);
- radeon_ring_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
@@ -2083,20 +2099,17 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
- /* Suspend operations */
- r600_suspend(rdev);
-
r600_audio_fini(rdev);
r600_blit_fini(rdev);
+ r600_cp_fini(rdev);
+ r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
- radeon_ring_fini(rdev);
- r600_wb_fini(rdev);
r600_pcie_gart_fini(rdev);
+ radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
- radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
@@ -2900,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
return 0;
#endif
}
+
+/**
+ * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
+ * rdev: radeon device structure
+ * bo: buffer object struct which userspace is waiting for idle
+ *
+ * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
+ * through ring buffer, this leads to corruption in rendering, see
+ * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
+ * directly perform HDP flush by writing register through MMIO.
+ */
+void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
+{
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 99e2c3891a7d..0dcb6904c4ff 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return rdev->family >= CHIP_R600
+ return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
@@ -261,7 +261,6 @@ void r600_audio_fini(struct radeon_device *rdev)
if (!r600_audio_chipset_supported(rdev))
return;
- WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
-
del_timer(&rdev->audio_timer);
+ WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index af1c3ca8a4cb..446b765ac72a 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
void r600_vb_ib_put(struct radeon_device *rdev)
{
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
- mutex_lock(&rdev->ib_pool.mutex);
- list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
- mutex_unlock(&rdev->ib_pool.mutex);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
}
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 6d5a711c2e91..75bcf35a0931 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1428,9 +1428,12 @@ static void r700_gfx_init(struct drm_device *dev,
gb_tiling_config |= R600_BANK_SWAPS(1);
- backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
- dev_priv->r600_max_backends,
- (0xff << dev_priv->r600_max_backends) & 0xff);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
+ backend_map = 0x28;
+ else
+ backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
+ dev_priv->r600_max_backends,
+ (0xff << dev_priv->r600_max_backends) & 0xff);
gb_tiling_config |= R600_BACKEND_MAP(backend_map);
cc_gc_shader_pipe_config =
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2d5f2bfa7201..c0356bb193e5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -96,6 +96,7 @@ extern int radeon_audio;
* symbol;
*/
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
#define RADEONFB_CONN_LIMIT 4
@@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
*/
struct radeon_ib {
struct list_head list;
- unsigned long idx;
+ unsigned idx;
uint64_t gpu_addr;
struct radeon_fence *fence;
- uint32_t *ptr;
+ uint32_t *ptr;
uint32_t length_dw;
+ bool free;
};
/*
@@ -377,10 +379,9 @@ struct radeon_ib {
struct radeon_ib_pool {
struct mutex mutex;
struct radeon_bo *robj;
- struct list_head scheduled_ibs;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
- DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
+ unsigned head_id;
};
struct radeon_cp {
@@ -661,6 +662,13 @@ struct radeon_asic {
void (*hpd_fini)(struct radeon_device *rdev);
bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+ /* ioctl hw specific callback. Some hw might want to perform special
+ * operation on specific ioctl. For instance on wait idle some hw
+ * might want to perform and HDP flush through MMIO as it seems that
+ * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
+ * through ring.
+ */
+ void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
};
/*
@@ -1143,6 +1151,7 @@ extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_cp_resume(struct radeon_device *rdev);
+extern void r600_cp_fini(struct radeon_device *rdev);
extern int r600_count_pipe_bits(uint32_t val);
extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index f2fbd2e4e9df..05ee1aeac3fd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -117,6 +117,7 @@ static struct radeon_asic r100_asic = {
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -176,6 +177,7 @@ static struct radeon_asic r300_asic = {
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
/*
@@ -219,6 +221,7 @@ static struct radeon_asic r420_asic = {
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -267,6 +270,7 @@ static struct radeon_asic rs400_asic = {
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -323,6 +327,7 @@ static struct radeon_asic rs600_asic = {
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -370,6 +375,7 @@ static struct radeon_asic rs690_asic = {
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -421,6 +427,7 @@ static struct radeon_asic rv515_asic = {
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
@@ -463,6 +470,7 @@ static struct radeon_asic r520_asic = {
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
};
/*
@@ -504,6 +512,7 @@ void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
+extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
static struct radeon_asic r600_asic = {
.init = &r600_init,
@@ -538,6 +547,7 @@ static struct radeon_asic r600_asic = {
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
};
/*
@@ -582,6 +592,7 @@ static struct radeon_asic rv770_asic = {
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
};
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index fa82ca74324e..4d8831548a5f 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -206,6 +206,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
+ /* Asrock RS600 board lists the DVI port as HDMI */
+ if ((dev->pdev->device == 0x7941) &&
+ (dev->pdev->subsystem_vendor == 0x1849) &&
+ (dev->pdev->subsystem_device == 0x7941)) {
+ if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+ (supported_device == ATOM_DEVICE_DFP3_SUPPORT))
+ *connector_type = DRM_MODE_CONNECTOR_DVID;
+ }
+
/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
if ((dev->pdev->device == 0x7941) &&
(dev->pdev->subsystem_vendor == 0x147b) &&
@@ -287,6 +296,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*connector_type = DRM_MODE_CONNECTOR_DVID;
}
+ /* XFX Pine Group device rv730 reports no VGA DDC lines
+ * even though they are wired up to record 0x93
+ */
+ if ((dev->pdev->device == 0x9498) &&
+ (dev->pdev->subsystem_vendor == 0x1682) &&
+ (dev->pdev->subsystem_device == 0x2452)) {
+ struct radeon_device *rdev = dev->dev_private;
+ *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
+ }
return true;
}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 4ddfd4b5bc51..7932dc4d6b90 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -65,31 +65,42 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
- start_jiffies = jiffies;
- for (i = 0; i < n; i++) {
- r = radeon_fence_create(rdev, &fence);
- if (r) {
- goto out_cleanup;
+
+ /* r100 doesn't have dma engine so skip the test */
+ if (rdev->asic->copy_dma) {
+
+ start_jiffies = jiffies;
+ for (i = 0; i < n; i++) {
+ r = radeon_fence_create(rdev, &fence);
+ if (r) {
+ goto out_cleanup;
+ }
+
+ r = radeon_copy_dma(rdev, saddr, daddr,
+ size / RADEON_GPU_PAGE_SIZE, fence);
+
+ if (r) {
+ goto out_cleanup;
+ }
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ goto out_cleanup;
+ }
+ radeon_fence_unref(&fence);
}
- r = radeon_copy_dma(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
- if (r) {
- goto out_cleanup;
+ end_jiffies = jiffies;
+ time = end_jiffies - start_jiffies;
+ time = jiffies_to_msecs(time);
+ if (time > 0) {
+ i = ((n * size) >> 10) / time;
+ printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
+ " %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
+ n, size >> 10,
+ sdomain, ddomain, time,
+ i, i * 1000, (i * 1000) / 1024);
}
- r = radeon_fence_wait(fence, false);
- if (r) {
- goto out_cleanup;
- }
- radeon_fence_unref(&fence);
- }
- end_jiffies = jiffies;
- time = end_jiffies - start_jiffies;
- time = jiffies_to_msecs(time);
- if (time > 0) {
- i = ((n * size) >> 10) / time;
- printk(KERN_INFO "radeon: dma %u bo moves of %ukb from %d to %d"
- " in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
- sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
}
+
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
r = radeon_fence_create(rdev, &fence);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 579c8920e081..e7b19440102e 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -971,8 +971,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
lvds->native_mode.vdisplay);
lvds->panel_vcc_delay = RBIOS16(lcd_info + 0x2c);
- if (lvds->panel_vcc_delay > 2000 || lvds->panel_vcc_delay < 0)
- lvds->panel_vcc_delay = 2000;
+ lvds->panel_vcc_delay = min_t(u16, lvds->panel_vcc_delay, 2000);
lvds->panel_pwr_delay = RBIOS8(lcd_info + 0x24);
lvds->panel_digon_delay = RBIOS16(lcd_info + 0x38) & 0xf;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 55266416fa47..65f81942f399 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -580,16 +580,18 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *encoder;
struct drm_encoder_helper_funcs *encoder_funcs;
- bool dret;
+ bool dret = false;
enum drm_connector_status ret = connector_status_disconnected;
encoder = radeon_best_single_encoder(connector);
if (!encoder)
ret = connector_status_disconnected;
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
- dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ if (radeon_connector->ddc_bus) {
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ dret = radeon_ddc_probe(radeon_connector);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ }
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -740,11 +742,13 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
struct drm_mode_object *obj;
int i;
enum drm_connector_status ret = connector_status_disconnected;
- bool dret;
+ bool dret = false;
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
- dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ if (radeon_connector->ddc_bus) {
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ dret = radeon_ddc_probe(radeon_connector);
+ radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+ }
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
@@ -776,7 +780,7 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
* connected and the DVI port disconnected. If the edid doesn't
* say HDMI, vice versa.
*/
- if (radeon_connector->shared_ddc && connector_status_connected) {
+ if (radeon_connector->shared_ddc && (ret == connector_status_connected)) {
struct drm_device *dev = connector->dev;
struct drm_connector *list_connector;
struct radeon_connector *list_radeon_connector;
@@ -1056,8 +1060,7 @@ radeon_add_atom_connector(struct drm_device *dev,
return;
}
if (radeon_connector->ddc_bus && i2c_bus->valid) {
- if (memcmp(&radeon_connector->ddc_bus->rec, i2c_bus,
- sizeof(struct radeon_i2c_bus_rec)) == 0) {
+ if (radeon_connector->ddc_bus->rec.i2c_id == i2c_bus->i2c_id) {
radeon_connector->shared_ddc = true;
shared_ddc = true;
}
@@ -1343,7 +1346,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_connector->dac_load_detect = false;
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
- 1);
+ radeon_connector->dac_load_detect);
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.tv_std_property,
radeon_combios_get_tv_info(rdev));
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 1190148cf5e6..e9d085021c1f 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
&p->validated);
}
}
- return radeon_bo_list_validate(&p->validated, p->ib->fence);
+ return radeon_bo_list_validate(&p->validated);
}
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
{
unsigned i;
- if (error && parser->ib) {
- radeon_bo_list_unvalidate(&parser->validated,
- parser->ib->fence);
- } else {
- radeon_bo_list_unreserve(&parser->validated);
+ if (!error && parser->ib) {
+ radeon_bo_list_fence(&parser->validated, parser->ib->fence);
}
+ radeon_bo_list_unreserve(&parser->validated);
for (i = 0; i < parser->nrelocs; i++) {
if (parser->relocs[i].gobj) {
mutex_lock(&parser->rdev->ddev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6a92f994cc26..7e17a362b54b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -278,7 +278,7 @@ static void radeon_print_display_setup(struct drm_device *dev)
DRM_INFO(" %s\n", connector_names[connector->connector_type]);
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
- if (radeon_connector->ddc_bus)
+ if (radeon_connector->ddc_bus) {
DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
radeon_connector->ddc_bus->rec.mask_clk_reg,
radeon_connector->ddc_bus->rec.mask_data_reg,
@@ -288,6 +288,15 @@ static void radeon_print_display_setup(struct drm_device *dev)
radeon_connector->ddc_bus->rec.en_data_reg,
radeon_connector->ddc_bus->rec.y_clk_reg,
radeon_connector->ddc_bus->rec.y_data_reg);
+ } else {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to xorg-driver-ati@lists.x.org\n");
+ }
DRM_INFO(" Encoders:\n");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index e13785282a82..c57ad606504d 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -106,9 +106,10 @@
* 1.29- R500 3D cmd buffer support
* 1.30- Add support for occlusion queries
* 1.31- Add support for num Z pipes from GET_PARAM
+ * 1.32- fixes for rv740 setup
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 31
+#define DRIVER_MINOR 32
#define DRIVER_PATCHLEVEL 0
enum radeon_cp_microcode_version {
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 3ba213d1b06c..d71e346e9ab5 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -248,7 +248,7 @@ int radeonfb_create(struct drm_device *dev,
if (ret)
goto out_unref;
- memset_io(fbptr, 0xff, aligned_size);
+ memset_io(fbptr, 0x0, aligned_size);
strcpy(info->fix.id, "radeondrmfb");
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 0e1325e18534..db8e9a355a01 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -308,6 +308,9 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
}
robj = gobj->driver_private;
r = radeon_bo_wait(robj, NULL, false);
+ /* callback hw specific functions if any */
+ if (robj->rdev->asic->ioctl_wait_idle)
+ robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d72a71bff218..f1da370928eb 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
}
}
-int radeon_bo_list_validate(struct list_head *head, void *fence)
+int radeon_bo_list_validate(struct list_head *head)
{
struct radeon_bo_list *lobj;
struct radeon_bo *bo;
- struct radeon_fence *old_fence = NULL;
int r;
r = radeon_bo_list_reserve(head);
@@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
}
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
lobj->tiling_flags = bo->tiling_flags;
- if (fence) {
- old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
- bo->tbo.sync_obj = radeon_fence_ref(fence);
- bo->tbo.sync_obj_arg = NULL;
- }
- if (old_fence) {
- radeon_fence_unref(&old_fence);
- }
}
return 0;
}
-void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
+void radeon_bo_list_fence(struct list_head *head, void *fence)
{
struct radeon_bo_list *lobj;
- struct radeon_fence *old_fence;
-
- if (fence)
- list_for_each_entry(lobj, head, list) {
- old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
- if (old_fence == fence) {
- lobj->bo->tbo.sync_obj = NULL;
- radeon_fence_unref(&old_fence);
- }
+ struct radeon_bo *bo;
+ struct radeon_fence *old_fence = NULL;
+
+ list_for_each_entry(lobj, head, list) {
+ bo = lobj->bo;
+ spin_lock(&bo->tbo.lock);
+ old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
+ bo->tbo.sync_obj = radeon_fence_ref(fence);
+ bo->tbo.sync_obj_arg = NULL;
+ spin_unlock(&bo->tbo.lock);
+ if (old_fence) {
+ radeon_fence_unref(&old_fence);
}
- radeon_bo_list_unreserve(head);
+ }
}
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index a02f18011ad1..7ab43de1e244 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
struct list_head *head);
extern int radeon_bo_list_reserve(struct list_head *head);
extern void radeon_bo_list_unreserve(struct list_head *head);
-extern int radeon_bo_list_validate(struct list_head *head, void *fence);
-extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
+extern int radeon_bo_list_validate(struct list_head *head);
+extern void radeon_bo_list_fence(struct list_head *head, void *fence);
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
struct vm_area_struct *vma);
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 4d12b2d17b4d..6579eb4c1f28 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
{
struct radeon_fence *fence;
struct radeon_ib *nib;
- unsigned long i;
- int r = 0;
+ int r = 0, i, c;
*ib = NULL;
r = radeon_fence_create(rdev, &fence);
if (r) {
- DRM_ERROR("failed to create fence for new IB\n");
+ dev_err(rdev->dev, "failed to create fence for new IB\n");
return r;
}
mutex_lock(&rdev->ib_pool.mutex);
- i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
- if (i < RADEON_IB_POOL_SIZE) {
- set_bit(i, rdev->ib_pool.alloc_bm);
- rdev->ib_pool.ibs[i].length_dw = 0;
- *ib = &rdev->ib_pool.ibs[i];
- mutex_unlock(&rdev->ib_pool.mutex);
- goto out;
+ for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
+ i &= (RADEON_IB_POOL_SIZE - 1);
+ if (rdev->ib_pool.ibs[i].free) {
+ nib = &rdev->ib_pool.ibs[i];
+ break;
+ }
}
- if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
- /* we go do nothings here */
+ if (nib == NULL) {
+ /* This should never happen, it means we allocated all
+ * IB and haven't scheduled one yet, return EBUSY to
+ * userspace hoping that on ioctl recall we get better
+ * luck
+ */
+ dev_err(rdev->dev, "no free indirect buffer !\n");
mutex_unlock(&rdev->ib_pool.mutex);
- DRM_ERROR("all IB allocated none scheduled.\n");
- r = -EINVAL;
- goto out;
+ radeon_fence_unref(&fence);
+ return -EBUSY;
}
- /* get the first ib on the scheduled list */
- nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
- struct radeon_ib, list);
- if (nib->fence == NULL) {
- /* we go do nothings here */
+ rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+ nib->free = false;
+ if (nib->fence) {
mutex_unlock(&rdev->ib_pool.mutex);
- DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
- r = -EINVAL;
- goto out;
- }
- mutex_unlock(&rdev->ib_pool.mutex);
-
- r = radeon_fence_wait(nib->fence, false);
- if (r) {
- DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
- (unsigned long)nib->gpu_addr, nib->length_dw);
- DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
- goto out;
+ r = radeon_fence_wait(nib->fence, false);
+ if (r) {
+ dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
+ nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
+ mutex_lock(&rdev->ib_pool.mutex);
+ nib->free = true;
+ mutex_unlock(&rdev->ib_pool.mutex);
+ radeon_fence_unref(&fence);
+ return r;
+ }
+ mutex_lock(&rdev->ib_pool.mutex);
}
radeon_fence_unref(&nib->fence);
-
+ nib->fence = fence;
nib->length_dw = 0;
-
- /* scheduled list is accessed here */
- mutex_lock(&rdev->ib_pool.mutex);
- list_del(&nib->list);
- INIT_LIST_HEAD(&nib->list);
mutex_unlock(&rdev->ib_pool.mutex);
-
*ib = nib;
-out:
- if (r) {
- radeon_fence_unref(&fence);
- } else {
- (*ib)->fence = fence;
- }
- return r;
+ return 0;
}
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -113,19 +100,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
if (tmp == NULL) {
return;
}
- mutex_lock(&rdev->ib_pool.mutex);
- if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
- /* IB is scheduled & not signaled don't do anythings */
- mutex_unlock(&rdev->ib_pool.mutex);
- return;
- }
- list_del(&tmp->list);
- INIT_LIST_HEAD(&tmp->list);
- if (tmp->fence)
+ if (!tmp->fence->emited)
radeon_fence_unref(&tmp->fence);
-
- tmp->length_dw = 0;
- clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
+ mutex_lock(&rdev->ib_pool.mutex);
+ tmp->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
}
@@ -135,7 +113,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
if (!ib->length_dw || !rdev->cp.ready) {
/* TODO: Nothings in the ib we should report. */
- DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
+ DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
return -EINVAL;
}
@@ -148,7 +126,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_ib_execute(rdev, ib);
radeon_fence_emit(rdev, ib->fence);
mutex_lock(&rdev->ib_pool.mutex);
- list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
+ /* once scheduled IB is considered free and protected by the fence */
+ ib->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
radeon_ring_unlock_commit(rdev);
return 0;
@@ -164,7 +143,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
if (rdev->ib_pool.robj)
return 0;
/* Allocate 1M object buffer */
- INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
true, RADEON_GEM_DOMAIN_GTT,
&rdev->ib_pool.robj);
@@ -195,9 +173,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
rdev->ib_pool.ibs[i].ptr = ptr + offset;
rdev->ib_pool.ibs[i].idx = i;
rdev->ib_pool.ibs[i].length_dw = 0;
- INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
+ rdev->ib_pool.ibs[i].free = true;
}
- bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
+ rdev->ib_pool.head_id = 0;
rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n");
if (radeon_debugfs_ib_init(rdev)) {
@@ -214,7 +192,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
return;
}
mutex_lock(&rdev->ib_pool.mutex);
- bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
if (rdev->ib_pool.robj) {
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
if (likely(r == 0)) {
@@ -363,7 +340,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
if (ib == NULL) {
return 0;
}
- seq_printf(m, "IB %04lu\n", ib->idx);
+ seq_printf(m, "IB %04u\n", ib->idx);
seq_printf(m, "IB fence %p\n", ib->fence);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) {
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 9f5418983e2a..287fcebfb4e6 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -223,15 +223,31 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
return 0;
}
+int rs400_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ uint32_t tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(0x0150);
+ if (tmp & (1 << 2)) {
+ return 0;
+ }
+ DRM_UDELAY(1);
+ }
+ return -1;
+}
+
void rs400_gpu_init(struct radeon_device *rdev)
{
/* FIXME: HDP same place on rs400 ? */
r100_hdp_reset(rdev);
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
- if (r300_mc_wait_for_idle(rdev)) {
- printk(KERN_WARNING "Failed to wait MC idle while "
- "programming pipes. Bad things might happen.\n");
+ if (rs400_mc_wait_for_idle(rdev)) {
+ printk(KERN_WARNING "rs400: Failed to wait MC idle while "
+ "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
}
}
@@ -370,8 +386,8 @@ void rs400_mc_program(struct radeon_device *rdev)
r100_mc_stop(rdev, &save);
/* Wait for mc idle */
- if (r300_mc_wait_for_idle(rdev))
- dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
+ if (rs400_mc_wait_for_idle(rdev))
+ dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
WREG32(R_000148_MC_FB_LOCATION,
S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
@@ -448,7 +464,6 @@ int rs400_suspend(struct radeon_device *rdev)
void rs400_fini(struct radeon_device *rdev)
{
- rs400_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -527,7 +542,6 @@ int rs400_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rs400_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index d5255751e7b3..c3818562a13e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -610,7 +610,6 @@ int rs600_suspend(struct radeon_device *rdev)
void rs600_fini(struct radeon_device *rdev)
{
- rs600_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -689,7 +688,6 @@ int rs600_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rs600_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index cd31da913771..06e2771aee5a 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -676,7 +676,6 @@ int rs690_suspend(struct radeon_device *rdev)
void rs690_fini(struct radeon_device *rdev)
{
- rs690_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -756,7 +755,6 @@ int rs690_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rs690_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 62756717b044..0e1e6b8632b8 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -537,7 +537,6 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
void rv515_fini(struct radeon_device *rdev)
{
- rv515_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -615,13 +614,12 @@ int rv515_init(struct radeon_device *rdev)
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
- rv515_suspend(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
+ radeon_irq_kms_fini(rdev);
rv370_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
- radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index afd9e8213c29..03021674d097 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -549,9 +549,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
gb_tiling_config |= BANK_SWAPS(1);
- backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
- rdev->config.rv770.max_backends,
- (0xff << rdev->config.rv770.max_backends) & 0xff);
+ if (rdev->family == CHIP_RV740)
+ backend_map = 0x28;
+ else
+ backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
+ rdev->config.rv770.max_backends,
+ (0xff << rdev->config.rv770.max_backends) & 0xff);
gb_tiling_config |= BACKEND_MAP(backend_map);
cc_gc_shader_pipe_config =
@@ -887,6 +890,12 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
rv770_gpu_init(rdev);
+ r = r600_blit_init(rdev);
+ if (r) {
+ r600_blit_fini(rdev);
+ rdev->asic->copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
/* pin copy shader into vram */
if (rdev->r600_blit.shader_obj) {
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
@@ -1055,19 +1064,15 @@ int rv770_init(struct radeon_device *rdev)
r = r600_pcie_gart_init(rdev);
if (r)
return r;
- r = r600_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
- rv770_suspend(rdev);
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ r600_cp_fini(rdev);
r600_wb_fini(rdev);
- radeon_ring_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
@@ -1089,13 +1094,11 @@ int rv770_init(struct radeon_device *rdev)
void rv770_fini(struct radeon_device *rdev)
{
- rv770_suspend(rdev);
-
r600_blit_fini(rdev);
+ r600_cp_fini(rdev);
+ r600_wb_fini(rdev);
r600_irq_fini(rdev);
radeon_irq_kms_fini(rdev);
- radeon_ring_fini(rdev);
- r600_wb_fini(rdev);
rv770_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1a3e909b7bba..c7320ce4567d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1020,6 +1020,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
int i;
+ struct drm_mm_node *node = mem->mm_node;
+
+ if (node && placement->lpfn != 0 &&
+ (node->start < placement->fpfn ||
+ node->start + node->size > placement->lpfn))
+ return -1;
for (i = 0; i < placement->num_placement; i++) {
if ((placement->placement[i] & mem->placement &
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index e2123af7775a..3d47a2c12322 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -196,14 +196,15 @@ EXPORT_SYMBOL(ttm_tt_populate);
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_state)
+ enum ttm_caching_state c_old,
+ enum ttm_caching_state c_new)
{
int ret = 0;
if (PageHighMem(p))
return 0;
- if (get_page_memtype(p) != -1) {
+ if (c_old != tt_cached) {
/* p isn't in the default caching state, set it to
* writeback first to free its current memtype. */
@@ -212,16 +213,17 @@ static inline int ttm_tt_set_page_caching(struct page *p,
return ret;
}
- if (c_state == tt_wc)
+ if (c_new == tt_wc)
ret = set_memory_wc((unsigned long) page_address(p), 1);
- else if (c_state == tt_uncached)
+ else if (c_new == tt_uncached)
ret = set_pages_uc(p, 1);
return ret;
}
#else /* CONFIG_X86 */
static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_state)
+ enum ttm_caching_state c_old,
+ enum ttm_caching_state c_new)
{
return 0;
}
@@ -254,7 +256,9 @@ static int ttm_tt_set_caching(struct ttm_tt *ttm,
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages[i];
if (likely(cur_page != NULL)) {
- ret = ttm_tt_set_page_caching(cur_page, c_state);
+ ret = ttm_tt_set_page_caching(cur_page,
+ ttm->caching_state,
+ c_state);
if (unlikely(ret != 0))
goto out_err;
}
@@ -268,7 +272,7 @@ out_err:
for (j = 0; j < i; ++j) {
cur_page = ttm->pages[j];
if (likely(cur_page != NULL)) {
- (void)ttm_tt_set_page_caching(cur_page,
+ (void)ttm_tt_set_page_caching(cur_page, c_state,
ttm->caching_state);
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index a6e8f687fa64..0c9c0811f42d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
*/
DRM_INFO("It appears like vesafb is loaded. "
- "Ignore above error if any. Entering stealth mode.\n");
+ "Ignore above error if any.\n");
ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
if (unlikely(ret != 0)) {
DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
goto out_no_device;
}
- vmw_kms_init(dev_priv);
- vmw_overlay_init(dev_priv);
- } else {
- ret = vmw_request_device(dev_priv);
- if (unlikely(ret != 0))
- goto out_no_device;
- vmw_kms_init(dev_priv);
- vmw_overlay_init(dev_priv);
- vmw_fb_init(dev_priv);
}
+ ret = vmw_request_device(dev_priv);
+ if (unlikely(ret != 0))
+ goto out_no_device;
+ vmw_kms_init(dev_priv);
+ vmw_overlay_init(dev_priv);
+ vmw_fb_init(dev_priv);
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
register_pm_notifier(&dev_priv->pm_nb);
@@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev)
unregister_pm_notifier(&dev_priv->pm_nb);
- if (!dev_priv->stealth) {
- vmw_fb_close(dev_priv);
- vmw_kms_close(dev_priv);
- vmw_overlay_close(dev_priv);
- vmw_release_device(dev_priv);
- pci_release_regions(dev->pdev);
- } else {
- vmw_kms_close(dev_priv);
- vmw_overlay_close(dev_priv);
+ vmw_fb_close(dev_priv);
+ vmw_kms_close(dev_priv);
+ vmw_overlay_close(dev_priv);
+ vmw_release_device(dev_priv);
+ if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
- }
+ else
+ pci_release_regions(dev->pdev);
+
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev);
if (dev->devname == vmw_devname)
@@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev,
int ret = 0;
DRM_INFO("Master set.\n");
- if (dev_priv->stealth) {
- ret = vmw_request_device(dev_priv);
- if (unlikely(ret != 0))
- return ret;
- }
if (active) {
BUG_ON(active != &dev_priv->fbdev_master);
@@ -649,18 +639,11 @@ static void vmw_master_drop(struct drm_device *dev,
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
- if (dev_priv->stealth) {
- ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
- if (unlikely(ret != 0))
- DRM_ERROR("Unable to clean VRAM on master drop.\n");
- vmw_release_device(dev_priv);
- }
dev_priv->active_master = &dev_priv->fbdev_master;
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
- if (!dev_priv->stealth)
- vmw_fb_on(dev_priv);
+ vmw_fb_on(dev_priv);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 135be9688c90..356dc935ec13 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -39,10 +39,10 @@
#include "ttm/ttm_execbuf_util.h"
#include "ttm/ttm_module.h"
-#define VMWGFX_DRIVER_DATE "20090724"
-#define VMWGFX_DRIVER_MAJOR 0
-#define VMWGFX_DRIVER_MINOR 1
-#define VMWGFX_DRIVER_PATCHLEVEL 2
+#define VMWGFX_DRIVER_DATE "20100209"
+#define VMWGFX_DRIVER_MAJOR 1
+#define VMWGFX_DRIVER_MINOR 0
+#define VMWGFX_DRIVER_PATCHLEVEL 0
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -113,6 +113,7 @@ struct vmw_fifo_state {
unsigned long static_buffer_size;
bool using_bounce_buffer;
uint32_t capabilities;
+ struct mutex fifo_mutex;
struct rw_semaphore rwsem;
};
@@ -213,7 +214,7 @@ struct vmw_private {
* Fencing and IRQs.
*/
- uint32_t fence_seq;
+ atomic_t fence_seq;
wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue;
atomic_t fence_queue_waiters;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index d69caf92ffe7..0897359b3e4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
}
-static int vmw_cmd_dma(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
+static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGAGuestPtr *ptr,
+ struct vmw_dma_buffer **vmw_bo_p)
{
- uint32_t handle;
struct vmw_dma_buffer *vmw_bo = NULL;
struct ttm_buffer_object *bo;
- struct vmw_surface *srf = NULL;
- struct vmw_dma_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA dma;
- } *cmd;
+ uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
- int ret;
uint32_t cur_validate_node;
struct ttm_validate_buffer *val_buf;
+ int ret;
- cmd = container_of(header, struct vmw_dma_cmd, header);
- handle = cmd->dma.guest.ptr.gmrId;
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
@@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
bo = &vmw_bo->base;
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
- DRM_ERROR("Max number of DMA commands per submission"
+ DRM_ERROR("Max number relocations per submission"
" exceeded\n");
ret = -EINVAL;
goto out_no_reloc;
}
reloc = &sw_context->relocs[sw_context->cur_reloc++];
- reloc->location = &cmd->dma.guest.ptr;
+ reloc->location = ptr;
cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
@@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
++sw_context->cur_val_buf;
}
+ *vmw_bo_p = vmw_bo;
+ return 0;
+
+out_no_reloc:
+ vmw_dmabuf_unreference(&vmw_bo);
+ vmw_bo_p = NULL;
+ return ret;
+}
+
+static int vmw_cmd_end_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ struct vmw_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdEndQuery q;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_query_cmd, header);
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->q.guestResult,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_dmabuf_unreference(&vmw_bo);
+ return 0;
+}
+static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dma_buffer *vmw_bo;
+ struct vmw_query_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdWaitForQuery q;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_query_cmd, header);
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->q.guestResult,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ vmw_dmabuf_unreference(&vmw_bo);
+ return 0;
+}
+
+
+static int vmw_cmd_dma(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_dma_buffer *vmw_bo = NULL;
+ struct ttm_buffer_object *bo;
+ struct vmw_surface *srf = NULL;
+ struct vmw_dma_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSurfaceDMA dma;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_dma_cmd, header);
+ ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+ &cmd->dma.guest.ptr,
+ &vmw_bo);
+ if (unlikely(ret != 0))
+ return ret;
+
+ bo = &vmw_bo->base;
ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
cmd->dma.host.sid, &srf);
if (ret) {
@@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
+ VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
&vmw_cmd_blt_surf_screen_check)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 4f4f6432be8b..a93367041cdc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
info->pixmap.scan_align = 1;
#endif
+ info->aperture_base = vmw_priv->vram_start;
+ info->aperture_size = vmw_priv->vram_size;
+
/*
* Dirty & Deferred IO
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 4157547cc6e4..39d43a01d846 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -74,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
fifo->reserved_size = 0;
fifo->using_bounce_buffer = false;
+ mutex_init(&fifo->fifo_mutex);
init_rwsem(&fifo->rwsem);
/*
@@ -117,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
(unsigned int) min,
(unsigned int) fifo->capabilities);
- dev_priv->fence_seq = dev_priv->last_read_sequence;
+ atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
return vmw_fifo_send_fence(dev_priv, &dummy);
@@ -283,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
int ret;
- down_write(&fifo_state->rwsem);
+ mutex_lock(&fifo_state->fifo_mutex);
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
@@ -351,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
}
out_err:
fifo_state->reserved_size = 0;
- up_write(&fifo_state->rwsem);
+ mutex_unlock(&fifo_state->fifo_mutex);
return NULL;
}
@@ -426,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
}
+ down_write(&fifo_state->rwsem);
if (fifo_state->using_bounce_buffer || reserveable) {
next_cmd += bytes;
if (next_cmd >= max)
@@ -437,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
if (reserveable)
iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
mb();
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
up_write(&fifo_state->rwsem);
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+ mutex_unlock(&fifo_state->fifo_mutex);
}
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
@@ -451,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) {
- down_write(&fifo_state->rwsem);
- *sequence = dev_priv->fence_seq;
- up_write(&fifo_state->rwsem);
+ *sequence = atomic_read(&dev_priv->fence_seq);
ret = -ENOMEM;
(void)vmw_fallback_wait(dev_priv, false, true, *sequence,
false, 3*HZ);
@@ -461,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
}
do {
- *sequence = dev_priv->fence_seq++;
+ *sequence = atomic_add_return(1, &dev_priv->fence_seq);
} while (*sequence == 0);
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 778851f9f1d6..1c7a316454d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -48,6 +48,12 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_FIFO_OFFSET:
param->value = dev_priv->mmio_start;
break;
+ case DRM_VMW_PARAM_HW_CAPS:
+ param->value = dev_priv->capabilities;
+ break;
+ case DRM_VMW_PARAM_FIFO_CAPS:
+ param->value = dev_priv->fifo.capabilities;
+ break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index d40086fc8647..4d7cb5393860 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
return true;
/**
- * Below is to signal stale fences that have wrapped.
- * First, block fence submission.
- */
-
- down_read(&fifo_state->rwsem);
-
- /**
* Then check if the sequence is higher than what we've actually
* emitted. Then the fence is stale and signaled.
*/
- ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP);
- up_read(&fifo_state->rwsem);
+ ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
+ > VMW_FENCE_WRAP);
return ret;
}
@@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
if (fifo_idle)
down_read(&fifo_state->rwsem);
- signal_seq = dev_priv->fence_seq;
+ signal_seq = atomic_read(&dev_priv->fence_seq);
ret = 0;
for (;;) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index eeba6d1d06e4..31f9afed0a63 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -769,10 +769,10 @@ int vmw_kms_init(struct vmw_private *dev_priv)
drm_mode_config_init(dev);
dev->mode_config.funcs = &vmw_kms_funcs;
- dev->mode_config.min_width = 640;
- dev->mode_config.min_height = 480;
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ dev->mode_config.min_width = 1;
+ dev->mode_config.min_height = 1;
+ dev->mode_config.max_width = dev_priv->fb_max_width;
+ dev->mode_config.max_height = dev_priv->fb_max_height;
ret = vmw_kms_init_legacy_display_system(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c7efbd47ab84..f8fbbc67a406 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -35,11 +35,6 @@
#define VMW_RES_SURFACE ttm_driver_type1
#define VMW_RES_STREAM ttm_driver_type2
-/* XXX: This isn't a real hardware flag, but just a hack for kernel to
- * know about primary surfaces. Find a better way to accomplish this.
- */
-#define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
-
struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
@@ -579,6 +574,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
srf->flags = req->flags;
srf->format = req->format;
+ srf->scanout = req->scanout;
memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
srf->num_sizes = 0;
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
@@ -604,16 +600,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_err1;
- if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) {
- /* we should not send this flag down to hardware since
- * its not a official one
- */
- srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT;
- srf->scanout = true;
- } else {
- srf->scanout = false;
- }
-
if (srf->scanout &&
srf->num_sizes == 1 &&
srf->sizes[0].width == 64 &&
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index 1ac0c93603c9..2f6cf69ecb39 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -961,7 +961,7 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
remaining -= 7;
pr_devel("client 0x%p called 'target'\n", priv);
/* if target is default */
- if (!strncmp(buf, "default", 7))
+ if (!strncmp(curr_pos, "default", 7))
pdev = pci_dev_get(vga_default_device());
else {
if (!vga_pci_str_to_vars(curr_pos, remaining,
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index a31e77c776ae..b8156b4893bb 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -179,7 +179,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
*
* Some, but not all, of these voltages have low/high limits.
*/
-#define ADT7462_VOLT_COUNT 12
+#define ADT7462_VOLT_COUNT 13
#define ADT7462_VENDOR 0x41
#define ADT7462_DEVICE 0x62
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index cadcbd90ff3b..72ff2c4e757d 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -851,17 +851,16 @@ static struct lm78_data *lm78_update_device(struct device *dev)
static int __init lm78_isa_found(unsigned short address)
{
int val, save, found = 0;
-
- /* We have to request the region in two parts because some
- boards declare base+4 to base+7 as a PNP device */
- if (!request_region(address, 4, "lm78")) {
- pr_debug("lm78: Failed to request low part of region\n");
- return 0;
- }
- if (!request_region(address + 4, 4, "lm78")) {
- pr_debug("lm78: Failed to request high part of region\n");
- release_region(address, 4);
- return 0;
+ int port;
+
+ /* Some boards declare base+0 to base+7 as a PNP device, some base+4
+ * to base+7 and some base+5 to base+6. So we better request each port
+ * individually for the probing phase. */
+ for (port = address; port < address + LM78_EXTENT; port++) {
+ if (!request_region(port, 1, "lm78")) {
+ pr_debug("lm78: Failed to request port 0x%x\n", port);
+ goto release;
+ }
}
#define REALLY_SLOW_IO
@@ -925,8 +924,8 @@ static int __init lm78_isa_found(unsigned short address)
val & 0x80 ? "LM79" : "LM78", (int)address);
release:
- release_region(address + 4, 4);
- release_region(address, 4);
+ for (port--; port >= address; port--)
+ release_region(port, 1);
return found;
}
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 05f9225b6f94..32d4adee73db 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1793,17 +1793,17 @@ static int __init
w83781d_isa_found(unsigned short address)
{
int val, save, found = 0;
-
- /* We have to request the region in two parts because some
- boards declare base+4 to base+7 as a PNP device */
- if (!request_region(address, 4, "w83781d")) {
- pr_debug("w83781d: Failed to request low part of region\n");
- return 0;
- }
- if (!request_region(address + 4, 4, "w83781d")) {
- pr_debug("w83781d: Failed to request high part of region\n");
- release_region(address, 4);
- return 0;
+ int port;
+
+ /* Some boards declare base+0 to base+7 as a PNP device, some base+4
+ * to base+7 and some base+5 to base+6. So we better request each port
+ * individually for the probing phase. */
+ for (port = address; port < address + W83781D_EXTENT; port++) {
+ if (!request_region(port, 1, "w83781d")) {
+ pr_debug("w83781d: Failed to request port 0x%x\n",
+ port);
+ goto release;
+ }
}
#define REALLY_SLOW_IO
@@ -1877,8 +1877,8 @@ w83781d_isa_found(unsigned short address)
val == 0x30 ? "W83782D" : "W83781D", (int)address);
release:
- release_region(address + 4, 4);
- release_region(address, 4);
+ for (port--; port >= address; port--)
+ release_region(port, 1);
return found;
}
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index b1c050ff311d..e29b6d5ba8ef 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/module.h>
+#include <linux/types.h>
/* include interfaces to usb layer */
#include <linux/usb.h>
@@ -31,8 +32,8 @@
#define CMD_I2C_IO_END (1<<1)
/* i2c bit delay, default is 10us -> 100kHz */
-static int delay = 10;
-module_param(delay, int, 0);
+static unsigned short delay = 10;
+module_param(delay, ushort, 0);
MODULE_PARM_DESC(delay, "bit delay in microseconds, "
"e.g. 10 for 100kHz (default is 100kHz)");
@@ -109,7 +110,7 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
static u32 usb_func(struct i2c_adapter *adapter)
{
- u32 func;
+ __le32 func;
/* get functionality from adapter */
if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) !=
@@ -118,7 +119,7 @@ static u32 usb_func(struct i2c_adapter *adapter)
return 0;
}
- return func;
+ return le32_to_cpu(func);
}
/* This is the actual algorithm we define */
@@ -216,8 +217,7 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
"i2c-tiny-usb at bus %03d device %03d",
dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
- if (usb_write(&dev->adapter, CMD_SET_DELAY,
- cpu_to_le16(delay), 0, NULL, 0) != 0) {
+ if (usb_write(&dev->adapter, CMD_SET_DELAY, delay, 0, NULL, 0) != 0) {
dev_err(&dev->adapter.dev,
"failure setting delay to %dus\n", delay);
retval = -EIO;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index cc9b5940fa97..875e34e0b235 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2115,9 +2115,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (ret)
goto err1;
- if (cma_loopback_addr(addr)) {
- ret = cma_bind_loopback(id_priv);
- } else if (!cma_zero_addr(addr)) {
+ if (!cma_any_addr(addr)) {
ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
if (ret)
goto err1;
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index aa6713b4a988..291d9393d359 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -100,6 +100,12 @@ static void input_close_polled_device(struct input_dev *input)
struct input_polled_dev *dev = input_get_drvdata(input);
cancel_delayed_work_sync(&dev->work);
+ /*
+ * Clean up work struct to remove references to the workqueue.
+ * It may be destroyed by the next call. This causes problems
+ * at next device open-close in case of poll_interval == 0.
+ */
+ INIT_DELAYED_WORK(&dev->work, dev->work.work.func);
input_polldev_stop_workqueue();
if (dev->close)
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 9774bdfaa482..d8c0c8d6992c 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1141,7 +1141,14 @@ static void psmouse_cleanup(struct serio *serio)
psmouse_deactivate(parent);
}
- psmouse_deactivate(psmouse);
+ psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
+
+ /*
+ * Disable stream mode so cleanup routine can proceed undisturbed.
+ */
+ if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE))
+ printk(KERN_WARNING "psmouse.c: Failed to disable mouse on %s\n",
+ psmouse->ps2dev.serio->phys);
if (psmouse->cleanup)
psmouse->cleanup(psmouse);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index d84a36e545f6..b54aee7cd9e3 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1161,9 +1161,17 @@ static int i8042_pm_restore(struct device *dev)
return 0;
}
+static int i8042_pm_thaw(struct device *dev)
+{
+ i8042_interrupt(0, NULL);
+
+ return 0;
+}
+
static const struct dev_pm_ops i8042_pm_ops = {
.suspend = i8042_pm_reset,
.resume = i8042_pm_restore,
+ .thaw = i8042_pm_thaw,
.poweroff = i8042_pm_reset,
.restore = i8042_pm_restore,
};
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 09a5e7341bd5..5256123a5228 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -618,8 +618,8 @@ static int idealtek_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
static int general_touch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
{
- dev->x = ((pkt[2] & 0x0F) << 8) | pkt[1] ;
- dev->y = ((pkt[4] & 0x0F) << 8) | pkt[3] ;
+ dev->x = (pkt[2] << 8) | pkt[1];
+ dev->y = (pkt[4] << 8) | pkt[3];
dev->press = pkt[5] & 0xff;
dev->touch = pkt[0] & 0x01;
@@ -809,9 +809,9 @@ static struct usbtouch_device_info usbtouch_dev_info[] = {
#ifdef CONFIG_TOUCHSCREEN_USB_GENERAL_TOUCH
[DEVTYPE_GENERAL_TOUCH] = {
.min_xc = 0x0,
- .max_xc = 0x0500,
+ .max_xc = 0x7fff,
.min_yc = 0x0,
- .max_yc = 0x0500,
+ .max_yc = 0x7fff,
.rept_size = 7,
.read_data = general_touch_read_data,
},
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index 54abf9e303b7..f1c8cae70b4b 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -172,11 +172,15 @@ int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type,
{
int r = 0;
size_t dummy = 0;
- int overhead_size =
- sizeof(struct dm_ulog_request *) + sizeof(struct cn_msg);
+ int overhead_size = sizeof(struct dm_ulog_request) + sizeof(struct cn_msg);
struct dm_ulog_request *tfr = prealloced_ulog_tfr;
struct receiving_pkg pkg;
+ /*
+ * Given the space needed to hold the 'struct cn_msg' and
+ * 'struct dm_ulog_request' - do we have enough payload
+ * space remaining?
+ */
if (data_size > (DM_ULOG_PREALLOCED_SIZE - overhead_size)) {
DMINFO("Size of tfr exceeds preallocated size");
return -EINVAL;
@@ -191,7 +195,7 @@ resend:
*/
mutex_lock(&dm_ulog_lock);
- memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size);
+ memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - sizeof(struct cn_msg));
memcpy(tfr->uuid, uuid, DM_UUID_LEN);
tfr->luid = luid;
tfr->seq = dm_ulog_seq++;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ad779bd13aec..6c1046df81f6 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -724,7 +724,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
/*
* Dispatch io.
*/
- if (unlikely(ms->log_failure)) {
+ if (unlikely(ms->log_failure) && errors_handled(ms)) {
spin_lock_irq(&ms->lock);
bio_list_merge(&ms->failures, &sync);
spin_unlock_irq(&ms->lock);
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 5f19ceb6fe91..168bd38f5006 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -660,10 +660,9 @@ void dm_rh_recovery_end(struct dm_region *reg, int success)
spin_lock_irq(&rh->region_lock);
if (success)
list_add(&reg->list, &reg->rh->recovered_regions);
- else {
- reg->state = DM_RH_NOSYNC;
+ else
list_add(&reg->list, &reg->rh->failed_recovered_regions);
- }
+
spin_unlock_irq(&rh->region_lock);
rh->wakeup_workers(rh->context);
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 7d08879689ac..c097d8a4823d 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -254,7 +254,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
* Issue the synchronous I/O from a different thread
* to avoid generic_make_request recursion.
*/
- INIT_WORK(&req.work, do_metadata);
+ INIT_WORK_ON_STACK(&req.work, do_metadata);
queue_work(ps->metadata_wq, &req.work);
flush_workqueue(ps->metadata_wq);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index e0efc1adcaff..bd58703ee8f6 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -110,7 +110,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
stripes = simple_strtoul(argv[0], &end, 10);
- if (*end) {
+ if (!stripes || *end) {
ti->error = "Invalid stripe count";
return -EINVAL;
}
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index f53392df7b97..f91b40942e07 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -80,20 +80,12 @@ static struct sysfs_ops dm_sysfs_ops = {
};
/*
- * The sysfs structure is embedded in md struct, nothing to do here
- */
-static void dm_sysfs_release(struct kobject *kobj)
-{
-}
-
-/*
* dm kobject is embedded in mapped_device structure
* no need to define release function here
*/
static struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops,
.default_attrs = dm_attrs,
- .release = dm_sysfs_release
};
/*
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 3167480b532c..aa4e2aa86d49 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1595,10 +1595,15 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq)
return BLKPREP_OK;
}
-static void map_request(struct dm_target *ti, struct request *clone,
- struct mapped_device *md)
+/*
+ * Returns:
+ * 0 : the request has been processed (not requeued)
+ * !0 : the request has been requeued
+ */
+static int map_request(struct dm_target *ti, struct request *clone,
+ struct mapped_device *md)
{
- int r;
+ int r, requeued = 0;
struct dm_rq_target_io *tio = clone->end_io_data;
/*
@@ -1625,6 +1630,7 @@ static void map_request(struct dm_target *ti, struct request *clone,
case DM_MAPIO_REQUEUE:
/* The target wants to requeue the I/O */
dm_requeue_unmapped_request(clone);
+ requeued = 1;
break;
default:
if (r > 0) {
@@ -1636,6 +1642,8 @@ static void map_request(struct dm_target *ti, struct request *clone,
dm_kill_unmapped_request(clone, r);
break;
}
+
+ return requeued;
}
/*
@@ -1677,12 +1685,17 @@ static void dm_request_fn(struct request_queue *q)
atomic_inc(&md->pending[rq_data_dir(clone)]);
spin_unlock(q->queue_lock);
- map_request(ti, clone, md);
+ if (map_request(ti, clone, md))
+ goto requeued;
+
spin_lock_irq(q->queue_lock);
}
goto out;
+requeued:
+ spin_lock_irq(q->queue_lock);
+
plug_and_out:
if (!elv_queue_empty(q))
/* Some requests still remain, retry later */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index dd3dfe42d5a9..a20a71e5efd3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4075,8 +4075,10 @@ static void mddev_delayed_delete(struct work_struct *ws)
{
mddev_t *mddev = container_of(ws, mddev_t, del_work);
- if (mddev->private == &md_redundancy_group) {
+ if (mddev->private) {
sysfs_remove_group(&mddev->kobj, &md_redundancy_group);
+ if (mddev->private != (void*)1)
+ sysfs_remove_group(&mddev->kobj, mddev->private);
if (mddev->sysfs_action)
sysfs_put(mddev->sysfs_action);
mddev->sysfs_action = NULL;
@@ -4287,10 +4289,7 @@ static int do_md_run(mddev_t * mddev)
sysfs_notify_dirent(rdev->sysfs_state);
}
- md_probe(mddev->unit, NULL, NULL);
disk = mddev->gendisk;
- if (!disk)
- return -ENOMEM;
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
@@ -4530,8 +4529,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
mddev->queue->unplug_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
module_put(mddev->pers->owner);
- if (mddev->pers->sync_request)
- mddev->private = &md_redundancy_group;
+ if (mddev->pers->sync_request && mddev->private == NULL)
+ mddev->private = (void*)1;
mddev->pers = NULL;
/* tell userspace to handle 'inactive' */
sysfs_notify_dirent(mddev->sysfs_state);
@@ -4578,9 +4577,6 @@ out:
}
mddev->bitmap_info.offset = 0;
- /* make sure all md_delayed_delete calls have finished */
- flush_scheduled_work();
-
export_array(mddev);
mddev->array_sectors = 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e84204eb12df..ceb24afdc147 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5136,9 +5136,8 @@ static int stop(mddev_t *mddev)
mddev->thread = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
- sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
free_conf(conf);
- mddev->private = NULL;
+ mddev->private = &raid5_attrs_group;
return 0;
}
@@ -5464,11 +5463,11 @@ static int raid5_start_reshape(mddev_t *mddev)
!test_bit(Faulty, &rdev->flags)) {
if (raid5_add_disk(mddev, rdev) == 0) {
char nm[20];
- if (rdev->raid_disk >= conf->previous_raid_disks)
+ if (rdev->raid_disk >= conf->previous_raid_disks) {
set_bit(In_sync, &rdev->flags);
- else
+ added_devices++;
+ } else
rdev->recovery_offset = 0;
- added_devices++;
sprintf(nm, "rd%d", rdev->raid_disk);
if (sysfs_create_link(&mddev->kobj,
&rdev->kobj, nm))
@@ -5480,9 +5479,12 @@ static int raid5_start_reshape(mddev_t *mddev)
break;
}
+ /* When a reshape changes the number of devices, ->degraded
+ * is measured against the large of the pre and post number of
+ * devices.*/
if (mddev->delta_disks > 0) {
spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded = (conf->raid_disks - conf->previous_raid_disks)
+ mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
- added_devices;
spin_unlock_irqrestore(&conf->device_lock, flags);
}
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index becbaadb3b77..5ed75263340a 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -1333,9 +1333,9 @@ static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
DEB_CAP(("vbuf:%p\n",vb));
- release_all_pagetables(dev, buf);
-
saa7146_dma_free(dev,q,buf);
+
+ release_all_pagetables(dev, buf);
}
static struct videobuf_queue_ops video_qops = {
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index c37790ad92d0..9ddc57909d49 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -761,7 +761,6 @@ static int dvb_demux_open(struct inode *inode, struct file *file)
dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
dmxdevfilter->type = DMXDEV_TYPE_NONE;
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
- INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
init_timer(&dmxdevfilter->timer);
dvbdev->users++;
@@ -887,6 +886,7 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev,
dmxdevfilter->type = DMXDEV_TYPE_PES;
memcpy(&dmxdevfilter->params, params,
sizeof(struct dmx_pes_filter_params));
+ INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index b78cfb7d1897..67f189b7aa1f 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -426,16 +426,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
};
};
- if (dvb_demux_tscheck) {
- if (!demux->cnt_storage)
- demux->cnt_storage = vmalloc(MAX_PID + 1);
-
- if (!demux->cnt_storage) {
- printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
- dvb_demux_tscheck = 0;
- goto no_dvb_demux_tscheck;
- }
-
+ if (demux->cnt_storage) {
/* check pkt counter */
if (pid < MAX_PID) {
if (buf[1] & 0x80)
@@ -454,7 +445,6 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
};
/* end check */
};
-no_dvb_demux_tscheck:
list_for_each_entry(feed, &demux->feed_list, list_head) {
if ((feed->pid != pid) && (feed->pid != 0x2000))
@@ -1246,6 +1236,7 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
dvbdemux->feed = vmalloc(dvbdemux->feednum * sizeof(struct dvb_demux_feed));
if (!dvbdemux->feed) {
vfree(dvbdemux->filter);
+ dvbdemux->filter = NULL;
return -ENOMEM;
}
for (i = 0; i < dvbdemux->filternum; i++) {
@@ -1257,6 +1248,13 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
dvbdemux->feed[i].index = i;
}
+ if (dvb_demux_tscheck) {
+ dvbdemux->cnt_storage = vmalloc(MAX_PID + 1);
+
+ if (!dvbdemux->cnt_storage)
+ printk(KERN_WARNING "Couldn't allocate memory for TS/TEI check. Disabling it\n");
+ }
+
INIT_LIST_HEAD(&dvbdemux->frontend_list);
for (i = 0; i < DMX_TS_PES_OTHER; i++) {
diff --git a/drivers/media/dvb/dvb-usb/Kconfig b/drivers/media/dvb/dvb-usb/Kconfig
index 1b249897c9fb..465295b1d14b 100644
--- a/drivers/media/dvb/dvb-usb/Kconfig
+++ b/drivers/media/dvb/dvb-usb/Kconfig
@@ -112,11 +112,13 @@ config DVB_USB_CXUSB
select DVB_MT352 if !DVB_FE_CUSTOMISE
select DVB_ZL10353 if !DVB_FE_CUSTOMISE
select DVB_DIB7000P if !DVB_FE_CUSTOMISE
- select DVB_LGS8GL5 if !DVB_FE_CUSTOMISE
select DVB_TUNER_DIB0070 if !DVB_FE_CUSTOMISE
+ select DVB_ATBM8830 if !DVB_FE_CUSTOMISE
+ select DVB_LGS8GXX if !DVB_FE_CUSTOMISE
select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMISE
select MEDIA_TUNER_MXL5005S if !MEDIA_TUNER_CUSTOMISE
+ select MEDIA_TUNER_MAX2165 if !MEDIA_TUNER_CUSTOMISE
help
Say Y here to support the Conexant USB2.0 hybrid reference design.
Currently, only DVB and ATSC modes are supported, analog mode
diff --git a/drivers/media/dvb/frontends/l64781.c b/drivers/media/dvb/frontends/l64781.c
index 3051b64aa17c..445fa1068064 100644
--- a/drivers/media/dvb/frontends/l64781.c
+++ b/drivers/media/dvb/frontends/l64781.c
@@ -192,8 +192,8 @@ static int apply_frontend_param (struct dvb_frontend* fe, struct dvb_frontend_pa
spi_bias *= qam_tab[p->constellation];
spi_bias /= p->code_rate_HP + 1;
spi_bias /= (guard_tab[p->guard_interval] + 32);
- spi_bias *= 1000ULL;
- spi_bias /= 1000ULL + ppm/1000;
+ spi_bias *= 1000;
+ spi_bias /= 1000 + ppm/1000;
spi_bias *= p->code_rate_HP;
val0x04 = (p->transmission_mode << 2) | p->guard_interval;
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index 3182a406bdd1..ae08b077fd04 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -4461,6 +4461,7 @@ static int __devinit bttv_probe(struct pci_dev *dev,
request_modules(btv);
}
+ init_bttv_i2c_ir(btv);
bttv_input_init(btv);
/* everything is fine */
diff --git a/drivers/media/video/bt8xx/bttv-i2c.c b/drivers/media/video/bt8xx/bttv-i2c.c
index 63aa31a041e8..407fa61e4cda 100644
--- a/drivers/media/video/bt8xx/bttv-i2c.c
+++ b/drivers/media/video/bt8xx/bttv-i2c.c
@@ -388,7 +388,12 @@ int __devinit init_bttv_i2c(struct bttv *btv)
if (0 == btv->i2c_rc && i2c_scan)
do_i2c_scan(btv->c.v4l2_dev.name, &btv->i2c_client);
- /* Instantiate the IR receiver device, if present */
+ return btv->i2c_rc;
+}
+
+/* Instantiate the I2C IR receiver device, if present */
+void __devinit init_bttv_i2c_ir(struct bttv *btv)
+{
if (0 == btv->i2c_rc) {
struct i2c_board_info info;
/* The external IR receiver is at i2c address 0x34 (0x35 for
@@ -408,7 +413,6 @@ int __devinit init_bttv_i2c(struct bttv *btv)
strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
i2c_new_probed_device(&btv->c.i2c_adap, &info, addr_list);
}
- return btv->i2c_rc;
}
int __devexit fini_bttv_i2c(struct bttv *btv)
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index a1d0e9c9f286..6cccc2a17eee 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -279,6 +279,7 @@ extern unsigned int bttv_debug;
extern unsigned int bttv_gpio;
extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
extern int init_bttv_i2c(struct bttv *btv);
+extern void init_bttv_i2c_ir(struct bttv *btv);
extern int fini_bttv_i2c(struct bttv *btv);
#define bttv_printk if (bttv_verbose) printk
diff --git a/drivers/media/video/mt9t112.c b/drivers/media/video/mt9t112.c
index fc4dd6045720..7438f8d775ba 100644
--- a/drivers/media/video/mt9t112.c
+++ b/drivers/media/video/mt9t112.c
@@ -514,7 +514,7 @@ static int mt9t112_init_pll(const struct i2c_client *client)
/* poll to verify out of standby. Must Poll this bit */
for (i = 0; i < 100; i++) {
mt9t112_reg_read(data, client, 0x0018);
- if (0x4000 & data)
+ if (!(0x4000 & data))
break;
mdelay(10);
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 50b415e07eda..f7f7e04cf485 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -753,7 +753,7 @@ int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value)
buf[0] = 0xff; /* fixed */
ret = send_control_msg(pdev,
- SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, sizeof(buf));
+ SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, 1);
if (!mode && ret >= 0) {
if (value < 0)
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index 7dfecfc6017c..ee5bff02a92c 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -93,9 +93,9 @@ static int ts_open(struct file *file)
dprintk("open dev=%s\n", video_device_node_name(vdev));
err = -EBUSY;
if (!mutex_trylock(&dev->empress_tsq.vb_lock))
- goto done;
+ return err;
if (atomic_read(&dev->empress_users))
- goto done_up;
+ goto done;
/* Unmute audio */
saa_writeb(SAA7134_AUDIO_MUTE_CTRL,
@@ -105,10 +105,8 @@ static int ts_open(struct file *file)
file->private_data = dev;
err = 0;
-done_up:
- mutex_unlock(&dev->empress_tsq.vb_lock);
done:
- unlock_kernel();
+ mutex_unlock(&dev->empress_tsq.vb_lock);
return err;
}
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 57752751712b..81279b3d694c 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1796,7 +1796,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: "
"Command not in the active list! (sc=%p)\n", ioc->name,
SCpnt));
- retval = 0;
+ retval = SUCCESS;
goto out;
}
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index b9f1e84897cc..e7f8027165e6 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -74,6 +74,9 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
}
mrq->cmd->arg = dev_addr;
+ if (!mmc_card_blockaddr(test->card))
+ mrq->cmd->arg <<= 9;
+
mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
if (blocks == 1)
@@ -190,7 +193,7 @@ static int __mmc_test_prepare(struct mmc_test_card *test, int write)
}
for (i = 0;i < BUFFER_SIZE / 512;i++) {
- ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1);
+ ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
if (ret)
return ret;
}
@@ -219,7 +222,7 @@ static int mmc_test_cleanup(struct mmc_test_card *test)
memset(test->buffer, 0, 512);
for (i = 0;i < BUFFER_SIZE / 512;i++) {
- ret = mmc_test_buffer_transfer(test, test->buffer, i * 512, 512, 1);
+ ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
if (ret)
return ret;
}
@@ -426,7 +429,7 @@ static int mmc_test_transfer(struct mmc_test_card *test,
for (i = 0;i < sectors;i++) {
ret = mmc_test_buffer_transfer(test,
test->buffer + i * 512,
- dev_addr + i * 512, 512, 0);
+ dev_addr + i, 512, 0);
if (ret)
return ret;
}
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 62d9c9cc5671..1dd4403247ca 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -921,7 +921,7 @@ static int ax_probe(struct platform_device *pdev)
size = (res->end - res->start) + 1;
ax->mem2 = request_mem_region(res->start, size, pdev->name);
- if (ax->mem == NULL) {
+ if (ax->mem2 == NULL) {
dev_err(&pdev->dev, "cannot reserve registers\n");
ret = -ENXIO;
goto exit_mem1;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index fee6eee7ae5b..006cb2efcd22 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -296,6 +296,7 @@ static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
req_hdr->opcode = opcode;
req_hdr->subsystem = subsystem;
req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
+ req_hdr->version = 0;
}
static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index bdbd14727e4b..318a018ca7c5 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -2079,6 +2079,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
struct sge_fl *fl, int len, int complete)
{
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
+ struct port_info *pi = netdev_priv(qs->netdev);
struct sk_buff *skb = NULL;
struct cpl_rx_pkt *cpl;
struct skb_frag_struct *rx_frag;
@@ -2116,11 +2117,18 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
if (!nr_frags) {
offset = 2 + sizeof(struct cpl_rx_pkt);
- qs->lro_va = sd->pg_chunk.va + 2;
- }
- len -= offset;
+ cpl = qs->lro_va = sd->pg_chunk.va + 2;
- prefetch(qs->lro_va);
+ if ((pi->rx_offload & T3_RX_CSUM) &&
+ cpl->csum_valid && cpl->csum == htons(0xffff)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+ } else
+ cpl = qs->lro_va;
+
+ len -= offset;
rx_frag += nr_frags;
rx_frag->page = sd->pg_chunk.page;
@@ -2136,12 +2144,8 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
return;
skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- cpl = qs->lro_va;
if (unlikely(cpl->vlan_valid)) {
- struct net_device *dev = qs->netdev;
- struct port_info *pi = netdev_priv(dev);
struct vlan_group *grp = pi->vlan_grp;
if (likely(grp != NULL)) {
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index d29bb532eccf..765543663a4f 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -4006,11 +4006,21 @@ check_page:
}
}
- if (!buffer_info->dma)
+ if (!buffer_info->dma) {
buffer_info->dma = pci_map_page(pdev,
buffer_info->page, 0,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ put_page(buffer_info->page);
+ dev_kfree_skb(skb);
+ buffer_info->page = NULL;
+ buffer_info->skb = NULL;
+ buffer_info->dma = 0;
+ adapter->alloc_rx_buff_failed++;
+ break; /* while !buffer_info->skb */
+ }
+ }
rx_desc = E1000_RX_DESC(*rx_ring, i);
rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -4101,6 +4111,13 @@ map_skb:
skb->data,
buffer_info->length,
PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
+ dev_kfree_skb(skb);
+ buffer_info->skb = NULL;
+ buffer_info->dma = 0;
+ adapter->alloc_rx_buff_failed++;
+ break; /* while !buffer_info->skb */
+ }
/*
* XXX if it was allocated cleanly it will never map to a
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 997124d2992a..c881347cb26d 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -421,6 +421,8 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
if (tx_queue > IGB_N0_QUEUE)
msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
+ if (!adapter->msix_entries && msix_vector == 0)
+ msixbm |= E1000_EIMS_OTHER;
array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
q_vector->eims_value = msixbm;
break;
@@ -877,7 +879,6 @@ static int igb_request_irq(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
- struct e1000_hw *hw = &adapter->hw;
int err = 0;
if (adapter->msix_entries) {
@@ -909,20 +910,7 @@ static int igb_request_irq(struct igb_adapter *adapter)
igb_setup_all_tx_resources(adapter);
igb_setup_all_rx_resources(adapter);
} else {
- switch (hw->mac.type) {
- case e1000_82575:
- wr32(E1000_MSIXBM(0),
- (E1000_EICR_RX_QUEUE0 |
- E1000_EICR_TX_QUEUE0 |
- E1000_EIMS_OTHER));
- break;
- case e1000_82580:
- case e1000_82576:
- wr32(E1000_IVAR0, E1000_IVAR_VALID);
- break;
- default:
- break;
- }
+ igb_assign_vector(adapter->q_vector[0], 0);
}
if (adapter->flags & IGB_FLAG_HAS_MSI) {
@@ -1140,6 +1128,8 @@ int igb_up(struct igb_adapter *adapter)
}
if (adapter->msix_entries)
igb_configure_msix(adapter);
+ else
+ igb_assign_vector(adapter->q_vector[0], 0);
/* Clear any pending interrupts. */
rd32(E1000_ICR);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 3103f4165311..35a06b47587b 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -357,12 +357,34 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
u32 fctrl_reg;
u32 rmcs_reg;
u32 reg;
+ u32 link_speed = 0;
+ bool link_up;
#ifdef CONFIG_DCB
if (hw->fc.requested_mode == ixgbe_fc_pfc)
goto out;
#endif /* CONFIG_DCB */
+ /*
+ * On 82598 having Rx FC on causes resets while doing 1G
+ * so if it's on turn it off once we know link_speed. For
+ * more details see 82598 Specification update.
+ */
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ hw->fc.requested_mode = ixgbe_fc_tx_pause;
+ break;
+ case ixgbe_fc_rx_pause:
+ hw->fc.requested_mode = ixgbe_fc_none;
+ break;
+ default:
+ /* no change */
+ break;
+ }
+ }
+
/* Negotiate the fc mode to use */
ret_val = ixgbe_fc_autoneg(hw);
if (ret_val)
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index b5f64ad67975..951b73cf5ca2 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -5179,7 +5179,7 @@ dma_error:
ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
}
- return count;
+ return 0;
}
static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
@@ -5329,8 +5329,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
struct ixgbe_adapter *adapter = netdev_priv(dev);
int txq = smp_processor_id();
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
+ while (unlikely(txq >= dev->real_num_tx_queues))
+ txq -= dev->real_num_tx_queues;
return txq;
+ }
#ifdef IXGBE_FCOE
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
@@ -5760,6 +5763,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
+ /* Make it possible the adapter to be woken up via WOL */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+
/*
* If there is a fan on this device and it has failed log the
* failure.
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 9f9d6081959b..24279e6e55f5 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1941,7 +1941,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
netif_wake_queue(adapter->netdev);
clear_bit(__NX_RESETTING, &adapter->state);
-
+ return;
} else {
clear_bit(__NX_RESETTING, &adapter->state);
if (!netxen_nic_reset_context(adapter)) {
@@ -2240,7 +2240,9 @@ netxen_detach_work(struct work_struct *work)
netxen_nic_down(adapter, netdev);
+ rtnl_lock();
netxen_nic_detach(adapter);
+ rtnl_unlock();
status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 103e8b0e2a0d..46997e177ee3 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -2284,6 +2284,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
fail2:
efx_fini_struct(efx);
fail1:
+ WARN_ON(rc > 0);
EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
free_netdev(net_dev);
return rc;
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index bf0b96af5334..5712fddd72f2 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -29,6 +29,15 @@
#define FALCON_BOARD_SFN4111T 0x51
#define FALCON_BOARD_SFN4112F 0x52
+/* Board temperature is about 15°C above ambient when air flow is
+ * limited. */
+#define FALCON_BOARD_TEMP_BIAS 15
+
+/* SFC4000 datasheet says: 'The maximum permitted junction temperature
+ * is 125°C; the thermal design of the environment for the SFC4000
+ * should aim to keep this well below 100°C.' */
+#define FALCON_JUNC_TEMP_MAX 90
+
/*****************************************************************************
* Support for LM87 sensor chip used on several boards
*/
@@ -548,16 +557,16 @@ fail_hwmon:
static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
static const u8 sfe4002_lm87_regs[] = {
- LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
- LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
- LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
- LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */
- LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
- LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
- LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */
- LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
- LM87_TEMP_INT_LIMITS(10, 60), /* board */
- LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
+ LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
+ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
+ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
+ LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */
+ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
+ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
+ LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */
+ LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
+ LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS),
+ LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
0
};
@@ -619,14 +628,14 @@ static int sfe4002_init(struct efx_nic *efx)
static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
static const u8 sfn4112f_lm87_regs[] = {
- LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
- LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
- LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
- LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
- LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
- LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
- LM87_TEMP_INT_LIMITS(10, 60), /* board */
- LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
+ LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */
+ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */
+ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */
+ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */
+ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */
+ LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */
+ LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS),
+ LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX),
0
};
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 9f035b9f0350..f66b3da6ddff 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -127,7 +127,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
efx_dword_t reg;
/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
- rc = efx_mcdi_poll_reboot(efx);
+ rc = -efx_mcdi_poll_reboot(efx);
if (rc)
goto out;
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index e0d13a451019..67eec7a6e487 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -320,7 +320,7 @@ static int qt202x_reset_phy(struct efx_nic *efx)
falcon_board(efx)->type->init_phy(efx);
- return rc;
+ return 0;
fail:
EFX_ERR(efx, "PHY reset timed out\n");
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index d760650c5c04..67249c3c9f50 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1025,11 +1025,8 @@ static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2, u16 *slot)
{
struct sky2_tx_le *le = sky2->tx_le + *slot;
- struct tx_ring_info *re = sky2->tx_ring + *slot;
*slot = RING_NEXT(*slot, sky2->tx_ring_size);
- re->flags = 0;
- re->skb = NULL;
le->ctrl = 0;
return le;
}
@@ -1622,8 +1619,7 @@ static unsigned tx_le_req(const struct sk_buff *skb)
return count;
}
-static void sky2_tx_unmap(struct pci_dev *pdev,
- const struct tx_ring_info *re)
+static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
{
if (re->flags & TX_MAP_SINGLE)
pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
@@ -1633,6 +1629,7 @@ static void sky2_tx_unmap(struct pci_dev *pdev,
pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr),
pci_unmap_len(re, maplen),
PCI_DMA_TODEVICE);
+ re->flags = 0;
}
/*
@@ -1839,6 +1836,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
+ re->skb = NULL;
dev_kfree_skb_any(skb);
sky2->tx_next = RING_NEXT(idx, sky2->tx_ring_size);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 75a669d48e5e..d71c1976072e 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1437,7 +1437,6 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
/* Transmit complete. */
lp->lstats.tx_ints++;
tc35815_txdone(dev);
- netif_wake_queue(dev);
if (ret < 0)
ret = 0;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 4f27f022fbf7..5f3b9eaeb04f 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -584,6 +584,11 @@ static const struct usb_device_id products [] = {
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &mbm_info,
}, {
+ /* Ericsson C3607w ver 2 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &mbm_info,
+}, {
/* Toshiba F3507g */
USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index c93f58f5c6f2..317aa34b21cf 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1877,13 +1877,12 @@ static void velocity_error(struct velocity_info *vptr, int status)
/**
* tx_srv - transmit interrupt service
* @vptr; Velocity
- * @status:
*
* Scan the queues looking for transmitted packets that
* we can complete and clean up. Update any statistics as
* necessary/
*/
-static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
+static int velocity_tx_srv(struct velocity_info *vptr)
{
struct tx_desc *td;
int qnum;
@@ -2090,14 +2089,12 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
/**
* velocity_rx_srv - service RX interrupt
* @vptr: velocity
- * @status: adapter status (unused)
*
* Walk the receive ring of the velocity adapter and remove
* any received packets from the receive queue. Hand the ring
* slots back to the adapter for reuse.
*/
-static int velocity_rx_srv(struct velocity_info *vptr, int status,
- int budget_left)
+static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
{
struct net_device_stats *stats = &vptr->dev->stats;
int rd_curr = vptr->rx.curr;
@@ -2151,32 +2148,24 @@ static int velocity_poll(struct napi_struct *napi, int budget)
struct velocity_info *vptr = container_of(napi,
struct velocity_info, napi);
unsigned int rx_done;
- u32 isr_status;
-
- spin_lock(&vptr->lock);
- isr_status = mac_read_isr(vptr->mac_regs);
-
- /* Ack the interrupt */
- mac_write_isr(vptr->mac_regs, isr_status);
- if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
- velocity_error(vptr, isr_status);
+ unsigned long flags;
+ spin_lock_irqsave(&vptr->lock, flags);
/*
* Do rx and tx twice for performance (taken from the VIA
* out-of-tree driver).
*/
- rx_done = velocity_rx_srv(vptr, isr_status, budget / 2);
- velocity_tx_srv(vptr, isr_status);
- rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done);
- velocity_tx_srv(vptr, isr_status);
-
- spin_unlock(&vptr->lock);
+ rx_done = velocity_rx_srv(vptr, budget / 2);
+ velocity_tx_srv(vptr);
+ rx_done += velocity_rx_srv(vptr, budget - rx_done);
+ velocity_tx_srv(vptr);
/* If budget not fully consumed, exit the polling mode */
if (rx_done < budget) {
napi_complete(napi);
mac_enable_int(vptr->mac_regs);
}
+ spin_unlock_irqrestore(&vptr->lock, flags);
return rx_done;
}
@@ -2206,10 +2195,17 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance)
return IRQ_NONE;
}
+ /* Ack the interrupt */
+ mac_write_isr(vptr->mac_regs, isr_status);
+
if (likely(napi_schedule_prep(&vptr->napi))) {
mac_disable_int(vptr->mac_regs);
__napi_schedule(&vptr->napi);
}
+
+ if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
+ velocity_error(vptr, isr_status);
+
spin_unlock(&vptr->lock);
return IRQ_HANDLED;
@@ -3100,7 +3096,7 @@ static int velocity_resume(struct pci_dev *pdev)
velocity_init_registers(vptr, VELOCITY_INIT_WOL);
mac_disable_int(vptr->mac_regs);
- velocity_tx_srv(vptr, 0);
+ velocity_tx_srv(vptr);
for (i = 0; i < vptr->tx.numq; i++) {
if (vptr->tx.used[i])
@@ -3344,6 +3340,7 @@ static int velocity_set_coalesce(struct net_device *dev,
{
struct velocity_info *vptr = netdev_priv(dev);
int max_us = 0x3f * 64;
+ unsigned long flags;
/* 6 bits of */
if (ecmd->tx_coalesce_usecs > max_us)
@@ -3365,6 +3362,7 @@ static int velocity_set_coalesce(struct net_device *dev,
ecmd->tx_coalesce_usecs);
/* Setup the interrupt suppression and queue timers */
+ spin_lock_irqsave(&vptr->lock, flags);
mac_disable_int(vptr->mac_regs);
setup_adaptive_interrupts(vptr);
setup_queue_timers(vptr);
@@ -3372,6 +3370,7 @@ static int velocity_set_coalesce(struct net_device *dev,
mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
mac_clear_isr(vptr->mac_regs);
mac_enable_int(vptr->mac_regs);
+ spin_unlock_irqrestore(&vptr->lock, flags);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index fa12b9060b0b..29bf33692f71 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1615,7 +1615,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
bf->bf_frmlen -= padsize;
}
- if (conf_is_ht(&hw->conf) && !is_pae(skb))
+ if (conf_is_ht(&hw->conf))
bf->bf_state.bf_type |= BUF_HT;
bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
@@ -1701,7 +1701,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
goto tx_done;
}
- if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
+ if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && !is_pae(skb)) {
/*
* Try aggregation if it's a unicast data frame
* and the destination is HT capable.
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index fe3bf9491997..c484cc253892 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -115,6 +115,7 @@
#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */
#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */
#define B43_MMIO_RNG 0x65A
+#define B43_MMIO_IFSSLOT 0x684 /* Interframe slot time */
#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */
#define B43_MMIO_IFSCTL_USE_EDCF 0x0004
#define B43_MMIO_POWERUP_DELAY 0x6A8
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 4c41cfe44f26..490fb45d1d05 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -628,10 +628,17 @@ static void b43_upload_card_macaddress(struct b43_wldev *dev)
static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time)
{
/* slot_time is in usec. */
- if (dev->phy.type != B43_PHYTYPE_G)
+ /* This test used to exit for all but a G PHY. */
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
return;
- b43_write16(dev, 0x684, 510 + slot_time);
- b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
+ b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time);
+ /* Shared memory location 0x0010 is the slot time and should be
+ * set to slot_time; however, this register is initially 0 and changing
+ * the value adversely affects the transmit rate for BCM4311
+ * devices. Until this behavior is unterstood, delete this step
+ *
+ * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time);
+ */
}
static void b43_short_slot_timing_enable(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 9b4b8b5c7574..31462813bac0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2008,7 +2008,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
"%d index %d\n", scd_ssn , index);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index de45f308b744..cffaae772d51 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1125,7 +1125,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
scd_ssn , index, txq_id, txq->swq_id);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
@@ -1153,16 +1153,14 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
tx_resp->failure_frame);
freed = iwl_tx_queue_reclaim(priv, txq_id, index);
- if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark))
iwl_wake_queue(priv, txq_id);
}
- if (ieee80211_is_data_qos(tx_resp->frame_ctrl))
- iwl_txq_check_empty(priv, sta_id, tid, txq_id);
+ iwl_txq_check_empty(priv, sta_id, tid, txq_id);
if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 5461f105bd2d..f36f804804fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2745,6 +2745,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
priv->staging_rxon.flags = 0;
iwl_set_rxon_channel(priv, conf->channel);
+ iwl_set_rxon_ht(priv, ht_conf);
iwl_set_flags_for_band(priv, conf->channel->band);
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 27ca859e7453..b69e972671b2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -446,6 +446,8 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
+void iwl_free_tfds_in_queue(struct iwl_priv *priv,
+ int sta_id, int tid, int freed);
int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 6f36b6e79f5e..2dbce85404aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -928,7 +928,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
if (ieee80211_is_mgmt(fc) ||
ieee80211_has_protected(fc) ||
ieee80211_has_morefrags(fc) ||
- le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
+ le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
+ (ieee80211_is_data_qos(fc) &&
+ *ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
ret = skb_linearize(skb);
else
ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 87ce2bd292c7..8f4071562857 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -120,6 +120,20 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
EXPORT_SYMBOL(iwl_txq_update_write_ptr);
+void iwl_free_tfds_in_queue(struct iwl_priv *priv,
+ int sta_id, int tid, int freed)
+{
+ if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+ priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ else {
+ IWL_ERR(priv, "free more than tfds_in_queue (%u:%d)\n",
+ priv->stations[sta_id].tid[tid].tfds_in_queue,
+ freed);
+ priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
+ }
+}
+EXPORT_SYMBOL(iwl_free_tfds_in_queue);
+
/**
* iwl_tx_queue_free - Deallocate DMA queue.
* @txq: Transmit queue to deallocate.
@@ -1131,6 +1145,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
struct iwl_queue *q = &txq->q;
struct iwl_tx_info *tx_info;
int nfreed = 0;
+ struct ieee80211_hdr *hdr;
if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
@@ -1145,13 +1160,16 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
tx_info = &txq->txb[txq->q.read_ptr];
iwl_tx_status(priv, tx_info->skb[0]);
+
+ hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
+ if (hdr && ieee80211_is_data_qos(hdr->frame_control))
+ nfreed++;
tx_info->skb[0] = NULL;
if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- nfreed++;
}
return nfreed;
}
@@ -1559,7 +1577,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
/* calculate mac80211 ampdu sw queue to wake */
int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
priv->mac80211_registered &&
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 6d6ed7485175..f727b4a83196 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -794,7 +794,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
}
bss->bss = kzalloc(bss_len, GFP_KERNEL);
- if (!bss) {
+ if (!bss->bss) {
kfree(bss);
IWM_ERR(iwm, "Couldn't allocate bss\n");
return -ENOMEM;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index bc5726dd5fe4..7ba3052b0708 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -65,6 +65,7 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
/* Sitecom */
{USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
{USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
+ {USB_DEVICE(0x0df6, 0x0029), .driver_info = DEVICE_RTL8187B},
/* Sphairon Access Systems GmbH */
{USB_DEVICE(0x114B, 0x0150), .driver_info = DEVICE_RTL8187},
/* Dick Smith Electronics */
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 8e952fdab764..cb2fd01eddae 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -720,12 +720,6 @@ static int acpiphp_bus_add(struct acpiphp_func *func)
-ret_val);
goto acpiphp_bus_add_out;
}
- /*
- * try to start anyway. We could have failed to add
- * simply because this bus had previously been added
- * on another add. Don't bother with the return value
- * we just keep going.
- */
ret_val = acpi_bus_start(device);
acpiphp_bus_add_out:
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index c74694345b6e..d58b94030ef3 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -338,6 +338,23 @@ static void __devinit quirk_s3_64M(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
+/*
+ * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
+ * ver. 1.33 20070103) don't set the correct ISA PCI region header info.
+ * BAR0 should be 8 bytes; instead, it may be set to something like 8k
+ * (which conflicts w/ BAR1's memory range).
+ */
+static void __devinit quirk_cs5536_vsa(struct pci_dev *dev)
+{
+ if (pci_resource_len(dev, 0) != 8) {
+ struct resource *res = &dev->resource[0];
+ res->end = res->start + 8 - 1;
+ dev_info(&dev->dev, "CS5536 ISA bridge bug detected "
+ "(incorrect header); workaround applied.\n");
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
+
static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
unsigned size, int nr, const char *name)
{
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 07d14dfdf0b4..226b3e93498c 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -934,7 +934,7 @@ static int __devinit acer_backlight_init(struct device *dev)
acer_backlight_device = bd;
bd->props.power = FB_BLANK_UNBLANK;
- bd->props.brightness = max_brightness;
+ bd->props.brightness = read_brightness(bd);
bd->props.max_brightness = max_brightness;
backlight_update_status(bd);
return 0;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index e67e4feb35cb..eb603f1d55ca 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -5771,7 +5771,7 @@ static void thermal_exit(void)
case TPACPI_THERMAL_ACPI_TMP07:
case TPACPI_THERMAL_ACPI_UPDT:
sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
- &thermal_temp_input16_group);
+ &thermal_temp_input8_group);
break;
case TPACPI_THERMAL_NONE:
default:
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index fa39e759a275..6ea3cb5837c7 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -175,8 +175,14 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
dev_err(&dev->dev, "Do not pass platform_data through "
"wm97xx_bat_set_pdata!\n");
return -EINVAL;
- } else
- pdata = wmdata->batt_pdata;
+ }
+
+ if (!wmdata) {
+ dev_err(&dev->dev, "No platform data supplied\n");
+ return -EINVAL;
+ }
+
+ pdata = wmdata->batt_pdata;
if (dev->id != -1)
return -EINVAL;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 686ef270ecf7..b60a4c9f8f16 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -661,7 +661,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
static void print_constraints(struct regulator_dev *rdev)
{
struct regulation_constraints *constraints = rdev->constraints;
- char buf[80];
+ char buf[80] = "";
int count = 0;
int ret;
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 76d08c282f9c..4f33a0f4a179 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -183,7 +183,7 @@ static int lp3971_ldo_set_voltage(struct regulator_dev *dev,
if (vol_map[val] >= min_vol)
break;
- if (vol_map[val] > max_vol)
+ if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol)
return -EINVAL;
return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo),
@@ -272,7 +272,7 @@ static int lp3971_dcdc_set_voltage(struct regulator_dev *dev,
if (vol_map[val] >= min_vol)
break;
- if (vol_map[val] > max_vol)
+ if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol)
return -EINVAL;
ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck),
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 999fe80c4051..62b654af9237 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -531,7 +531,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
qdio_siga_sync_q(q);
get_buf_state(q, q->first_to_check, &state, 0);
- if (state == SLSB_P_INPUT_PRIMED)
+ if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
/* more work coming */
return 0;
@@ -960,6 +960,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
qdio_handle_activate_check(cdev, intparm, cstat,
dstat);
break;
+ case QDIO_IRQ_STATE_STOPPED:
+ break;
default:
WARN_ON(1);
}
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 0f7b493fb105..271399f62f1b 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -671,12 +671,11 @@ static void zfcp_fc_ct_els_job_handler(void *data)
{
struct fc_bsg_job *job = data;
struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
- int status = zfcp_ct_els->status;
- int reply_status;
+ struct fc_bsg_reply *jr = job->reply;
- reply_status = status ? FC_CTELS_STATUS_REJECT : FC_CTELS_STATUS_OK;
- job->reply->reply_data.ctels_reply.status = reply_status;
- job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+ jr->reply_payload_rcv_len = job->reply_payload.payload_len;
+ jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ jr->result = zfcp_ct_els->status ? -EIO : 0;
job->job_done(job);
}
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 477542602284..9e71ac611146 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -2516,7 +2516,7 @@ int fas216_eh_device_reset(struct scsi_cmnd *SCpnt)
if (info->scsi.phase == PHASE_IDLE)
fas216_kick(info);
- mod_timer(&info->eh_timer, 30 * HZ);
+ mod_timer(&info->eh_timer, jiffies + 30 * HZ);
spin_unlock_irqrestore(&info->host_lock, flags);
/*
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 10be9f36a4cc..2f47ae7cce91 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -2009,6 +2009,8 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
fcoe_interface_cleanup(fcoe);
rtnl_unlock();
fcoe_if_destroy(fcoe->ctlr.lp);
+ module_put(THIS_MODULE);
+
out_putdev:
dev_put(netdev);
out_nodev:
@@ -2059,6 +2061,11 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
}
#endif
+ if (!try_module_get(THIS_MODULE)) {
+ rc = -EINVAL;
+ goto out_nomod;
+ }
+
rtnl_lock();
netdev = fcoe_if_to_netdev(buffer);
if (!netdev) {
@@ -2099,17 +2106,24 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
if (!fcoe_link_ok(lport))
fcoe_ctlr_link_up(&fcoe->ctlr);
- rc = 0;
-out_free:
/*
* Release from init in fcoe_interface_create(), on success lport
* should be holding a reference taken in fcoe_if_create().
*/
fcoe_interface_put(fcoe);
+ dev_put(netdev);
+ rtnl_unlock();
+ mutex_unlock(&fcoe_config_mutex);
+
+ return 0;
+out_free:
+ fcoe_interface_put(fcoe);
out_putdev:
dev_put(netdev);
out_nodev:
rtnl_unlock();
+ module_put(THIS_MODULE);
+out_nomod:
mutex_unlock(&fcoe_config_mutex);
return rc;
}
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index 9823291395ad..511cb6b371ee 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -1187,7 +1187,7 @@ static void fcoe_ctlr_timeout(unsigned long arg)
next_timer = fip->ctlr_ka_time;
if (time_after_eq(jiffies, fip->port_ka_time)) {
- fip->port_ka_time += jiffies +
+ fip->port_ka_time = jiffies +
msecs_to_jiffies(FIP_VN_KA_PERIOD);
fip->send_port_ka = 1;
}
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 19d711cb938c..7f4364770e4a 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1890,7 +1890,7 @@ static struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
fc_exch_setup_hdr(ep, fp, ep->f_ctl);
sp->cnt++;
- if (ep->xid <= lport->lro_xid)
+ if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD)
fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
if (unlikely(lport->tt.frame_send(lport, fp)))
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 881d5dfe8c74..6fde2fabfd9b 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -298,9 +298,6 @@ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
{
struct fc_lport *lport;
- if (!fsp)
- return;
-
lport = fsp->lp;
if ((fsp->req_flags & FC_SRB_READ) &&
(lport->lro_enabled) && (lport->tt.ddp_setup)) {
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 0b165024a219..7ec8ce75007c 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1800,7 +1800,8 @@ int fc_lport_bsg_request(struct fc_bsg_job *job)
u32 did;
job->reply->reply_payload_rcv_len = 0;
- rsp->resid_len = job->reply_payload.payload_len;
+ if (rsp)
+ rsp->resid_len = job->reply_payload.payload_len;
mutex_lock(&lport->lp_mutex);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 02300523b234..97923bb07765 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -623,7 +623,7 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
tov = ntohl(plp->fl_csp.sp_e_d_tov);
if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
- tov /= 1000;
+ tov /= 1000000;
if (tov > rdata->e_d_tov)
rdata->e_d_tov = tov;
csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index db6856c138fc..4ad87fd74ddd 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -992,12 +992,10 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)
if (r2t == NULL) {
if (kfifo_out(&tcp_task->r2tqueue,
(void *)&tcp_task->r2t, sizeof(void *)) !=
- sizeof(void *)) {
- WARN_ONCE(1, "unexpected fifo state");
+ sizeof(void *))
r2t = NULL;
- }
-
- r2t = tcp_task->r2t;
+ else
+ r2t = tcp_task->r2t;
}
spin_unlock_bh(&session->lock);
}
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index 708ea3157b60..d9b8ca5116bc 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -3781,6 +3781,7 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
compat_alloc_user_space(sizeof(struct megasas_iocpacket));
int i;
int error = 0;
+ compat_uptr_t ptr;
if (clear_user(ioc, sizeof(*ioc)))
return -EFAULT;
@@ -3793,9 +3794,22 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
return -EFAULT;
- for (i = 0; i < MAX_IOCTL_SGE; i++) {
- compat_uptr_t ptr;
+ /*
+ * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
+ * sense_len is not null, so prepare the 64bit value under
+ * the same condition.
+ */
+ if (ioc->sense_len) {
+ void __user **sense_ioc_ptr =
+ (void __user **)(ioc->frame.raw + ioc->sense_off);
+ compat_uptr_t *sense_cioc_ptr =
+ (compat_uptr_t *)(cioc->frame.raw + cioc->sense_off);
+ if (get_user(ptr, sense_cioc_ptr) ||
+ put_user(compat_ptr(ptr), sense_ioc_ptr))
+ return -EFAULT;
+ }
+ for (i = 0; i < MAX_IOCTL_SGE; i++) {
if (get_user(ptr, &cioc->sgl[i].iov_base) ||
put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
copy_in_user(&ioc->sgl[i].iov_len,
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index f61fb8d01330..8bc6f53691e9 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -453,6 +453,5 @@ extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
-extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ffd0efdff40e..6fc63b98818c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1917,6 +1917,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
struct rsp_que *rsp;
struct device_reg_24xx __iomem *reg;
struct scsi_qla_host *vha;
+ unsigned long flags;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -1927,15 +1928,15 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
ha = rsp->hw;
reg = &ha->iobase->isp24;
- spin_lock_irq(&ha->hardware_lock);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
- vha = qla25xx_get_host(rsp);
+ vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, rsp);
if (!ha->flags.disable_msix_handshake) {
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD_RELAXED(&reg->hccr);
}
- spin_unlock_irq(&ha->hardware_lock);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
}
@@ -1946,6 +1947,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
struct qla_hw_data *ha;
struct rsp_que *rsp;
struct device_reg_24xx __iomem *reg;
+ unsigned long flags;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -1958,10 +1960,10 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
/* Clear the interrupt, if enabled, for this response queue */
if (rsp->options & ~BIT_6) {
reg = &ha->iobase->isp24;
- spin_lock_irq(&ha->hardware_lock);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
RD_REG_DWORD_RELAXED(&reg->hccr);
- spin_unlock_irq(&ha->hardware_lock);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
@@ -1979,6 +1981,7 @@ qla24xx_msix_default(int irq, void *dev_id)
uint32_t stat;
uint32_t hccr;
uint16_t mb[4];
+ unsigned long flags;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -1990,7 +1993,7 @@ qla24xx_msix_default(int irq, void *dev_id)
reg = &ha->iobase->isp24;
status = 0;
- spin_lock_irq(&ha->hardware_lock);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
do {
stat = RD_REG_DWORD(&reg->host_status);
@@ -2039,7 +2042,7 @@ qla24xx_msix_default(int irq, void *dev_id)
}
WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
} while (0);
- spin_unlock_irq(&ha->hardware_lock);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
@@ -2277,30 +2280,3 @@ int qla25xx_request_irq(struct rsp_que *rsp)
msix->rsp = rsp;
return ret;
}
-
-struct scsi_qla_host *
-qla25xx_get_host(struct rsp_que *rsp)
-{
- srb_t *sp;
- struct qla_hw_data *ha = rsp->hw;
- struct scsi_qla_host *vha = NULL;
- struct sts_entry_24xx *pkt;
- struct req_que *req;
- uint16_t que;
- uint32_t handle;
-
- pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
- que = MSW(pkt->handle);
- handle = (uint32_t) LSW(pkt->handle);
- req = ha->req_q_map[que];
- if (handle < MAX_OUTSTANDING_COMMANDS) {
- sp = req->outstanding_cmds[handle];
- if (sp)
- return sp->fcport->vha;
- else
- goto base_que;
- }
-base_que:
- vha = pci_get_drvdata(ha->pdev);
- return vha;
-}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index b901aa267e7d..ff17dee28613 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -636,13 +636,15 @@ failed:
static void qla_do_work(struct work_struct *work)
{
+ unsigned long flags;
struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
struct scsi_qla_host *vha;
+ struct qla_hw_data *ha = rsp->hw;
- spin_lock_irq(&rsp->hw->hardware_lock);
- vha = qla25xx_get_host(rsp);
+ spin_lock_irqsave(&rsp->hw->hardware_lock, flags);
+ vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, rsp);
- spin_unlock_irq(&rsp->hw->hardware_lock);
+ spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags);
}
/* create response queue */
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index c3e37c8e7e26..e9b15c3746fa 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -83,6 +83,9 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */
#define PASS_LIMIT 256
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+
/*
* We default to IRQ0 for the "no irq" hack. Some
* machine types want others as well - they're free
@@ -1792,7 +1795,7 @@ static unsigned int serial8250_tx_empty(struct uart_port *port)
up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
spin_unlock_irqrestore(&up->port.lock, flags);
- return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
+ return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0;
}
static unsigned int serial8250_get_mctrl(struct uart_port *port)
@@ -1850,8 +1853,6 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state)
spin_unlock_irqrestore(&up->port.lock, flags);
}
-#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
/*
* Wait for transmitter & holding register to empty
*/
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 5681ebed9c65..03dfd27c4bfb 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -494,8 +494,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
#endif
break;
case SSB_BUSTYPE_SDIO:
-#ifdef CONFIG_SSB_SDIO
- sdev->irq = bus->host_sdio->dev.irq;
+#ifdef CONFIG_SSB_SDIOHOST
dev->parent = &bus->host_sdio->dev;
#endif
break;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 6e8bcdfd23b4..a678186f218f 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1312,9 +1312,9 @@ static int processcompl(struct async *as, void __user * __user *arg)
void __user *addr = as->userurb;
unsigned int i;
- if (as->userbuffer)
+ if (as->userbuffer && urb->actual_length)
if (copy_to_user(as->userbuffer, urb->transfer_buffer,
- urb->transfer_buffer_length))
+ urb->actual_length))
goto err_out;
if (put_user(as->status, &userurb->status))
goto err_out;
@@ -1334,14 +1334,11 @@ static int processcompl(struct async *as, void __user * __user *arg)
}
}
- free_async(as);
-
if (put_user(addr, (void __user * __user *)arg))
return -EFAULT;
return 0;
err_out:
- free_async(as);
return -EFAULT;
}
@@ -1371,8 +1368,11 @@ static struct async *reap_as(struct dev_state *ps)
static int proc_reapurb(struct dev_state *ps, void __user *arg)
{
struct async *as = reap_as(ps);
- if (as)
- return processcompl(as, (void __user * __user *)arg);
+ if (as) {
+ int retval = processcompl(as, (void __user * __user *)arg);
+ free_async(as);
+ return retval;
+ }
if (signal_pending(current))
return -EINTR;
return -EIO;
@@ -1380,11 +1380,16 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg)
static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
{
+ int retval;
struct async *as;
- if (!(as = async_getcompleted(ps)))
- return -EAGAIN;
- return processcompl(as, (void __user * __user *)arg);
+ as = async_getcompleted(ps);
+ retval = -EAGAIN;
+ if (as) {
+ retval = processcompl(as, (void __user * __user *)arg);
+ free_async(as);
+ }
+ return retval;
}
#ifdef CONFIG_COMPAT
@@ -1475,9 +1480,9 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
void __user *addr = as->userurb;
unsigned int i;
- if (as->userbuffer)
+ if (as->userbuffer && urb->actual_length)
if (copy_to_user(as->userbuffer, urb->transfer_buffer,
- urb->transfer_buffer_length))
+ urb->actual_length))
return -EFAULT;
if (put_user(as->status, &userurb->status))
return -EFAULT;
@@ -1497,7 +1502,6 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
}
}
- free_async(as);
if (put_user(ptr_to_compat(addr), (u32 __user *)arg))
return -EFAULT;
return 0;
@@ -1506,8 +1510,11 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
{
struct async *as = reap_as(ps);
- if (as)
- return processcompl_compat(as, (void __user * __user *)arg);
+ if (as) {
+ int retval = processcompl_compat(as, (void __user * __user *)arg);
+ free_async(as);
+ return retval;
+ }
if (signal_pending(current))
return -EINTR;
return -EIO;
@@ -1515,11 +1522,16 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
{
+ int retval;
struct async *as;
- if (!(as = async_getcompleted(ps)))
- return -EAGAIN;
- return processcompl_compat(as, (void __user * __user *)arg);
+ retval = -EAGAIN;
+ as = async_getcompleted(ps);
+ if (as) {
+ retval = processcompl_compat(as, (void __user * __user *)arg);
+ free_async(as);
+ }
+ return retval;
}
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index 0a577d5694fd..d4f0db58a8ad 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -358,7 +358,7 @@ done:
* b15: bmType (0 == data)
*/
len = skb->len;
- put_unaligned_le16((len & 0x3FFF) | BIT(14), skb_push(skb, 2));
+ put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2));
/* add a zero-length EEM packet, if needed */
if (padlen)
@@ -464,7 +464,6 @@ static int eem_unwrap(struct gether *port,
}
/* validate CRC */
- crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN);
if (header & BIT(14)) {
crc = get_unaligned_le32(skb->data + len
- ETH_FCS_LEN);
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 429560100b10..76496f5d272c 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -29,7 +29,7 @@
#if defined USB_ETH_RNDIS
# undef USB_ETH_RNDIS
#endif
-#ifdef CONFIG_USB_ETH_RNDIS
+#ifdef CONFIG_USB_G_MULTI_RNDIS
# define USB_ETH_RNDIS y
#endif
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index e220fb8091a3..8b45145b9136 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -26,6 +26,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 4b5dbd0127f5..5fc80a104150 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2582,6 +2582,7 @@ err:
hsotg->gadget.dev.driver = NULL;
return ret;
}
+EXPORT_SYMBOL(usb_gadget_register_driver);
int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
{
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index c75d9270c752..19372673bf09 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -196,7 +196,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
if (hostpc_reg) {
u32 t3;
+ spin_unlock_irq(&ehci->lock);
msleep(5);/* 5ms for HCD enter low pwr mode */
+ spin_lock_irq(&ehci->lock);
t3 = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
t3 = ehci_readl(ehci, hostpc_reg);
@@ -904,17 +906,18 @@ static int ehci_hub_control (
if ((temp & PORT_PE) == 0
|| (temp & PORT_RESET) != 0)
goto error;
- ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+
/* After above check the port must be connected.
* Set appropriate bit thus could put phy into low power
* mode if we have hostpc feature
*/
+ temp &= ~PORT_WKCONN_E;
+ temp |= PORT_WKDISC_E | PORT_WKOC_E;
+ ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
if (hostpc_reg) {
- temp &= ~PORT_WKCONN_E;
- temp |= (PORT_WKDISC_E | PORT_WKOC_E);
- ehci_writel(ehci, temp | PORT_SUSPEND,
- status_reg);
+ spin_unlock_irqrestore(&ehci->lock, flags);
msleep(5);/* 5ms for HCD enter low pwr mode */
+ spin_lock_irqsave(&ehci->lock, flags);
temp1 = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp1 | HOSTPC_PHCD,
hostpc_reg);
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index d224ab467a40..e1232890c78b 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -105,7 +105,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
if (ep->td_base)
cpm_muram_free(cpm_muram_offset(ep->td_base));
- if (ep->conf_frame_Q) {
+ if (kfifo_initialized(&ep->conf_frame_Q)) {
size = cq_howmany(&ep->conf_frame_Q);
for (; size; size--) {
struct packet *pkt = cq_get(&ep->conf_frame_Q);
@@ -115,7 +115,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
cq_delete(&ep->conf_frame_Q);
}
- if (ep->empty_frame_Q) {
+ if (kfifo_initialized(&ep->empty_frame_Q)) {
size = cq_howmany(&ep->empty_frame_Q);
for (; size; size--) {
struct packet *pkt = cq_get(&ep->empty_frame_Q);
@@ -125,7 +125,7 @@ void fhci_ep0_free(struct fhci_usb *usb)
cq_delete(&ep->empty_frame_Q);
}
- if (ep->dummy_packets_Q) {
+ if (kfifo_initialized(&ep->dummy_packets_Q)) {
size = cq_howmany(&ep->dummy_packets_Q);
for (; size; size--) {
u8 *buff = cq_get(&ep->dummy_packets_Q);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 0ceec123ddfd..bee558aed427 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -35,7 +35,9 @@
#include <linux/usb.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/mm.h>
#include <linux/irq.h>
+#include <asm/cacheflush.h>
#include "../core/hcd.h"
#include "r8a66597.h"
@@ -820,6 +822,26 @@ static void enable_r8a66597_pipe(struct r8a66597 *r8a66597, struct urb *urb,
enable_r8a66597_pipe_dma(r8a66597, dev, pipe, urb);
}
+static void r8a66597_urb_done(struct r8a66597 *r8a66597, struct urb *urb,
+ int status)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
+{
+ if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
+ void *ptr;
+
+ for (ptr = urb->transfer_buffer;
+ ptr < urb->transfer_buffer + urb->transfer_buffer_length;
+ ptr += PAGE_SIZE)
+ flush_dcache_page(virt_to_page(ptr));
+ }
+
+ usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
+ spin_unlock(&r8a66597->lock);
+ usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb, status);
+ spin_lock(&r8a66597->lock);
+}
+
/* this function must be called with interrupt disabled */
static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
{
@@ -838,15 +860,9 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
list_del(&td->queue);
kfree(td);
- if (urb) {
- usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597),
- urb);
+ if (urb)
+ r8a66597_urb_done(r8a66597, urb, -ENODEV);
- spin_unlock(&r8a66597->lock);
- usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb,
- -ENODEV);
- spin_lock(&r8a66597->lock);
- }
break;
}
}
@@ -1006,6 +1022,8 @@ static void start_root_hub_sampling(struct r8a66597 *r8a66597, int port,
/* this function must be called with interrupt disabled */
static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
u16 syssts)
+__releases(r8a66597->lock)
+__acquires(r8a66597->lock)
{
if (syssts == SE0) {
r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port));
@@ -1023,7 +1041,9 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port,
usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597));
}
+ spin_unlock(&r8a66597->lock);
usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597));
+ spin_lock(&r8a66597->lock);
}
/* this function must be called with interrupt disabled */
@@ -1283,10 +1303,7 @@ __releases(r8a66597->lock) __acquires(r8a66597->lock)
if (usb_pipeisoc(urb->pipe))
urb->start_frame = r8a66597_get_frame(hcd);
- usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
- spin_unlock(&r8a66597->lock);
- usb_hcd_giveback_urb(hcd, urb, status);
- spin_lock(&r8a66597->lock);
+ r8a66597_urb_done(r8a66597, urb, status);
}
if (restart) {
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 0025847743f3..8b37a4b9839e 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3245,6 +3245,7 @@ static struct usb_device_id sisusb_table [] = {
{ USB_DEVICE(0x0711, 0x0902) },
{ USB_DEVICE(0x0711, 0x0903) },
{ USB_DEVICE(0x0711, 0x0918) },
+ { USB_DEVICE(0x0711, 0x0920) },
{ USB_DEVICE(0x182d, 0x021c) },
{ USB_DEVICE(0x182d, 0x0269) },
{ }
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index de56b3d743d7..3d2d3e549bd1 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -44,6 +44,7 @@ config ISP1301_OMAP
config USB_ULPI
bool "Generic ULPI Transceiver Driver"
depends on ARM
+ select USB_OTG_UTILS
help
Enable this to support ULPI connected USB OTG transceivers which
are likely found on embedded boards.
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 216f187582ab..7638828e7317 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -50,7 +50,7 @@
* Version Information
*/
#define DRIVER_VERSION "v1.5.0"
-#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>"
+#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr"
#define DRIVER_DESC "USB FTDI Serial Converters Driver"
static int debug;
@@ -145,10 +145,15 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = {
+/*
+ * Device ID not listed? Test via module params product/vendor or
+ * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
+ */
static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -552,9 +557,16 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_IBS_PEDO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_IBS_PROD_PID) },
/*
- * Due to many user requests for multiple ELV devices we enable
- * them by default.
+ * ELV devices:
*/
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_USR_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_MSM1_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_KL100_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_WS550_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_EC3000_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_WS888_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_TWS550_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_FEM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_CLI7000_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_PPS7330_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_TFM100_PID) },
@@ -571,11 +583,17 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_ELV_PCK100_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_RFP500_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FS20SIG_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_UTP8_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS300PC_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_WS444PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_FHZ1300PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_EM1010PC_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS500_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELV_HS485_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_UMS100_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
{ USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
{ USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
{ USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
@@ -697,6 +715,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
{ USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) },
+ { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) },
{ USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index da92b4952ffb..c8951aeed983 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -38,6 +38,8 @@
/* www.candapter.com Ewert Energy Systems CANdapter device */
#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
+#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
+
/* OOCDlink by Joern Kaipf <joernk@web.de>
* (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */
#define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */
@@ -161,22 +163,37 @@
/*
* ELV USB devices submitted by Christian Abt of ELV (www.elv.de).
* All of these devices use FTDI's vendor ID (0x0403).
+ * Further IDs taken from ELV Windows .inf file.
*
* The previously included PID for the UO 100 module was incorrect.
* In fact, that PID was for ELV's UR 100 USB-RS232 converter (0xFB58).
*
* Armin Laeuger originally sent the PID for the UM 100 module.
*/
+#define FTDI_ELV_USR_PID 0xE000 /* ELV Universal-Sound-Recorder */
+#define FTDI_ELV_MSM1_PID 0xE001 /* ELV Mini-Sound-Modul */
+#define FTDI_ELV_KL100_PID 0xE002 /* ELV Kfz-Leistungsmesser KL 100 */
+#define FTDI_ELV_WS550_PID 0xE004 /* WS 550 */
+#define FTDI_ELV_EC3000_PID 0xE006 /* ENERGY CONTROL 3000 USB */
+#define FTDI_ELV_WS888_PID 0xE008 /* WS 888 */
+#define FTDI_ELV_TWS550_PID 0xE009 /* Technoline WS 550 */
+#define FTDI_ELV_FEM_PID 0xE00A /* Funk Energie Monitor */
#define FTDI_ELV_FHZ1300PC_PID 0xE0E8 /* FHZ 1300 PC */
#define FTDI_ELV_WS500_PID 0xE0E9 /* PC-Wetterstation (WS 500) */
#define FTDI_ELV_HS485_PID 0xE0EA /* USB to RS-485 adapter */
+#define FTDI_ELV_UMS100_PID 0xE0EB /* ELV USB Master-Slave Schaltsteckdose UMS 100 */
+#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */
+#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */
+#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */
#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
#define FTDI_ELV_RFP500_PID 0xE0F3 /* HF-Leistungsmesser (RFP 500) */
#define FTDI_ELV_FS20SIG_PID 0xE0F4 /* Signalgeber (FS 20 SIG) */
+#define FTDI_ELV_UTP8_PID 0xE0F5 /* ELV UTP 8 */
#define FTDI_ELV_WS300PC_PID 0xE0F6 /* PC-Wetterstation (WS 300 PC) */
+#define FTDI_ELV_WS444PC_PID 0xE0F7 /* Conrad WS 444 PC */
#define FTDI_PHI_FISCO_PID 0xE40B /* PHI Fisco USB to Serial cable */
#define FTDI_ELV_UAD8_PID 0xF068 /* USB-AD-Wandler (UAD 8) */
#define FTDI_ELV_UDA7_PID 0xF069 /* USB-DA-Wandler (UDA 7) */
@@ -968,6 +985,7 @@
#define PAPOUCH_VID 0x5050 /* Vendor ID */
#define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */
#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */
+#define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */
/*
* Marvell SheevaPlug
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index ac1b6449fb6a..3eb6143bb646 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -298,6 +298,7 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
+ { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
{ }
};
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index c932f9053188..49575fba3756 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -941,7 +941,7 @@ UNUSUAL_DEV( 0x07ab, 0xfccd, 0x0000, 0x9999,
UNUSUAL_DEV( 0x07af, 0x0004, 0x0100, 0x0133,
"Microtech",
"USB-SCSI-DB25",
- US_SC_SCSI, US_PR_BULK, usb_stor_euscsi_init,
+ US_SC_DEVICE, US_PR_DEVICE, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
UNUSUAL_DEV( 0x07af, 0x0005, 0x0100, 0x0100,
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index eb12182b2059..d25df51bb0d2 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -161,8 +161,17 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
return 0;
}
+static void efifb_destroy(struct fb_info *info)
+{
+ if (info->screen_base)
+ iounmap(info->screen_base);
+ release_mem_region(info->aperture_base, info->aperture_size);
+ framebuffer_release(info);
+}
+
static struct fb_ops efifb_ops = {
.owner = THIS_MODULE,
+ .fb_destroy = efifb_destroy,
.fb_setcolreg = efifb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
@@ -281,7 +290,7 @@ static int __init efifb_probe(struct platform_device *dev)
info->par = NULL;
info->aperture_base = efifb_fix.smem_start;
- info->aperture_size = size_total;
+ info->aperture_size = size_remap;
info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
if (!info->screen_base) {
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 505be88c82ae..369f2eebbad1 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -28,7 +28,7 @@
struct virtio_balloon
{
struct virtio_device *vdev;
- struct virtqueue *inflate_vq, *deflate_vq;
+ struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
/* Where the ballooning thread waits for config to change. */
wait_queue_head_t config_change;
@@ -49,6 +49,10 @@ struct virtio_balloon
/* The array of pfns we tell the Host about. */
unsigned int num_pfns;
u32 pfns[256];
+
+ /* Memory statistics */
+ int need_stats_update;
+ struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
};
static struct virtio_device_id id_table[] = {
@@ -154,6 +158,72 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
}
}
+static inline void update_stat(struct virtio_balloon *vb, int idx,
+ u16 tag, u64 val)
+{
+ BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
+ vb->stats[idx].tag = tag;
+ vb->stats[idx].val = val;
+}
+
+#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
+
+static void update_balloon_stats(struct virtio_balloon *vb)
+{
+ unsigned long events[NR_VM_EVENT_ITEMS];
+ struct sysinfo i;
+ int idx = 0;
+
+ all_vm_events(events);
+ si_meminfo(&i);
+
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
+ pages_to_bytes(events[PSWPIN]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
+ pages_to_bytes(events[PSWPOUT]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
+ pages_to_bytes(i.freeram));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
+ pages_to_bytes(i.totalram));
+}
+
+/*
+ * While most virtqueues communicate guest-initiated requests to the hypervisor,
+ * the stats queue operates in reverse. The driver initializes the virtqueue
+ * with a single buffer. From that point forward, all conversations consist of
+ * a hypervisor request (a call to this function) which directs us to refill
+ * the virtqueue with a fresh stats buffer. Since stats collection can sleep,
+ * we notify our kthread which does the actual work via stats_handle_request().
+ */
+static void stats_request(struct virtqueue *vq)
+{
+ struct virtio_balloon *vb;
+ unsigned int len;
+
+ vb = vq->vq_ops->get_buf(vq, &len);
+ if (!vb)
+ return;
+ vb->need_stats_update = 1;
+ wake_up(&vb->config_change);
+}
+
+static void stats_handle_request(struct virtio_balloon *vb)
+{
+ struct virtqueue *vq;
+ struct scatterlist sg;
+
+ vb->need_stats_update = 0;
+ update_balloon_stats(vb);
+
+ vq = vb->stats_vq;
+ sg_init_one(&sg, vb->stats, sizeof(vb->stats));
+ if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
+ BUG();
+ vq->vq_ops->kick(vq);
+}
+
static void virtballoon_changed(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
@@ -190,8 +260,11 @@ static int balloon(void *_vballoon)
try_to_freeze();
wait_event_interruptible(vb->config_change,
(diff = towards_target(vb)) != 0
+ || vb->need_stats_update
|| kthread_should_stop()
|| freezing(current));
+ if (vb->need_stats_update)
+ stats_handle_request(vb);
if (diff > 0)
fill_balloon(vb, diff);
else if (diff < 0)
@@ -204,10 +277,10 @@ static int balloon(void *_vballoon)
static int virtballoon_probe(struct virtio_device *vdev)
{
struct virtio_balloon *vb;
- struct virtqueue *vqs[2];
- vq_callback_t *callbacks[] = { balloon_ack, balloon_ack };
- const char *names[] = { "inflate", "deflate" };
- int err;
+ struct virtqueue *vqs[3];
+ vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
+ const char *names[] = { "inflate", "deflate", "stats" };
+ int err, nvqs;
vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
if (!vb) {
@@ -219,14 +292,31 @@ static int virtballoon_probe(struct virtio_device *vdev)
vb->num_pages = 0;
init_waitqueue_head(&vb->config_change);
vb->vdev = vdev;
+ vb->need_stats_update = 0;
- /* We expect two virtqueues. */
- err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names);
+ /* We expect two virtqueues: inflate and deflate,
+ * and optionally stat. */
+ nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
+ err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
if (err)
goto out_free_vb;
vb->inflate_vq = vqs[0];
vb->deflate_vq = vqs[1];
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
+ struct scatterlist sg;
+ vb->stats_vq = vqs[2];
+
+ /*
+ * Prime this virtqueue with one buffer so the hypervisor can
+ * use it to signal us later.
+ */
+ sg_init_one(&sg, vb->stats, sizeof vb->stats);
+ if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq,
+ &sg, 1, 0, vb) < 0)
+ BUG();
+ vb->stats_vq->vq_ops->kick(vb->stats_vq);
+ }
vb->thread = kthread_run(balloon, vb, "vballoon");
if (IS_ERR(vb->thread)) {
@@ -264,7 +354,10 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev)
kfree(vb);
}
-static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST };
+static unsigned int features[] = {
+ VIRTIO_BALLOON_F_MUST_TELL_HOST,
+ VIRTIO_BALLOON_F_STATS_VQ,
+};
static struct virtio_driver virtio_balloon_driver = {
.feature_table = features,
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 28d9cf7cf72f..1d5191fab62e 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -702,7 +702,7 @@ static struct pci_driver virtio_pci_driver = {
.name = "virtio-pci",
.id_table = virtio_pci_id_table,
.probe = virtio_pci_probe,
- .remove = virtio_pci_remove,
+ .remove = __devexit_p(virtio_pci_remove),
#ifdef CONFIG_PM
.suspend = virtio_pci_suspend,
.resume = virtio_pci_resume,
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index fbd2ecde93e4..0db906b3c95d 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -21,6 +21,24 @@
#include <linux/virtio_config.h>
#include <linux/device.h>
+/* virtio guest is communicating with a virtual "device" that actually runs on
+ * a host processor. Memory barriers are used to control SMP effects. */
+#ifdef CONFIG_SMP
+/* Where possible, use SMP barriers which are more lightweight than mandatory
+ * barriers, because mandatory barriers control MMIO effects on accesses
+ * through relaxed memory I/O windows (which virtio does not use). */
+#define virtio_mb() smp_mb()
+#define virtio_rmb() smp_rmb()
+#define virtio_wmb() smp_wmb()
+#else
+/* We must force memory ordering even if guest is UP since host could be
+ * running on another CPU, but SMP barriers are defined to barrier() in that
+ * configuration. So fall back to mandatory barriers instead. */
+#define virtio_mb() mb()
+#define virtio_rmb() rmb()
+#define virtio_wmb() wmb()
+#endif
+
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
#define BAD_RING(_vq, fmt, args...) \
@@ -36,10 +54,9 @@
panic("%s:in_use = %i\n", \
(_vq)->vq.name, (_vq)->in_use); \
(_vq)->in_use = __LINE__; \
- mb(); \
} while (0)
#define END_USE(_vq) \
- do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0)
+ do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
#else
#define BAD_RING(_vq, fmt, args...) \
do { \
@@ -221,13 +238,13 @@ static void vring_kick(struct virtqueue *_vq)
START_USE(vq);
/* Descriptors and available array need to be set before we expose the
* new available array entries. */
- wmb();
+ virtio_wmb();
vq->vring.avail->idx += vq->num_added;
vq->num_added = 0;
/* Need to update avail index before checking if we should notify */
- mb();
+ virtio_mb();
if (!(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
/* Prod other side to tell it about changes. */
@@ -286,7 +303,7 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len)
}
/* Only get used array entries after they have been exposed by host. */
- rmb();
+ virtio_rmb();
i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
*len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
@@ -324,7 +341,7 @@ static bool vring_enable_cb(struct virtqueue *_vq)
/* We optimistically turn back on interrupts, then check if there was
* more to do. */
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
- mb();
+ virtio_mb();
if (unlikely(more_used(vq))) {
END_USE(vq);
return false;
@@ -334,6 +351,30 @@ static bool vring_enable_cb(struct virtqueue *_vq)
return true;
}
+static void *vring_detach_unused_buf(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ unsigned int i;
+ void *buf;
+
+ START_USE(vq);
+
+ for (i = 0; i < vq->vring.num; i++) {
+ if (!vq->data[i])
+ continue;
+ /* detach_buf clears data, so grab it now. */
+ buf = vq->data[i];
+ detach_buf(vq, i);
+ END_USE(vq);
+ return buf;
+ }
+ /* That should have freed everything. */
+ BUG_ON(vq->num_free != vq->vring.num);
+
+ END_USE(vq);
+ return NULL;
+}
+
irqreturn_t vring_interrupt(int irq, void *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -360,6 +401,7 @@ static struct virtqueue_ops vring_vq_ops = {
.kick = vring_kick,
.disable_cb = vring_disable_cb,
.enable_cb = vring_enable_cb,
+ .detach_unused_buf = vring_detach_unused_buf,
};
struct virtqueue *vring_new_virtqueue(unsigned int num,
@@ -406,8 +448,11 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
/* Put everything in free lists. */
vq->num_free = num;
vq->free_head = 0;
- for (i = 0; i < num-1; i++)
+ for (i = 0; i < num-1; i++) {
vq->vring.desc[i].next = i+1;
+ vq->data[i] = NULL;
+ }
+ vq->data[i] = NULL;
return &vq->vq;
}
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index c7b3f9df2317..2159e668751c 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -1,9 +1,8 @@
/*
* Blackfin On-Chip Watchdog Driver
- * Supports BF53[123]/BF53[467]/BF54[2489]/BF561
*
* Originally based on softdog.c
- * Copyright 2006-2007 Analog Devices Inc.
+ * Copyright 2006-2010 Analog Devices Inc.
* Copyright 2006-2007 Michele d'Amico
* Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>
*
@@ -137,13 +136,15 @@ static int bfin_wdt_running(void)
*/
static int bfin_wdt_set_timeout(unsigned long t)
{
- u32 cnt;
+ u32 cnt, max_t, sclk;
unsigned long flags;
- stampit();
+ sclk = get_sclk();
+ max_t = -1 / sclk;
+ cnt = t * sclk;
+ stamp("maxtimeout=%us newtimeout=%lus (cnt=%#x)", max_t, t, cnt);
- cnt = t * get_sclk();
- if (cnt < get_sclk()) {
+ if (t > max_t) {
printk(KERN_WARNING PFX "timeout value is too large\n");
return -EINVAL;
}
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index cf62b05e296a..7d6c2139891d 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -84,7 +84,7 @@ static const match_table_t tokens = {
static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
{
- char *options;
+ char *options, *tmp_options;
substring_t args[MAX_OPT_ARGS];
char *p;
int option = 0;
@@ -102,9 +102,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
if (!opts)
return 0;
- options = kstrdup(opts, GFP_KERNEL);
- if (!options)
+ tmp_options = kstrdup(opts, GFP_KERNEL);
+ if (!tmp_options) {
+ ret = -ENOMEM;
goto fail_option_alloc;
+ }
+ options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
int token;
@@ -159,8 +162,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
break;
case Opt_cache:
s = match_strdup(&args[0]);
- if (!s)
- goto fail_option_alloc;
+ if (!s) {
+ ret = -ENOMEM;
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "problem allocating copy of cache arg\n");
+ goto free_and_return;
+ }
if (strcmp(s, "loose") == 0)
v9ses->cache = CACHE_LOOSE;
@@ -173,8 +180,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
case Opt_access:
s = match_strdup(&args[0]);
- if (!s)
- goto fail_option_alloc;
+ if (!s) {
+ ret = -ENOMEM;
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "problem allocating copy of access arg\n");
+ goto free_and_return;
+ }
v9ses->flags &= ~V9FS_ACCESS_MASK;
if (strcmp(s, "user") == 0)
@@ -194,13 +205,11 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
continue;
}
}
- kfree(options);
- return ret;
+free_and_return:
+ kfree(tmp_options);
fail_option_alloc:
- P9_DPRINTK(P9_DEBUG_ERROR,
- "failed to allocate copy of option argument\n");
- return -ENOMEM;
+ return ret;
}
/**
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 3a7560e35865..ed835836e0dc 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -60,3 +60,4 @@ void v9fs_dentry_release(struct dentry *);
int v9fs_uflags2omode(int uflags, int extended);
ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
+void v9fs_blank_wstat(struct p9_wstat *wstat);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 3902bf43a088..74a0461a9ac0 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -257,6 +257,23 @@ v9fs_file_write(struct file *filp, const char __user * data,
return total;
}
+static int v9fs_file_fsync(struct file *filp, struct dentry *dentry,
+ int datasync)
+{
+ struct p9_fid *fid;
+ struct p9_wstat wstat;
+ int retval;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp,
+ dentry, datasync);
+
+ fid = filp->private_data;
+ v9fs_blank_wstat(&wstat);
+
+ retval = p9_client_wstat(fid, &wstat);
+ return retval;
+}
+
static const struct file_operations v9fs_cached_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
@@ -266,6 +283,7 @@ static const struct file_operations v9fs_cached_file_operations = {
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
.mmap = generic_file_readonly_mmap,
+ .fsync = v9fs_file_fsync,
};
const struct file_operations v9fs_file_operations = {
@@ -276,4 +294,5 @@ const struct file_operations v9fs_file_operations = {
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
.mmap = generic_file_readonly_mmap,
+ .fsync = v9fs_file_fsync,
};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 9d03d1ebca6f..a407fa3388c0 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -176,7 +176,7 @@ int v9fs_uflags2omode(int uflags, int extended)
*
*/
-static void
+void
v9fs_blank_wstat(struct p9_wstat *wstat)
{
wstat->type = ~0;
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 33baf27fac78..34ddda888e63 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -873,6 +873,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
brelse(bh);
unacquire_priv_sbp:
+ kfree(befs_sb->mount_opts.iocharset);
kfree(sb->s_fs_info);
unacquire_none:
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 73d6a735b8f3..d11d0289f3d2 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -246,7 +246,8 @@ struct super_block *freeze_bdev(struct block_device *bdev)
if (!sb)
goto out;
if (sb->s_flags & MS_RDONLY) {
- deactivate_locked_super(sb);
+ sb->s_frozen = SB_FREEZE_TRANS;
+ up_write(&sb->s_umount);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return sb;
}
@@ -307,7 +308,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
BUG_ON(sb->s_bdev != bdev);
down_write(&sb->s_umount);
if (sb->s_flags & MS_RDONLY)
- goto out_deactivate;
+ goto out_unfrozen;
if (sb->s_op->unfreeze_fs) {
error = sb->s_op->unfreeze_fs(sb);
@@ -321,11 +322,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
}
}
+out_unfrozen:
sb->s_frozen = SB_UNFROZEN;
smp_wmb();
wake_up(&sb->s_wait_unfrozen);
-out_deactivate:
if (sb)
deactivate_locked_super(sb);
out_unlock:
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 87b25543d7d1..2b59201b955c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1982,7 +1982,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
if (!(sb->s_flags & MS_RDONLY)) {
ret = btrfs_recover_relocation(tree_root);
- BUG_ON(ret);
+ if (ret < 0) {
+ printk(KERN_WARNING
+ "btrfs: failed to recover relocation\n");
+ err = -EINVAL;
+ goto fail_trans_kthread;
+ }
}
location.objectid = BTRFS_FS_TREE_OBJECTID;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 432a2da4641e..559f72489b3b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5402,10 +5402,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
int ret;
while (level >= 0) {
- if (path->slots[level] >=
- btrfs_header_nritems(path->nodes[level]))
- break;
-
ret = walk_down_proc(trans, root, path, wc, lookup_info);
if (ret > 0)
break;
@@ -5413,6 +5409,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
if (level == 0)
break;
+ if (path->slots[level] >=
+ btrfs_header_nritems(path->nodes[level]))
+ break;
+
ret = do_walk_down(trans, root, path, wc, &lookup_info);
if (ret > 0) {
path->slots[level]++;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 96577e8bf9fd..b177ed319612 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3165,10 +3165,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
spin_unlock(&tree->buffer_lock);
goto free_eb;
}
- spin_unlock(&tree->buffer_lock);
-
/* add one reference for the tree */
atomic_inc(&eb->refs);
+ spin_unlock(&tree->buffer_lock);
return eb;
free_eb:
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c02033596f02..6ed434ac037f 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -720,13 +720,15 @@ again:
inode->i_ino, orig_offset);
BUG_ON(ret);
}
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
if (del_nr == 0) {
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
btrfs_set_file_extent_type(leaf, fi,
BTRFS_FILE_EXTENT_REG);
btrfs_mark_buffer_dirty(leaf);
} else {
+ fi = btrfs_item_ptr(leaf, del_slot - 1,
+ struct btrfs_file_extent_item);
btrfs_set_file_extent_type(leaf, fi,
BTRFS_FILE_EXTENT_REG);
btrfs_set_file_extent_num_bytes(leaf, fi,
@@ -1133,7 +1135,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
}
mutex_lock(&dentry->d_inode->i_mutex);
out:
- return ret > 0 ? EIO : ret;
+ return ret > 0 ? -EIO : ret;
}
static const struct vm_operations_struct btrfs_file_vm_ops = {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8cd109972fa6..4deb280f8969 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1681,24 +1681,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
* before we start the transaction. It limits the amount of btree
* reads required while inside the transaction.
*/
-static noinline void reada_csum(struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_ordered_extent *ordered_extent)
-{
- struct btrfs_ordered_sum *sum;
- u64 bytenr;
-
- sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
- list);
- bytenr = sum->sums[0].bytenr;
-
- /*
- * we don't care about the results, the point of this search is
- * just to get the btree leaves into ram
- */
- btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
-}
-
/* as ordered data IO finishes, this gets called so we can finish
* an ordered extent if the range of bytes in the file it covers are
* fully written.
@@ -1709,7 +1691,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
struct btrfs_trans_handle *trans;
struct btrfs_ordered_extent *ordered_extent = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct btrfs_path *path;
int compressed = 0;
int ret;
@@ -1717,32 +1698,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
if (!ret)
return 0;
- /*
- * before we join the transaction, try to do some of our IO.
- * This will limit the amount of IO that we have to do with
- * the transaction running. We're unlikely to need to do any
- * IO if the file extents are new, the disk_i_size checks
- * covers the most common case.
- */
- if (start < BTRFS_I(inode)->disk_i_size) {
- path = btrfs_alloc_path();
- if (path) {
- ret = btrfs_lookup_file_extent(NULL, root, path,
- inode->i_ino,
- start, 0);
- ordered_extent = btrfs_lookup_ordered_extent(inode,
- start);
- if (!list_empty(&ordered_extent->list)) {
- btrfs_release_path(root, path);
- reada_csum(root, path, ordered_extent);
- }
- btrfs_free_path(path);
- }
- }
-
- if (!ordered_extent)
- ordered_extent = btrfs_lookup_ordered_extent(inode, start);
+ ordered_extent = btrfs_lookup_ordered_extent(inode, start);
BUG_ON(!ordered_extent);
+
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
BUG_ON(!list_empty(&ordered_extent->list));
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
@@ -5841,7 +5799,9 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
inode->i_ctime = CURRENT_TIME;
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- cur_offset > inode->i_size) {
+ (actual_len > inode->i_size) &&
+ (cur_offset > inode->i_size)) {
+
if (cur_offset > actual_len)
i_size = actual_len;
else
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ed3e4a2ec2c8..ab7ab5318745 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3764,7 +3764,8 @@ out:
BTRFS_DATA_RELOC_TREE_OBJECTID);
if (IS_ERR(fs_root))
err = PTR_ERR(fs_root);
- btrfs_orphan_cleanup(fs_root);
+ else
+ btrfs_orphan_cleanup(fs_root);
}
return err;
}
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 14ac4806e291..eeb4986ea7db 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -348,7 +348,17 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
dir = dget_parent(object->dentry);
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
- ret = cachefiles_bury_object(cache, dir, object->dentry);
+
+ /* we need to check that our parent is _still_ our parent - it may have
+ * been renamed */
+ if (dir == object->dentry->d_parent) {
+ ret = cachefiles_bury_object(cache, dir, object->dentry);
+ } else {
+ /* it got moved, presumably by cachefilesd culling it, so it's
+ * no longer in the key path and we can ignore it */
+ mutex_unlock(&dir->d_inode->i_mutex);
+ ret = 0;
+ }
dput(dir);
_leave(" = %d", ret);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 7b2600b380d7..49503d2edc7e 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,7 @@
+Version 1.62
+------------
+Add sockopt=TCP_NODELAY mount option.
+
Version 1.61
------------
Fix append problem to Samba servers (files opened with O_APPEND could
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index ac2b24c192f8..78c1b86d55f6 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -113,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* EXPERIMENTAL */
-#define CIFS_VERSION "1.61"
+#define CIFS_VERSION "1.62"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 4b35f7ec0583..ed751bb657db 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -149,6 +149,7 @@ struct TCP_Server_Info {
bool svlocal:1; /* local server or remote */
bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */
+ bool tcp_nodelay;
atomic_t inFlight; /* number of requests on the wire to server */
#ifdef CONFIG_CIFS_STATS2
atomic_t inSend; /* requests trying to send */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 3bbcaa716b3c..2e9e09ca0e30 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -98,7 +98,7 @@ struct smb_vol {
bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
unsigned int rsize;
unsigned int wsize;
- unsigned int sockopt;
+ bool sockopt_tcp_nodelay:1;
unsigned short int port;
char *prepath;
};
@@ -1142,9 +1142,11 @@ cifs_parse_mount_options(char *options, const char *devname,
simple_strtoul(value, &value, 0);
}
} else if (strnicmp(data, "sockopt", 5) == 0) {
- if (value && *value) {
- vol->sockopt =
- simple_strtoul(value, &value, 0);
+ if (!value || !*value) {
+ cERROR(1, ("no socket option specified"));
+ continue;
+ } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) {
+ vol->sockopt_tcp_nodelay = 1;
}
} else if (strnicmp(data, "netbiosname", 4) == 0) {
if (!value || !*value || (*value == ' ')) {
@@ -1514,6 +1516,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
tcp_ses->noblocksnd = volume_info->noblocksnd;
tcp_ses->noautotune = volume_info->noautotune;
+ tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
atomic_set(&tcp_ses->inFlight, 0);
init_waitqueue_head(&tcp_ses->response_q);
init_waitqueue_head(&tcp_ses->request_q);
@@ -1764,6 +1767,7 @@ static int
ipv4_connect(struct TCP_Server_Info *server)
{
int rc = 0;
+ int val;
bool connected = false;
__be16 orig_port = 0;
struct socket *socket = server->ssocket;
@@ -1845,6 +1849,14 @@ ipv4_connect(struct TCP_Server_Info *server)
socket->sk->sk_rcvbuf = 140 * 1024;
}
+ if (server->tcp_nodelay) {
+ val = 1;
+ rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
+ (char *)&val, sizeof(val));
+ if (rc)
+ cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+ }
+
cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
socket->sk->sk_sndbuf,
socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo));
@@ -1916,6 +1928,7 @@ static int
ipv6_connect(struct TCP_Server_Info *server)
{
int rc = 0;
+ int val;
bool connected = false;
__be16 orig_port = 0;
struct socket *socket = server->ssocket;
@@ -1987,6 +2000,15 @@ ipv6_connect(struct TCP_Server_Info *server)
*/
socket->sk->sk_rcvtimeo = 7 * HZ;
socket->sk->sk_sndtimeo = 5 * HZ;
+
+ if (server->tcp_nodelay) {
+ val = 1;
+ rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
+ (char *)&val, sizeof(val));
+ if (rc)
+ cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+ }
+
server->ssocket = socket;
return rc;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index cf18ee765590..e3fda978f481 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1762,8 +1762,18 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
CIFS_MOUNT_MAP_SPECIAL_CHR);
}
- if (!rc)
+ if (!rc) {
rc = inode_setattr(inode, attrs);
+
+ /* force revalidate when any of these times are set since some
+ of the fs types (eg ext3, fat) do not have fine enough
+ time granularity to match protocol, and we do not have a
+ a way (yet) to query the server fs's time granularity (and
+ whether it rounds times down).
+ */
+ if (!rc && (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME)))
+ cifsInode->time = 0;
+ }
out:
kfree(args);
kfree(full_path);
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index f84062f9a985..c343b14ba2d3 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -77,6 +77,11 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
cFYI(1, ("For %s", name->name));
+ if (parent->d_op && parent->d_op->d_hash)
+ parent->d_op->d_hash(parent, name);
+ else
+ name->hash = full_name_hash(name->name, name->len);
+
dentry = d_lookup(parent, name);
if (dentry) {
/* FIXME: check for inode number changes? */
@@ -666,12 +671,11 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
min(len, max_len), nlt,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
+ pqst->len -= nls_nullsize(nlt);
} else {
pqst->name = filename;
pqst->len = len;
}
- pqst->hash = full_name_hash(pqst->name, pqst->len);
-/* cFYI(1, ("filldir on %s",pqst->name)); */
return rc;
}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7085a6275c4c..aaa9c1c5a5bd 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -223,9 +223,9 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
/* null user mount */
*bcc_ptr = 0;
*(bcc_ptr+1) = 0;
- } else { /* 300 should be long enough for any conceivable user name */
+ } else {
bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
- 300, nls_cp);
+ MAX_USERNAME_SIZE, nls_cp);
}
bcc_ptr += 2 * bytes_ret;
bcc_ptr += 2; /* account for null termination */
@@ -246,11 +246,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
/* copy user */
if (ses->userName == NULL) {
/* BB what about null user mounts - check that we do this BB */
- } else { /* 300 should be long enough for any conceivable user name */
- strncpy(bcc_ptr, ses->userName, 300);
+ } else {
+ strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE);
}
- /* BB improve check for overflow */
- bcc_ptr += strnlen(ses->userName, 300);
+ bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
*bcc_ptr = 0;
bcc_ptr++; /* account for null termination */
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index c5c45de1a2ee..0ca9ec4a79c3 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -301,6 +301,12 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd,
u32 data;
void __user *dxferp;
int err;
+ int interface_id;
+
+ if (get_user(interface_id, &sgio32->interface_id))
+ return -EFAULT;
+ if (interface_id != 'S')
+ return sys_ioctl(fd, cmd, (unsigned long)sgio32);
if (get_user(iovec_count, &sgio32->iovec_count))
return -EFAULT;
@@ -936,6 +942,7 @@ COMPATIBLE_IOCTL(TCSETSF)
COMPATIBLE_IOCTL(TIOCLINUX)
COMPATIBLE_IOCTL(TIOCSBRK)
COMPATIBLE_IOCTL(TIOCCBRK)
+COMPATIBLE_IOCTL(TIOCGSID)
COMPATIBLE_IOCTL(TIOCGICOUNT)
/* Little t */
COMPATIBLE_IOCTL(TIOCGETD)
@@ -1038,6 +1045,8 @@ COMPATIBLE_IOCTL(FIOQSIZE)
#ifdef CONFIG_BLOCK
/* loop */
IGNORE_IOCTL(LOOP_CLR_FD)
+/* md calls this on random blockdevs */
+IGNORE_IOCTL(RAID_VERSION)
/* SG stuff */
COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
diff --git a/fs/exec.c b/fs/exec.c
index 0790a107ff7e..cce6bbdbdbb1 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -571,6 +571,9 @@ int setup_arg_pages(struct linux_binprm *bprm,
struct vm_area_struct *prev = NULL;
unsigned long vm_flags;
unsigned long stack_base;
+ unsigned long stack_size;
+ unsigned long stack_expand;
+ unsigned long rlim_stack;
#ifdef CONFIG_STACK_GROWSUP
/* Limit stack size to 1GB */
@@ -627,10 +630,23 @@ int setup_arg_pages(struct linux_binprm *bprm,
goto out_unlock;
}
+ stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+ stack_size = vma->vm_end - vma->vm_start;
+ /*
+ * Align this down to a page boundary as expand_stack
+ * will align it up.
+ */
+ rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
#ifdef CONFIG_STACK_GROWSUP
- stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+ if (stack_size + stack_expand > rlim_stack)
+ stack_base = vma->vm_start + rlim_stack;
+ else
+ stack_base = vma->vm_end + stack_expand;
#else
- stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+ if (stack_size + stack_expand > rlim_stack)
+ stack_base = vma->vm_end - rlim_stack;
+ else
+ stack_base = vma->vm_start - stack_expand;
#endif
ret = expand_stack(vma, stack_base);
if (ret)
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 5ef953e6f908..97e01dc0d95f 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -199,9 +199,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
int force)
{
- unsigned long flags;
-
- write_lock_irqsave(&filp->f_owner.lock, flags);
+ write_lock_irq(&filp->f_owner.lock);
if (force || !filp->f_owner.pid) {
put_pid(filp->f_owner.pid);
filp->f_owner.pid = get_pid(pid);
@@ -213,7 +211,7 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
filp->f_owner.euid = cred->euid;
}
}
- write_unlock_irqrestore(&filp->f_owner.lock, flags);
+ write_unlock_irq(&filp->f_owner.lock);
}
int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
diff --git a/fs/file_table.c b/fs/file_table.c
index 69652c5bd5f0..b98404b54383 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -253,6 +253,7 @@ void __fput(struct file *file)
if (file->f_op && file->f_op->release)
file->f_op->release(inode, file);
security_file_free(file);
+ ima_file_free(file);
if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
cdev_put(inode->i_cdev);
fops_put(file->f_op);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 6d47379e794b..583e823307ae 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -541,7 +541,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
*ptr++ = cpu_to_be64(bn++);
break;
}
- } while (state != ALLOC_DATA);
+ } while ((state != ALLOC_DATA) || !dblock);
ip->i_height = height;
gfs2_add_inode_blocks(&ip->i_inode, alloced);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f455a03a09e2..f42663325931 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -769,6 +769,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
if (!gl)
return -ENOMEM;
+ atomic_inc(&sdp->sd_glock_disposal);
gl->gl_flags = 0;
gl->gl_name = name;
atomic_set(&gl->gl_ref, 1);
@@ -1538,6 +1539,9 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
up_write(&gfs2_umount_flush_sem);
msleep(10);
}
+ flush_workqueue(glock_workqueue);
+ wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
+ gfs2_dump_lockstate(sdp);
}
void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 13f0bd228132..c0262faf4725 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -123,7 +123,7 @@ struct lm_lockops {
int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname);
void (*lm_unmount) (struct gfs2_sbd *sdp);
void (*lm_withdraw) (struct gfs2_sbd *sdp);
- void (*lm_put_lock) (struct kmem_cache *cachep, void *gl);
+ void (*lm_put_lock) (struct kmem_cache *cachep, struct gfs2_glock *gl);
unsigned int (*lm_lock) (struct gfs2_glock *gl,
unsigned int req_state, unsigned int flags);
void (*lm_cancel) (struct gfs2_glock *gl);
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 4792200978c8..bc0ad158e6b4 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -544,6 +544,8 @@ struct gfs2_sbd {
struct gfs2_holder sd_live_gh;
struct gfs2_glock *sd_rename_gl;
struct gfs2_glock *sd_trans_gl;
+ wait_queue_head_t sd_glock_wait;
+ atomic_t sd_glock_disposal;
/* Inode Stuff */
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 46df988323bc..0e5e0e7022e5 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -21,6 +21,7 @@ static void gdlm_ast(void *arg)
{
struct gfs2_glock *gl = arg;
unsigned ret = gl->gl_state;
+ struct gfs2_sbd *sdp = gl->gl_sbd;
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
@@ -30,6 +31,8 @@ static void gdlm_ast(void *arg)
switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
kmem_cache_free(gfs2_glock_cachep, gl);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_glock_wait);
return;
case -DLM_ECANCEL: /* Cancel while getting lock */
ret |= LM_OUT_CANCELED;
@@ -164,14 +167,16 @@ static unsigned int gdlm_lock(struct gfs2_glock *gl,
return LM_OUT_ASYNC;
}
-static void gdlm_put_lock(struct kmem_cache *cachep, void *ptr)
+static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
{
- struct gfs2_glock *gl = ptr;
- struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int error;
if (gl->gl_lksb.sb_lkid == 0) {
kmem_cache_free(cachep, gl);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_glock_wait);
return;
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index edfee24f3636..a86ed6381566 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -82,6 +82,8 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
gfs2_tune_init(&sdp->sd_tune);
+ init_waitqueue_head(&sdp->sd_glock_wait);
+ atomic_set(&sdp->sd_glock_disposal, 0);
spin_lock_init(&sdp->sd_statfs_spin);
spin_lock_init(&sdp->sd_rindex_spin);
@@ -723,7 +725,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
goto fail;
}
- error = -EINVAL;
+ error = -EUSERS;
if (!gfs2_jindex_size(sdp)) {
fs_err(sdp, "no journals!\n");
goto fail_jindex;
@@ -983,9 +985,17 @@ static const match_table_t nolock_tokens = {
{ Opt_err, NULL },
};
+static void nolock_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
+{
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ kmem_cache_free(cachep, gl);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_glock_wait);
+}
+
static const struct lm_lockops nolock_ops = {
.lm_proto_name = "lock_nolock",
- .lm_put_lock = kmem_cache_free,
+ .lm_put_lock = nolock_put_lock,
.lm_tokens = &nolock_tokens,
};
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index c282ad41f3d1..b9dd3da22c0a 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -21,6 +21,7 @@
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
#include <linux/time.h>
+#include <linux/wait.h>
#include "gfs2.h"
#include "incore.h"
diff --git a/fs/namei.c b/fs/namei.c
index 94a5e60779f9..a4855af776a8 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -823,6 +823,17 @@ fail:
}
/*
+ * This is a temporary kludge to deal with "automount" symlinks; proper
+ * solution is to trigger them on follow_mount(), so that do_lookup()
+ * would DTRT. To be killed before 2.6.34-final.
+ */
+static inline int follow_on_final(struct inode *inode, unsigned lookup_flags)
+{
+ return inode && unlikely(inode->i_op->follow_link) &&
+ ((lookup_flags & LOOKUP_FOLLOW) || S_ISDIR(inode->i_mode));
+}
+
+/*
* Name resolution.
* This is the basic name resolution function, turning a pathname into
* the final dentry. We expect 'base' to be positive and a directory.
@@ -942,8 +953,7 @@ last_component:
if (err)
break;
inode = next.dentry->d_inode;
- if ((lookup_flags & LOOKUP_FOLLOW)
- && inode && inode->i_op->follow_link) {
+ if (follow_on_final(inode, lookup_flags)) {
err = do_follow_link(&next, nd);
if (err)
goto return_err;
@@ -1736,8 +1746,7 @@ do_last:
if (nd.root.mnt)
path_put(&nd.root);
if (!IS_ERR(filp)) {
- error = ima_path_check(&filp->f_path, filp->f_mode &
- (MAY_READ | MAY_WRITE | MAY_EXEC));
+ error = ima_file_check(filp, acc_mode);
if (error) {
fput(filp);
filp = ERR_PTR(error);
@@ -1797,8 +1806,7 @@ ok:
}
filp = nameidata_to_filp(&nd);
if (!IS_ERR(filp)) {
- error = ima_path_check(&filp->f_path, filp->f_mode &
- (MAY_READ | MAY_WRITE | MAY_EXEC));
+ error = ima_file_check(filp, acc_mode);
if (error) {
fput(filp);
filp = ERR_PTR(error);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index e1d415e97849..0d289823e856 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -342,6 +342,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
data->res.fattr = &data->fattr;
data->res.eof = 0;
data->res.count = bytes;
+ nfs_fattr_init(&data->fattr);
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
@@ -575,6 +576,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
data->res.count = 0;
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
+ nfs_fattr_init(&data->fattr);
NFS_PROTO(data->inode)->commit_setup(data, &msg);
@@ -766,6 +768,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
data->res.fattr = &data->fattr;
data->res.count = bytes;
data->res.verf = &data->verf;
+ nfs_fattr_init(&data->fattr);
task_setup_data.task = &data->task;
task_setup_data.callback_data = data;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 6b891328f332..63f2071d6445 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -486,6 +486,8 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
{
dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
+ if (gfp & __GFP_WAIT)
+ nfs_wb_page(page->mapping->host, page);
/* If PagePrivate() is set, then the page is not freeable */
if (PagePrivate(page))
return 0;
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index fa588006588d..237874f1af23 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -354,12 +354,11 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode)
*/
int nfs_fscache_release_page(struct page *page, gfp_t gfp)
{
- struct nfs_inode *nfsi = NFS_I(page->mapping->host);
- struct fscache_cookie *cookie = nfsi->fscache;
-
- BUG_ON(!cookie);
-
if (PageFsCache(page)) {
+ struct nfs_inode *nfsi = NFS_I(page->mapping->host);
+ struct fscache_cookie *cookie = nfsi->fscache;
+
+ BUG_ON(!cookie);
dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
cookie, page, nfsi);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index faa091865ad0..f141bde7756a 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1261,8 +1261,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if (fattr->valid & NFS_ATTR_FATTR_MODE) {
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
+ umode_t newmode = inode->i_mode & S_IFMT;
+ newmode |= fattr->mode & S_IALLUGO;
+ inode->i_mode = newmode;
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
- inode->i_mode = fattr->mode;
}
} else if (server->caps & NFS_CAP_MODE)
invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 0adefc40cc89..59047f8d7d72 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -120,7 +120,7 @@ static struct {
{ .status = MNT3ERR_INVAL, .errno = -EINVAL, },
{ .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, },
{ .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, },
- { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, },
+ { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, },
};
struct mountres {
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 5e078b222b4e..7bc2da8efd4a 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -699,7 +699,7 @@ static struct {
{ NFSERR_BAD_COOKIE, -EBADCOOKIE },
{ NFSERR_NOTSUPP, -ENOTSUPP },
{ NFSERR_TOOSMALL, -ETOOSMALL },
- { NFSERR_SERVERFAULT, -ESERVERFAULT },
+ { NFSERR_SERVERFAULT, -EREMOTEIO },
{ NFSERR_BADTYPE, -EBADTYPE },
{ NFSERR_JUKEBOX, -EJUKEBOX },
{ -1, -EIO }
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 865265bdca03..0c6fda33d66e 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -146,6 +146,7 @@ enum {
NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */
NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */
NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */
+ NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */
};
struct nfs4_state {
@@ -277,6 +278,7 @@ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
extern void nfs4_schedule_state_recovery(struct nfs_client *);
extern void nfs4_schedule_state_manager(struct nfs_client *);
extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state);
+extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state);
extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 198d51d17c13..375f0fae2c6a 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -249,19 +249,15 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
if (state == NULL)
break;
nfs4_state_mark_reclaim_nograce(clp, state);
- case -NFS4ERR_STALE_CLIENTID:
+ goto do_state_recovery;
case -NFS4ERR_STALE_STATEID:
- case -NFS4ERR_EXPIRED:
- nfs4_schedule_state_recovery(clp);
- ret = nfs4_wait_clnt_recover(clp);
- if (ret == 0)
- exception->retry = 1;
-#if !defined(CONFIG_NFS_V4_1)
- break;
-#else /* !defined(CONFIG_NFS_V4_1) */
- if (!nfs4_has_session(server->nfs_client))
+ if (state == NULL)
break;
- /* FALLTHROUGH */
+ nfs4_state_mark_reclaim_reboot(clp, state);
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_EXPIRED:
+ goto do_state_recovery;
+#if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
@@ -274,7 +270,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
nfs4_schedule_state_recovery(clp);
exception->retry = 1;
break;
-#endif /* !defined(CONFIG_NFS_V4_1) */
+#endif /* defined(CONFIG_NFS_V4_1) */
case -NFS4ERR_FILE_OPEN:
if (exception->timeout > HZ) {
/* We have retried a decent amount, time to
@@ -293,6 +289,12 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
}
/* We failed to handle the error */
return nfs4_map_errors(ret);
+do_state_recovery:
+ nfs4_schedule_state_recovery(clp);
+ ret = nfs4_wait_clnt_recover(clp);
+ if (ret == 0)
+ exception->retry = 1;
+ return ret;
}
@@ -1658,6 +1660,8 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
status = PTR_ERR(state);
if (IS_ERR(state))
goto err_opendata_put;
+ if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
+ set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
*res = state;
@@ -3422,15 +3426,14 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
if (state == NULL)
break;
nfs4_state_mark_reclaim_nograce(clp, state);
- case -NFS4ERR_STALE_CLIENTID:
+ goto do_state_recovery;
case -NFS4ERR_STALE_STATEID:
+ if (state == NULL)
+ break;
+ nfs4_state_mark_reclaim_reboot(clp, state);
+ case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_EXPIRED:
- rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
- nfs4_schedule_state_recovery(clp);
- if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
- rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
- task->tk_status = 0;
- return -EAGAIN;
+ goto do_state_recovery;
#if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
@@ -3458,6 +3461,13 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
}
task->tk_status = nfs4_map_errors(task->tk_status);
return 0;
+do_state_recovery:
+ rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
+ nfs4_schedule_state_recovery(clp);
+ if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
+ rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
+ task->tk_status = 0;
+ return -EAGAIN;
}
static int
@@ -4088,6 +4098,28 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = {
.rpc_release = nfs4_lock_release,
};
+static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_state *state = lsp->ls_state;
+
+ switch (error) {
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_EXPIRED:
+ if (new_lock_owner != 0 ||
+ (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
+ nfs4_state_mark_reclaim_nograce(clp, state);
+ lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
+ break;
+ case -NFS4ERR_STALE_STATEID:
+ if (new_lock_owner != 0 ||
+ (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
+ nfs4_state_mark_reclaim_reboot(clp, state);
+ lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
+ };
+}
+
static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
{
struct nfs4_lockdata *data;
@@ -4126,6 +4158,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
ret = nfs4_wait_for_completion_rpc_task(task);
if (ret == 0) {
ret = data->rpc_status;
+ if (ret)
+ nfs4_handle_setlk_error(data->server, data->lsp,
+ data->arg.new_lock_owner, ret);
} else
data->cancelled = 1;
rpc_put_task(task);
@@ -4181,8 +4216,11 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
{
struct nfs_inode *nfsi = NFS_I(state->inode);
unsigned char fl_flags = request->fl_flags;
- int status;
+ int status = -ENOLCK;
+ if ((fl_flags & FL_POSIX) &&
+ !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
+ goto out;
/* Is this a delegated open? */
status = nfs4_set_lock_state(state, request);
if (status != 0)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 6d263ed79e92..c1e2733f4fa4 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -901,7 +901,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp)
nfs4_schedule_state_manager(clp);
}
-static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
+int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
{
set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index e437fd6a819f..5cd5184b56db 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -4631,7 +4631,7 @@ static int decode_sequence(struct xdr_stream *xdr,
* If the server returns different values for sessionID, slotID or
* sequence number, the server is looney tunes.
*/
- status = -ESERVERFAULT;
+ status = -EREMOTEIO;
if (memcmp(id.data, res->sr_session->sess_id.data,
NFS4_MAX_SESSIONID_LEN)) {
@@ -5774,7 +5774,7 @@ static struct {
{ NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
{ NFS4ERR_NOTSUPP, -ENOTSUPP },
{ NFS4ERR_TOOSMALL, -ETOOSMALL },
- { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
+ { NFS4ERR_SERVERFAULT, -EREMOTEIO },
{ NFS4ERR_BADTYPE, -EBADTYPE },
{ NFS4ERR_LOCKED, -EAGAIN },
{ NFS4ERR_SYMLINK, -ELOOP },
@@ -5801,7 +5801,7 @@ nfs4_stat_to_errno(int stat)
}
if (stat <= 10000 || stat > 10100) {
/* The server is looney tunes. */
- return -ESERVERFAULT;
+ return -EREMOTEIO;
}
/* If we cannot translate the error, the recovery routines should
* handle it.
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index e2975939126a..a12c45b65dd4 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -176,6 +176,12 @@ void nfs_release_request(struct nfs_page *req)
kref_put(&req->wb_kref, nfs_free_request);
}
+static int nfs_wait_bit_uninterruptible(void *word)
+{
+ io_schedule();
+ return 0;
+}
+
/**
* nfs_wait_on_request - Wait for a request to complete.
* @req: request to wait upon.
@@ -186,14 +192,9 @@ void nfs_release_request(struct nfs_page *req)
int
nfs_wait_on_request(struct nfs_page *req)
{
- int ret = 0;
-
- if (!test_bit(PG_BUSY, &req->wb_flags))
- goto out;
- ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
- nfs_wait_bit_killable, TASK_KILLABLE);
-out:
- return ret;
+ return wait_on_bit(&req->wb_flags, PG_BUSY,
+ nfs_wait_bit_uninterruptible,
+ TASK_UNINTERRUPTIBLE);
}
/**
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ce907efc5508..f1afee4eea77 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -243,6 +243,7 @@ static int nfs_show_stats(struct seq_file *, struct vfsmount *);
static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *);
static int nfs_xdev_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
+static void nfs_put_super(struct super_block *);
static void nfs_kill_super(struct super_block *);
static int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
@@ -266,6 +267,7 @@ static const struct super_operations nfs_sops = {
.alloc_inode = nfs_alloc_inode,
.destroy_inode = nfs_destroy_inode,
.write_inode = nfs_write_inode,
+ .put_super = nfs_put_super,
.statfs = nfs_statfs,
.clear_inode = nfs_clear_inode,
.umount_begin = nfs_umount_begin,
@@ -335,6 +337,7 @@ static const struct super_operations nfs4_sops = {
.alloc_inode = nfs_alloc_inode,
.destroy_inode = nfs_destroy_inode,
.write_inode = nfs_write_inode,
+ .put_super = nfs_put_super,
.statfs = nfs_statfs,
.clear_inode = nfs4_clear_inode,
.umount_begin = nfs_umount_begin,
@@ -2258,6 +2261,17 @@ error_splat_super:
}
/*
+ * Ensure that we unregister the bdi before kill_anon_super
+ * releases the device name
+ */
+static void nfs_put_super(struct super_block *s)
+{
+ struct nfs_server *server = NFS_SB(s);
+
+ bdi_unregister(&server->backing_dev_info);
+}
+
+/*
* Destroy an NFS2/3 superblock
*/
static void nfs_kill_super(struct super_block *s)
@@ -2265,7 +2279,6 @@ static void nfs_kill_super(struct super_block *s)
struct nfs_server *server = NFS_SB(s);
kill_anon_super(s);
- bdi_unregister(&server->backing_dev_info);
nfs_fscache_release_super_cookie(s);
nfs_free_server(server);
}
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
index 70e1fbbaaeab..ad4d2e787b20 100644
--- a/fs/nfs/sysctl.c
+++ b/fs/nfs/sysctl.c
@@ -15,8 +15,10 @@
#include "callback.h"
+#ifdef CONFIG_NFS_V4
static const int nfs_set_port_min = 0;
static const int nfs_set_port_max = 65535;
+#endif
static struct ctl_table_header *nfs_callback_sysctl_table;
static ctl_table nfs_cb_sysctls[] = {
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index d171696017f4..d63d964a0392 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1233,7 +1233,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-void nfs_commitdata_release(void *data)
+static void nfs_commitdata_release(void *data)
{
struct nfs_write_data *wdata = data;
@@ -1541,6 +1541,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
break;
}
ret = nfs_wait_on_request(req);
+ nfs_release_request(req);
if (ret < 0)
goto out;
}
@@ -1597,8 +1598,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
struct nfs_page *req;
int ret;
- if (PageFsCache(page))
- nfs_fscache_release_page(page, GFP_KERNEL);
+ nfs_fscache_release_page(page, GFP_KERNEL);
req = nfs_find_and_lock_request(page);
ret = PTR_ERR(req);
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c487810a2366..a0c4016413f1 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1316,19 +1316,11 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path)
static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp)
{
- struct svc_export *exp;
u32 fsidv[2];
mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
- exp = rqst_exp_find(rqstp, FSID_NUM, fsidv);
- /*
- * We shouldn't have accepting an nfsv4 request at all if we
- * don't have a pseudoexport!:
- */
- if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT)
- exp = ERR_PTR(-ESERVERFAULT);
- return exp;
+ return rqst_exp_find(rqstp, FSID_NUM, fsidv);
}
/*
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c194793b642b..8715d194561a 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -752,6 +752,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
flags, current_cred());
if (IS_ERR(*filp))
host_err = PTR_ERR(*filp);
+ else
+ host_err = ima_file_check(*filp, access);
out_nfserr:
err = nfserrno(host_err);
out:
@@ -2127,7 +2129,6 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
*/
path.mnt = exp->ex_path.mnt;
path.dentry = dentry;
- err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC));
nfsd_out:
return err? nfserrno(err) : 0;
}
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 17584c524486..105b508b47a8 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2829,7 +2829,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
|| sci->sc_seq_request != sci->sc_seq_done);
spin_unlock(&sci->sc_state_lock);
- if (flag || nilfs_segctor_confirm(sci))
+ if (flag || !nilfs_segctor_confirm(sci))
nilfs_segctor_write_out(sci);
WARN_ON(!list_empty(&sci->sc_copied_buffers));
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 3dae4a13f6e4..7e9df11260f4 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -599,7 +599,7 @@ bail:
return ret;
}
-/*
+/*
* ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
* particularly interested in the aio/dio case. Like the core uses
* i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
@@ -670,7 +670,7 @@ static ssize_t ocfs2_direct_IO(int rw,
ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
inode->i_sb->s_bdev, iov, offset,
- nr_segs,
+ nr_segs,
ocfs2_direct_IO_get_blocks,
ocfs2_dio_end_io);
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index d43d34a1dd31..21c808f752d8 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
}
ocfs2_metadata_cache_io_unlock(ci);
- mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
+ mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
(unsigned long long)block, nr,
((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
flags);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index eda5b8bcddd5..5c9890006708 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
-/* Only sets a new threshold if there are no active regions.
+/* Only sets a new threshold if there are no active regions.
*
* No locking or otherwise interesting code is required for reading
* o2hb_dead_threshold as it can't change once regions are active and
@@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work)
mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
"milliseconds\n", reg->hr_dev_name,
- jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
+ jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
o2quo_disk_timeout();
}
@@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg,
"seq %llu last %llu changed %u equal %u\n",
slot->ds_node_num, (long long)slot->ds_last_generation,
le32_to_cpu(hb_block->hb_cksum),
- (unsigned long long)le64_to_cpu(hb_block->hb_seq),
+ (unsigned long long)le64_to_cpu(hb_block->hb_seq),
(unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
slot->ds_equal_samples);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 334f231a422c..d8d0c65ac03c 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -485,7 +485,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
}
if (was_valid && !valid) {
- printk(KERN_INFO "o2net: no longer connected to "
+ printk(KERN_NOTICE "o2net: no longer connected to "
SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
o2net_complete_nodes_nsw(nn);
}
@@ -493,7 +493,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
if (!was_valid && valid) {
o2quo_conn_up(o2net_num_from_nn(nn));
cancel_delayed_work(&nn->nn_connect_expired);
- printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n",
+ printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
o2nm_this_node() > sc->sc_node->nd_num ?
"connected to" : "accepted connection from",
SC_NODEF_ARGS(sc));
@@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
cond_resched();
continue;
}
- mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
+ mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
" failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
o2net_ensure_shutdown(nn, sc, 0);
break;
@@ -1476,14 +1476,14 @@ static void o2net_idle_timer(unsigned long data)
do_gettimeofday(&now);
- printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
+ printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
"seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
o2net_idle_timeout() / 1000,
o2net_idle_timeout() % 1000);
mlog(ML_NOTICE, "here are some times that might help debug the "
"situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
"%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
- sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
+ sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
now.tv_sec, (long) now.tv_usec,
sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
sc->sc_tv_advance_start.tv_sec,
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 8d58cfe410b1..96fa7ebc530c 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -32,10 +32,10 @@
* on their number */
#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
-/*
+/*
* This version number represents quite a lot, unfortunately. It not
* only represents the raw network message protocol on the wire but also
- * locking semantics of the file system using the protocol. It should
+ * locking semantics of the file system using the protocol. It should
* be somewhere else, I'm sure, but right now it isn't.
*
* With version 11, we separate out the filesystem locking portion. The
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h
index b5786a787fab..3cfa114aa391 100644
--- a/fs/ocfs2/dlm/dlmapi.h
+++ b/fs/ocfs2/dlm/dlmapi.h
@@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err);
mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \
} while (0)
-#define DLM_LKSB_UNUSED1 0x01
+#define DLM_LKSB_UNUSED1 0x01
#define DLM_LKSB_PUT_LVB 0x02
#define DLM_LKSB_GET_LVB 0x04
#define DLM_LKSB_UNUSED2 0x08
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 01cf8cc3d286..dccc439fa087 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
dlm_lock_put(lock);
/* free up the reserved bast that we are cancelling.
* guaranteed that this will not be the last reserved
- * ast because *both* an ast and a bast were reserved
+ * ast because *both* an ast and a bast were reserved
* to get to this point. the res->spinlock will not be
* taken here */
dlm_lockres_release_ast(dlm, res);
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index ca96bce50e18..f283bce776b4 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
/* instead of logging the same network error over
* and over, sleep here and wait for the heartbeat
* to notice the node is dead. times out after 5s. */
- dlm_wait_for_node_death(dlm, res->owner,
+ dlm_wait_for_node_death(dlm, res->owner,
DLM_NODE_DEATH_WAIT_MAX);
ret = DLM_RECOVERING;
mlog(0, "node %u died so returning DLM_RECOVERING "
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 42b0bad7a612..0cd24cf54396 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -102,7 +102,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
assert_spin_locked(&res->spinlock);
stringify_lockname(res->lockname.name, res->lockname.len,
- buf, sizeof(buf) - 1);
+ buf, sizeof(buf));
printk("lockres: %s, owner=%u, state=%u\n",
buf, res->owner, res->state);
printk(" last used: %lu, refcnt: %u, on purge list: %s\n",
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 0334000676d3..988c9055fd4e 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
}
/* Once the dlm ctxt is marked as leaving then we don't want
- * to be put in someone's domain map.
+ * to be put in someone's domain map.
* Also, explicitly disallow joining at certain troublesome
* times (ie. during recovery). */
if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 437698e9465f..733337772671 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
}
dlm_revert_pending_lock(res, lock);
dlm_lock_put(lock);
- } else if (dlm_is_recovery_lock(res->lockname.name,
+ } else if (dlm_is_recovery_lock(res->lockname.name,
res->lockname.len)) {
/* special case for the $RECOVERY lock.
* there will never be an AST delivered to put
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 03ccf9a7b1f4..a659606dcb95 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
struct dlm_master_list_entry *mle;
assert_spin_locked(&dlm->spinlock);
-
+
list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
if (node_up)
dlm_mle_node_up(dlm, mle, NULL, idx);
@@ -833,7 +833,7 @@ lookup:
__dlm_insert_mle(dlm, mle);
/* still holding the dlm spinlock, check the recovery map
- * to see if there are any nodes that still need to be
+ * to see if there are any nodes that still need to be
* considered. these will not appear in the mle nodemap
* but they might own this lockres. wait on them. */
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
@@ -883,7 +883,7 @@ redo_request:
msleep(500);
}
continue;
- }
+ }
dlm_kick_recovery_thread(dlm);
msleep(1000);
@@ -939,8 +939,8 @@ wait:
res->lockname.name, blocked);
if (++tries > 20) {
mlog(ML_ERROR, "%s:%.*s: spinning on "
- "dlm_wait_for_lock_mastery, blocked=%d\n",
- dlm->name, res->lockname.len,
+ "dlm_wait_for_lock_mastery, blocked=%d\n",
+ dlm->name, res->lockname.len,
res->lockname.name, blocked);
dlm_print_one_lock_resource(res);
dlm_print_one_mle(mle);
@@ -1029,7 +1029,7 @@ recheck:
ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
b = (mle->type == DLM_MLE_BLOCK);
if ((*blocked && !b) || (!*blocked && b)) {
- mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
+ mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
dlm->name, res->lockname.len, res->lockname.name,
*blocked, b);
*blocked = b;
@@ -1602,7 +1602,7 @@ send_response:
}
mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
dlm->node_num, res->lockname.len, res->lockname.name);
- ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
+ ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
DLM_ASSERT_MASTER_MLE_CLEANUP);
if (ret < 0) {
mlog(ML_ERROR, "failed to dispatch assert master work\n");
@@ -1701,7 +1701,7 @@ again:
if (r & DLM_ASSERT_RESPONSE_REASSERT) {
mlog(0, "%.*s: node %u create mles on other "
- "nodes and requests a re-assert\n",
+ "nodes and requests a re-assert\n",
namelen, lockname, to);
reassert = 1;
}
@@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
goto done;
- }
+ }
}
}
spin_unlock(&dlm->master_lock);
@@ -1883,7 +1883,7 @@ ok:
int extra_ref = 0;
int nn = -1;
int rr, err = 0;
-
+
spin_lock(&mle->spinlock);
if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
extra_ref = 1;
@@ -1891,7 +1891,7 @@ ok:
/* MASTER mle: if any bits set in the response map
* then the calling node needs to re-assert to clear
* up nodes that this node contacted */
- while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
+ while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
nn+1)) < O2NM_MAX_NODES) {
if (nn != dlm->node_num && nn != assert->node_idx)
master_request = 1;
@@ -2002,7 +2002,7 @@ kill:
__dlm_print_one_lock_resource(res);
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
- *ret_data = (void *)res;
+ *ret_data = (void *)res;
dlm_put(dlm);
return -EINVAL;
}
@@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
item->u.am.request_from = request_from;
item->u.am.flags = flags;
- if (ignore_higher)
- mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
+ if (ignore_higher)
+ mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
res->lockname.name);
-
+
spin_lock(&dlm->work_lock);
list_add_tail(&item->list, &dlm->work_list);
spin_unlock(&dlm->work_lock);
@@ -2133,7 +2133,7 @@ put:
* think that $RECOVERY is currently mastered by a dead node. If so,
* we wait a short time to allow that node to get notified by its own
* heartbeat stack, then check again. All $RECOVERY lock resources
- * mastered by dead nodes are purged when the hearbeat callback is
+ * mastered by dead nodes are purged when the hearbeat callback is
* fired, so we can know for sure that it is safe to continue once
* the node returns a live node or no node. */
static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
@@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
ret = -EAGAIN;
}
spin_unlock(&dlm->spinlock);
- mlog(0, "%s: reco lock master is %u\n", dlm->name,
+ mlog(0, "%s: reco lock master is %u\n", dlm->name,
master);
break;
}
@@ -2602,7 +2602,7 @@ fail:
mlog(0, "%s:%.*s: timed out during migration\n",
dlm->name, res->lockname.len, res->lockname.name);
- /* avoid hang during shutdown when migrating lockres
+ /* avoid hang during shutdown when migrating lockres
* to a node which also goes down */
if (dlm_is_node_dead(dlm, target)) {
mlog(0, "%s:%.*s: expected migration "
@@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
spin_unlock(&res->spinlock);
- /* target has died, so make the caller break out of the
+ /* target has died, so make the caller break out of the
* wait_event, but caller must recheck the domain_map */
spin_lock(&dlm->spinlock);
if (!test_bit(mig_target, dlm->domain_map))
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 2f9e4e19a4f2..344bcf90cbf4 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1050,7 +1050,7 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
if (lock->ml.node == dead_node) {
mlog(0, "AHA! there was "
"a $RECOVERY lock for dead "
- "node %u (%s)!\n",
+ "node %u (%s)!\n",
dead_node, dlm->name);
list_del_init(&lock->list);
dlm_lock_put(lock);
@@ -1164,6 +1164,39 @@ static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
mres->master = master;
}
+static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
+ struct dlm_migratable_lockres *mres,
+ int queue)
+{
+ if (!lock->lksb)
+ return;
+
+ /* Ignore lvb in all locks in the blocked list */
+ if (queue == DLM_BLOCKED_LIST)
+ return;
+
+ /* Only consider lvbs in locks with granted EX or PR lock levels */
+ if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
+ return;
+
+ if (dlm_lvb_is_empty(mres->lvb)) {
+ memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
+ return;
+ }
+
+ /* Ensure the lvb copied for migration matches in other valid locks */
+ if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
+ return;
+
+ mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
+ "node=%u\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ lock->lockres->lockname.len, lock->lockres->lockname.name,
+ lock->ml.node);
+ dlm_print_one_lock_resource(lock->lockres);
+ BUG();
+}
/* returns 1 if this lock fills the network structure,
* 0 otherwise */
@@ -1181,20 +1214,7 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock,
ml->list = queue;
if (lock->lksb) {
ml->flags = lock->lksb->flags;
- /* send our current lvb */
- if (ml->type == LKM_EXMODE ||
- ml->type == LKM_PRMODE) {
- /* if it is already set, this had better be a PR
- * and it has to match */
- if (!dlm_lvb_is_empty(mres->lvb) &&
- (ml->type == LKM_EXMODE ||
- memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
- mlog(ML_ERROR, "mismatched lvbs!\n");
- dlm_print_one_lock_resource(lock->lockres);
- BUG();
- }
- memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
- }
+ dlm_prepare_lvb_for_migration(lock, mres, queue);
}
ml->node = lock->ml.node;
mres->num_locks++;
@@ -1730,6 +1750,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
struct dlm_lock *lock = NULL;
u8 from = O2NM_MAX_NODES;
unsigned int added = 0;
+ __be64 c;
mlog(0, "running %d locks for this lockres\n", mres->num_locks);
for (i=0; i<mres->num_locks; i++) {
@@ -1777,19 +1798,48 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
/* lock is always created locally first, and
* destroyed locally last. it must be on the list */
if (!lock) {
- __be64 c = ml->cookie;
- mlog(ML_ERROR, "could not find local lock "
- "with cookie %u:%llu!\n",
+ c = ml->cookie;
+ mlog(ML_ERROR, "Could not find local lock "
+ "with cookie %u:%llu, node %u, "
+ "list %u, flags 0x%x, type %d, "
+ "conv %d, highest blocked %d\n",
dlm_get_lock_cookie_node(be64_to_cpu(c)),
- dlm_get_lock_cookie_seq(be64_to_cpu(c)));
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ ml->node, ml->list, ml->flags, ml->type,
+ ml->convert_type, ml->highest_blocked);
+ __dlm_print_one_lock_resource(res);
+ BUG();
+ }
+
+ if (lock->ml.node != ml->node) {
+ c = lock->ml.cookie;
+ mlog(ML_ERROR, "Mismatched node# in lock "
+ "cookie %u:%llu, name %.*s, node %u\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(c)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ res->lockname.len, res->lockname.name,
+ lock->ml.node);
+ c = ml->cookie;
+ mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
+ "node %u, list %u, flags 0x%x, type %d, "
+ "conv %d, highest blocked %d\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(c)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ ml->node, ml->list, ml->flags, ml->type,
+ ml->convert_type, ml->highest_blocked);
__dlm_print_one_lock_resource(res);
BUG();
}
- BUG_ON(lock->ml.node != ml->node);
if (tmpq != queue) {
- mlog(0, "lock was on %u instead of %u for %.*s\n",
- j, ml->list, res->lockname.len, res->lockname.name);
+ c = ml->cookie;
+ mlog(0, "Lock cookie %u:%llu was on list %u "
+ "instead of list %u for %.*s\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(c)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ j, ml->list, res->lockname.len,
+ res->lockname.name);
+ __dlm_print_one_lock_resource(res);
spin_unlock(&res->spinlock);
continue;
}
@@ -1839,7 +1889,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
* the lvb. */
memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
} else {
- /* otherwise, the node is sending its
+ /* otherwise, the node is sending its
* most recent valid lvb info */
BUG_ON(ml->type != LKM_EXMODE &&
ml->type != LKM_PRMODE);
@@ -1886,7 +1936,7 @@ skip_lvb:
spin_lock(&res->spinlock);
list_for_each_entry(lock, queue, list) {
if (lock->ml.cookie == ml->cookie) {
- __be64 c = lock->ml.cookie;
+ c = lock->ml.cookie;
mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
"exists on this lockres!\n", dlm->name,
res->lockname.len, res->lockname.name,
@@ -2114,7 +2164,7 @@ static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
assert_spin_locked(&res->spinlock);
if (res->owner == dlm->node_num)
- /* if this node owned the lockres, and if the dead node
+ /* if this node owned the lockres, and if the dead node
* had an EX when he died, blank out the lvb */
search_node = dead_node;
else {
@@ -2152,7 +2202,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
/* this node is the lockres master:
* 1) remove any stale locks for the dead node
- * 2) if the dead node had an EX when he died, blank out the lvb
+ * 2) if the dead node had an EX when he died, blank out the lvb
*/
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
@@ -2193,7 +2243,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
"dropping ref from lockres\n", dlm->name,
res->lockname.len, res->lockname.name, freed, dead_node);
- BUG_ON(!test_bit(dead_node, res->refmap));
+ if(!test_bit(dead_node, res->refmap)) {
+ mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
+ "but ref was not set\n", dlm->name,
+ res->lockname.len, res->lockname.name, freed, dead_node);
+ __dlm_print_one_lock_resource(res);
+ }
dlm_lockres_clear_refmap_bit(dead_node, res);
} else if (test_bit(dead_node, res->refmap)) {
mlog(0, "%s:%.*s: dead node %u had a ref, but had "
@@ -2260,7 +2315,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
}
spin_unlock(&res->spinlock);
continue;
- }
+ }
spin_lock(&res->spinlock);
/* zero the lvb if necessary */
dlm_revalidate_lvb(dlm, res, dead_node);
@@ -2411,7 +2466,7 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
* this function on each node racing to become the recovery
* master will not stop attempting this until either:
* a) this node gets the EX (and becomes the recovery master),
- * or b) dlm->reco.new_master gets set to some nodenum
+ * or b) dlm->reco.new_master gets set to some nodenum
* != O2NM_INVALID_NODE_NUM (another node will do the reco).
* so each time a recovery master is needed, the entire cluster
* will sync at this point. if the new master dies, that will
@@ -2424,7 +2479,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
-again:
+again:
memset(&lksb, 0, sizeof(lksb));
ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
@@ -2437,8 +2492,8 @@ again:
if (ret == DLM_NORMAL) {
mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
dlm->name, dlm->node_num);
-
- /* got the EX lock. check to see if another node
+
+ /* got the EX lock. check to see if another node
* just became the reco master */
if (dlm_reco_master_ready(dlm)) {
mlog(0, "%s: got reco EX lock, but %u will "
@@ -2451,12 +2506,12 @@ again:
/* see if recovery was already finished elsewhere */
spin_lock(&dlm->spinlock);
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
- status = -EINVAL;
+ status = -EINVAL;
mlog(0, "%s: got reco EX lock, but "
"node got recovered already\n", dlm->name);
if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
mlog(ML_ERROR, "%s: new master is %u "
- "but no dead node!\n",
+ "but no dead node!\n",
dlm->name, dlm->reco.new_master);
BUG();
}
@@ -2468,7 +2523,7 @@ again:
* set the master and send the messages to begin recovery */
if (!status) {
mlog(0, "%s: dead=%u, this=%u, sending "
- "begin_reco now\n", dlm->name,
+ "begin_reco now\n", dlm->name,
dlm->reco.dead_node, dlm->node_num);
status = dlm_send_begin_reco_message(dlm,
dlm->reco.dead_node);
@@ -2501,7 +2556,7 @@ again:
mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
dlm->name, dlm->node_num);
/* another node is master. wait on
- * reco.new_master != O2NM_INVALID_NODE_NUM
+ * reco.new_master != O2NM_INVALID_NODE_NUM
* for at most one second */
wait_event_timeout(dlm->dlm_reco_thread_wq,
dlm_reco_master_ready(dlm),
@@ -2589,7 +2644,13 @@ retry:
"begin reco msg (%d)\n", dlm->name, nodenum, ret);
ret = 0;
}
- if (ret == -EAGAIN) {
+
+ /*
+ * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
+ * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
+ * We are handling both for compatibility reasons.
+ */
+ if (ret == -EAGAIN || ret == EAGAIN) {
mlog(0, "%s: trying to start recovery of node "
"%u, but node %u is waiting for last recovery "
"to complete, backoff for a bit\n", dlm->name,
@@ -2599,7 +2660,7 @@ retry:
}
if (ret < 0) {
struct dlm_lock_resource *res;
- /* this is now a serious problem, possibly ENOMEM
+ /* this is now a serious problem, possibly ENOMEM
* in the network stack. must retry */
mlog_errno(ret);
mlog(ML_ERROR, "begin reco of dlm %s to node %u "
@@ -2612,7 +2673,7 @@ retry:
} else {
mlog(ML_ERROR, "recovery lock not found\n");
}
- /* sleep for a bit in hopes that we can avoid
+ /* sleep for a bit in hopes that we can avoid
* another ENOMEM */
msleep(100);
goto retry;
@@ -2664,7 +2725,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
}
if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
- "node %u changing it to %u\n", dlm->name,
+ "node %u changing it to %u\n", dlm->name,
dlm->reco.dead_node, br->node_idx, br->dead_node);
}
dlm_set_reco_master(dlm, br->node_idx);
@@ -2730,8 +2791,8 @@ stage2:
if (ret < 0) {
mlog_errno(ret);
if (dlm_is_host_down(ret)) {
- /* this has no effect on this recovery
- * session, so set the status to zero to
+ /* this has no effect on this recovery
+ * session, so set the status to zero to
* finish out the last recovery */
mlog(ML_ERROR, "node %u went down after this "
"node finished recovery.\n", nodenum);
@@ -2768,7 +2829,7 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
mlog(0, "%s: node %u finalizing recovery stage%d of "
"node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
-
+
spin_lock(&dlm->spinlock);
if (dlm->reco.new_master != fr->node_idx) {
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 00f53b2aea76..49e29ecd0201 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -190,8 +190,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
DLM_UNLOCK_REGRANT_LOCK|
DLM_UNLOCK_CLEAR_CONVERT_TYPE);
- } else if (status == DLM_RECOVERING ||
- status == DLM_MIGRATING ||
+ } else if (status == DLM_RECOVERING ||
+ status == DLM_MIGRATING ||
status == DLM_FORWARD) {
/* must clear the actions because this unlock
* is about to be retried. cannot free or do
@@ -661,14 +661,14 @@ retry:
if (call_ast) {
mlog(0, "calling unlockast(%p, %d)\n", data, status);
if (is_master) {
- /* it is possible that there is one last bast
+ /* it is possible that there is one last bast
* pending. make sure it is flushed, then
* call the unlockast.
* not an issue if this is a mastered remotely,
* since this lock has been removed from the
* lockres queues and cannot be found. */
dlm_kick_thread(dlm, NULL);
- wait_event(dlm->ast_wq,
+ wait_event(dlm->ast_wq,
dlm_lock_basts_flushed(dlm, lock));
}
(*unlockast)(data, status);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index c5e4a49e3a12..e044019cb3b1 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -875,6 +875,14 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo
lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
lockres->l_level = lockres->l_requested;
+
+ /*
+ * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
+ * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
+ * downconverting the lock before the upconvert has fully completed.
+ */
+ lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
mlog_exit_void();
@@ -907,8 +915,6 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
assert_spin_locked(&lockres->l_lock);
- lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
-
if (level > lockres->l_blocking) {
/* only schedule a downconvert if we haven't already scheduled
* one that goes low enough to satisfy the level we're
@@ -921,6 +927,9 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
lockres->l_blocking = level;
}
+ if (needs_downconvert)
+ lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
+
mlog_exit(needs_downconvert);
return needs_downconvert;
}
@@ -1133,6 +1142,7 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
mlog_entry_void();
spin_lock_irqsave(&lockres->l_lock, flags);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+ lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
if (convert)
lockres->l_action = OCFS2_AST_INVALID;
else
@@ -1323,13 +1333,13 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
again:
wait = 0;
+ spin_lock_irqsave(&lockres->l_lock, flags);
+
if (catch_signals && signal_pending(current)) {
ret = -ERESTARTSYS;
- goto out;
+ goto unlock;
}
- spin_lock_irqsave(&lockres->l_lock, flags);
-
mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
"Cluster lock called on freeing lockres %s! flags "
"0x%lx\n", lockres->l_name, lockres->l_flags);
@@ -1346,6 +1356,25 @@ again:
goto unlock;
}
+ if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
+ /*
+ * We've upconverted. If the lock now has a level we can
+ * work with, we take it. If, however, the lock is not at the
+ * required level, we go thru the full cycle. One way this could
+ * happen is if a process requesting an upconvert to PR is
+ * closely followed by another requesting upconvert to an EX.
+ * If the process requesting EX lands here, we want it to
+ * continue attempting to upconvert and let the process
+ * requesting PR take the lock.
+ * If multiple processes request upconvert to PR, the first one
+ * here will take the lock. The others will have to go thru the
+ * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
+ * downconvert request.
+ */
+ if (level <= lockres->l_level)
+ goto update_holders;
+ }
+
if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
!ocfs2_may_continue_on_blocked_lock(lockres, level)) {
/* is the lock is currently blocked on behalf of
@@ -1416,11 +1445,14 @@ again:
goto again;
}
+update_holders:
/* Ok, if we get here then we're good to go. */
ocfs2_inc_holders(lockres, level);
ret = 0;
unlock:
+ lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+
spin_unlock_irqrestore(&lockres->l_lock, flags);
out:
/*
@@ -3155,7 +3187,7 @@ out:
/* Mark the lockres as being dropped. It will no longer be
* queued if blocking, but we still may have to wait on it
* being dequeued from the downconvert thread before we can consider
- * it safe to drop.
+ * it safe to drop.
*
* You can *not* attempt to call cluster_lock on this lockres anymore. */
void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
@@ -3352,6 +3384,7 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
unsigned long flags;
int blocking;
int new_level;
+ int level;
int ret = 0;
int set_lvb = 0;
unsigned int gen;
@@ -3360,9 +3393,17 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
spin_lock_irqsave(&lockres->l_lock, flags);
- BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
-
recheck:
+ /*
+ * Is it still blocking? If not, we have no more work to do.
+ */
+ if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
+ BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ ret = 0;
+ goto leave;
+ }
+
if (lockres->l_flags & OCFS2_LOCK_BUSY) {
/* XXX
* This is a *big* race. The OCFS2_LOCK_PENDING flag
@@ -3401,6 +3442,31 @@ recheck:
goto leave;
}
+ /*
+ * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
+ * set when the ast is received for an upconvert just before the
+ * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
+ * on the heels of the ast, we want to delay the downconvert just
+ * enough to allow the up requestor to do its task. Because this
+ * lock is in the blocked queue, the lock will be downconverted
+ * as soon as the requestor is done with the lock.
+ */
+ if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
+ goto leave_requeue;
+
+ /*
+ * How can we block and yet be at NL? We were trying to upconvert
+ * from NL and got canceled. The code comes back here, and now
+ * we notice and clear BLOCKING.
+ */
+ if (lockres->l_level == DLM_LOCK_NL) {
+ BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
+ lockres->l_blocking = DLM_LOCK_NL;
+ lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ goto leave;
+ }
+
/* if we're blocking an exclusive and we have *any* holders,
* then requeue. */
if ((lockres->l_blocking == DLM_LOCK_EX)
@@ -3438,6 +3504,7 @@ recheck:
* may sleep, so we save off a copy of what we're blocking as
* it may change while we're not holding the spin lock. */
blocking = lockres->l_blocking;
+ level = lockres->l_level;
spin_unlock_irqrestore(&lockres->l_lock, flags);
ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
@@ -3446,7 +3513,7 @@ recheck:
goto leave;
spin_lock_irqsave(&lockres->l_lock, flags);
- if (blocking != lockres->l_blocking) {
+ if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
/* If this changed underneath us, then we can't drop
* it just yet. */
goto recheck;
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 15713cbb865c..19ad145d2af3 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -239,7 +239,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
mlog(0, "Encoding parent: blkno: %llu, generation: %u\n",
(unsigned long long)blkno, generation);
}
-
+
*max_len = len;
bail:
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index d35a27f4523e..5328529e7fd2 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -192,7 +192,7 @@ static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
emi->ei_clusters += ins->ei_clusters;
return 1;
} else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
- (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys &&
+ (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos &&
ins->ei_flags == emi->ei_flags) {
emi->ei_phys = ins->ei_phys;
emi->ei_cpos = ins->ei_cpos;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 06ccf6a86d35..558ce0312421 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -749,7 +749,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
int ret;
offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
- /* ugh. in prepare/commit_write, if from==to==start of block, we
+ /* ugh. in prepare/commit_write, if from==to==start of block, we
** skip the prepare. make sure we never send an offset for the start
** of a block
*/
@@ -1779,7 +1779,7 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
struct inode *inode = dentry->d_inode;
loff_t saved_pos, end;
- /*
+ /*
* We start with a read level meta lock and only jump to an ex
* if we need to make modifications here.
*/
@@ -2013,8 +2013,8 @@ out_dio:
/* buffered aio wouldn't have proper lock coverage today */
BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
- if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode) ||
- (file->f_flags & O_DIRECT && has_refcount)) {
+ if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
+ ((file->f_flags & O_DIRECT) && has_refcount)) {
ret = filemap_fdatawrite_range(file->f_mapping, pos,
pos + count - 1);
if (ret < 0)
@@ -2033,7 +2033,7 @@ out_dio:
pos + count - 1);
}
- /*
+ /*
* deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
* function pointer which is called when o_direct io completes so that
* it can unlock our rw lock. (it's the clustered equivalent of
@@ -2198,7 +2198,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
goto bail;
}
- /*
+ /*
* buffered reads protect themselves in ->readpage(). O_DIRECT reads
* need locks to protect pending reads from racing with truncate.
*/
@@ -2220,10 +2220,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
* We're fine letting folks race truncates and extending
* writes with read across the cluster, just like they can
* locally. Hence no rw_lock during read.
- *
+ *
* Take and drop the meta data lock to update inode fields
* like i_size. This allows the checks down below
- * generic_file_aio_read() a chance of actually working.
+ * generic_file_aio_read() a chance of actually working.
*/
ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
if (ret < 0) {
@@ -2248,7 +2248,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
bail:
if (have_alloc_sem)
up_read(&inode->i_alloc_sem);
- if (rw_level != -1)
+ if (rw_level != -1)
ocfs2_rw_unlock(inode, rw_level);
mlog_exit(ret);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 0297fb8982b8..88459bdd1ff3 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -475,7 +475,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) {
status = ocfs2_try_open_lock(inode, 0);
if (status) {
- make_bad_inode(inode);
+ make_bad_inode(inode);
return status;
}
}
@@ -684,7 +684,7 @@ bail:
return status;
}
-/*
+/*
* Serialize with orphan dir recovery. If the process doing
* recovery on this orphan dir does an iget() with the dir
* i_mutex held, we'll deadlock here. Instead we detect this
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 31fbb0619510..7d9d9c132cef 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -7,6 +7,7 @@
#include <linux/fs.h>
#include <linux/mount.h>
+#include <linux/compat.h>
#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
@@ -181,6 +182,10 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_COMPAT
long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
+ bool preserve;
+ struct reflink_arguments args;
+ struct inode *inode = file->f_path.dentry->d_inode;
+
switch (cmd) {
case OCFS2_IOC32_GETFLAGS:
cmd = OCFS2_IOC_GETFLAGS;
@@ -195,8 +200,15 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case OCFS2_IOC_GROUP_EXTEND:
case OCFS2_IOC_GROUP_ADD:
case OCFS2_IOC_GROUP_ADD64:
- case OCFS2_IOC_REFLINK:
break;
+ case OCFS2_IOC_REFLINK:
+ if (copy_from_user(&args, (struct reflink_arguments *)arg,
+ sizeof(args)))
+ return -EFAULT;
+ preserve = (args.preserve != 0);
+
+ return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
+ compat_ptr(args.new_path), preserve);
default:
return -ENOIOCTLCMD;
}
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index bf34c491ae96..9336c60e3a36 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -2034,7 +2034,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
status = -ENOENT;
mlog_errno(status);
return status;
- }
+ }
mutex_lock(&orphan_dir_inode->i_mutex);
status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 9362eea7424b..740f448041e2 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -136,6 +136,10 @@ enum ocfs2_unlock_action {
#define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a
call to dlm_lock. Only
exists with BUSY set. */
+#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread
+ * from downconverting
+ * before the upconvert
+ * has completed */
struct ocfs2_lock_res_ops;
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 1a1a679e51b5..7638a38c32bc 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -1417,9 +1417,16 @@ static inline int ocfs2_fast_symlink_chars(int blocksize)
return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink);
}
-static inline int ocfs2_max_inline_data(int blocksize)
+static inline int ocfs2_max_inline_data_with_xattr(int blocksize,
+ struct ocfs2_dinode *di)
{
- return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data);
+ if (di && (di->i_dyn_features & OCFS2_INLINE_XATTR_FL))
+ return blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_data.id_data) -
+ di->i_xattr_inline_size;
+ else
+ return blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_data.id_data);
}
static inline int ocfs2_extent_recs_per_inode(int blocksize)
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 74db2be75dd6..8ae65c9c020c 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2945,7 +2945,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
while (offset < end) {
page_index = offset >> PAGE_CACHE_SHIFT;
- map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
+ map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
if (map_end > end)
map_end = end;
@@ -2957,8 +2957,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
page = grab_cache_page(mapping, page_index);
- /* This page can't be dirtied before we CoW it out. */
- BUG_ON(PageDirty(page));
+ /*
+ * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
+ * can't be dirtied before we CoW it out.
+ */
+ if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
+ BUG_ON(PageDirty(page));
if (!PageUptodate(page)) {
ret = block_read_full_page(page, ocfs2_get_block);
@@ -3170,7 +3174,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
while (offset < end) {
page_index = offset >> PAGE_CACHE_SHIFT;
- map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
+ map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
if (map_end > end)
map_end = end;
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index e49c41050264..3038c92af493 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -277,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
u32 dlm_key;
struct dlm_ctxt *dlm;
struct o2dlm_private *priv;
- struct dlm_protocol_version dlm_version;
+ struct dlm_protocol_version fs_version;
BUG_ON(conn == NULL);
BUG_ON(o2cb_stack.sp_proto == NULL);
@@ -304,18 +304,18 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
/* used by the dlm code to make message headers unique, each
* node in this domain must agree on this. */
dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen);
- dlm_version.pv_major = conn->cc_version.pv_major;
- dlm_version.pv_minor = conn->cc_version.pv_minor;
+ fs_version.pv_major = conn->cc_version.pv_major;
+ fs_version.pv_minor = conn->cc_version.pv_minor;
- dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version);
+ dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version);
if (IS_ERR(dlm)) {
rc = PTR_ERR(dlm);
mlog_errno(rc);
goto out_free;
}
- conn->cc_version.pv_major = dlm_version.pv_major;
- conn->cc_version.pv_minor = dlm_version.pv_minor;
+ conn->cc_version.pv_major = fs_version.pv_major;
+ conn->cc_version.pv_minor = fs_version.pv_minor;
conn->cc_lockspace = dlm;
dlm_register_eviction_cb(dlm, &priv->op_eviction_cb);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 26069917a9f5..755cd49a5ef3 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1062,7 +1062,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
"file system, but write access is "
"unavailable.\n");
else
- mlog_errno(status);
+ mlog_errno(status);
goto read_super_error;
}
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 49b133ccbf11..32499d213fc4 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -137,20 +137,20 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
}
memcpy(link, target, len);
- nd_set_link(nd, link);
bail:
+ nd_set_link(nd, status ? ERR_PTR(status) : link);
brelse(bh);
mlog_exit(status);
- return status ? ERR_PTR(status) : link;
+ return NULL;
}
static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
{
- char *link = cookie;
-
- kfree(link);
+ char *link = nd_get_link(nd);
+ if (!IS_ERR(link))
+ kfree(link);
}
const struct inode_operations ocfs2_symlink_inode_operations = {
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index c61369342a27..a0a120e82b97 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -267,8 +267,8 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
}
/* Warning: even if it returns true, this does *not* guarantee that
- * the block is stored in our inode metadata cache.
- *
+ * the block is stored in our inode metadata cache.
+ *
* This can be called under lock_buffer()
*/
int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index e42bbd843ed1..58324c299165 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2369,16 +2369,30 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct pid_namespace *ns = dentry->d_sb->s_fs_info;
pid_t tgid = task_tgid_nr_ns(current, ns);
- char tmp[PROC_NUMBUF];
- if (!tgid)
- return ERR_PTR(-ENOENT);
- sprintf(tmp, "%d", task_tgid_nr_ns(current, ns));
- return ERR_PTR(vfs_follow_link(nd,tmp));
+ char *name = ERR_PTR(-ENOENT);
+ if (tgid) {
+ name = __getname();
+ if (!name)
+ name = ERR_PTR(-ENOMEM);
+ else
+ sprintf(name, "%d", tgid);
+ }
+ nd_set_link(nd, name);
+ return NULL;
+}
+
+static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *cookie)
+{
+ char *s = nd_get_link(nd);
+ if (!IS_ERR(s))
+ __putname(s);
}
static const struct inode_operations proc_self_inode_operations = {
.readlink = proc_self_readlink,
.follow_link = proc_self_follow_link,
+ .put_link = proc_self_put_link,
};
/*
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 9087b10209e6..2df0f5c7c60b 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1497,9 +1497,11 @@ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
args.objectid = key->on_disk_key.k_objectid;
args.dirid = key->on_disk_key.k_dir_id;
+ reiserfs_write_unlock(s);
inode = iget5_locked(s, key->on_disk_key.k_objectid,
reiserfs_find_actor, reiserfs_init_locked_inode,
(void *)(&args));
+ reiserfs_write_lock(s);
if (!inode)
return ERR_PTR(-ENOMEM);
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 220b758523ae..6a06a1d1ea7b 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -81,24 +81,23 @@ int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr * iattr)
if (!sd_attrs)
return -ENOMEM;
sd->s_iattr = sd_attrs;
- } else {
- /* attributes were changed at least once in past */
- iattrs = &sd_attrs->ia_iattr;
-
- if (ia_valid & ATTR_UID)
- iattrs->ia_uid = iattr->ia_uid;
- if (ia_valid & ATTR_GID)
- iattrs->ia_gid = iattr->ia_gid;
- if (ia_valid & ATTR_ATIME)
- iattrs->ia_atime = iattr->ia_atime;
- if (ia_valid & ATTR_MTIME)
- iattrs->ia_mtime = iattr->ia_mtime;
- if (ia_valid & ATTR_CTIME)
- iattrs->ia_ctime = iattr->ia_ctime;
- if (ia_valid & ATTR_MODE) {
- umode_t mode = iattr->ia_mode;
- iattrs->ia_mode = sd->s_mode = mode;
- }
+ }
+ /* attributes were changed at least once in past */
+ iattrs = &sd_attrs->ia_iattr;
+
+ if (ia_valid & ATTR_UID)
+ iattrs->ia_uid = iattr->ia_uid;
+ if (ia_valid & ATTR_GID)
+ iattrs->ia_gid = iattr->ia_gid;
+ if (ia_valid & ATTR_ATIME)
+ iattrs->ia_atime = iattr->ia_atime;
+ if (ia_valid & ATTR_MTIME)
+ iattrs->ia_mtime = iattr->ia_mtime;
+ if (ia_valid & ATTR_CTIME)
+ iattrs->ia_ctime = iattr->ia_ctime;
+ if (ia_valid & ATTR_MODE) {
+ umode_t mode = iattr->ia_mode;
+ iattrs->ia_mode = sd->s_mode = mode;
}
return 0;
}
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index 1e67c441ea82..f745948b61e4 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -77,6 +77,7 @@ struct drm_nouveau_gpuobj_free {
#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
#define NOUVEAU_GETPARAM_CHIPSET_ID 11
#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
+#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
struct drm_nouveau_getparam {
uint64_t param;
uint64_t value;
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
index 2be7e1249b6f..c7645f480d12 100644
--- a/include/drm/vmwgfx_drm.h
+++ b/include/drm/vmwgfx_drm.h
@@ -68,7 +68,8 @@
#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
#define DRM_VMW_PARAM_3D 2
#define DRM_VMW_PARAM_FIFO_OFFSET 3
-
+#define DRM_VMW_PARAM_HW_CAPS 4
+#define DRM_VMW_PARAM_FIFO_CAPS 5
/**
* struct drm_vmw_getparam_arg
@@ -181,6 +182,8 @@ struct drm_vmw_context_arg {
* The size of the array should equal the total number of mipmap levels.
* @shareable: Boolean whether other clients (as identified by file descriptors)
* may reference this surface.
+ * @scanout: Boolean whether the surface is intended to be used as a
+ * scanout.
*
* Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
* Output data from the DRM_VMW_REF_SURFACE Ioctl.
@@ -192,7 +195,7 @@ struct drm_vmw_surface_create_req {
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
uint64_t size_addr;
int32_t shareable;
- uint32_t pad64;
+ int32_t scanout;
};
/**
@@ -295,17 +298,28 @@ union drm_vmw_surface_reference_arg {
*
* @commands: User-space address of a command buffer cast to an uint64_t.
* @command-size: Size in bytes of the command buffer.
+ * @throttle-us: Sleep until software is less than @throttle_us
+ * microseconds ahead of hardware. The driver may round this value
+ * to the nearest kernel tick.
* @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
* uint64_t.
+ * @version: Allows expanding the execbuf ioctl parameters without breaking
+ * backwards compatibility, since user-space will always tell the kernel
+ * which version it uses.
+ * @flags: Execbuf flags. None currently.
*
* Argument to the DRM_VMW_EXECBUF Ioctl.
*/
+#define DRM_VMW_EXECBUF_VERSION 0
+
struct drm_vmw_execbuf_arg {
uint64_t commands;
uint32_t command_size;
- uint32_t pad64;
+ uint32_t throttle_us;
uint64_t fence_rep;
+ uint32_t version;
+ uint32_t flags;
};
/**
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index ab94335b4bb9..6816be6c3f77 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -1,5 +1,9 @@
/*
- * linux/include/asm-arm/hardware/amba.h
+ * linux/include/amba/bus.h
+ *
+ * This device type deals with ARM PrimeCells and anything else that
+ * presents a proper CID (0xB105F00D) at the end of the I/O register
+ * region or that is derived from a PrimeCell.
*
* Copyright (C) 2003 Deep Blue Solutions Ltd, All Rights Reserved.
*
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 38a6948ce0c2..20f31567ccee 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -647,9 +647,9 @@ static inline int ata_id_has_large_logical_sectors(const u16 *id)
return id[ATA_ID_SECTOR_SIZE] & (1 << 13);
}
-static inline u8 ata_id_logical_per_physical_sectors(const u16 *id)
+static inline u16 ata_id_logical_per_physical_sectors(const u16 *id)
{
- return id[ATA_ID_SECTOR_SIZE] & 0xf;
+ return 1 << (id[ATA_ID_SECTOR_SIZE] & 0xf);
}
static inline int ata_id_has_lba48(const u16 *id)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5c8018977efa..1896e868854f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -461,8 +461,7 @@ struct request_queue
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
-#define QUEUE_FLAG_CQ 16 /* hardware does queuing */
-#define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */
+#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_CLUSTER) | \
@@ -586,7 +585,6 @@ enum {
#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
-#define blk_queue_queuing(q) test_bit(QUEUE_FLAG_CQ, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 5be3dab4a695..188fcae10a99 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -15,6 +15,7 @@
# define __acquire(x) __context__(x,1)
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
+# define __percpu __attribute__((noderef, address_space(3)))
extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
#else
@@ -32,6 +33,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __acquire(x) (void)0
# define __release(x) (void)0
# define __cond_lock(x,c) (c)
+# define __percpu
#endif
#ifdef __KERNEL__
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b1bcb275b596..ebb1cd5bc241 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -729,6 +729,7 @@ struct inode {
uid_t i_uid;
gid_t i_gid;
dev_t i_rdev;
+ unsigned int i_blkbits;
u64 i_version;
loff_t i_size;
#ifdef __NEED_I_SIZE_ORDERED
@@ -738,7 +739,6 @@ struct inode {
struct timespec i_mtime;
struct timespec i_ctime;
blkcnt_t i_blocks;
- unsigned int i_blkbits;
unsigned short i_bytes;
umode_t i_mode;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 070ba0621738..5977b724f7c6 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -44,7 +44,7 @@ static inline int hw_breakpoint_type(struct perf_event *bp)
return bp->attr.bp_type;
}
-static inline int hw_breakpoint_len(struct perf_event *bp)
+static inline unsigned long hw_breakpoint_len(struct perf_event *bp)
{
return bp->attr.bp_len;
}
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 99dc6d5cf7e5..975837e7d6c0 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -17,7 +17,7 @@ struct linux_binprm;
extern int ima_bprm_check(struct linux_binprm *bprm);
extern int ima_inode_alloc(struct inode *inode);
extern void ima_inode_free(struct inode *inode);
-extern int ima_path_check(struct path *path, int mask);
+extern int ima_file_check(struct file *file, int mask);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
extern void ima_counts_get(struct file *file);
@@ -38,7 +38,7 @@ static inline void ima_inode_free(struct inode *inode)
return;
}
-static inline int ima_path_check(struct path *path, int mask)
+static inline int ima_file_check(struct file *file, int mask)
{
return 0;
}
diff --git a/include/linux/input.h b/include/linux/input.h
index 6e04c9e811f3..f44ee9114401 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -376,6 +376,7 @@ struct input_absinfo {
#define KEY_DISPLAY_OFF 245 /* display device to off state */
#define KEY_WIMAX 246
+#define KEY_RFKILL 247 /* Key that controls all radios */
/* Range 248 - 255 is reserved for special needs of AT keyboard driver */
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 6f6c5f300af6..bc0fc795bd35 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -124,7 +124,7 @@ extern __must_check unsigned int kfifo_out_peek(struct kfifo *fifo,
*/
static inline bool kfifo_initialized(struct kfifo *fifo)
{
- return fifo->buffer != 0;
+ return fifo->buffer != NULL;
}
/**
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 8fa71874113f..a177698d95e2 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -211,11 +211,9 @@ struct perf_event_attr {
__u32 wakeup_watermark; /* bytes before wakeup */
};
- __u32 __reserved_2;
-
- __u64 bp_addr;
__u32 bp_type;
- __u32 bp_len;
+ __u64 bp_addr;
+ __u64 bp_len;
};
/*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index abdfacc58653..78efe7c485ac 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -310,6 +310,7 @@ extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_DETECT_SOFTLOCKUP
extern void softlockup_tick(void);
extern void touch_softlockup_watchdog(void);
+extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
void __user *buffer,
@@ -323,6 +324,9 @@ static inline void softlockup_tick(void)
static inline void touch_softlockup_watchdog(void)
{
}
+static inline void touch_softlockup_watchdog_sync(void)
+{
+}
static inline void touch_all_softlockup_watchdogs(void)
{
}
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 057a2e010758..f508c651e53d 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -51,6 +51,9 @@ struct virtqueue {
* This re-enables callbacks; it returns "false" if there are pending
* buffers in the queue, to detect a possible race between the driver
* checking for more work, and enabling callbacks.
+ * @detach_unused_buf: detach first unused buffer
+ * vq: the struct virtqueue we're talking about.
+ * Returns NULL or the "data" token handed to add_buf
*
* Locking rules are straightforward: the driver is responsible for
* locking. No two operations may be invoked simultaneously, with the exception
@@ -71,6 +74,7 @@ struct virtqueue_ops {
void (*disable_cb)(struct virtqueue *vq);
bool (*enable_cb)(struct virtqueue *vq);
+ void *(*detach_unused_buf)(struct virtqueue *vq);
};
/**
diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h
index 1418f048cb34..a50ecd1b81a2 100644
--- a/include/linux/virtio_balloon.h
+++ b/include/linux/virtio_balloon.h
@@ -7,6 +7,7 @@
/* The feature bitmap for virtio balloon */
#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */
+#define VIRTIO_BALLOON_F_STATS_VQ 1 /* Memory Stats virtqueue */
/* Size of a PFN in the balloon interface. */
#define VIRTIO_BALLOON_PFN_SHIFT 12
@@ -18,4 +19,18 @@ struct virtio_balloon_config
/* Number of pages we've actually got in balloon. */
__le32 actual;
};
+
+#define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */
+#define VIRTIO_BALLOON_S_SWAP_OUT 1 /* Amount of memory swapped out */
+#define VIRTIO_BALLOON_S_MAJFLT 2 /* Number of major faults */
+#define VIRTIO_BALLOON_S_MINFLT 3 /* Number of minor faults */
+#define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */
+#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
+#define VIRTIO_BALLOON_S_NR 6
+
+struct virtio_balloon_stat {
+ u16 tag;
+ u64 val;
+} __attribute__((packed));
+
#endif /* _LINUX_VIRTIO_BALLOON_H */
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index fd294c56d571..e52029e98919 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -15,6 +15,7 @@
#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
#define VIRTIO_BLK_F_FLUSH 9 /* Cache flush command support */
+#define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */
struct virtio_blk_config {
/* The capacity (in 512-byte sectors). */
@@ -29,8 +30,20 @@ struct virtio_blk_config {
__u8 heads;
__u8 sectors;
} geometry;
+
/* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
__u32 blk_size;
+
+ /* the next 4 entries are guarded by VIRTIO_BLK_F_TOPOLOGY */
+ /* exponent for physical block per logical block. */
+ __u8 physical_block_exp;
+ /* alignment offset in logical blocks. */
+ __u8 alignment_offset;
+ /* minimum I/O size without performance penalty in logical blocks. */
+ __u16 min_io_size;
+ /* optimal sustained I/O size in logical blocks. */
+ __u32 opt_io_size;
+
} __attribute__((packed));
/*
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
index fe885174cc1f..ae4f039515b4 100644
--- a/include/linux/virtio_console.h
+++ b/include/linux/virtio_console.h
@@ -3,19 +3,45 @@
#include <linux/types.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
-/* This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
- * anyone can use the definitions to implement compatible drivers/servers. */
+/*
+ * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
+ * anyone can use the definitions to implement compatible drivers/servers.
+ *
+ * Copyright (C) Red Hat, Inc., 2009, 2010
+ */
/* Feature bits */
#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
+#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
struct virtio_console_config {
/* colums of the screens */
__u16 cols;
/* rows of the screens */
__u16 rows;
+ /* max. number of ports this device can hold */
+ __u32 max_nr_ports;
+ /* number of ports added so far */
+ __u32 nr_ports;
} __attribute__((packed));
+/*
+ * A message that's passed between the Host and the Guest for a
+ * particular port.
+ */
+struct virtio_console_control {
+ __u32 id; /* Port number */
+ __u16 event; /* The kind of control event (see below) */
+ __u16 value; /* Extra information for the key */
+};
+
+/* Some events for control messages */
+#define VIRTIO_CONSOLE_PORT_READY 0
+#define VIRTIO_CONSOLE_CONSOLE_PORT 1
+#define VIRTIO_CONSOLE_RESIZE 2
+#define VIRTIO_CONSOLE_PORT_OPEN 3
+#define VIRTIO_CONSOLE_PORT_NAME 4
+#define VIRTIO_CONSOLE_PORT_REMOVE 5
#ifdef __KERNEL__
int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index ba1ba0c5efd1..63d449807d9b 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -11,6 +11,8 @@ struct nf_conntrack_ecache;
struct netns_ct {
atomic_t count;
unsigned int expect_count;
+ unsigned int htable_size;
+ struct kmem_cache *nf_conntrack_cachep;
struct hlist_nulls_head *hash;
struct hlist_head *expect_hash;
struct hlist_nulls_head unconfirmed;
@@ -28,5 +30,6 @@ struct netns_ct {
#endif
int hash_vmalloc;
int expect_vmalloc;
+ char *slabname;
};
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 2eb3814d6258..9a4b8b714079 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -40,6 +40,7 @@ struct netns_ipv4 {
struct xt_table *iptable_security;
struct xt_table *nat_table;
struct hlist_head *nat_bysource;
+ unsigned int nat_htable_size;
int nat_vmalloced;
#endif
diff --git a/init/main.c b/init/main.c
index dac44a9356a5..4cb47a159f02 100644
--- a/init/main.c
+++ b/init/main.c
@@ -657,9 +657,9 @@ asmlinkage void __init start_kernel(void)
proc_caches_init();
buffer_init();
key_init();
+ radix_tree_init();
security_init();
vfs_caches_init(totalram_pages);
- radix_tree_init();
signals_init();
/* rootfs populating might need page-writeback */
page_writeback_init();
diff --git a/kernel/futex.c b/kernel/futex.c
index d9b3a2228f9d..e7a35f1039e7 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -530,8 +530,25 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
return -EINVAL;
WARN_ON(!atomic_read(&pi_state->refcount));
- WARN_ON(pid && pi_state->owner &&
- pi_state->owner->pid != pid);
+
+ /*
+ * When pi_state->owner is NULL then the owner died
+ * and another waiter is on the fly. pi_state->owner
+ * is fixed up by the task which acquires
+ * pi_state->rt_mutex.
+ *
+ * We do not check for pid == 0 which can happen when
+ * the owner died and robust_list_exit() cleared the
+ * TID.
+ */
+ if (pid && pi_state->owner) {
+ /*
+ * Bail out if user space manipulated the
+ * futex value.
+ */
+ if (pid != task_pid_vnr(pi_state->owner))
+ return -EINVAL;
+ }
atomic_inc(&pi_state->refcount);
*ps = pi_state;
@@ -758,6 +775,13 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
if (!pi_state)
return -EINVAL;
+ /*
+ * If current does not own the pi_state then the futex is
+ * inconsistent and user space fiddled with the futex value.
+ */
+ if (pi_state->owner != current)
+ return -EINVAL;
+
raw_spin_lock(&pi_state->pi_mutex.wait_lock);
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
@@ -1971,7 +1995,7 @@ retry_private:
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
- goto out;
+ goto out_put_key;
out_unlock_put_key:
queue_unlock(&q, hb);
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 8a5c7d55ac9f..967e66143e11 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -360,8 +360,8 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
{
u64 old_addr = bp->attr.bp_addr;
+ u64 old_len = bp->attr.bp_len;
int old_type = bp->attr.bp_type;
- int old_len = bp->attr.bp_len;
int err = 0;
perf_event_disable(bp);
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 498cabba225e..35edbe22e9a9 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -80,7 +80,7 @@ int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask)
buffer = kmalloc(size, gfp_mask);
if (!buffer) {
- _kfifo_init(fifo, 0, 0);
+ _kfifo_init(fifo, NULL, 0);
return -ENOMEM;
}
@@ -97,6 +97,7 @@ EXPORT_SYMBOL(kfifo_alloc);
void kfifo_free(struct kfifo *fifo)
{
kfree(fifo->buffer);
+ _kfifo_init(fifo, NULL, 0);
}
EXPORT_SYMBOL(kfifo_free);
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index c7ade62e4ef0..761fdd2b3034 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -599,7 +599,7 @@ static void kgdb_wait(struct pt_regs *regs)
/* Signal the primary CPU that we are done: */
atomic_set(&cpu_in_kgdb[cpu], 0);
- touch_softlockup_watchdog();
+ touch_softlockup_watchdog_sync();
clocksource_touch_watchdog();
local_irq_restore(flags);
}
@@ -1453,7 +1453,7 @@ acquirelock:
(kgdb_info[cpu].task &&
kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
atomic_set(&kgdb_active, -1);
- touch_softlockup_watchdog();
+ touch_softlockup_watchdog_sync();
clocksource_touch_watchdog();
local_irq_restore(flags);
@@ -1553,7 +1553,7 @@ kgdb_restore:
}
/* Free kgdb_active */
atomic_set(&kgdb_active, -1);
- touch_softlockup_watchdog();
+ touch_softlockup_watchdog_sync();
clocksource_touch_watchdog();
local_irq_restore(flags);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index d27746bd3a06..2ae7409bf38f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -3259,8 +3259,6 @@ static void perf_event_task_output(struct perf_event *event,
task_event->event_id.tid = perf_event_tid(event, task);
task_event->event_id.ptid = perf_event_tid(event, current);
- task_event->event_id.time = perf_clock();
-
perf_output_put(&handle, task_event->event_id);
perf_output_end(&handle);
@@ -3268,7 +3266,7 @@ static void perf_event_task_output(struct perf_event *event,
static int perf_event_task_match(struct perf_event *event)
{
- if (event->state != PERF_EVENT_STATE_ACTIVE)
+ if (event->state < PERF_EVENT_STATE_INACTIVE)
return 0;
if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -3300,7 +3298,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
cpuctx = &get_cpu_var(perf_cpu_context);
perf_event_task_ctx(&cpuctx->ctx, task_event);
if (!ctx)
- ctx = rcu_dereference(task_event->task->perf_event_ctxp);
+ ctx = rcu_dereference(current->perf_event_ctxp);
if (ctx)
perf_event_task_ctx(ctx, task_event);
put_cpu_var(perf_cpu_context);
@@ -3331,6 +3329,7 @@ static void perf_event_task(struct task_struct *task,
/* .ppid */
/* .tid */
/* .ptid */
+ .time = perf_clock(),
},
};
@@ -3380,7 +3379,7 @@ static void perf_event_comm_output(struct perf_event *event,
static int perf_event_comm_match(struct perf_event *event)
{
- if (event->state != PERF_EVENT_STATE_ACTIVE)
+ if (event->state < PERF_EVENT_STATE_INACTIVE)
return 0;
if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -3500,7 +3499,7 @@ static void perf_event_mmap_output(struct perf_event *event,
static int perf_event_mmap_match(struct perf_event *event,
struct perf_mmap_event *mmap_event)
{
- if (event->state != PERF_EVENT_STATE_ACTIVE)
+ if (event->state < PERF_EVENT_STATE_INACTIVE)
return 0;
if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -4580,7 +4579,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
if (attr->type >= PERF_TYPE_MAX)
return -EINVAL;
- if (attr->__reserved_1 || attr->__reserved_2)
+ if (attr->__reserved_1)
return -EINVAL;
if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
diff --git a/kernel/softirq.c b/kernel/softirq.c
index a09502e2ef75..7c1a67ef0274 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -500,22 +500,17 @@ EXPORT_SYMBOL(tasklet_kill);
*/
/*
- * The trampoline is called when the hrtimer expires. If this is
- * called from the hrtimer interrupt then we schedule the tasklet as
- * the timer callback function expects to run in softirq context. If
- * it's called in softirq context anyway (i.e. high resolution timers
- * disabled) then the hrtimer callback is called right away.
+ * The trampoline is called when the hrtimer expires. It schedules a tasklet
+ * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
+ * hrtimer callback, but from softirq context.
*/
static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
{
struct tasklet_hrtimer *ttimer =
container_of(timer, struct tasklet_hrtimer, timer);
- if (hrtimer_is_hres_active(timer)) {
- tasklet_hi_schedule(&ttimer->tasklet);
- return HRTIMER_NORESTART;
- }
- return ttimer->function(timer);
+ tasklet_hi_schedule(&ttimer->tasklet);
+ return HRTIMER_NORESTART;
}
/*
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index d22579087e27..0d4c7898ab80 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -25,6 +25,7 @@ static DEFINE_SPINLOCK(print_lock);
static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */
static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */
static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
+static DEFINE_PER_CPU(bool, softlock_touch_sync);
static int __read_mostly did_panic;
int __read_mostly softlockup_thresh = 60;
@@ -79,6 +80,12 @@ void touch_softlockup_watchdog(void)
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
+void touch_softlockup_watchdog_sync(void)
+{
+ __raw_get_cpu_var(softlock_touch_sync) = true;
+ __raw_get_cpu_var(softlockup_touch_ts) = 0;
+}
+
void touch_all_softlockup_watchdogs(void)
{
int cpu;
@@ -118,6 +125,14 @@ void softlockup_tick(void)
}
if (touch_ts == 0) {
+ if (unlikely(per_cpu(softlock_touch_sync, this_cpu))) {
+ /*
+ * If the time stamp was touched atomically
+ * make sure the scheduler tick is up to date.
+ */
+ per_cpu(softlock_touch_sync, this_cpu) = false;
+ sched_clock_tick();
+ }
__touch_softlockup_watchdog();
return;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 26a6b73a6b85..18bde979f346 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -222,6 +222,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
if (which > PRIO_USER || which < PRIO_PROCESS)
return -EINVAL;
+ rcu_read_lock();
read_lock(&tasklist_lock);
switch (which) {
case PRIO_PROCESS:
@@ -267,6 +268,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
}
out_unlock:
read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return retval;
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 7faaa32fbf4f..e2ab064c6d41 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -880,6 +880,7 @@ void getboottime(struct timespec *ts)
set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
}
+EXPORT_SYMBOL_GPL(getboottime);
/**
* monotonic_to_bootbased - Convert the monotonic time to boot based.
@@ -889,6 +890,7 @@ void monotonic_to_bootbased(struct timespec *ts)
{
*ts = timespec_add_safe(*ts, total_sleep_time);
}
+EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
unsigned long get_seconds(void)
{
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 6ea90c0e2c96..50b1b8239806 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -689,7 +689,7 @@ static int create_trace_probe(int argc, char **argv)
return -EINVAL;
}
/* an address specified */
- ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr);
+ ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr);
if (ret) {
pr_info("Failed to parse address.\n");
return ret;
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 678a5120ee30..f4bc9b27de5f 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -157,6 +157,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
unsigned long val, flags;
char buf[64];
int ret;
+ int cpu;
if (count >= sizeof(buf))
return -EINVAL;
@@ -171,9 +172,20 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
return ret;
local_irq_save(flags);
+
+ /*
+ * In case we trace inside arch_spin_lock() or after (NMI),
+ * we will cause circular lock, so we also need to increase
+ * the percpu trace_active here.
+ */
+ cpu = smp_processor_id();
+ per_cpu(trace_active, cpu)++;
+
arch_spin_lock(&max_stack_lock);
*ptr = val;
arch_spin_unlock(&max_stack_lock);
+
+ per_cpu(trace_active, cpu)--;
local_irq_restore(flags);
return count;
@@ -206,7 +218,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
static void *t_start(struct seq_file *m, loff_t *pos)
{
+ int cpu;
+
local_irq_disable();
+
+ cpu = smp_processor_id();
+ per_cpu(trace_active, cpu)++;
+
arch_spin_lock(&max_stack_lock);
if (*pos == 0)
@@ -217,7 +235,13 @@ static void *t_start(struct seq_file *m, loff_t *pos)
static void t_stop(struct seq_file *m, void *p)
{
+ int cpu;
+
arch_spin_unlock(&max_stack_lock);
+
+ cpu = smp_processor_id();
+ per_cpu(trace_active, cpu)--;
+
local_irq_enable();
}
diff --git a/lib/idr.c b/lib/idr.c
index ba7d37cf7847..0dc782216d4b 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -140,7 +140,8 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
id = *starting_id;
restart:
p = idp->top;
- l = p->layer;
+ l = idp->layers;
+ pa[l--] = NULL;
while (1) {
/*
* We run around this while until we reach the leaf node...
@@ -154,11 +155,13 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
oid = id;
id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
- /* did id go over the limit? */
- if (id >= (1 << (idp->layers * IDR_BITS))) {
+ /* if already at the top layer, we need to grow */
+ if (id >= 1 << (idp->layers * IDR_BITS)) {
*starting_id = id;
return IDR_NEED_TO_GROW;
}
+ p = pa[l];
+ BUG_ON(!p);
/* If we need to go up one layer, continue the
* loop; otherwise, restart from the top.
diff --git a/mm/migrate.c b/mm/migrate.c
index efddbf0926b2..880bd592d38e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -912,6 +912,9 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
goto out_pm;
err = -ENODEV;
+ if (node < 0 || node >= MAX_NUMNODES)
+ goto out_pm;
+
if (!node_state(node, N_HIGH_MEMORY))
goto out_pm;
@@ -999,33 +1002,27 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
#define DO_PAGES_STAT_CHUNK_NR 16
const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
int chunk_status[DO_PAGES_STAT_CHUNK_NR];
- unsigned long i, chunk_nr = DO_PAGES_STAT_CHUNK_NR;
- int err;
- for (i = 0; i < nr_pages; i += chunk_nr) {
- if (chunk_nr > nr_pages - i)
- chunk_nr = nr_pages - i;
+ while (nr_pages) {
+ unsigned long chunk_nr;
- err = copy_from_user(chunk_pages, &pages[i],
- chunk_nr * sizeof(*chunk_pages));
- if (err) {
- err = -EFAULT;
- goto out;
- }
+ chunk_nr = nr_pages;
+ if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
+ chunk_nr = DO_PAGES_STAT_CHUNK_NR;
+
+ if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
+ break;
do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
- err = copy_to_user(&status[i], chunk_status,
- chunk_nr * sizeof(*chunk_status));
- if (err) {
- err = -EFAULT;
- goto out;
- }
- }
- err = 0;
+ if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
+ break;
-out:
- return err;
+ pages += chunk_nr;
+ status += chunk_nr;
+ nr_pages -= chunk_nr;
+ }
+ return nr_pages ? -EFAULT : 0;
}
/*
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f52481b1c1e5..237050478f28 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -459,6 +459,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
list_for_each_entry(c, &p->children, sibling) {
if (c->mm == p->mm)
continue;
+ if (mem && !task_in_mem_cgroup(c, mem))
+ continue;
if (!oom_kill_task(c))
return 0;
}
diff --git a/net/9p/client.c b/net/9p/client.c
index 8af95b2dddd6..09d4f1e2e4a8 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -69,7 +69,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
static int parse_opts(char *opts, struct p9_client *clnt)
{
- char *options;
+ char *options, *tmp_options;
char *p;
substring_t args[MAX_OPT_ARGS];
int option;
@@ -81,12 +81,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
if (!opts)
return 0;
- options = kstrdup(opts, GFP_KERNEL);
- if (!options) {
+ tmp_options = kstrdup(opts, GFP_KERNEL);
+ if (!tmp_options) {
P9_DPRINTK(P9_DEBUG_ERROR,
"failed to allocate copy of option string\n");
return -ENOMEM;
}
+ options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
int token;
@@ -108,6 +109,13 @@ static int parse_opts(char *opts, struct p9_client *clnt)
break;
case Opt_trans:
clnt->trans_mod = v9fs_get_trans_by_name(&args[0]);
+ if(clnt->trans_mod == NULL) {
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "Could not find request transport: %s\n",
+ (char *) &args[0]);
+ ret = -EINVAL;
+ goto free_and_return;
+ }
break;
case Opt_legacy:
clnt->dotu = 0;
@@ -117,7 +125,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
}
}
- kfree(options);
+free_and_return:
+ kfree(tmp_options);
return ret;
}
@@ -667,18 +676,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
clnt->trans = NULL;
spin_lock_init(&clnt->lock);
INIT_LIST_HEAD(&clnt->fidlist);
- clnt->fidpool = p9_idpool_create();
- if (IS_ERR(clnt->fidpool)) {
- err = PTR_ERR(clnt->fidpool);
- clnt->fidpool = NULL;
- goto error;
- }
p9_tag_init(clnt);
err = parse_opts(options, clnt);
if (err < 0)
- goto error;
+ goto free_client;
if (!clnt->trans_mod)
clnt->trans_mod = v9fs_get_default_trans();
@@ -687,7 +690,14 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
err = -EPROTONOSUPPORT;
P9_DPRINTK(P9_DEBUG_ERROR,
"No transport defined or default transport\n");
- goto error;
+ goto free_client;
+ }
+
+ clnt->fidpool = p9_idpool_create();
+ if (IS_ERR(clnt->fidpool)) {
+ err = PTR_ERR(clnt->fidpool);
+ clnt->fidpool = NULL;
+ goto put_trans;
}
P9_DPRINTK(P9_DEBUG_MUX, "clnt %p trans %p msize %d dotu %d\n",
@@ -695,19 +705,25 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
err = clnt->trans_mod->create(clnt, dev_name, options);
if (err)
- goto error;
+ goto destroy_fidpool;
if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize)
clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ;
err = p9_client_version(clnt);
if (err)
- goto error;
+ goto close_trans;
return clnt;
-error:
- p9_client_destroy(clnt);
+close_trans:
+ clnt->trans_mod->close(clnt);
+destroy_fidpool:
+ p9_idpool_destroy(clnt->fidpool);
+put_trans:
+ v9fs_put_trans(clnt->trans_mod);
+free_client:
+ kfree(clnt);
return ERR_PTR(err);
}
EXPORT_SYMBOL(p9_client_create);
@@ -1214,10 +1230,11 @@ static int p9_client_statsize(struct p9_wstat *wst, int optional)
{
int ret;
+ /* NOTE: size shouldn't include its own length */
/* size[2] type[2] dev[4] qid[13] */
/* mode[4] atime[4] mtime[4] length[8]*/
/* name[s] uid[s] gid[s] muid[s] */
- ret = 2+2+4+13+4+4+4+8+2+2+2+2;
+ ret = 2+4+13+4+4+4+8+2+2+2+2;
if (wst->name)
ret += strlen(wst->name);
@@ -1258,7 +1275,7 @@ int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst)
wst->name, wst->uid, wst->gid, wst->muid, wst->extension,
wst->n_uid, wst->n_gid, wst->n_muid);
- req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size, wst);
+ req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto error;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index be1cb909d8c0..31d0b05582a9 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -714,7 +714,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
char *p;
substring_t args[MAX_OPT_ARGS];
int option;
- char *options;
+ char *options, *tmp_options;
int ret;
opts->port = P9_PORT;
@@ -724,12 +724,13 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
if (!params)
return 0;
- options = kstrdup(params, GFP_KERNEL);
- if (!options) {
+ tmp_options = kstrdup(params, GFP_KERNEL);
+ if (!tmp_options) {
P9_DPRINTK(P9_DEBUG_ERROR,
"failed to allocate copy of option string\n");
return -ENOMEM;
}
+ options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
int token;
@@ -760,7 +761,8 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
continue;
}
}
- kfree(options);
+
+ kfree(tmp_options);
return 0;
}
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 65cb29db03f8..2c95a89c0f46 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -166,7 +166,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
char *p;
substring_t args[MAX_OPT_ARGS];
int option;
- char *options;
+ char *options, *tmp_options;
int ret;
opts->port = P9_PORT;
@@ -177,12 +177,13 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
if (!params)
return 0;
- options = kstrdup(params, GFP_KERNEL);
- if (!options) {
+ tmp_options = kstrdup(params, GFP_KERNEL);
+ if (!tmp_options) {
P9_DPRINTK(P9_DEBUG_ERROR,
"failed to allocate copy of option string\n");
return -ENOMEM;
}
+ options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
int token;
@@ -216,7 +217,7 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
}
/* RQ must be at least as large as the SQ */
opts->rq_depth = max(opts->rq_depth, opts->sq_depth);
- kfree(options);
+ kfree(tmp_options);
return 0;
}
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index ea1e3daabefe..cb50f4ae5eef 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -102,7 +102,8 @@ static void p9_virtio_close(struct p9_client *client)
struct virtio_chan *chan = client->trans;
mutex_lock(&virtio_9p_lock);
- chan->inuse = false;
+ if (chan)
+ chan->inuse = false;
mutex_unlock(&virtio_9p_lock);
}
@@ -311,6 +312,7 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
}
client->trans = (void *)chan;
+ client->status = Connected;
chan->client = client;
return 0;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b7c4224f4e7d..b10e3cdb08f8 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -377,6 +377,9 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
if (acl->state == BT_CONNECTED &&
(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
+ acl->power_save = 1;
+ hci_conn_enter_active_mode(acl);
+
if (lmp_esco_capable(hdev))
hci_setup_sync(sco, acl->handle);
else
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 28517bad796c..592da5c909c1 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1699,6 +1699,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
break;
case 0x1c: /* SCO interval rejected */
+ case 0x1a: /* Unsupported Remote Feature */
case 0x1f: /* Unspecified error */
if (conn->out && conn->attempt < 2) {
conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 37ba153c4cd4..280529ad9274 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -714,29 +714,9 @@ static void hidp_close(struct hid_device *hid)
static int hidp_parse(struct hid_device *hid)
{
struct hidp_session *session = hid->driver_data;
- struct hidp_connadd_req *req = session->req;
- unsigned char *buf;
- int ret;
-
- buf = kmalloc(req->rd_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, req->rd_data, req->rd_size)) {
- kfree(buf);
- return -EFAULT;
- }
-
- ret = hid_parse_report(session->hid, buf, req->rd_size);
-
- kfree(buf);
-
- if (ret)
- return ret;
-
- session->req = NULL;
- return 0;
+ return hid_parse_report(session->hid, session->rd_data,
+ session->rd_size);
}
static int hidp_start(struct hid_device *hid)
@@ -781,12 +761,24 @@ static int hidp_setup_hid(struct hidp_session *session,
bdaddr_t src, dst;
int err;
+ session->rd_data = kzalloc(req->rd_size, GFP_KERNEL);
+ if (!session->rd_data)
+ return -ENOMEM;
+
+ if (copy_from_user(session->rd_data, req->rd_data, req->rd_size)) {
+ err = -EFAULT;
+ goto fault;
+ }
+ session->rd_size = req->rd_size;
+
hid = hid_allocate_device();
- if (IS_ERR(hid))
- return PTR_ERR(hid);
+ if (IS_ERR(hid)) {
+ err = PTR_ERR(hid);
+ goto fault;
+ }
session->hid = hid;
- session->req = req;
+
hid->driver_data = session;
baswap(&src, &bt_sk(session->ctrl_sock->sk)->src);
@@ -817,6 +809,10 @@ failed:
hid_destroy_device(hid);
session->hid = NULL;
+fault:
+ kfree(session->rd_data);
+ session->rd_data = NULL;
+
return err;
}
@@ -911,6 +907,9 @@ unlink:
session->hid = NULL;
}
+ kfree(session->rd_data);
+ session->rd_data = NULL;
+
purge:
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index faf3d74c3586..a4e215d50c10 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -154,7 +154,9 @@ struct hidp_session {
struct sk_buff_head ctrl_transmit;
struct sk_buff_head intr_transmit;
- struct hidp_connadd_req *req;
+ /* Report descriptor */
+ __u8 *rd_data;
+ uint rd_size;
};
static inline void hidp_schedule(struct hidp_session *session)
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index fc5ee3296e22..89f4a59eb82b 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -252,7 +252,6 @@ static void rfcomm_session_timeout(unsigned long arg)
BT_DBG("session %p state %ld", s, s->state);
set_bit(RFCOMM_TIMED_OUT, &s->flags);
- rfcomm_session_put(s);
rfcomm_schedule(RFCOMM_SCHED_TIMEO);
}
@@ -1151,7 +1150,11 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
break;
case BT_DISCONN:
- rfcomm_session_put(s);
+ /* When socket is closed and we are not RFCOMM
+ * initiator rfcomm_process_rx already calls
+ * rfcomm_session_put() */
+ if (s->sock->sk->sk_state != BT_CLOSED)
+ rfcomm_session_put(s);
break;
}
}
@@ -1920,6 +1923,7 @@ static inline void rfcomm_process_sessions(void)
if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
s->state = BT_DISCONN;
rfcomm_send_disc(s, 0);
+ rfcomm_session_put(s);
continue;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index be9924f60ec3..ec874218b206 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2761,7 +2761,7 @@ gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
switch (ret) {
case GRO_NORMAL:
case GRO_HELD:
- skb->protocol = eth_type_trans(skb, napi->dev);
+ skb->protocol = eth_type_trans(skb, skb->dev);
if (ret == GRO_HELD)
skb_gro_pull(skb, -ETH_HLEN);
diff --git a/net/core/dst.c b/net/core/dst.c
index 57bc4d5b8d08..cb1b3488b739 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -17,6 +17,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <net/net_namespace.h>
+#include <linux/sched.h>
#include <net/dst.h>
@@ -79,6 +80,7 @@ loop:
while ((dst = next) != NULL) {
next = dst->next;
prefetch(&next->next);
+ cond_resched();
if (likely(atomic_read(&dst->__refcnt))) {
last->next = dst;
last = dst;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index d8aee584e8d1..236a9988ea91 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -927,6 +927,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GPERMADDR:
case ETHTOOL_GUFO:
case ETHTOOL_GGSO:
+ case ETHTOOL_GGRO:
case ETHTOOL_GFLAGS:
case ETHTOOL_GPFLAGS:
case ETHTOOL_GRXFH:
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index fbc1c7472c5e..099c753c4213 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -410,7 +410,8 @@ static ssize_t wireless_show(struct device *d, char *buf,
const struct iw_statistics *iw;
ssize_t ret = -EINVAL;
- rtnl_lock();
+ if (!rtnl_trylock())
+ return restart_syscall();
if (dev_isalive(dev)) {
iw = get_wireless_stats(dev);
if (iw)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index de0c2c726420..2e692afdc55d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3524,6 +3524,7 @@ static int pktgen_thread_worker(void *arg)
wait_event_interruptible_timeout(t->queue,
t->control != 0,
HZ/10);
+ try_to_freeze();
continue;
}
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 57dfb9c8c4f2..ff16e9df1969 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -83,7 +83,7 @@ static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_f
va_list args;
va_start(args, fmt);
- vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args);
+ vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args);
va_end(args);
slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 269958bf7fe9..6df6f8ac9636 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -19,7 +19,9 @@
#include <linux/list.h>
#include <linux/module.h>
-#define CCID_MAX 255
+/* maximum value for a CCID (RFC 4340, 19.5) */
+#define CCID_MAX 255
+#define CCID_SLAB_NAME_LENGTH 32
struct tcp_info;
@@ -49,8 +51,8 @@ struct ccid_operations {
const char *ccid_name;
struct kmem_cache *ccid_hc_rx_slab,
*ccid_hc_tx_slab;
- char ccid_hc_rx_slab_name[32];
- char ccid_hc_tx_slab_name[32];
+ char ccid_hc_rx_slab_name[CCID_SLAB_NAME_LENGTH];
+ char ccid_hc_tx_slab_name[CCID_SLAB_NAME_LENGTH];
__u32 ccid_hc_rx_obj_size,
ccid_hc_tx_obj_size;
/* Interface Routines */
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index bace1d8cbcfd..f5b3464f1242 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -161,8 +161,8 @@ static __init int dccpprobe_init(void)
if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
goto err0;
- ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0),
- "dccp");
+ try_then_request_module((ret = register_jprobe(&dccp_send_probe)) == 0,
+ "dccp");
if (ret)
goto err1;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 040c4f05b653..26dec2be9615 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1317,14 +1317,19 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write,
{
int *valp = ctl->data;
int val = *valp;
+ loff_t pos = *ppos;
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write && *valp != val) {
struct net *net = ctl->extra2;
if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
- if (!rtnl_trylock())
+ if (!rtnl_trylock()) {
+ /* Restore the original values before restarting */
+ *valp = val;
+ *ppos = pos;
return restart_syscall();
+ }
if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
inet_forward_change(net);
} else if (*valp) {
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 76c08402c933..a42f658e756a 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -946,7 +946,6 @@ int igmp_rcv(struct sk_buff *skb)
break;
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
- case IGMPV3_HOST_MEMBERSHIP_REPORT:
/* Is it our report looped back? */
if (skb_rtable(skb)->fl.iif == 0)
break;
@@ -960,6 +959,7 @@ int igmp_rcv(struct sk_buff *skb)
in_dev_put(in_dev);
return pim_rcv_v1(skb);
#endif
+ case IGMPV3_HOST_MEMBERSHIP_REPORT:
case IGMP_DVMRP:
case IGMP_TRACE:
case IGMP_HOST_LEAVE_MESSAGE:
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 38fbf04150ae..544ce0876f12 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -124,16 +124,12 @@ static int ipcomp4_init_state(struct xfrm_state *x)
if (x->props.mode == XFRM_MODE_TUNNEL) {
err = ipcomp_tunnel_attach(x);
if (err)
- goto error_tunnel;
+ goto out;
}
err = 0;
out:
return err;
-
-error_tunnel:
- ipcomp_destroy(x);
- goto out;
}
static const struct xfrm_type ipcomp_type = {
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 06632762ba5f..90203e1b9187 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -925,10 +925,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
if (t && !IS_ERR(t)) {
struct arpt_getinfo info;
const struct xt_table_info *private = t->private;
-
#ifdef CONFIG_COMPAT
+ struct xt_table_info tmp;
+
if (compat) {
- struct xt_table_info tmp;
ret = compat_table_info(private, &tmp);
xt_compat_flush_offsets(NFPROTO_ARP);
private = &tmp;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 572330a552ef..3ce53cf13d5a 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1132,10 +1132,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
if (t && !IS_ERR(t)) {
struct ipt_getinfo info;
const struct xt_table_info *private = t->private;
-
#ifdef CONFIG_COMPAT
+ struct xt_table_info tmp;
+
if (compat) {
- struct xt_table_info tmp;
ret = compat_table_info(private, &tmp);
xt_compat_flush_offsets(AF_INET);
private = &tmp;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index d171b123a656..d1ea38a7c490 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -210,7 +210,7 @@ static ctl_table ip_ct_sysctl_table[] = {
},
{
.procname = "ip_conntrack_buckets",
- .data = &nf_conntrack_htable_size,
+ .data = &init_net.ct.htable_size,
.maxlen = sizeof(unsigned int),
.mode = 0444,
.proc_handler = proc_dointvec,
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 8668a3defda6..2fb7b76da94f 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
struct hlist_nulls_node *n;
for (st->bucket = 0;
- st->bucket < nf_conntrack_htable_size;
+ st->bucket < net->ct.htable_size;
st->bucket++) {
n = rcu_dereference(net->ct.hash[st->bucket].first);
if (!is_a_nulls(n))
@@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
head = rcu_dereference(head->next);
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
- if (++st->bucket >= nf_conntrack_htable_size)
+ if (++st->bucket >= net->ct.htable_size)
return NULL;
}
head = rcu_dereference(net->ct.hash[st->bucket].first);
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index fe1a64479dd0..26066a2327ad 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -35,9 +35,6 @@ static DEFINE_SPINLOCK(nf_nat_lock);
static struct nf_conntrack_l3proto *l3proto __read_mostly;
-/* Calculated at init based on memory size */
-static unsigned int nf_nat_htable_size __read_mostly;
-
#define MAX_IP_NAT_PROTO 256
static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
__read_mostly;
@@ -72,7 +69,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
/* We keep an extra hash for each conntrack, for fast searching. */
static inline unsigned int
-hash_by_src(const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
{
unsigned int hash;
@@ -80,7 +77,7 @@ hash_by_src(const struct nf_conntrack_tuple *tuple)
hash = jhash_3words((__force u32)tuple->src.u3.ip,
(__force u32)tuple->src.u.all,
tuple->dst.protonum, 0);
- return ((u64)hash * nf_nat_htable_size) >> 32;
+ return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
}
/* Is this tuple already taken? (not by us) */
@@ -147,7 +144,7 @@ find_appropriate_src(struct net *net,
struct nf_conntrack_tuple *result,
const struct nf_nat_range *range)
{
- unsigned int h = hash_by_src(tuple);
+ unsigned int h = hash_by_src(net, tuple);
const struct nf_conn_nat *nat;
const struct nf_conn *ct;
const struct hlist_node *n;
@@ -330,7 +327,7 @@ nf_nat_setup_info(struct nf_conn *ct,
if (have_to_hash) {
unsigned int srchash;
- srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
spin_lock_bh(&nf_nat_lock);
/* nf_conntrack_alter_reply might re-allocate exntension aera */
nat = nfct_nat(ct);
@@ -679,8 +676,10 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
static int __net_init nf_nat_net_init(struct net *net)
{
- net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size,
- &net->ipv4.nat_vmalloced, 0);
+ /* Leave them the same for the moment. */
+ net->ipv4.nat_htable_size = net->ct.htable_size;
+ net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
+ &net->ipv4.nat_vmalloced, 0);
if (!net->ipv4.nat_bysource)
return -ENOMEM;
return 0;
@@ -703,7 +702,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
nf_ct_iterate_cleanup(net, &clean_nat, NULL);
synchronize_rcu();
nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
- nf_nat_htable_size);
+ net->ipv4.nat_htable_size);
}
static struct pernet_operations nf_nat_net_ops = {
@@ -724,9 +723,6 @@ static int __init nf_nat_init(void)
return ret;
}
- /* Leave them the same for the moment. */
- nf_nat_htable_size = nf_conntrack_htable_size;
-
ret = register_pernet_subsys(&nf_nat_net_ops);
if (ret < 0)
goto cleanup_extend;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 28e029632493..3fddc69ccccc 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5783,11 +5783,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
/* tcp_ack considers this ACK as duplicate
* and does not calculate rtt.
- * Fix it at least with timestamps.
+ * Force it here.
*/
- if (tp->rx_opt.saw_tstamp &&
- tp->rx_opt.rcv_tsecr && !tp->srtt)
- tcp_ack_saw_tstamp(sk, 0);
+ tcp_ack_update_rtt(sk, 0, 0);
if (tp->rx_opt.tstamp_ok)
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index de7a194a64ab..143791da062c 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -502,8 +502,11 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
if (p == &net->ipv6.devconf_dflt->forwarding)
return 0;
- if (!rtnl_trylock())
+ if (!rtnl_trylock()) {
+ /* Restore the original values before restarting */
+ *p = old;
return restart_syscall();
+ }
if (p == &net->ipv6.devconf_all->forwarding) {
__s32 newf = net->ipv6.devconf_all->forwarding;
@@ -4028,12 +4031,15 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
{
int *valp = ctl->data;
int val = *valp;
+ loff_t pos = *ppos;
int ret;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write)
ret = addrconf_fixup_forwarding(ctl, valp, val);
+ if (ret)
+ *ppos = pos;
return ret;
}
@@ -4075,8 +4081,11 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
if (p == &net->ipv6.devconf_dflt->disable_ipv6)
return 0;
- if (!rtnl_trylock())
+ if (!rtnl_trylock()) {
+ /* Restore the original values before restarting */
+ *p = old;
return restart_syscall();
+ }
if (p == &net->ipv6.devconf_all->disable_ipv6) {
__s32 newf = net->ipv6.devconf_all->disable_ipv6;
@@ -4095,12 +4104,15 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
{
int *valp = ctl->data;
int val = *valp;
+ loff_t pos = *ppos;
int ret;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write)
ret = addrconf_disable_ipv6(ctl, valp, val);
+ if (ret)
+ *ppos = pos;
return ret;
}
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 2f2a5ca2c878..002e6eef9120 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -154,16 +154,12 @@ static int ipcomp6_init_state(struct xfrm_state *x)
if (x->props.mode == XFRM_MODE_TUNNEL) {
err = ipcomp6_tunnel_attach(x);
if (err)
- goto error_tunnel;
+ goto out;
}
err = 0;
out:
return err;
-error_tunnel:
- ipcomp_destroy(x);
-
- goto out;
}
static const struct xfrm_type ipcomp6_type =
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 480d7f8c9802..8a7e0f52e177 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1164,10 +1164,10 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
if (t && !IS_ERR(t)) {
struct ip6t_getinfo info;
const struct xt_table_info *private = t->private;
-
#ifdef CONFIG_COMPAT
+ struct xt_table_info tmp;
+
if (compat) {
- struct xt_table_info tmp;
ret = compat_table_info(private, &tmp);
xt_compat_flush_offsets(AF_INET6);
private = &tmp;
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 156020d138b5..6b3602de359a 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -698,15 +698,18 @@ dev_irnet_ioctl(
/* Query PPP channel and unit number */
case PPPIOCGCHAN:
+ lock_kernel();
if(ap->ppp_open && !put_user(ppp_channel_index(&ap->chan),
(int __user *)argp))
err = 0;
+ unlock_kernel();
break;
case PPPIOCGUNIT:
lock_kernel();
if(ap->ppp_open && !put_user(ppp_unit_number(&ap->chan),
(int __user *)argp))
- err = 0;
+ err = 0;
+ unlock_kernel();
break;
/* All these ioctls can be passed both directly and from ppp_generic,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 76fa6fef6473..539f43bc97db 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3794,9 +3794,9 @@ static struct pernet_operations pfkey_net_ops = {
static void __exit ipsec_pfkey_exit(void)
{
- unregister_pernet_subsys(&pfkey_net_ops);
xfrm_unregister_km(&pfkeyv2_mgr);
sock_unregister(PF_KEY);
+ unregister_pernet_subsys(&pfkey_net_ops);
proto_unregister(&key_proto);
}
@@ -3807,21 +3807,22 @@ static int __init ipsec_pfkey_init(void)
if (err != 0)
goto out;
- err = sock_register(&pfkey_family_ops);
+ err = register_pernet_subsys(&pfkey_net_ops);
if (err != 0)
goto out_unregister_key_proto;
+ err = sock_register(&pfkey_family_ops);
+ if (err != 0)
+ goto out_unregister_pernet;
err = xfrm_register_km(&pfkeyv2_mgr);
if (err != 0)
goto out_sock_unregister;
- err = register_pernet_subsys(&pfkey_net_ops);
- if (err != 0)
- goto out_xfrm_unregister_km;
out:
return err;
-out_xfrm_unregister_km:
- xfrm_unregister_km(&pfkeyv2_mgr);
+
out_sock_unregister:
sock_unregister(PF_KEY);
+out_unregister_pernet:
+ unregister_pernet_subsys(&pfkey_net_ops);
out_unregister_key_proto:
proto_unregister(&key_proto);
goto out;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 1f2db647bb5c..22f0c2aa7a89 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -647,7 +647,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
}
if (pos[1] != 0 &&
(pos[1] != ifibss->ssid_len ||
- !memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
+ memcmp(pos + 2, ifibss->ssid, ifibss->ssid_len))) {
/* Ignore ProbeReq for foreign SSID */
return;
}
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index b9007f80cb92..12a2bff7dcdb 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -245,6 +245,9 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
info->control.rates[i].count = 1;
}
+ if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
+ return;
+
if (sta && sdata->force_unicast_rateidx > -1) {
info->control.rates[0].idx = sdata->force_unicast_rateidx;
} else {
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index f934c9620b73..bc17cf7d68db 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -439,6 +439,16 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if (local->scan_req)
return -EBUSY;
+ if (req != local->int_scan_req &&
+ sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !list_empty(&ifmgd->work_list)) {
+ /* actually wait for the work it's doing to finish/time out */
+ set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
+ local->scan_req = req;
+ local->scan_sdata = sdata;
+ return 0;
+ }
+
if (local->ops->hw_scan) {
u8 *ies;
@@ -463,14 +473,6 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
local->scan_req = req;
local->scan_sdata = sdata;
- if (req != local->int_scan_req &&
- sdata->vif.type == NL80211_IFTYPE_STATION &&
- !list_empty(&ifmgd->work_list)) {
- /* actually wait for the work it's doing to finish/time out */
- set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
- return 0;
- }
-
if (local->ops->hw_scan)
__set_bit(SCAN_HW_SCANNING, &local->scanning);
else
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0e98c3282d42..4d79e3c1616c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -30,6 +30,7 @@
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <linux/mm.h>
+#include <linux/nsproxy.h>
#include <linux/rculist_nulls.h>
#include <net/netfilter/nf_conntrack.h>
@@ -63,8 +64,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
struct nf_conn nf_conntrack_untracked __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
-static struct kmem_cache *nf_conntrack_cachep __read_mostly;
-
static int nf_conntrack_hash_rnd_initted;
static unsigned int nf_conntrack_hash_rnd;
@@ -86,9 +85,10 @@ static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
return ((u64)h * size) >> 32;
}
-static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
+static inline u_int32_t hash_conntrack(const struct net *net,
+ const struct nf_conntrack_tuple *tuple)
{
- return __hash_conntrack(tuple, nf_conntrack_htable_size,
+ return __hash_conntrack(tuple, net->ct.htable_size,
nf_conntrack_hash_rnd);
}
@@ -296,7 +296,7 @@ __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
{
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
- unsigned int hash = hash_conntrack(tuple);
+ unsigned int hash = hash_conntrack(net, tuple);
/* Disable BHs the entire time since we normally need to disable them
* at least once for the stats anyway.
@@ -366,10 +366,11 @@ static void __nf_conntrack_hash_insert(struct nf_conn *ct,
void nf_conntrack_hash_insert(struct nf_conn *ct)
{
+ struct net *net = nf_ct_net(ct);
unsigned int hash, repl_hash;
- hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
- repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
__nf_conntrack_hash_insert(ct, hash, repl_hash);
}
@@ -397,8 +398,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
return NF_ACCEPT;
- hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
- repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
/* We're not in hash table, and we refuse to set up related
connections for unconfirmed conns. But packet copies and
@@ -468,7 +469,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
struct net *net = nf_ct_net(ignored_conntrack);
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
- unsigned int hash = hash_conntrack(tuple);
+ unsigned int hash = hash_conntrack(net, tuple);
/* Disable BHs the entire time since we need to disable them at
* least once for the stats anyway.
@@ -503,7 +504,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
int dropped = 0;
rcu_read_lock();
- for (i = 0; i < nf_conntrack_htable_size; i++) {
+ for (i = 0; i < net->ct.htable_size; i++) {
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
hnnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
@@ -523,7 +524,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
if (cnt >= NF_CT_EVICTION_RANGE)
break;
- hash = (hash + 1) % nf_conntrack_htable_size;
+ hash = (hash + 1) % net->ct.htable_size;
}
rcu_read_unlock();
@@ -557,7 +558,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
if (nf_conntrack_max &&
unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
- unsigned int hash = hash_conntrack(orig);
+ unsigned int hash = hash_conntrack(net, orig);
if (!early_drop(net, hash)) {
atomic_dec(&net->ct.count);
if (net_ratelimit())
@@ -572,7 +573,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net,
* Do not use kmem_cache_zalloc(), as this cache uses
* SLAB_DESTROY_BY_RCU.
*/
- ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
+ ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
if (ct == NULL) {
pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
atomic_dec(&net->ct.count);
@@ -611,7 +612,7 @@ void nf_conntrack_free(struct nf_conn *ct)
nf_ct_ext_destroy(ct);
atomic_dec(&net->ct.count);
nf_ct_ext_free(ct);
- kmem_cache_free(nf_conntrack_cachep, ct);
+ kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
}
EXPORT_SYMBOL_GPL(nf_conntrack_free);
@@ -1014,7 +1015,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
struct hlist_nulls_node *n;
spin_lock_bh(&nf_conntrack_lock);
- for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+ for (; *bucket < net->ct.htable_size; (*bucket)++) {
hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (iter(ct, data))
@@ -1113,9 +1114,12 @@ static void nf_ct_release_dying_list(struct net *net)
static void nf_conntrack_cleanup_init_net(void)
{
+ /* wait until all references to nf_conntrack_untracked are dropped */
+ while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
+ schedule();
+
nf_conntrack_helper_fini();
nf_conntrack_proto_fini();
- kmem_cache_destroy(nf_conntrack_cachep);
}
static void nf_conntrack_cleanup_net(struct net *net)
@@ -1127,15 +1131,14 @@ static void nf_conntrack_cleanup_net(struct net *net)
schedule();
goto i_see_dead_people;
}
- /* wait until all references to nf_conntrack_untracked are dropped */
- while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
- schedule();
nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
- nf_conntrack_htable_size);
+ net->ct.htable_size);
nf_conntrack_ecache_fini(net);
nf_conntrack_acct_fini(net);
nf_conntrack_expect_fini(net);
+ kmem_cache_destroy(net->ct.nf_conntrack_cachep);
+ kfree(net->ct.slabname);
free_percpu(net->ct.stat);
}
@@ -1190,10 +1193,12 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
{
int i, bucket, vmalloced, old_vmalloced;
unsigned int hashsize, old_size;
- int rnd;
struct hlist_nulls_head *hash, *old_hash;
struct nf_conntrack_tuple_hash *h;
+ if (current->nsproxy->net_ns != &init_net)
+ return -EOPNOTSUPP;
+
/* On boot, we can set this without any fancy locking. */
if (!nf_conntrack_htable_size)
return param_set_uint(val, kp);
@@ -1206,33 +1211,29 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
if (!hash)
return -ENOMEM;
- /* We have to rehahs for the new table anyway, so we also can
- * use a newrandom seed */
- get_random_bytes(&rnd, sizeof(rnd));
-
/* Lookups in the old hash might happen in parallel, which means we
* might get false negatives during connection lookup. New connections
* created because of a false negative won't make it into the hash
* though since that required taking the lock.
*/
spin_lock_bh(&nf_conntrack_lock);
- for (i = 0; i < nf_conntrack_htable_size; i++) {
+ for (i = 0; i < init_net.ct.htable_size; i++) {
while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
h = hlist_nulls_entry(init_net.ct.hash[i].first,
struct nf_conntrack_tuple_hash, hnnode);
hlist_nulls_del_rcu(&h->hnnode);
- bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
+ bucket = __hash_conntrack(&h->tuple, hashsize,
+ nf_conntrack_hash_rnd);
hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
}
}
- old_size = nf_conntrack_htable_size;
+ old_size = init_net.ct.htable_size;
old_vmalloced = init_net.ct.hash_vmalloc;
old_hash = init_net.ct.hash;
- nf_conntrack_htable_size = hashsize;
+ init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
init_net.ct.hash_vmalloc = vmalloced;
init_net.ct.hash = hash;
- nf_conntrack_hash_rnd = rnd;
spin_unlock_bh(&nf_conntrack_lock);
nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
@@ -1271,15 +1272,6 @@ static int nf_conntrack_init_init_net(void)
NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
nf_conntrack_max);
- nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
- sizeof(struct nf_conn),
- 0, SLAB_DESTROY_BY_RCU, NULL);
- if (!nf_conntrack_cachep) {
- printk(KERN_ERR "Unable to create nf_conn slab cache\n");
- ret = -ENOMEM;
- goto err_cache;
- }
-
ret = nf_conntrack_proto_init();
if (ret < 0)
goto err_proto;
@@ -1288,13 +1280,19 @@ static int nf_conntrack_init_init_net(void)
if (ret < 0)
goto err_helper;
+ /* Set up fake conntrack: to never be deleted, not in any hashes */
+#ifdef CONFIG_NET_NS
+ nf_conntrack_untracked.ct_net = &init_net;
+#endif
+ atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
+ /* - and look it like as a confirmed connection */
+ set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
+
return 0;
err_helper:
nf_conntrack_proto_fini();
err_proto:
- kmem_cache_destroy(nf_conntrack_cachep);
-err_cache:
return ret;
}
@@ -1316,7 +1314,24 @@ static int nf_conntrack_init_net(struct net *net)
ret = -ENOMEM;
goto err_stat;
}
- net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
+
+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
+ if (!net->ct.slabname) {
+ ret = -ENOMEM;
+ goto err_slabname;
+ }
+
+ net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
+ sizeof(struct nf_conn), 0,
+ SLAB_DESTROY_BY_RCU, NULL);
+ if (!net->ct.nf_conntrack_cachep) {
+ printk(KERN_ERR "Unable to create nf_conn slab cache\n");
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+
+ net->ct.htable_size = nf_conntrack_htable_size;
+ net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
&net->ct.hash_vmalloc, 1);
if (!net->ct.hash) {
ret = -ENOMEM;
@@ -1333,15 +1348,6 @@ static int nf_conntrack_init_net(struct net *net)
if (ret < 0)
goto err_ecache;
- /* Set up fake conntrack:
- - to never be deleted, not in any hashes */
-#ifdef CONFIG_NET_NS
- nf_conntrack_untracked.ct_net = &init_net;
-#endif
- atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
- /* - and look it like as a confirmed connection */
- set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
-
return 0;
err_ecache:
@@ -1350,8 +1356,12 @@ err_acct:
nf_conntrack_expect_fini(net);
err_expect:
nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
- nf_conntrack_htable_size);
+ net->ct.htable_size);
err_hash:
+ kmem_cache_destroy(net->ct.nf_conntrack_cachep);
+err_cache:
+ kfree(net->ct.slabname);
+err_slabname:
free_percpu(net->ct.stat);
err_stat:
return ret;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index fdf5d2a1d9b4..2f25ff610982 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -569,7 +569,7 @@ static void exp_proc_remove(struct net *net)
#endif /* CONFIG_PROC_FS */
}
-module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600);
+module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
int nf_conntrack_expect_init(struct net *net)
{
@@ -577,7 +577,7 @@ int nf_conntrack_expect_init(struct net *net)
if (net_eq(net, &init_net)) {
if (!nf_ct_expect_hsize) {
- nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
+ nf_ct_expect_hsize = net->ct.htable_size / 256;
if (!nf_ct_expect_hsize)
nf_ct_expect_hsize = 1;
}
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 65c2a7bc3afc..4b1a56bd074c 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -192,7 +192,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
/* Get rid of expecteds, set helpers to NULL. */
hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
unhelp(h, me);
- for (i = 0; i < nf_conntrack_htable_size; i++) {
+ for (i = 0; i < net->ct.htable_size; i++) {
hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
unhelp(h, me);
}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 42f21c01a93e..0ffe689dfe97 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -594,7 +594,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
last = (struct nf_conn *)cb->args[1];
- for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
+ for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) {
restart:
hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
hnnode) {
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 028aba667ef7..e310f1561bb2 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -51,7 +51,7 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
struct hlist_nulls_node *n;
for (st->bucket = 0;
- st->bucket < nf_conntrack_htable_size;
+ st->bucket < net->ct.htable_size;
st->bucket++) {
n = rcu_dereference(net->ct.hash[st->bucket].first);
if (!is_a_nulls(n))
@@ -69,7 +69,7 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
head = rcu_dereference(head->next);
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
- if (++st->bucket >= nf_conntrack_htable_size)
+ if (++st->bucket >= net->ct.htable_size)
return NULL;
}
head = rcu_dereference(net->ct.hash[st->bucket].first);
@@ -355,7 +355,7 @@ static ctl_table nf_ct_sysctl_table[] = {
},
{
.procname = "nf_conntrack_buckets",
- .data = &nf_conntrack_htable_size,
+ .data = &init_net.ct.htable_size,
.maxlen = sizeof(unsigned int),
.mode = 0444,
.proc_handler = proc_dointvec,
@@ -421,6 +421,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
goto out_kmemdup;
table[1].data = &net->ct.count;
+ table[2].data = &net->ct.htable_size;
table[3].data = &net->ct.sysctl_checksum;
table[4].data = &net->ct.sysctl_log_invalid;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a4957bf2ca60..4c5972ba8c78 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -455,9 +455,14 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
if (nl_table[protocol].registered &&
try_module_get(nl_table[protocol].module))
module = nl_table[protocol].module;
+ else
+ err = -EPROTONOSUPPORT;
cb_mutex = nl_table[protocol].cb_mutex;
netlink_unlock_table();
+ if (err < 0)
+ goto out;
+
err = __netlink_create(net, sock, cb_mutex, protocol);
if (err < 0)
goto out_module;
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 929218a47620..21f9c7678aa3 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -433,7 +433,7 @@ config NET_ACT_POLICE
module.
To compile this code as a module, choose M here: the
- module will be called police.
+ module will be called act_police.
config NET_ACT_GACT
tristate "Generic actions"
@@ -443,7 +443,7 @@ config NET_ACT_GACT
accepting packets.
To compile this code as a module, choose M here: the
- module will be called gact.
+ module will be called act_gact.
config GACT_PROB
bool "Probability support"
@@ -459,7 +459,7 @@ config NET_ACT_MIRRED
other devices.
To compile this code as a module, choose M here: the
- module will be called mirred.
+ module will be called act_mirred.
config NET_ACT_IPT
tristate "IPtables targets"
@@ -469,7 +469,7 @@ config NET_ACT_IPT
classification.
To compile this code as a module, choose M here: the
- module will be called ipt.
+ module will be called act_ipt.
config NET_ACT_NAT
tristate "Stateless NAT"
@@ -479,7 +479,7 @@ config NET_ACT_NAT
netfilter for NAT unless you know what you are doing.
To compile this code as a module, choose M here: the
- module will be called nat.
+ module will be called act_nat.
config NET_ACT_PEDIT
tristate "Packet Editing"
@@ -488,7 +488,7 @@ config NET_ACT_PEDIT
Say Y here if you want to mangle the content of packets.
To compile this code as a module, choose M here: the
- module will be called pedit.
+ module will be called act_pedit.
config NET_ACT_SIMP
tristate "Simple Example (Debug)"
@@ -502,7 +502,7 @@ config NET_ACT_SIMP
If unsure, say N.
To compile this code as a module, choose M here: the
- module will be called simple.
+ module will be called act_simple.
config NET_ACT_SKBEDIT
tristate "SKB Editing"
@@ -513,7 +513,7 @@ config NET_ACT_SKBEDIT
If unsure, say N.
To compile this code as a module, choose M here: the
- module will be called skbedit.
+ module will be called act_skbedit.
config NET_CLS_IND
bool "Incoming device classification"
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index b36cc344474b..f445ea1c5f52 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1102,7 +1102,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
int err = -ENOMEM;
struct xfrm_state *x = xfrm_state_alloc(net);
if (!x)
- goto error;
+ goto out;
memcpy(&x->id, &orig->id, sizeof(x->id));
memcpy(&x->sel, &orig->sel, sizeof(x->sel));
@@ -1160,16 +1160,10 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
return x;
error:
+ xfrm_state_put(x);
+out:
if (errp)
*errp = err;
- if (x) {
- kfree(x->aalg);
- kfree(x->ealg);
- kfree(x->calg);
- kfree(x->encap);
- kfree(x->coaddr);
- }
- kfree(x);
return NULL;
}
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index c41afe6639a0..47fb65d1fcbd 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -65,7 +65,6 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
const char *cause, int result, int info);
/* Internal IMA function definitions */
-void ima_iintcache_init(void);
int ima_init(void);
void ima_cleanup(void);
int ima_fs_init(void);
@@ -131,7 +130,7 @@ void iint_free(struct kref *kref);
void iint_rcu_free(struct rcu_head *rcu);
/* IMA policy related functions */
-enum ima_hooks { PATH_CHECK = 1, FILE_MMAP, BPRM_CHECK };
+enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK };
int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask);
void ima_init_policy(void);
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 3cd58b60afd2..2a5e0bcf3887 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -95,12 +95,12 @@ err_out:
* ima_must_measure - measure decision based on policy.
* @inode: pointer to inode to measure
* @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE)
- * @function: calling function (PATH_CHECK, BPRM_CHECK, FILE_MMAP)
+ * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP)
*
* The policy is defined in terms of keypairs:
* subj=, obj=, type=, func=, mask=, fsmagic=
* subj,obj, and type: are LSM specific.
- * func: PATH_CHECK | BPRM_CHECK | FILE_MMAP
+ * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP
* mask: contains the permission mask
* fsmagic: hex value
*
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
index fa592ff1ac1c..2d4d05d92fda 100644
--- a/security/integrity/ima/ima_iint.c
+++ b/security/integrity/ima/ima_iint.c
@@ -52,9 +52,6 @@ int ima_inode_alloc(struct inode *inode)
struct ima_iint_cache *iint = NULL;
int rc = 0;
- if (!ima_initialized)
- return 0;
-
iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
if (!iint)
return -ENOMEM;
@@ -66,12 +63,11 @@ int ima_inode_alloc(struct inode *inode)
spin_lock(&ima_iint_lock);
rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
spin_unlock(&ima_iint_lock);
+ radix_tree_preload_end();
out:
if (rc < 0)
kmem_cache_free(iint_cache, iint);
- radix_tree_preload_end();
-
return rc;
}
@@ -118,8 +114,6 @@ void ima_inode_free(struct inode *inode)
{
struct ima_iint_cache *iint;
- if (!ima_initialized)
- return;
spin_lock(&ima_iint_lock);
iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode);
spin_unlock(&ima_iint_lock);
@@ -141,9 +135,11 @@ static void init_once(void *foo)
kref_set(&iint->refcount, 1);
}
-void __init ima_iintcache_init(void)
+static int __init ima_iintcache_init(void)
{
iint_cache =
kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
SLAB_PANIC, init_once);
+ return 0;
}
+security_initcall(ima_iintcache_init);
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index a89f44d5e030..294b005d6520 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -14,7 +14,7 @@
*
* File: ima_main.c
* implements the IMA hooks: ima_bprm_check, ima_file_mmap,
- * and ima_path_check.
+ * and ima_file_check.
*/
#include <linux/module.h>
#include <linux/file.h>
@@ -84,6 +84,36 @@ out:
return found;
}
+/* ima_read_write_check - reflect possible reading/writing errors in the PCR.
+ *
+ * When opening a file for read, if the file is already open for write,
+ * the file could change, resulting in a file measurement error.
+ *
+ * Opening a file for write, if the file is already open for read, results
+ * in a time of measure, time of use (ToMToU) error.
+ *
+ * In either case invalidate the PCR.
+ */
+enum iint_pcr_error { TOMTOU, OPEN_WRITERS };
+static void ima_read_write_check(enum iint_pcr_error error,
+ struct ima_iint_cache *iint,
+ struct inode *inode,
+ const unsigned char *filename)
+{
+ switch (error) {
+ case TOMTOU:
+ if (iint->readcount > 0)
+ ima_add_violation(inode, filename, "invalid_pcr",
+ "ToMToU");
+ break;
+ case OPEN_WRITERS:
+ if (iint->writecount > 0)
+ ima_add_violation(inode, filename, "invalid_pcr",
+ "open_writers");
+ break;
+ }
+}
+
/*
* Update the counts given an fmode_t
*/
@@ -99,6 +129,47 @@ static void ima_inc_counts(struct ima_iint_cache *iint, fmode_t mode)
}
/*
+ * ima_counts_get - increment file counts
+ *
+ * Maintain read/write counters for all files, but only
+ * invalidate the PCR for measured files:
+ * - Opening a file for write when already open for read,
+ * results in a time of measure, time of use (ToMToU) error.
+ * - Opening a file for read when already open for write,
+ * could result in a file measurement error.
+ *
+ */
+void ima_counts_get(struct file *file)
+{
+ struct dentry *dentry = file->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ fmode_t mode = file->f_mode;
+ struct ima_iint_cache *iint;
+ int rc;
+
+ if (!ima_initialized || !S_ISREG(inode->i_mode))
+ return;
+ iint = ima_iint_find_get(inode);
+ if (!iint)
+ return;
+ mutex_lock(&iint->mutex);
+ rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
+ if (rc < 0)
+ goto out;
+
+ if (mode & FMODE_WRITE) {
+ ima_read_write_check(TOMTOU, iint, inode, dentry->d_name.name);
+ goto out;
+ }
+ ima_read_write_check(OPEN_WRITERS, iint, inode, dentry->d_name.name);
+out:
+ ima_inc_counts(iint, file->f_mode);
+ mutex_unlock(&iint->mutex);
+
+ kref_put(&iint->refcount, iint_free);
+}
+
+/*
* Decrement ima counts
*/
static void ima_dec_counts(struct ima_iint_cache *iint, struct inode *inode,
@@ -153,123 +224,6 @@ void ima_file_free(struct file *file)
kref_put(&iint->refcount, iint_free);
}
-/* ima_read_write_check - reflect possible reading/writing errors in the PCR.
- *
- * When opening a file for read, if the file is already open for write,
- * the file could change, resulting in a file measurement error.
- *
- * Opening a file for write, if the file is already open for read, results
- * in a time of measure, time of use (ToMToU) error.
- *
- * In either case invalidate the PCR.
- */
-enum iint_pcr_error { TOMTOU, OPEN_WRITERS };
-static void ima_read_write_check(enum iint_pcr_error error,
- struct ima_iint_cache *iint,
- struct inode *inode,
- const unsigned char *filename)
-{
- switch (error) {
- case TOMTOU:
- if (iint->readcount > 0)
- ima_add_violation(inode, filename, "invalid_pcr",
- "ToMToU");
- break;
- case OPEN_WRITERS:
- if (iint->writecount > 0)
- ima_add_violation(inode, filename, "invalid_pcr",
- "open_writers");
- break;
- }
-}
-
-static int get_path_measurement(struct ima_iint_cache *iint, struct file *file,
- const unsigned char *filename)
-{
- int rc = 0;
-
- ima_inc_counts(iint, file->f_mode);
-
- rc = ima_collect_measurement(iint, file);
- if (!rc)
- ima_store_measurement(iint, file, filename);
- return rc;
-}
-
-/**
- * ima_path_check - based on policy, collect/store measurement.
- * @path: contains a pointer to the path to be measured
- * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE
- *
- * Measure the file being open for readonly, based on the
- * ima_must_measure() policy decision.
- *
- * Keep read/write counters for all files, but only
- * invalidate the PCR for measured files:
- * - Opening a file for write when already open for read,
- * results in a time of measure, time of use (ToMToU) error.
- * - Opening a file for read when already open for write,
- * could result in a file measurement error.
- *
- * Always return 0 and audit dentry_open failures.
- * (Return code will be based upon measurement appraisal.)
- */
-int ima_path_check(struct path *path, int mask)
-{
- struct inode *inode = path->dentry->d_inode;
- struct ima_iint_cache *iint;
- struct file *file = NULL;
- int rc;
-
- if (!ima_initialized || !S_ISREG(inode->i_mode))
- return 0;
- iint = ima_iint_find_get(inode);
- if (!iint)
- return 0;
-
- mutex_lock(&iint->mutex);
-
- rc = ima_must_measure(iint, inode, MAY_READ, PATH_CHECK);
- if (rc < 0)
- goto out;
-
- if ((mask & MAY_WRITE) || (mask == 0))
- ima_read_write_check(TOMTOU, iint, inode,
- path->dentry->d_name.name);
-
- if ((mask & (MAY_WRITE | MAY_READ | MAY_EXEC)) != MAY_READ)
- goto out;
-
- ima_read_write_check(OPEN_WRITERS, iint, inode,
- path->dentry->d_name.name);
- if (!(iint->flags & IMA_MEASURED)) {
- struct dentry *dentry = dget(path->dentry);
- struct vfsmount *mnt = mntget(path->mnt);
-
- file = dentry_open(dentry, mnt, O_RDONLY | O_LARGEFILE,
- current_cred());
- if (IS_ERR(file)) {
- int audit_info = 0;
-
- integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
- dentry->d_name.name,
- "add_measurement",
- "dentry_open failed",
- 1, audit_info);
- file = NULL;
- goto out;
- }
- rc = get_path_measurement(iint, file, dentry->d_name.name);
- }
-out:
- mutex_unlock(&iint->mutex);
- if (file)
- fput(file);
- kref_put(&iint->refcount, iint_free);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ima_path_check);
-
static int process_measurement(struct file *file, const unsigned char *filename,
int mask, int function)
{
@@ -297,33 +251,6 @@ out:
return rc;
}
-/*
- * ima_counts_get - increment file counts
- *
- * - for IPC shm and shmat file.
- * - for nfsd exported files.
- *
- * Increment the counts for these files to prevent unnecessary
- * imbalance messages.
- */
-void ima_counts_get(struct file *file)
-{
- struct inode *inode = file->f_dentry->d_inode;
- struct ima_iint_cache *iint;
-
- if (!ima_initialized || !S_ISREG(inode->i_mode))
- return;
- iint = ima_iint_find_get(inode);
- if (!iint)
- return;
- mutex_lock(&iint->mutex);
- ima_inc_counts(iint, file->f_mode);
- mutex_unlock(&iint->mutex);
-
- kref_put(&iint->refcount, iint_free);
-}
-EXPORT_SYMBOL_GPL(ima_counts_get);
-
/**
* ima_file_mmap - based on policy, collect/store measurement.
* @file: pointer to the file to be measured (May be NULL)
@@ -369,11 +296,31 @@ int ima_bprm_check(struct linux_binprm *bprm)
return 0;
}
+/**
+ * ima_path_check - based on policy, collect/store measurement.
+ * @file: pointer to the file to be measured
+ * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE
+ *
+ * Measure files based on the ima_must_measure() policy decision.
+ *
+ * Always return 0 and audit dentry_open failures.
+ * (Return code will be based upon measurement appraisal.)
+ */
+int ima_file_check(struct file *file, int mask)
+{
+ int rc;
+
+ rc = process_measurement(file, file->f_dentry->d_name.name,
+ mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
+ FILE_CHECK);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ima_file_check);
+
static int __init init_ima(void)
{
int error;
- ima_iintcache_init();
error = ima_init();
ima_initialized = 1;
return error;
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index e1278399b345..4759d0f99335 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -67,7 +67,7 @@ static struct ima_measure_rule_entry default_rules[] = {
.flags = IMA_FUNC | IMA_MASK},
{.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC,
.flags = IMA_FUNC | IMA_MASK},
- {.action = MEASURE,.func = PATH_CHECK,.mask = MAY_READ,.uid = 0,
+ {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = 0,
.flags = IMA_FUNC | IMA_MASK | IMA_UID},
};
@@ -282,8 +282,11 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
break;
case Opt_func:
audit_log_format(ab, "func=%s ", args[0].from);
- if (strcmp(args[0].from, "PATH_CHECK") == 0)
- entry->func = PATH_CHECK;
+ if (strcmp(args[0].from, "FILE_CHECK") == 0)
+ entry->func = FILE_CHECK;
+ /* PATH_CHECK is for backwards compat */
+ else if (strcmp(args[0].from, "PATH_CHECK") == 0)
+ entry->func = FILE_CHECK;
else if (strcmp(args[0].from, "FILE_MMAP") == 0)
entry->func = FILE_MMAP;
else if (strcmp(args[0].from, "BPRM_CHECK") == 0)
diff --git a/security/security.c b/security/security.c
index 24e060be9fa5..122b748d0f4c 100644
--- a/security/security.c
+++ b/security/security.c
@@ -666,8 +666,6 @@ int security_file_alloc(struct file *file)
void security_file_free(struct file *file)
{
security_ops->file_free_security(file);
- if (file->f_dentry)
- ima_file_free(file);
}
int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 68c7348d1acc..04b6145d767f 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -128,7 +128,7 @@ int ebitmap_netlbl_export(struct ebitmap *ebmap,
cmap_idx = delta / NETLBL_CATMAP_MAPSIZE;
cmap_sft = delta % NETLBL_CATMAP_MAPSIZE;
c_iter->bitmap[cmap_idx]
- |= e_iter->maps[cmap_idx] << cmap_sft;
+ |= e_iter->maps[i] << cmap_sft;
}
e_iter = e_iter->next;
}
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index cb65bd0dd35b..459c1f62783b 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -166,18 +166,7 @@ static void ct_unmap_audio_buffer(struct ct_atc *atc, struct ct_atc_pcm *apcm)
static unsigned long atc_get_ptp_phys(struct ct_atc *atc, int index)
{
- struct ct_vm *vm;
- void *kvirt_addr;
- unsigned long phys_addr;
-
- vm = atc->vm;
- kvirt_addr = vm->get_ptp_virt(vm, index);
- if (kvirt_addr == NULL)
- phys_addr = (~0UL);
- else
- phys_addr = virt_to_phys(kvirt_addr);
-
- return phys_addr;
+ return atc->vm->get_ptp_phys(atc->vm, index);
}
static unsigned int convert_format(snd_pcm_format_t snd_format)
@@ -1669,7 +1658,7 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci,
}
/* Set up device virtual memory management object */
- err = ct_vm_create(&atc->vm);
+ err = ct_vm_create(&atc->vm, pci);
if (err < 0)
goto error1;
diff --git a/sound/pci/ctxfi/ctvmem.c b/sound/pci/ctxfi/ctvmem.c
index 6b78752e9503..65da6e466f80 100644
--- a/sound/pci/ctxfi/ctvmem.c
+++ b/sound/pci/ctxfi/ctvmem.c
@@ -138,7 +138,7 @@ ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
return NULL;
}
- ptp = vm->ptp[0];
+ ptp = (unsigned long *)vm->ptp[0].area;
pte_start = (block->addr >> CT_PAGE_SHIFT);
pages = block->size >> CT_PAGE_SHIFT;
for (i = 0; i < pages; i++) {
@@ -158,25 +158,25 @@ static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
}
/* *
- * return the host (kmalloced) addr of the @index-th device
- * page talbe page on success, or NULL on failure.
- * The first returned NULL indicates the termination.
+ * return the host physical addr of the @index-th device
+ * page table page on success, or ~0UL on failure.
+ * The first returned ~0UL indicates the termination.
* */
-static void *
-ct_get_ptp_virt(struct ct_vm *vm, int index)
+static dma_addr_t
+ct_get_ptp_phys(struct ct_vm *vm, int index)
{
- void *addr;
+ dma_addr_t addr;
- addr = (index >= CT_PTP_NUM) ? NULL : vm->ptp[index];
+ addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
return addr;
}
-int ct_vm_create(struct ct_vm **rvm)
+int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
{
struct ct_vm *vm;
struct ct_vm_block *block;
- int i;
+ int i, err = 0;
*rvm = NULL;
@@ -188,23 +188,21 @@ int ct_vm_create(struct ct_vm **rvm)
/* Allocate page table pages */
for (i = 0; i < CT_PTP_NUM; i++) {
- vm->ptp[i] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!vm->ptp[i])
+ err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
+ snd_dma_pci_data(pci),
+ PAGE_SIZE, &vm->ptp[i]);
+ if (err < 0)
break;
}
- if (!i) {
+ if (err < 0) {
/* no page table pages are allocated */
- kfree(vm);
+ ct_vm_destroy(vm);
return -ENOMEM;
}
vm->size = CT_ADDRS_PER_PAGE * i;
- /* Initialise remaining ptps */
- for (; i < CT_PTP_NUM; i++)
- vm->ptp[i] = NULL;
-
vm->map = ct_vm_map;
vm->unmap = ct_vm_unmap;
- vm->get_ptp_virt = ct_get_ptp_virt;
+ vm->get_ptp_phys = ct_get_ptp_phys;
INIT_LIST_HEAD(&vm->unused);
INIT_LIST_HEAD(&vm->used);
block = kzalloc(sizeof(*block), GFP_KERNEL);
@@ -242,7 +240,7 @@ void ct_vm_destroy(struct ct_vm *vm)
/* free allocated page table pages */
for (i = 0; i < CT_PTP_NUM; i++)
- kfree(vm->ptp[i]);
+ snd_dma_free_pages(&vm->ptp[i]);
vm->size = 0;
diff --git a/sound/pci/ctxfi/ctvmem.h b/sound/pci/ctxfi/ctvmem.h
index 01e4fd0386a3..b23adfca4de6 100644
--- a/sound/pci/ctxfi/ctvmem.h
+++ b/sound/pci/ctxfi/ctvmem.h
@@ -22,6 +22,8 @@
#include <linux/mutex.h>
#include <linux/list.h>
+#include <linux/pci.h>
+#include <sound/memalloc.h>
/* The chip can handle the page table of 4k pages
* (emu20k1 can handle even 8k pages, but we don't use it right now)
@@ -41,7 +43,7 @@ struct snd_pcm_substream;
/* Virtual memory management object for card device */
struct ct_vm {
- void *ptp[CT_PTP_NUM]; /* Device page table pages */
+ struct snd_dma_buffer ptp[CT_PTP_NUM]; /* Device page table pages */
unsigned int size; /* Available addr space in bytes */
struct list_head unused; /* List of unused blocks */
struct list_head used; /* List of used blocks */
@@ -52,10 +54,10 @@ struct ct_vm {
int size);
/* Unmap device logical addr area. */
void (*unmap)(struct ct_vm *, struct ct_vm_block *block);
- void *(*get_ptp_virt)(struct ct_vm *vm, int index);
+ dma_addr_t (*get_ptp_phys)(struct ct_vm *vm, int index);
};
-int ct_vm_create(struct ct_vm **rvm);
+int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci);
void ct_vm_destroy(struct ct_vm *vm);
#endif /* CTVMEM_H */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 565de38a3fc7..ff6da6f386d1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -426,6 +426,7 @@ struct azx {
/* flags */
int position_fix;
+ int poll_count;
unsigned int running :1;
unsigned int initialized :1;
unsigned int single_cmd :1;
@@ -506,7 +507,7 @@ static char *driver_short_names[] __devinitdata = {
#define get_azx_dev(substream) (substream->runtime->private_data)
static int azx_acquire_irq(struct azx *chip, int do_disconnect);
-
+static int azx_send_cmd(struct hda_bus *bus, unsigned int val);
/*
* Interface for HD codec
*/
@@ -664,11 +665,12 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
{
struct azx *chip = bus->private_data;
unsigned long timeout;
+ int do_poll = 0;
again:
timeout = jiffies + msecs_to_jiffies(1000);
for (;;) {
- if (chip->polling_mode) {
+ if (chip->polling_mode || do_poll) {
spin_lock_irq(&chip->reg_lock);
azx_update_rirb(chip);
spin_unlock_irq(&chip->reg_lock);
@@ -676,6 +678,9 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
if (!chip->rirb.cmds[addr]) {
smp_rmb();
bus->rirb_error = 0;
+
+ if (!do_poll)
+ chip->poll_count = 0;
return chip->rirb.res[addr]; /* the last value */
}
if (time_after(jiffies, timeout))
@@ -688,6 +693,16 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus,
}
}
+ if (!chip->polling_mode && chip->poll_count < 2) {
+ snd_printdd(SFX "azx_get_response timeout, "
+ "polling the codec once: last cmd=0x%08x\n",
+ chip->last_cmd[addr]);
+ do_poll = 1;
+ chip->poll_count++;
+ goto again;
+ }
+
+
if (!chip->polling_mode) {
snd_printk(KERN_WARNING SFX "azx_get_response timeout, "
"switching to polling mode: last cmd=0x%08x\n",
@@ -1878,6 +1893,9 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev)
if (!bdl_pos_adj[chip->dev_index])
return 1; /* no delayed ack */
+ if (WARN_ONCE(!azx_dev->period_bytes,
+ "hda-intel: zero azx_dev->period_bytes"))
+ return 0; /* this shouldn't happen! */
if (pos % azx_dev->period_bytes > azx_dev->period_bytes / 2)
return 0; /* NG - it's below the period boundary */
return 1; /* OK, it's fine */
@@ -2043,7 +2061,7 @@ static int azx_acquire_irq(struct azx *chip, int do_disconnect)
{
if (request_irq(chip->pci->irq, azx_interrupt,
chip->msi ? 0 : IRQF_SHARED,
- "HDA Intel", chip)) {
+ "hda_intel", chip)) {
printk(KERN_ERR "hda-intel: unable to grab IRQ %d, "
"disabling device\n", chip->pci->irq);
if (do_disconnect)
@@ -2332,7 +2350,7 @@ static void __devinit check_probe_mask(struct azx *chip, int dev)
*/
static struct snd_pci_quirk msi_black_list[] __devinitdata = {
SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
- SND_PCI_QUIRK(0x1043, 0x829c, "ASUS", 0), /* nvidia */
+ SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
{}
};
diff --git a/sound/pci/ice1712/aureon.c b/sound/pci/ice1712/aureon.c
index 765d7bd4c3d4..9e66f6d306f8 100644
--- a/sound/pci/ice1712/aureon.c
+++ b/sound/pci/ice1712/aureon.c
@@ -703,11 +703,13 @@ static void wm_set_vol(struct snd_ice1712 *ice, unsigned int index, unsigned sho
{
unsigned char nvol;
- if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE))
+ if ((master & WM_VOL_MUTE) || (vol & WM_VOL_MUTE)) {
nvol = 0;
- else
+ } else {
nvol = ((vol % WM_VOL_CNT) * (master % WM_VOL_CNT)) /
WM_VOL_MAX;
+ nvol += 0x1b;
+ }
wm_put(ice, index, nvol);
wm_put_nocache(ice, index, 0x180 | nvol);
@@ -778,7 +780,7 @@ static int wm_master_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
for (ch = 0; ch < 2; ch++) {
unsigned int vol = ucontrol->value.integer.value[ch];
if (vol > WM_VOL_MAX)
- continue;
+ vol = WM_VOL_MAX;
vol |= spec->master[ch] & WM_VOL_MUTE;
if (vol != spec->master[ch]) {
int dac;
@@ -834,8 +836,8 @@ static int wm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *
for (i = 0; i < voices; i++) {
unsigned int vol = ucontrol->value.integer.value[i];
if (vol > WM_VOL_MAX)
- continue;
- vol |= spec->vol[ofs+i];
+ vol = WM_VOL_MAX;
+ vol |= spec->vol[ofs+i] & WM_VOL_MUTE;
if (vol != spec->vol[ofs+i]) {
spec->vol[ofs+i] = vol;
idx = WM_DAC_ATTEN + ofs + i;
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index 71b2c161158d..68980c19a3bc 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -145,6 +145,7 @@ static const struct snd_soc_dapm_widget omap3pandora_in_dapm_widgets[] = {
};
static const struct snd_soc_dapm_route omap3pandora_out_map[] = {
+ {"PCM DAC", NULL, "APLL Enable"},
{"Headphone Amplifier", NULL, "PCM DAC"},
{"Line Out", NULL, "PCM DAC"},
{"Headphone Jack", NULL, "Headphone Amplifier"},
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index ddc584b64871..4b91d8cf00ec 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -705,7 +705,7 @@ static void print_mapped_keys(void)
fprintf(stdout, "\t[w] toggle display weighted/count[E]r. \t(%d)\n", display_weighted ? 1 : 0);
fprintf(stdout,
- "\t[K] hide kernel_symbols symbols. \t(%s)\n",
+ "\t[K] hide kernel_symbols symbols. \t(%s)\n",
hide_kernel_symbols ? "yes" : "no");
fprintf(stdout,
"\t[U] hide user symbols. \t(%s)\n",
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index bb0fd6da2d56..8a9e6baa3099 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -295,10 +295,10 @@ void thread__find_addr_location(struct thread *self,
al->thread = self;
al->addr = addr;
- if (cpumode & PERF_RECORD_MISC_KERNEL) {
+ if (cpumode == PERF_RECORD_MISC_KERNEL) {
al->level = 'k';
mg = &session->kmaps;
- } else if (cpumode & PERF_RECORD_MISC_USER)
+ } else if (cpumode == PERF_RECORD_MISC_USER)
al->level = '.';
else {
al->level = 'H';
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 29465d440043..fde17b090a47 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -272,6 +272,7 @@ int synthesize_perf_probe_point(struct probe_point *pp)
int ret;
pp->probes[0] = buf = zalloc(MAX_CMDLEN);
+ pp->found = 1;
if (!buf)
die("Failed to allocate memory by zalloc.");
if (pp->offset) {
@@ -294,6 +295,7 @@ int synthesize_perf_probe_point(struct probe_point *pp)
error:
free(pp->probes[0]);
pp->probes[0] = NULL;
+ pp->found = 0;
}
return ret;
}
@@ -455,6 +457,7 @@ void show_perf_probe_events(void)
struct strlist *rawlist;
struct str_node *ent;
+ memset(&pp, 0, sizeof(pp));
fd = open_kprobe_events(O_RDONLY, 0);
rawlist = get_trace_kprobe_event_rawlist(fd);
close(fd);