summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci7
-rw-r--r--Documentation/ABI/testing/sysfs-class-mtd125
-rw-r--r--Documentation/PCI/pcieaer-howto.txt25
-rw-r--r--Documentation/filesystems/00-INDEX4
-rw-r--r--Documentation/isdn/00-INDEX19
-rw-r--r--Documentation/kernel-parameters.txt43
-rw-r--r--Documentation/rfkill.txt2
-rw-r--r--MAINTAINERS25
-rw-r--r--arch/arm/Kconfig6
-rw-r--r--arch/arm/boot/compressed/head.S9
-rw-r--r--arch/arm/common/gic.c2
-rw-r--r--arch/arm/common/vic.c8
-rw-r--r--arch/arm/configs/mini2440_defconfig2097
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/irq.c6
-rw-r--r--arch/arm/kernel/process.c77
-rw-r--r--arch/arm/kernel/unwind.c19
-rw-r--r--arch/arm/kernel/vmlinux.lds.S8
-rw-r--r--arch/arm/mach-davinci/include/mach/nand.h8
-rw-r--r--arch/arm/mach-omap2/clock.c2
-rw-r--r--arch/arm/mach-omap2/clock34xx.c42
-rw-r--r--arch/arm/mach-omap2/io.c36
-rw-r--r--arch/arm/mach-omap2/powerdomain.c2
-rw-r--r--arch/arm/mach-omap2/sram34xx.S129
-rw-r--r--arch/arm/mach-orion5x/addr-map.c2
-rw-r--r--arch/arm/mach-orion5x/common.c10
-rw-r--r--arch/arm/mach-orion5x/common.h1
-rw-r--r--arch/arm/mach-pxa/Kconfig10
-rw-r--r--arch/arm/mach-pxa/Makefile1
-rw-r--r--arch/arm/mach-pxa/corgi.c6
-rw-r--r--arch/arm/mach-pxa/em-x270.c63
-rw-r--r--arch/arm/mach-pxa/hx4700.c41
-rw-r--r--arch/arm/mach-pxa/include/mach/palmz72.h5
-rw-r--r--arch/arm/mach-pxa/include/mach/treo680.h49
-rw-r--r--arch/arm/mach-pxa/mioa701.c42
-rw-r--r--arch/arm/mach-pxa/palmz72.c65
-rw-r--r--arch/arm/mach-pxa/poodle.c6
-rw-r--r--arch/arm/mach-pxa/treo680.c612
-rw-r--r--arch/arm/mach-realview/realview_pbx.c1
-rw-r--r--arch/arm/mach-s3c2410/usb-simtec.c1
-rw-r--r--arch/arm/mach-s3c2440/Kconfig10
-rw-r--r--arch/arm/mach-s3c2440/Makefile1
-rw-r--r--arch/arm/mach-s3c2440/mach-mini2440.c703
-rw-r--r--arch/arm/mach-s3c2442/Kconfig12
-rw-r--r--arch/arm/mach-s3c2442/Makefile2
-rw-r--r--arch/arm/mach-s3c2442/include/mach/gta02.h84
-rw-r--r--arch/arm/mach-s3c2442/mach-gta02.c646
-rw-r--r--arch/arm/mm/alignment.c139
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/plat-omap/include/mach/sram.h6
-rw-r--r--arch/arm/plat-omap/sram.c8
-rw-r--r--arch/arm/plat-s3c/Makefile1
-rw-r--r--arch/arm/plat-s3c/dev-audio.c68
-rw-r--r--arch/arm/plat-s3c/gpio-config.c2
-rw-r--r--arch/arm/plat-s3c/include/plat/devs.h5
-rw-r--r--arch/arm/plat-s3c/include/plat/nand.h31
-rw-r--r--arch/arm/plat-s3c64xx/Makefile1
-rw-r--r--arch/arm/plat-s3c64xx/clock.c2
-rw-r--r--arch/arm/plat-s3c64xx/cpufreq.c262
-rw-r--r--arch/arm/plat-s3c64xx/gpiolib.c6
-rw-r--r--arch/arm/plat-s3c64xx/include/plat/regs-clock.h10
-rw-r--r--arch/arm/tools/mach-types39
-rw-r--r--arch/blackfin/Kconfig68
-rw-r--r--arch/blackfin/boot/Makefile2
-rw-r--r--arch/blackfin/configs/BF518F-EZBRD_defconfig24
-rw-r--r--arch/blackfin/configs/BF526-EZBRD_defconfig22
-rw-r--r--arch/blackfin/configs/BF527-EZKIT_defconfig22
-rw-r--r--arch/blackfin/configs/BF533-EZKIT_defconfig25
-rw-r--r--arch/blackfin/configs/BF533-STAMP_defconfig22
-rw-r--r--arch/blackfin/configs/BF537-STAMP_defconfig22
-rw-r--r--arch/blackfin/configs/BF538-EZKIT_defconfig22
-rw-r--r--arch/blackfin/configs/BF548-EZKIT_defconfig27
-rw-r--r--arch/blackfin/configs/BF561-EZKIT_defconfig27
-rw-r--r--arch/blackfin/configs/BlackStamp_defconfig12
-rw-r--r--arch/blackfin/configs/CM-BF527_defconfig22
-rw-r--r--arch/blackfin/configs/CM-BF533_defconfig23
-rw-r--r--arch/blackfin/configs/CM-BF537E_defconfig22
-rw-r--r--arch/blackfin/configs/CM-BF537U_defconfig23
-rw-r--r--arch/blackfin/configs/CM-BF548_defconfig25
-rw-r--r--arch/blackfin/configs/CM-BF561_defconfig25
-rw-r--r--arch/blackfin/configs/H8606_defconfig14
-rw-r--r--arch/blackfin/configs/PNAV-10_defconfig20
-rw-r--r--arch/blackfin/configs/SRV1_defconfig14
-rw-r--r--arch/blackfin/configs/TCM-BF537_defconfig14
-rw-r--r--arch/blackfin/include/asm/blackfin.h1
-rw-r--r--arch/blackfin/include/asm/cache.h4
-rw-r--r--arch/blackfin/include/asm/cacheflush.h10
-rw-r--r--arch/blackfin/include/asm/cplb.h32
-rw-r--r--arch/blackfin/include/asm/dma-mapping.h13
-rw-r--r--arch/blackfin/include/asm/ipipe.h11
-rw-r--r--arch/blackfin/include/asm/ipipe_base.h30
-rw-r--r--arch/blackfin/include/asm/irq.h7
-rw-r--r--arch/blackfin/include/asm/irqflags.h164
-rw-r--r--arch/blackfin/include/asm/mem_init.h88
-rw-r--r--arch/blackfin/include/asm/mem_map.h97
-rw-r--r--arch/blackfin/include/asm/system.h4
-rw-r--r--arch/blackfin/include/asm/traps.h4
-rw-r--r--arch/blackfin/include/asm/uaccess.h22
-rw-r--r--arch/blackfin/include/asm/unistd.h3
-rw-r--r--arch/blackfin/kernel/Makefile1
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c10
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c36
-rw-r--r--arch/blackfin/kernel/ipipe.c7
-rw-r--r--arch/blackfin/kernel/irqchip.c114
-rw-r--r--arch/blackfin/kernel/kgdb.c297
-rw-r--r--arch/blackfin/kernel/mcount.S70
-rw-r--r--arch/blackfin/kernel/process.c151
-rw-r--r--arch/blackfin/kernel/setup.c122
-rw-r--r--arch/blackfin/kernel/traps.c60
-rw-r--r--arch/blackfin/mach-bf518/boards/ezbrd.c16
-rw-r--r--arch/blackfin/mach-bf518/include/mach/anomaly.h37
-rw-r--r--arch/blackfin/mach-bf518/include/mach/blackfin.h1
-rw-r--r--arch/blackfin/mach-bf518/include/mach/mem_map.h56
-rw-r--r--arch/blackfin/mach-bf527/boards/cm_bf527.c1
-rw-r--r--arch/blackfin/mach-bf527/boards/ezbrd.c4
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c1
-rw-r--r--arch/blackfin/mach-bf527/include/mach/anomaly.h15
-rw-r--r--arch/blackfin/mach-bf527/include/mach/blackfin.h1
-rw-r--r--arch/blackfin/mach-bf527/include/mach/mem_map.h56
-rw-r--r--arch/blackfin/mach-bf533/boards/ezkit.c106
-rw-r--r--arch/blackfin/mach-bf533/include/mach/anomaly.h77
-rw-r--r--arch/blackfin/mach-bf533/include/mach/blackfin.h1
-rw-r--r--arch/blackfin/mach-bf533/include/mach/mem_map.h56
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c1
-rw-r--r--arch/blackfin/mach-bf537/include/mach/anomaly.h41
-rw-r--r--arch/blackfin/mach-bf537/include/mach/blackfin.h1
-rw-r--r--arch/blackfin/mach-bf537/include/mach/mem_map.h56
-rw-r--r--arch/blackfin/mach-bf538/include/mach/anomaly.h24
-rw-r--r--arch/blackfin/mach-bf538/include/mach/blackfin.h1
-rw-r--r--arch/blackfin/mach-bf538/include/mach/mem_map.h57
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c1
-rw-r--r--arch/blackfin/mach-bf548/include/mach/anomaly.h20
-rw-r--r--arch/blackfin/mach-bf548/include/mach/blackfin.h1
-rw-r--r--arch/blackfin/mach-bf548/include/mach/mem_map.h51
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c1
-rw-r--r--arch/blackfin/mach-bf561/include/mach/anomaly.h89
-rw-r--r--arch/blackfin/mach-bf561/include/mach/blackfin.h1
-rw-r--r--arch/blackfin/mach-bf561/include/mach/mem_map.h58
-rw-r--r--arch/blackfin/mach-common/arch_checks.c4
-rw-r--r--arch/blackfin/mach-common/cpufreq.c2
-rw-r--r--arch/blackfin/mach-common/entry.S1
-rw-r--r--arch/blackfin/mach-common/ints-priority.c47
-rw-r--r--arch/blackfin/mach-common/pm.c4
-rw-r--r--arch/blackfin/mm/init.c2
-rw-r--r--arch/ia64/include/asm/iommu.h5
-rw-r--r--arch/ia64/kernel/pci-dma.c2
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c2
-rw-r--r--arch/mn10300/kernel/vmlinux.lds.S60
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c38
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/defconfig72
-rw-r--r--arch/s390/include/asm/cputime.h2
-rw-r--r--arch/s390/include/asm/debug.h9
-rw-r--r--arch/s390/include/asm/perf_counter.h8
-rw-r--r--arch/s390/include/asm/qdio.h2
-rw-r--r--arch/s390/kernel/kprobes.c18
-rw-r--r--arch/s390/kernel/smp.c28
-rw-r--r--arch/s390/kernel/time.c16
-rw-r--r--arch/s390/kernel/vtime.c27
-rw-r--r--arch/s390/power/swsusp_asm64.S6
-rw-r--r--arch/x86/include/asm/iommu.h1
-rw-r--r--arch/x86/include/asm/pci.h1
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/kernel/pci-dma.c6
-rw-r--r--arch/x86/kernel/pci-swiotlb.c3
-rw-r--r--arch/x86/pci/acpi.c35
-rw-r--r--arch/x86/pci/amd_bus.c2
-rw-r--r--arch/x86/pci/common.c4
-rw-r--r--arch/xtensa/configs/s6105_defconfig105
-rw-r--r--arch/xtensa/include/asm/cacheflush.h95
-rw-r--r--arch/xtensa/include/asm/gpio.h8
-rw-r--r--arch/xtensa/include/asm/irq.h12
-rw-r--r--arch/xtensa/kernel/irq.c2
-rw-r--r--arch/xtensa/platforms/s6105/device.c94
-rw-r--r--arch/xtensa/platforms/s6105/setup.c11
-rw-r--r--arch/xtensa/variants/s6000/Makefile2
-rw-r--r--arch/xtensa/variants/s6000/dmac.c173
-rw-r--r--arch/xtensa/variants/s6000/gpio.c163
-rw-r--r--arch/xtensa/variants/s6000/include/variant/dmac.h387
-rw-r--r--arch/xtensa/variants/s6000/include/variant/gpio.h2
-rw-r--r--arch/xtensa/variants/s6000/include/variant/irq.h6
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/char/bfin_jtag_comm.c30
-rw-r--r--drivers/char/moxa.c7
-rw-r--r--drivers/char/n_hdlc.c46
-rw-r--r--drivers/char/n_r3964.c26
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c3
-rw-r--r--drivers/char/vt_ioctl.c3
-rw-r--r--drivers/firewire/Kconfig60
-rw-r--r--drivers/firewire/Makefile4
-rw-r--r--drivers/firewire/core-card.c20
-rw-r--r--drivers/firewire/core-iso.c11
-rw-r--r--drivers/firewire/core.h87
-rw-r--r--drivers/firewire/net.c1655
-rw-r--r--drivers/i2c/busses/i2c-cpm.c6
-rw-r--r--drivers/i2c/busses/i2c-omap.c2
-rw-r--r--drivers/ieee1394/Kconfig19
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c26
-rw-r--r--drivers/mtd/chips/jedec_probe.c13
-rw-r--r--drivers/mtd/devices/m25p80.c4
-rw-r--r--drivers/mtd/maps/Kconfig13
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c5
-rw-r--r--drivers/mtd/maps/integrator-flash.c226
-rw-r--r--drivers/mtd/maps/physmap.c40
-rw-r--r--drivers/mtd/maps/physmap_of.c199
-rw-r--r--drivers/mtd/maps/pmcmsp-ramroot.c104
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c22
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c23
-rw-r--r--drivers/mtd/maps/sa1100-flash.c23
-rw-r--r--drivers/mtd/maps/uclinux.c16
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/mtdchar.c303
-rw-r--r--drivers/mtd/mtdcore.c47
-rw-r--r--drivers/mtd/mtdpart.c20
-rw-r--r--drivers/mtd/nand/Kconfig24
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/atmel_nand.c11
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c17
-rw-r--r--drivers/mtd/nand/davinci_nand.c342
-rw-r--r--drivers/mtd/nand/mxc_nand.c66
-rw-r--r--drivers/mtd/nand/nand_base.c3
-rw-r--r--drivers/mtd/nand/nand_ecc.c4
-rw-r--r--drivers/mtd/nand/omap2.c776
-rw-r--r--drivers/mtd/nand/orion_nand.c23
-rw-r--r--drivers/mtd/nand/plat_nand.c19
-rw-r--r--drivers/mtd/nand/s3c2410.c268
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c16
-rw-r--r--drivers/mtd/onenand/omap2.c4
-rw-r--r--drivers/mtd/onenand/onenand_base.c862
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c14
-rw-r--r--drivers/mtd/onenand/onenand_sim.c81
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/benet/be.h102
-rw-r--r--drivers/net/benet/be_cmds.c368
-rw-r--r--drivers/net/benet/be_cmds.h80
-rw-r--r--drivers/net/benet/be_hw.h8
-rw-r--r--drivers/net/benet/be_main.c299
-rw-r--r--drivers/net/e1000e/netdev.c2
-rw-r--r--drivers/net/mlx4/en_netdev.c8
-rw-r--r--drivers/net/mlx4/en_rx.c96
-rw-r--r--drivers/net/mlx4/en_tx.c29
-rw-r--r--drivers/net/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/mv643xx_eth.c7
-rw-r--r--drivers/net/ppp_async.c1
-rw-r--r--drivers/net/ppp_synctty.c1
-rw-r--r--drivers/net/qla3xxx.c2
-rw-r--r--drivers/net/r8169.c3
-rw-r--r--drivers/net/s6gmac.c1073
-rw-r--r--drivers/net/usb/Kconfig4
-rw-r--r--drivers/net/usb/cdc_ether.c25
-rw-r--r--drivers/net/usb/cdc_subset.c7
-rw-r--r--drivers/net/usb/pegasus.c29
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c7
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c64
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c49
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c11
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/parport/parport_pc.c34
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/access.c19
-rw-r--r--drivers/pci/bus.c18
-rw-r--r--drivers/pci/dmar.c235
-rw-r--r--drivers/pci/hotplug/Kconfig4
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c1
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c1
-rw-r--r--drivers/pci/hotplug/cpqphp.h167
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c1100
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c371
-rw-r--r--drivers/pci/hotplug/cpqphp_nvram.c97
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c599
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c2
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c155
-rw-r--r--drivers/pci/hotplug/pciehp.h3
-rw-r--r--drivers/pci/hotplug/pciehp_core.c112
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c31
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c1
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c1
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c1
-rw-r--r--drivers/pci/hotplug/shpchp_core.c1
-rw-r--r--drivers/pci/intel-iommu.c449
-rw-r--r--drivers/pci/intr_remapping.c8
-rw-r--r--drivers/pci/iov.c161
-rw-r--r--drivers/pci/msi.c100
-rw-r--r--drivers/pci/msi.h14
-rw-r--r--drivers/pci/pci.c246
-rw-r--r--drivers/pci/pci.h39
-rw-r--r--drivers/pci/pcie/aer/Kconfig15
-rw-r--r--drivers/pci/pcie/aer/Kconfig.debug18
-rw-r--r--drivers/pci/pcie/aer/Makefile3
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c473
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c3
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h6
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c278
-rw-r--r--drivers/pci/pcie/aer/ecrc.c131
-rw-r--r--drivers/pci/pcie/aspm.c787
-rw-r--r--drivers/pci/probe.c11
-rw-r--r--drivers/pci/quirks.c24
-rw-r--r--drivers/pci/remove.c2
-rw-r--r--drivers/pci/search.c32
-rw-r--r--drivers/pci/setup-bus.c53
-rw-r--r--drivers/pci/setup-res.c49
-rw-r--r--drivers/pci/slot.c39
-rw-r--r--drivers/platform/x86/acer-wmi.c4
-rw-r--r--drivers/platform/x86/eeepc-laptop.c50
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c14
-rw-r--r--drivers/s390/block/dasd.c20
-rw-r--r--drivers/s390/block/dasd_eckd.c10
-rw-r--r--drivers/s390/char/con3215.c22
-rw-r--r--drivers/s390/char/con3270.c13
-rw-r--r--drivers/s390/char/monreader.c6
-rw-r--r--drivers/s390/char/raw3270.c36
-rw-r--r--drivers/s390/char/sclp_con.c7
-rw-r--r--drivers/s390/char/sclp_vt220.c18
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/vmlogrdr.c4
-rw-r--r--drivers/s390/char/vmur.c2
-rw-r--r--drivers/s390/cio/qdio.h11
-rw-r--r--drivers/s390/cio/qdio_debug.c3
-rw-r--r--drivers/s390/cio/qdio_main.c144
-rw-r--r--drivers/s390/cio/qdio_thinint.c114
-rw-r--r--drivers/s390/crypto/ap_bus.c85
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/serial/Kconfig10
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/bfin_5xx.c5
-rw-r--r--drivers/serial/msm_serial.c772
-rw-r--r--drivers/serial/msm_serial.h117
-rw-r--r--drivers/serial/s3c2400.c2
-rw-r--r--drivers/serial/s3c2410.c2
-rw-r--r--drivers/serial/s3c2412.c2
-rw-r--r--drivers/serial/s3c2440.c2
-rw-r--r--drivers/serial/s3c24a0.c2
-rw-r--r--drivers/serial/s3c6400.c2
-rw-r--r--drivers/serial/samsung.c2
-rw-r--r--drivers/serial/samsung.h2
-rw-r--r--drivers/serial/sb1250-duart.c6
-rw-r--r--drivers/serial/sunhv.c2
-rw-r--r--drivers/serial/timbuart.c50
-rw-r--r--drivers/serial/zs.c6
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c29
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/compat_ioctl.c52
-rw-r--r--fs/jffs2/scan.c4
-rw-r--r--fs/lockd/clntproc.c4
-rw-r--r--fs/lockd/mon.c19
-rw-r--r--fs/lockd/svclock.c2
-rw-r--r--fs/locks.c3
-rw-r--r--fs/minix/minix.h5
-rw-r--r--fs/namespace.c53
-rw-r--r--fs/nfs/Kconfig11
-rw-r--r--fs/nfs/callback.c218
-rw-r--r--fs/nfs/callback.h68
-rw-r--r--fs/nfs/callback_proc.c127
-rw-r--r--fs/nfs/callback_xdr.c280
-rw-r--r--fs/nfs/client.c191
-rw-r--r--fs/nfs/delegation.c32
-rw-r--r--fs/nfs/direct.c9
-rw-r--r--fs/nfs/file.c37
-rw-r--r--fs/nfs/internal.h70
-rw-r--r--fs/nfs/mount_clnt.c337
-rw-r--r--fs/nfs/namespace.c5
-rw-r--r--fs/nfs/nfs3acl.c2
-rw-r--r--fs/nfs/nfs4_fs.h37
-rw-r--r--fs/nfs/nfs4proc.c1348
-rw-r--r--fs/nfs/nfs4renewd.c6
-rw-r--r--fs/nfs/nfs4state.c190
-rw-r--r--fs/nfs/nfs4xdr.c1072
-rw-r--r--fs/nfs/nfsroot.c5
-rw-r--r--fs/nfs/read.c33
-rw-r--r--fs/nfs/super.c497
-rw-r--r--fs/nfs/unlink.c20
-rw-r--r--fs/nfs/write.c31
-rw-r--r--fs/nfsd/export.c13
-rw-r--r--fs/nfsd/nfs3proc.c237
-rw-r--r--fs/nfsd/nfs3xdr.c1
-rw-r--r--fs/nfsd/nfs4callback.c247
-rw-r--r--fs/nfsd/nfs4proc.c129
-rw-r--r--fs/nfsd/nfs4state.c171
-rw-r--r--fs/nfsd/nfs4xdr.c296
-rw-r--r--fs/nfsd/nfscache.c33
-rw-r--r--fs/nfsd/nfsctl.c294
-rw-r--r--fs/nfsd/nfsfh.c6
-rw-r--r--fs/nfsd/nfsproc.c198
-rw-r--r--fs/nfsd/nfssvc.c12
-rw-r--r--fs/nfsd/vfs.c93
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/linux/cpu.h5
-rw-r--r--include/linux/dma_remapping.h9
-rw-r--r--include/linux/dmar.h9
-rw-r--r--include/linux/firewire.h87
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/intel-iommu.h35
-rw-r--r--include/linux/ioport.h2
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/mnt_namespace.h10
-rw-r--r--include/linux/mtd/nand.h16
-rw-r--r--include/linux/mtd/onenand.h24
-rw-r--r--include/linux/mtd/onenand_regs.h20
-rw-r--r--include/linux/mtd/partitions.h1
-rw-r--r--include/linux/nfs.h5
-rw-r--r--include/linux/nfs2.h7
-rw-r--r--include/linux/nfs3.h5
-rw-r--r--include/linux/nfs4.h18
-rw-r--r--include/linux/nfs_fs_sb.h67
-rw-r--r--include/linux/nfs_xdr.h171
-rw-r--r--include/linux/nfsd/cache.h3
-rw-r--r--include/linux/nfsd/nfsfh.h7
-rw-r--r--include/linux/nfsd/state.h45
-rw-r--r--include/linux/nfsd/xdr4.h28
-rw-r--r--include/linux/pci-acpi.h4
-rw-r--r--include/linux/pci.h37
-rw-r--r--include/linux/pci_hotplug.h23
-rw-r--r--include/linux/pci_regs.h14
-rw-r--r--include/linux/rfkill.h33
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/serial_reg.h1
-rw-r--r--include/linux/sunrpc/bc_xprt.h49
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/sched.h3
-rw-r--r--include/linux/sunrpc/svc.h11
-rw-r--r--include/linux/sunrpc/svc_xprt.h7
-rw-r--r--include/linux/sunrpc/svcsock.h9
-rw-r--r--include/linux/sunrpc/xprt.h38
-rw-r--r--include/mtd/Kbuild1
-rw-r--r--include/mtd/jffs2-user.h34
-rw-r--r--include/mtd/mtd-abi.h15
-rw-r--r--include/net/iucv/af_iucv.h2
-rw-r--r--init/main.c1
-rw-r--r--kernel/cpu.c13
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/ieee802154/af_ieee802154.c12
-rw-r--r--net/ipv4/route.c14
-rw-r--r--net/iucv/af_iucv.c297
-rw-r--r--net/rfkill/core.c56
-rw-r--r--net/sunrpc/Makefile1
-rw-r--r--net/sunrpc/backchannel_rqst.c281
-rw-r--r--net/sunrpc/bc_svc.c81
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/clnt.c143
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/sunrpc/stats.c8
-rw-r--r--net/sunrpc/sunrpc.h37
-rw-r--r--net/sunrpc/svc.c134
-rw-r--r--net/sunrpc/svc_xprt.c57
-rw-r--r--net/sunrpc/svcsock.c161
-rw-r--r--net/sunrpc/xprt.c60
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c8
-rw-r--r--net/sunrpc/xprtsock.c217
-rw-r--r--net/wireless/nl80211.c95
-rw-r--r--sound/soc/pxa/corgi.c36
-rw-r--r--sound/soc/pxa/poodle.c36
-rw-r--r--tools/perf/perf.h6
460 files changed, 27056 insertions, 7297 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 97ad190e13af..6bf68053e4b8 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -122,3 +122,10 @@ Description:
This symbolic link appears when a device is a Virtual Function.
The symbolic link points to the PCI device sysfs entry of the
Physical Function this device associates with.
+
+What: /sys/bus/pci/slots/.../module
+Date: June 2009
+Contact: linux-pci@vger.kernel.org
+Description:
+ This symbolic link points to the PCI hotplug controller driver
+ module that manages the hotplug slot.
diff --git a/Documentation/ABI/testing/sysfs-class-mtd b/Documentation/ABI/testing/sysfs-class-mtd
new file mode 100644
index 000000000000..4d55a1888981
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-mtd
@@ -0,0 +1,125 @@
+What: /sys/class/mtd/
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ The mtd/ class subdirectory belongs to the MTD subsystem
+ (MTD core).
+
+What: /sys/class/mtd/mtdX/
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ The /sys/class/mtd/mtd{0,1,2,3,...} directories correspond
+ to each /dev/mtdX character device. These may represent
+ physical/simulated flash devices, partitions on a flash
+ device, or concatenated flash devices. They exist regardless
+ of whether CONFIG_MTD_CHAR is actually enabled.
+
+What: /sys/class/mtd/mtdXro/
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ These directories provide the corresponding read-only device
+ nodes for /sys/class/mtd/mtdX/ . They are only created
+ (for the benefit of udev) if CONFIG_MTD_CHAR is enabled.
+
+What: /sys/class/mtd/mtdX/dev
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Major and minor numbers of the character device corresponding
+ to this MTD device (in <major>:<minor> format). This is the
+ read-write device so <minor> will be even.
+
+What: /sys/class/mtd/mtdXro/dev
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Major and minor numbers of the character device corresponding
+ to the read-only variant of thie MTD device (in
+ <major>:<minor> format). In this case <minor> will be odd.
+
+What: /sys/class/mtd/mtdX/erasesize
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ "Major" erase size for the device. If numeraseregions is
+ zero, this is the eraseblock size for the entire device.
+ Otherwise, the MEMGETREGIONCOUNT/MEMGETREGIONINFO ioctls
+ can be used to determine the actual eraseblock layout.
+
+What: /sys/class/mtd/mtdX/flags
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ A hexadecimal value representing the device flags, ORed
+ together:
+
+ 0x0400: MTD_WRITEABLE - device is writable
+ 0x0800: MTD_BIT_WRITEABLE - single bits can be flipped
+ 0x1000: MTD_NO_ERASE - no erase necessary
+ 0x2000: MTD_POWERUP_LOCK - always locked after reset
+
+What: /sys/class/mtd/mtdX/name
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ A human-readable ASCII name for the device or partition.
+ This will match the name in /proc/mtd .
+
+What: /sys/class/mtd/mtdX/numeraseregions
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ For devices that have variable eraseblock sizes, this
+ provides the total number of erase regions. Otherwise,
+ it will read back as zero.
+
+What: /sys/class/mtd/mtdX/oobsize
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Number of OOB bytes per page.
+
+What: /sys/class/mtd/mtdX/size
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Total size of the device/partition, in bytes.
+
+What: /sys/class/mtd/mtdX/type
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ One of the following ASCII strings, representing the device
+ type:
+
+ absent, ram, rom, nor, nand, dataflash, ubi, unknown
+
+What: /sys/class/mtd/mtdX/writesize
+Date: April 2009
+KernelVersion: 2.6.29
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Minimal writable flash unit size. This will always be
+ a positive integer.
+
+ In the case of NOR flash it is 1 (even though individual
+ bits can be cleared).
+
+ In the case of NAND flash it is one NAND page (or a
+ half page, or a quarter page).
+
+ In the case of ECC NOR, it is the ECC block size.
diff --git a/Documentation/PCI/pcieaer-howto.txt b/Documentation/PCI/pcieaer-howto.txt
index ddeb14beacc8..be21001ab144 100644
--- a/Documentation/PCI/pcieaer-howto.txt
+++ b/Documentation/PCI/pcieaer-howto.txt
@@ -61,6 +61,10 @@ be initiated although firmwares have no _OSC support. To enable the
walkaround, pls. add aerdriver.forceload=y to kernel boot parameter line
when booting kernel. Note that forceload=n by default.
+nosourceid, another parameter of type bool, can be used when broken
+hardware (mostly chipsets) has root ports that cannot obtain the reporting
+source ID. nosourceid=n by default.
+
2.3 AER error output
When a PCI-E AER error is captured, an error message will be outputed to
console. If it's a correctable error, it is outputed as a warning.
@@ -246,3 +250,24 @@ with the PCI Express AER Root driver?
A: It could call the helper functions to enable AER in devices and
cleanup uncorrectable status register. Pls. refer to section 3.3.
+
+4. Software error injection
+
+Debugging PCIE AER error recovery code is quite difficult because it
+is hard to trigger real hardware errors. Software based error
+injection can be used to fake various kinds of PCIE errors.
+
+First you should enable PCIE AER software error injection in kernel
+configuration, that is, following item should be in your .config.
+
+CONFIG_PCIEAER_INJECT=y or CONFIG_PCIEAER_INJECT=m
+
+After reboot with new kernel or insert the module, a device file named
+/dev/aer_inject should be created.
+
+Then, you need a user space tool named aer-inject, which can be gotten
+from:
+ http://www.kernel.org/pub/linux/utils/pci/aer-inject/
+
+More information about aer-inject can be found in the document comes
+with its source code.
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 8dd6db76171d..f15621ee5599 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -66,6 +66,10 @@ mandatory-locking.txt
- info on the Linux implementation of Sys V mandatory file locking.
ncpfs.txt
- info on Novell Netware(tm) filesystem using NCP protocol.
+nfs41-server.txt
+ - info on the Linux server implementation of NFSv4 minor version 1.
+nfs-rdma.txt
+ - how to install and setup the Linux NFS/RDMA client and server software.
nfsroot.txt
- short guide on setting up a diskless box with NFS root filesystem.
nilfs2.txt
diff --git a/Documentation/isdn/00-INDEX b/Documentation/isdn/00-INDEX
index f6010a536590..e87e336f590e 100644
--- a/Documentation/isdn/00-INDEX
+++ b/Documentation/isdn/00-INDEX
@@ -14,25 +14,14 @@ README
- general info on what you need and what to do for Linux ISDN.
README.FAQ
- general info for FAQ.
-README.audio
- - info for running audio over ISDN.
-README.fax
- - info for using Fax over ISDN.
-README.gigaset
- - info on the drivers for Siemens Gigaset ISDN adapters.
-README.icn
- - info on the ICN-ISDN-card and its driver.
->>>>>>> 93af7aca44f0e82e67bda10a0fb73d383edcc8bd:Documentation/isdn/00-INDEX
README.HiSax
- info on the HiSax driver which replaces the old teles.
+README.act2000
+ - info on driver for IBM ACT-2000 card.
README.audio
- info for running audio over ISDN.
README.avmb1
- info on driver for AVM-B1 ISDN card.
-README.act2000
- - info on driver for IBM ACT-2000 card.
-README.eicon
- - info on driver for Eicon active cards.
README.concap
- info on "CONCAP" encapsulation protocol interface used for X.25.
README.diversion
@@ -59,7 +48,3 @@ README.x25
- info for running X.25 over ISDN.
syncPPP.FAQ
- frequently asked questions about running PPP over ISDN.
-README.hysdn
- - info on driver for Hypercope active HYSDN cards
-README.mISDN
- - info on the Modular ISDN subsystem (mISDN).
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 08def8deb5f5..92e1ab8178a8 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1006,6 +1006,7 @@ and is between 256 and 4096 characters. It is defined in the file
nomerge
forcesac
soft
+ pt [x86, IA64]
io7= [HW] IO7 for Marvel based alpha systems
See comment before marvel_specify_io7 in
@@ -1369,6 +1370,27 @@ and is between 256 and 4096 characters. It is defined in the file
min_addr=nn[KMG] [KNL,BOOT,ia64] All physical memory below this
physical address is ignored.
+ mini2440= [ARM,HW,KNL]
+ Format:[0..2][b][c][t]
+ Default: "0tb"
+ MINI2440 configuration specification:
+ 0 - The attached screen is the 3.5" TFT
+ 1 - The attached screen is the 7" TFT
+ 2 - The VGA Shield is attached (1024x768)
+ Leaving out the screen size parameter will not load
+ the TFT driver, and the framebuffer will be left
+ unconfigured.
+ b - Enable backlight. The TFT backlight pin will be
+ linked to the kernel VESA blanking code and a GPIO
+ LED. This parameter is not necessary when using the
+ VGA shield.
+ c - Enable the s3c camera interface.
+ t - Reserved for enabling touchscreen support. The
+ touchscreen support is not enabled in the mainstream
+ kernel as of 2.6.30, a preliminary port can be found
+ in the "bleeding edge" mini2440 support kernel at
+ http://repo.or.cz/w/linux-2.6/mini2440.git
+
mminit_loglevel=
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
parameter allows control of the logging verbosity for
@@ -1410,6 +1432,16 @@ and is between 256 and 4096 characters. It is defined in the file
mtdparts= [MTD]
See drivers/mtd/cmdlinepart.c.
+ onenand.bdry= [HW,MTD] Flex-OneNAND Boundary Configuration
+
+ Format: [die0_boundary][,die0_lock][,die1_boundary][,die1_lock]
+
+ boundary - index of last SLC block on Flex-OneNAND.
+ The remaining blocks are configured as MLC blocks.
+ lock - Configure if Flex-OneNAND boundary should be locked.
+ Once locked, the boundary cannot be changed.
+ 1 indicates lock status, 0 indicates unlock status.
+
mtdset= [ARM]
ARM/S3C2412 JIVE boot control
@@ -1776,6 +1808,9 @@ and is between 256 and 4096 characters. It is defined in the file
root domains (aka PCI segments, in ACPI-speak).
nommconf [X86] Disable use of MMCONFIG for PCI
Configuration
+ check_enable_amd_mmconf [X86] check for and enable
+ properly configured MMIO access to PCI
+ config space on AMD family 10h CPU
nomsi [MSI] If the PCI_MSI kernel config parameter is
enabled, this kernel boot option can be used to
disable the use of MSI interrupts system-wide.
@@ -1828,7 +1863,7 @@ and is between 256 and 4096 characters. It is defined in the file
IRQ routing is enabled.
noacpi [X86] Do not use ACPI for IRQ routing
or for PCI scanning.
- use_crs [X86] Use _CRS for PCI resource
+ nocrs [X86] Don't use _CRS for PCI resource
allocation.
routeirq Do IRQ routing for all PCI devices.
This is normally done in pci_enable_device(),
@@ -1865,6 +1900,12 @@ and is between 256 and 4096 characters. It is defined in the file
PAGE_SIZE is used as alignment.
PCI-PCI bridge can be specified, if resource
windows need to be expanded.
+ ecrc= Enable/disable PCIe ECRC (transaction layer
+ end-to-end CRC checking).
+ bios: Use BIOS/firmware settings. This is the
+ the default.
+ off: Turn ECRC off
+ on: Turn ECRC on.
pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power
Management.
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index c8acd8659e91..b4860509c319 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -111,6 +111,8 @@ following attributes:
name: Name assigned by driver to this key (interface or driver name).
type: Driver type string ("wlan", "bluetooth", etc).
+ persistent: Whether the soft blocked state is initialised from
+ non-volatile storage at startup.
state: Current state of the transmitter
0: RFKILL_STATE_SOFT_BLOCKED
transmitter is turned off by software
diff --git a/MAINTAINERS b/MAINTAINERS
index 487aaea9d778..5ef3a16eb140 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -653,6 +653,8 @@ M: laforge@openezx.org
L: openezx-devel@lists.openezx.org (subscribers-only)
W: http://www.openezx.org/
S: Maintained
+T: topgit git://git.openezx.org/openezx.git
+F: arch/arm/mach-pxa/ezx.c
ARM/FARADAY FA526 PORT
P: Paulius Zaleckas
@@ -774,11 +776,25 @@ P: Philipp Zabel
M: philipp.zabel@gmail.com
S: Maintained
+ARM/MIOA701 MACHINE SUPPORT
+P: Robert Jarzmik
+M: robert.jarzmik@free.fr
+L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
+F: arch/arm/mach-pxa/mioa701.c
+S: Maintained
+
ARM/NEC MOBILEPRO 900/c MACHINE SUPPORT
P: Michael Petchkovsky
M: mkpetch@internode.on.net
S: Maintained
+ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
+P: Nelson Castillo
+M: arhuaco@freaks-unidos.net
+L: openmoko-kernel@lists.openmoko.org (subscribers-only)
+W: http://wiki.openmoko.org/wiki/Neo_FreeRunner
+S: Supported
+
ARM/TOSA MACHINE SUPPORT
P: Dmitry Eremin-Solenikov
M: dbaryshkov@gmail.com
@@ -792,6 +808,12 @@ M: marek.vasut@gmail.com
W: http://hackndev.com
S: Maintained
+ARM/PALM TREO 680 SUPPORT
+P: Tomas Cech
+M: sleep_walker@suse.cz
+W: http://hackndev.com
+S: Maintained
+
ARM/PALMZ72 SUPPORT
P: Sergey Lapin
M: slapin@ossfans.org
@@ -2853,7 +2875,7 @@ P: Sergey Lapin
M: slapin@ossfans.org
L: linux-zigbee-devel@lists.sourceforge.net
W: http://apps.sourceforge.net/trac/linux-zigbee
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/lumag/lowpan.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
S: Maintained
F: net/ieee802154/
F: drivers/ieee802154/
@@ -3230,7 +3252,6 @@ W: http://www.linux-mtd.infradead.org/doc/jffs2.html
S: Maintained
F: fs/jffs2/
F: include/linux/jffs2.h
-F: include/mtd/jffs2-user.h
JOURNALLING LAYER FOR BLOCK DEVICES (JBD)
P: Stephen Tweedie
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 29475101a7b3..aef63c8e3d2d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1241,7 +1241,7 @@ endmenu
menu "CPU Power Management"
-if (ARCH_SA1100 || ARCH_INTEGRATOR || ARCH_OMAP || ARCH_PXA)
+if (ARCH_SA1100 || ARCH_INTEGRATOR || ARCH_OMAP || ARCH_PXA || ARCH_S3C64XX)
source "drivers/cpufreq/Kconfig"
@@ -1272,6 +1272,10 @@ config CPU_FREQ_PXA
default y
select CPU_FREQ_DEFAULT_GOV_USERSPACE
+config CPU_FREQ_S3C64XX
+ bool "CPUfreq support for Samsung S3C64XX CPUs"
+ depends on CPU_FREQ && CPU_S3C6410
+
endif
source "drivers/cpuidle/Kconfig"
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 01d49be3b2ca..4515728c5345 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -674,6 +674,15 @@ proc_types:
b __armv4_mmu_cache_off
b __armv5tej_mmu_cache_flush
+#ifdef CONFIG_CPU_FEROCEON_OLD_ID
+ /* this conflicts with the standard ARMv5TE entry */
+ .long 0x41009260 @ Old Feroceon
+ .long 0xff00fff0
+ b __armv4_mmu_cache_on
+ b __armv4_mmu_cache_off
+ b __armv5tej_mmu_cache_flush
+#endif
+
.word 0x66015261 @ FA526
.word 0xff01fff1
b __fa526_cache_on
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 664c7b8b1ba8..337741f734ac 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -117,7 +117,7 @@ static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
u32 val;
spin_lock(&irq_controller_lock);
- irq_desc[irq].cpu = cpu;
+ irq_desc[irq].node = cpu;
val = readl(reg) & ~(0xff << shift);
val |= 1 << (cpu + shift);
writel(val, reg);
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 887c6eb3a18a..6ed89836e908 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -229,14 +229,18 @@ static int vic_set_wake(unsigned int irq, unsigned int on)
{
struct vic_device *v = vic_from_irq(irq);
unsigned int off = irq & 31;
+ u32 bit = 1 << off;
if (!v)
return -EINVAL;
+ if (!(bit & v->resume_sources))
+ return -EINVAL;
+
if (on)
- v->resume_irqs |= 1 << off;
+ v->resume_irqs |= bit;
else
- v->resume_irqs &= ~(1 << off);
+ v->resume_irqs &= ~bit;
return 0;
}
diff --git a/arch/arm/configs/mini2440_defconfig b/arch/arm/configs/mini2440_defconfig
new file mode 100644
index 000000000000..e49ed40f3be7
--- /dev/null
+++ b/arch/arm/configs/mini2440_defconfig
@@ -0,0 +1,2097 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.30-rc6
+# Wed May 20 12:29:51 2009
+#
+CONFIG_ARM=y
+CONFIG_HAVE_PWM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+# CONFIG_GENERIC_TIME is not set
+# CONFIG_GENERIC_CLOCKEVENTS is not set
+CONFIG_MMU=y
+CONFIG_NO_IOPORT=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+CONFIG_RELAY=y
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+# CONFIG_SLOW_WORK is not set
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+CONFIG_LBD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_BLK_DEV_INTEGRITY=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_PNX4008 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+CONFIG_ARCH_S3C2410=y
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_W90X900 is not set
+CONFIG_PLAT_S3C24XX=y
+CONFIG_S3C2410_CLOCK=y
+CONFIG_CPU_S3C244X=y
+CONFIG_S3C24XX_PWM=y
+CONFIG_S3C24XX_GPIO_EXTRA=0
+CONFIG_S3C2410_DMA=y
+# CONFIG_S3C2410_DMA_DEBUG is not set
+CONFIG_S3C24XX_ADC=y
+CONFIG_PLAT_S3C=y
+CONFIG_CPU_LLSERIAL_S3C2440_ONLY=y
+CONFIG_CPU_LLSERIAL_S3C2440=y
+
+#
+# Boot options
+#
+# CONFIG_S3C_BOOT_WATCHDOG is not set
+# CONFIG_S3C_BOOT_ERROR_RESET is not set
+CONFIG_S3C_BOOT_UART_FORCE_FIFO=y
+
+#
+# Power management
+#
+# CONFIG_S3C2410_PM_DEBUG is not set
+# CONFIG_S3C2410_PM_CHECK is not set
+CONFIG_S3C_LOWLEVEL_UART_PORT=0
+CONFIG_S3C_GPIO_SPACE=0
+
+#
+# S3C2400 Machines
+#
+CONFIG_S3C2410_PM=y
+CONFIG_S3C2410_GPIO=y
+
+#
+# S3C2410 Machines
+#
+# CONFIG_ARCH_SMDK2410 is not set
+# CONFIG_ARCH_H1940 is not set
+# CONFIG_MACH_N30 is not set
+# CONFIG_ARCH_BAST is not set
+# CONFIG_MACH_OTOM is not set
+# CONFIG_MACH_AML_M5900 is not set
+# CONFIG_MACH_TCT_HAMMER is not set
+# CONFIG_MACH_VR1000 is not set
+# CONFIG_MACH_QT2410 is not set
+
+#
+# S3C2412 Machines
+#
+# CONFIG_MACH_JIVE is not set
+# CONFIG_MACH_SMDK2413 is not set
+# CONFIG_MACH_SMDK2412 is not set
+# CONFIG_MACH_VSTMS is not set
+CONFIG_CPU_S3C2440=y
+CONFIG_S3C2440_DMA=y
+
+#
+# S3C2440 Machines
+#
+# CONFIG_MACH_ANUBIS is not set
+# CONFIG_MACH_OSIRIS is not set
+# CONFIG_MACH_RX3715 is not set
+# CONFIG_ARCH_S3C2440 is not set
+# CONFIG_MACH_NEXCODER_2440 is not set
+# CONFIG_MACH_AT2440EVB is not set
+CONFIG_MACH_MINI2440=y
+
+#
+# S3C2442 Machines
+#
+
+#
+# S3C2443 Machines
+#
+# CONFIG_MACH_SMDK2443 is not set
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM920T=y
+CONFIG_CPU_32v4T=y
+CONFIG_CPU_ABRT_EV4T=y
+CONFIG_CPU_PABRT_NOIFAR=y
+CONFIG_CPU_CACHE_V4WT=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+# CONFIG_OUTER_CACHE is not set
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=200
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_ARCH_FLATMEM_HAS_HOLES=y
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_CMDLINE=""
+# CONFIG_XIP_KERNEL is not set
+CONFIG_KEXEC=y
+CONFIG_ATAGS_PROC=y
+
+#
+# CPU Power Management
+#
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+CONFIG_BINFMT_AOUT=m
+CONFIG_BINFMT_MISC=m
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_APM_EMULATION=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_NET_KEY=m
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=m
+CONFIG_INET_TCP_DIAG=m
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+# CONFIG_NF_CONNTRACK is not set
+# CONFIG_NETFILTER_XTABLES is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+# CONFIG_NF_DEFRAG_IPV4 is not set
+# CONFIG_IP_NF_QUEUE is not set
+# CONFIG_IP_NF_IPTABLES is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+CONFIG_STP=m
+CONFIG_GARP=m
+CONFIG_BRIDGE=m
+# CONFIG_NET_DSA is not set
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+# CONFIG_DECNET is not set
+CONFIG_LLC=m
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+CONFIG_NET_PKTGEN=m
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+CONFIG_BT=m
+CONFIG_BT_L2CAP=m
+CONFIG_BT_SCO=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+
+#
+# Bluetooth device drivers
+#
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIBCM203X=m
+CONFIG_BT_HCIBPA10X=m
+CONFIG_BT_HCIBFUSB=m
+CONFIG_BT_HCIVHCI=m
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_CFG80211=m
+CONFIG_CFG80211_REG_DEBUG=y
+CONFIG_WIRELESS_OLD_REGULATORY=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=m
+CONFIG_LIB80211_CRYPT_WEP=m
+CONFIG_LIB80211_CRYPT_CCMP=m
+CONFIG_LIB80211_CRYPT_TKIP=m
+# CONFIG_LIB80211_DEBUG is not set
+CONFIG_MAC80211=m
+
+#
+# Rate control algorithm selection
+#
+CONFIG_MAC80211_RC_MINSTREL=y
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel"
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+# CONFIG_MAC80211_DEBUGFS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_FTL=y
+CONFIG_NFTL=y
+CONFIG_NFTL_RW=y
+CONFIG_INFTL=y
+CONFIG_RFD_FTL=y
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_RAM=y
+CONFIG_MTD_ROM=y
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_IMPA7 is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_VERIFY_WRITE=y
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_GPIO is not set
+CONFIG_MTD_NAND_IDS=y
+CONFIG_MTD_NAND_S3C2410=y
+# CONFIG_MTD_NAND_S3C2410_DEBUG is not set
+# CONFIG_MTD_NAND_S3C2410_HWECC is not set
+# CONFIG_MTD_NAND_S3C2410_CLKSTOP is not set
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+CONFIG_MTD_NAND_PLATFORM=y
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+CONFIG_MTD_LPDDR=y
+CONFIG_MTD_QINFO_PROBE=y
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=m
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+CONFIG_BLK_DEV_NBD=m
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=65536
+# CONFIG_BLK_DEV_XIP is not set
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=y
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=m
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=m
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+CONFIG_COMPAT_NET_DEV_OPS=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+CONFIG_TUN=m
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+CONFIG_DM9000=y
+CONFIG_DM9000_DEBUGLEVEL=4
+# CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+CONFIG_WLAN_80211=y
+CONFIG_LIBERTAS=m
+# CONFIG_LIBERTAS_USB is not set
+CONFIG_LIBERTAS_SDIO=m
+# CONFIG_LIBERTAS_SPI is not set
+# CONFIG_LIBERTAS_DEBUG is not set
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_AR9170_USB is not set
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+CONFIG_ZD1211RW=m
+CONFIG_ZD1211RW_DEBUG=y
+# CONFIG_RT2X00 is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+CONFIG_PPP=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_MPPE=m
+# CONFIG_PPPOE is not set
+# CONFIG_PPPOL2TP is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=m
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_POLLDEV is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=m
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_GPIO is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=y
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_UARTS=3
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_S3C2440=y
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=128
+CONFIG_IPMI_HANDLER=m
+# CONFIG_IPMI_PANIC_EVENT is not set
+CONFIG_IPMI_DEVICE_INTERFACE=m
+CONFIG_IPMI_SI=m
+CONFIG_IPMI_WATCHDOG=m
+CONFIG_IPMI_POWEROFF=m
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_S3C2410=y
+CONFIG_I2C_SIMTEC=y
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+CONFIG_SENSORS_TSL2550=m
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BITBANG=y
+# CONFIG_SPI_GPIO is not set
+CONFIG_SPI_S3C24XX=y
+# CONFIG_SPI_S3C24XX_GPIO is not set
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=y
+# CONFIG_SPI_TLE62X0 is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IBMAEM is not set
+# CONFIG_SENSORS_IBMPEX is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+CONFIG_SENSORS_LM75=y
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+CONFIG_THERMAL=m
+# CONFIG_THERMAL_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_S3C2410_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+CONFIG_VIDEO_DEV=m
+CONFIG_VIDEO_V4L2_COMMON=m
+CONFIG_VIDEO_ALLOW_V4L1=y
+CONFIG_VIDEO_V4L1_COMPAT=y
+CONFIG_DVB_CORE=m
+CONFIG_VIDEO_MEDIA=m
+
+#
+# Multimedia drivers
+#
+# CONFIG_MEDIA_ATTACH is not set
+CONFIG_MEDIA_TUNER=m
+# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
+CONFIG_MEDIA_TUNER_SIMPLE=m
+CONFIG_MEDIA_TUNER_TDA8290=m
+CONFIG_MEDIA_TUNER_TDA9887=m
+CONFIG_MEDIA_TUNER_TEA5761=m
+CONFIG_MEDIA_TUNER_TEA5767=m
+CONFIG_MEDIA_TUNER_MT20XX=m
+CONFIG_MEDIA_TUNER_XC2028=m
+CONFIG_MEDIA_TUNER_XC5000=m
+CONFIG_MEDIA_TUNER_MC44S803=m
+CONFIG_VIDEO_V4L2=m
+CONFIG_VIDEO_V4L1=m
+CONFIG_VIDEOBUF_GEN=m
+CONFIG_VIDEO_CAPTURE_DRIVERS=y
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
+# CONFIG_VIDEO_VIVI is not set
+# CONFIG_VIDEO_CPIA is not set
+# CONFIG_VIDEO_CPIA2 is not set
+# CONFIG_VIDEO_SAA5246A is not set
+# CONFIG_VIDEO_SAA5249 is not set
+# CONFIG_VIDEO_AU0828 is not set
+CONFIG_SOC_CAMERA=m
+# CONFIG_SOC_CAMERA_MT9M001 is not set
+# CONFIG_SOC_CAMERA_MT9M111 is not set
+# CONFIG_SOC_CAMERA_MT9T031 is not set
+# CONFIG_SOC_CAMERA_MT9V022 is not set
+# CONFIG_SOC_CAMERA_TW9910 is not set
+CONFIG_SOC_CAMERA_PLATFORM=m
+# CONFIG_SOC_CAMERA_OV772X is not set
+# CONFIG_VIDEO_SH_MOBILE_CEU is not set
+CONFIG_V4L_USB_DRIVERS=y
+# CONFIG_USB_VIDEO_CLASS is not set
+CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y
+CONFIG_USB_GSPCA=m
+# CONFIG_USB_M5602 is not set
+# CONFIG_USB_STV06XX is not set
+# CONFIG_USB_GSPCA_CONEX is not set
+# CONFIG_USB_GSPCA_ETOMS is not set
+# CONFIG_USB_GSPCA_FINEPIX is not set
+# CONFIG_USB_GSPCA_MARS is not set
+# CONFIG_USB_GSPCA_MR97310A is not set
+# CONFIG_USB_GSPCA_OV519 is not set
+# CONFIG_USB_GSPCA_OV534 is not set
+# CONFIG_USB_GSPCA_PAC207 is not set
+# CONFIG_USB_GSPCA_PAC7311 is not set
+# CONFIG_USB_GSPCA_SONIXB is not set
+# CONFIG_USB_GSPCA_SONIXJ is not set
+# CONFIG_USB_GSPCA_SPCA500 is not set
+# CONFIG_USB_GSPCA_SPCA501 is not set
+# CONFIG_USB_GSPCA_SPCA505 is not set
+# CONFIG_USB_GSPCA_SPCA506 is not set
+# CONFIG_USB_GSPCA_SPCA508 is not set
+# CONFIG_USB_GSPCA_SPCA561 is not set
+# CONFIG_USB_GSPCA_SQ905 is not set
+# CONFIG_USB_GSPCA_SQ905C is not set
+# CONFIG_USB_GSPCA_STK014 is not set
+# CONFIG_USB_GSPCA_SUNPLUS is not set
+# CONFIG_USB_GSPCA_T613 is not set
+# CONFIG_USB_GSPCA_TV8532 is not set
+# CONFIG_USB_GSPCA_VC032X is not set
+CONFIG_USB_GSPCA_ZC3XX=m
+# CONFIG_VIDEO_PVRUSB2 is not set
+# CONFIG_VIDEO_HDPVR is not set
+# CONFIG_VIDEO_EM28XX is not set
+# CONFIG_VIDEO_CX231XX is not set
+# CONFIG_VIDEO_USBVISION is not set
+# CONFIG_USB_VICAM is not set
+# CONFIG_USB_IBMCAM is not set
+# CONFIG_USB_KONICAWC is not set
+# CONFIG_USB_QUICKCAM_MESSENGER is not set
+# CONFIG_USB_ET61X251 is not set
+# CONFIG_VIDEO_OVCAMCHIP is not set
+# CONFIG_USB_OV511 is not set
+# CONFIG_USB_SE401 is not set
+# CONFIG_USB_SN9C102 is not set
+# CONFIG_USB_STV680 is not set
+# CONFIG_USB_ZC0301 is not set
+# CONFIG_USB_PWC is not set
+# CONFIG_USB_PWC_INPUT_EVDEV is not set
+# CONFIG_USB_ZR364XX is not set
+# CONFIG_USB_STKWEBCAM is not set
+# CONFIG_USB_S2255 is not set
+CONFIG_RADIO_ADAPTERS=y
+# CONFIG_USB_DSBR is not set
+# CONFIG_USB_SI470X is not set
+# CONFIG_USB_MR800 is not set
+# CONFIG_RADIO_TEA5764 is not set
+# CONFIG_DVB_DYNAMIC_MINORS is not set
+CONFIG_DVB_CAPTURE_DRIVERS=y
+# CONFIG_TTPCI_EEPROM is not set
+
+#
+# Supported USB Adapters
+#
+# CONFIG_DVB_USB is not set
+# CONFIG_DVB_SIANO_SMS1XXX is not set
+
+#
+# Supported FlexCopII (B2C2) Adapters
+#
+# CONFIG_DVB_B2C2_FLEXCOP is not set
+
+#
+# Supported DVB Frontends
+#
+# CONFIG_DVB_FE_CUSTOMISE is not set
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_UVESA is not set
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_S3C2410=y
+# CONFIG_FB_S3C2410_DEBUG is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_LTV350QV is not set
+# CONFIG_LCD_ILI9320 is not set
+# CONFIG_LCD_TDO24M is not set
+# CONFIG_LCD_VGG2432A4 is not set
+CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+
+#
+# Display device support
+#
+CONFIG_DISPLAY_SUPPORT=y
+
+#
+# Display hardware drivers
+#
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+# CONFIG_FONT_8x16 is not set
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+CONFIG_FONT_MINI_4x6=y
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_LOGO_LINUX_CLUT224=y
+CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_HWDEP=m
+CONFIG_SND_RAWMIDI=m
+CONFIG_SND_JACK=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_OSSEMUL=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_PCM_OSS_PLUGINS=y
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_ARM is not set
+# CONFIG_SND_SPI is not set
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_USB_CAIAQ=m
+CONFIG_SND_USB_CAIAQ_INPUT=y
+CONFIG_SND_SOC=y
+CONFIG_SND_S3C24XX_SOC=y
+CONFIG_SND_S3C24XX_SOC_I2S=y
+# CONFIG_SND_S3C24XX_SOC_LN2440SBC_ALC650 is not set
+CONFIG_SND_S3C24XX_SOC_S3C24XX_UDA134X=y
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_L3=y
+CONFIG_SND_SOC_UDA134X=y
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HID_DEBUG is not set
+CONFIG_HIDRAW=y
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+
+#
+# Special HID drivers
+#
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+# CONFIG_DRAGONRISE_FF is not set
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KYE=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
+# CONFIG_LOGITECH_FF is not set
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
+# CONFIG_PANTHERLORD_FF is not set
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+# CONFIG_GREENASIA_FF is not set
+CONFIG_HID_TOPSEED=y
+# CONFIG_THRUSTMASTER_FF is not set
+# CONFIG_ZEROPLUS_FF is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_GADGET_MUSB_HDRC is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=m
+# CONFIG_USB_PRINTER is not set
+CONFIG_USB_WDM=m
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+CONFIG_USB_STORAGE_DATAFAB=m
+# CONFIG_USB_STORAGE_FREECOM is not set
+CONFIG_USB_STORAGE_ISD200=m
+CONFIG_USB_STORAGE_USBAT=m
+CONFIG_USB_STORAGE_SDDR09=m
+CONFIG_USB_STORAGE_SDDR55=m
+CONFIG_USB_STORAGE_JUMPSHOT=m
+CONFIG_USB_STORAGE_ALAUDA=m
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+CONFIG_USB_LIBUSUAL=y
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+CONFIG_USB_SERIAL=m
+# CONFIG_USB_EZUSB is not set
+# CONFIG_USB_SERIAL_GENERIC is not set
+# CONFIG_USB_SERIAL_AIRCABLE is not set
+# CONFIG_USB_SERIAL_ARK3116 is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_CH341 is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+CONFIG_USB_SERIAL_CP210X=m
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+CONFIG_USB_SERIAL_FTDI_SIO=m
+# CONFIG_USB_SERIAL_FUNSOFT is not set
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_IUU is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+# CONFIG_USB_SERIAL_KEYSPAN is not set
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+# CONFIG_USB_SERIAL_MCT_U232 is not set
+# CONFIG_USB_SERIAL_MOS7720 is not set
+# CONFIG_USB_SERIAL_MOS7840 is not set
+# CONFIG_USB_SERIAL_MOTOROLA is not set
+# CONFIG_USB_SERIAL_NAVMAN is not set
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_OTI6858 is not set
+# CONFIG_USB_SERIAL_QUALCOMM is not set
+CONFIG_USB_SERIAL_SPCP8X5=m
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_SIEMENS_MPI is not set
+# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
+# CONFIG_USB_SERIAL_SYMBOL is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+# CONFIG_USB_SERIAL_OPTICON is not set
+# CONFIG_USB_SERIAL_DEBUG is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_AT91 is not set
+# CONFIG_USB_GADGET_ATMEL_USBA is not set
+# CONFIG_USB_GADGET_FSL_USB2 is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+# CONFIG_USB_GADGET_PXA25X is not set
+# CONFIG_USB_GADGET_PXA27X is not set
+CONFIG_USB_GADGET_S3C2410=y
+CONFIG_USB_S3C2410=y
+# CONFIG_USB_S3C2410_DEBUG is not set
+# CONFIG_USB_GADGET_IMX is not set
+# CONFIG_USB_GADGET_M66592 is not set
+# CONFIG_USB_GADGET_AMD5536UDC is not set
+# CONFIG_USB_GADGET_FSL_QE is not set
+# CONFIG_USB_GADGET_CI13XXX is not set
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+# CONFIG_USB_GADGET_DUALSPEED is not set
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_ETH_RNDIS=y
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FILE_STORAGE=m
+# CONFIG_USB_FILE_STORAGE_TEST is not set
+CONFIG_USB_G_SERIAL=m
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_CDC_COMPOSITE=m
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+CONFIG_SDIO_UART=y
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_S3C=y
+# CONFIG_MEMSTICK is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+CONFIG_LEDS_S3C24XX=y
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP5521 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_PWM is not set
+# CONFIG_LEDS_BD2802 is not set
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+CONFIG_RTC_DRV_S3C=y
+CONFIG_DMADEVICES=y
+
+#
+# DMA Devices
+#
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=m
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_EXT4_FS is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+CONFIG_AUTOFS_FS=y
+CONFIG_AUTOFS4_FS=y
+# CONFIG_FUSE_FS is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=m
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+CONFIG_ROMFS_FS=y
+# CONFIG_ROMFS_BACKED_BY_BLOCK is not set
+# CONFIG_ROMFS_BACKED_BY_MTD is not set
+CONFIG_ROMFS_BACKED_BY_BOTH=y
+CONFIG_ROMFS_ON_BLOCK=y
+CONFIG_ROMFS_ON_MTD=y
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_ACL_SUPPORT=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
+# CONFIG_LDM_DEBUG is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="cp437"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+
+#
+# Tracers
+#
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_EVENT_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARM_UNWIND=y
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_ERRORS is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_LL is not set
+CONFIG_DEBUG_S3C_UART=0
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+CONFIG_SECURITY_FILE_CAPABILITIES=y
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_FIPS=y
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=m
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=m
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+CONFIG_CRYPTO_GF128MUL=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_AUTHENC=m
+CONFIG_CRYPTO_TEST=m
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_SEQIV=m
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA1=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_CRYPTO_ZLIB=m
+CONFIG_CRYPTO_LZO=m
+
+#
+# Random Number Generation
+#
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_HW=y
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+CONFIG_CRC_CCITT=m
+CONFIG_CRC16=m
+CONFIG_CRC_T10DIF=y
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+CONFIG_CRC7=y
+CONFIG_LIBCRC32C=m
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=m
+CONFIG_LZO_DECOMPRESS=m
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_DECOMPRESS_BZIP2=y
+CONFIG_DECOMPRESS_LZMA=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 94cc58ef61ae..0e97b8cb77d5 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -389,6 +389,8 @@
#define __NR_inotify_init1 (__NR_SYSCALL_BASE+360)
#define __NR_preadv (__NR_SYSCALL_BASE+361)
#define __NR_pwritev (__NR_SYSCALL_BASE+362)
+#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363)
+#define __NR_perf_counter_open (__NR_SYSCALL_BASE+364)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 1680e9e9c831..f776e72a4cb8 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -372,6 +372,8 @@
/* 360 */ CALL(sys_inotify_init1)
CALL(sys_preadv)
CALL(sys_pwritev)
+ CALL(sys_rt_tgsigqueueinfo)
+ CALL(sys_perf_counter_open)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 6874c7dca75a..096f600dc8d8 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -167,7 +167,7 @@ void __init init_IRQ(void)
#ifdef CONFIG_SMP
cpumask_setall(bad_irq_desc.affinity);
- bad_irq_desc.cpu = smp_processor_id();
+ bad_irq_desc.node = smp_processor_id();
#endif
init_arch_irq();
}
@@ -176,7 +176,7 @@ void __init init_IRQ(void)
static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
{
- pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
+ pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
spin_lock_irq(&desc->lock);
desc->chip->set_affinity(irq, cpumask_of(cpu));
@@ -195,7 +195,7 @@ void migrate_irqs(void)
for (i = 0; i < NR_IRQS; i++) {
struct irq_desc *desc = irq_desc + i;
- if (desc->cpu == cpu) {
+ if (desc->node == cpu) {
unsigned int newcpu = cpumask_any_and(desc->affinity,
cpu_online_mask);
if (newcpu >= nr_cpu_ids) {
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 1585423699ee..39196dff478c 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -114,9 +114,6 @@ void arm_machine_restart(char mode, const char *cmd)
/*
* Function pointers to optional machine specific functions
*/
-void (*pm_idle)(void);
-EXPORT_SYMBOL(pm_idle);
-
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
@@ -130,20 +127,19 @@ EXPORT_SYMBOL_GPL(arm_pm_restart);
*/
static void default_idle(void)
{
- if (hlt_counter)
- cpu_relax();
- else {
- local_irq_disable();
- if (!need_resched())
- arch_idle();
- local_irq_enable();
- }
+ if (!need_resched())
+ arch_idle();
+ local_irq_enable();
}
+void (*pm_idle)(void) = default_idle;
+EXPORT_SYMBOL(pm_idle);
+
/*
- * The idle thread. We try to conserve power, while trying to keep
- * overall latency low. The architecture specific idle is passed
- * a value to indicate the level of "idleness" of the system.
+ * The idle thread, has rather strange semantics for calling pm_idle,
+ * but this is what x86 does and we need to do the same, so that
+ * things like cpuidle get called in the same way. The only difference
+ * is that we always respect 'hlt_counter' to prevent low power idle.
*/
void cpu_idle(void)
{
@@ -151,21 +147,31 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
- void (*idle)(void) = pm_idle;
-
+ tick_nohz_stop_sched_tick(1);
+ leds_event(led_idle_start);
+ while (!need_resched()) {
#ifdef CONFIG_HOTPLUG_CPU
- if (cpu_is_offline(smp_processor_id())) {
- leds_event(led_idle_start);
- cpu_die();
- }
+ if (cpu_is_offline(smp_processor_id()))
+ cpu_die();
#endif
- if (!idle)
- idle = default_idle;
- leds_event(led_idle_start);
- tick_nohz_stop_sched_tick(1);
- while (!need_resched())
- idle();
+ local_irq_disable();
+ if (hlt_counter) {
+ local_irq_enable();
+ cpu_relax();
+ } else {
+ stop_critical_timings();
+ pm_idle();
+ start_critical_timings();
+ /*
+ * This will eventually be removed - pm_idle
+ * functions should always return with IRQs
+ * enabled.
+ */
+ WARN_ON(irqs_disabled());
+ local_irq_enable();
+ }
+ }
leds_event(led_idle_end);
tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
@@ -352,6 +358,23 @@ asm( ".section .text\n"
" .size kernel_thread_helper, . - kernel_thread_helper\n"
" .previous");
+#ifdef CONFIG_ARM_UNWIND
+extern void kernel_thread_exit(long code);
+asm( ".section .text\n"
+" .align\n"
+" .type kernel_thread_exit, #function\n"
+"kernel_thread_exit:\n"
+" .fnstart\n"
+" .cantunwind\n"
+" bl do_exit\n"
+" nop\n"
+" .fnend\n"
+" .size kernel_thread_exit, . - kernel_thread_exit\n"
+" .previous");
+#else
+#define kernel_thread_exit do_exit
+#endif
+
/*
* Create a kernel thread.
*/
@@ -363,7 +386,7 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
regs.ARM_r1 = (unsigned long)arg;
regs.ARM_r2 = (unsigned long)fn;
- regs.ARM_r3 = (unsigned long)do_exit;
+ regs.ARM_r3 = (unsigned long)kernel_thread_exit;
regs.ARM_pc = (unsigned long)kernel_thread_helper;
regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE;
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 1dedc2c7ff49..dd56e11f339a 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -212,7 +212,8 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
ctrl->vrs[14] = *vsp++;
ctrl->vrs[SP] = (unsigned long)vsp;
} else if (insn == 0xb0) {
- ctrl->vrs[PC] = ctrl->vrs[LR];
+ if (ctrl->vrs[PC] == 0)
+ ctrl->vrs[PC] = ctrl->vrs[LR];
/* no further processing */
ctrl->entries = 0;
} else if (insn == 0xb1) {
@@ -309,18 +310,20 @@ int unwind_frame(struct stackframe *frame)
}
while (ctrl.entries > 0) {
- int urc;
-
- if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
- return -URC_FAILURE;
- urc = unwind_exec_insn(&ctrl);
+ int urc = unwind_exec_insn(&ctrl);
if (urc < 0)
return urc;
+ if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high)
+ return -URC_FAILURE;
}
if (ctrl.vrs[PC] == 0)
ctrl.vrs[PC] = ctrl.vrs[LR];
+ /* check for infinite loop */
+ if (frame->pc == ctrl.vrs[PC])
+ return -URC_FAILURE;
+
frame->fp = ctrl.vrs[FP];
frame->sp = ctrl.vrs[SP];
frame->lr = ctrl.vrs[LR];
@@ -332,7 +335,6 @@ int unwind_frame(struct stackframe *frame)
void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
- unsigned long high, low;
register unsigned long current_sp asm ("sp");
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
@@ -362,9 +364,6 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
frame.pc = thread_saved_pc(tsk);
}
- low = frame.sp & ~(THREAD_SIZE - 1);
- high = low + THREAD_SIZE;
-
while (1) {
int urc;
unsigned long where = frame.pc;
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 6c0779792546..4340bf3d2c84 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -84,6 +84,14 @@ SECTIONS
*(.exitcall.exit)
*(.ARM.exidx.exit.text)
*(.ARM.extab.exit.text)
+#ifndef CONFIG_HOTPLUG_CPU
+ *(.ARM.exidx.cpuexit.text)
+ *(.ARM.extab.cpuexit.text)
+#endif
+#ifndef CONFIG_HOTPLUG
+ *(.ARM.exidx.devexit.text)
+ *(.ARM.extab.devexit.text)
+#endif
#ifndef CONFIG_MMU
*(.fixup)
*(__ex_table)
diff --git a/arch/arm/mach-davinci/include/mach/nand.h b/arch/arm/mach-davinci/include/mach/nand.h
index aa482841270b..b520c4b5678a 100644
--- a/arch/arm/mach-davinci/include/mach/nand.h
+++ b/arch/arm/mach-davinci/include/mach/nand.h
@@ -68,10 +68,14 @@ struct davinci_nand_pdata { /* platform_data */
/* none == NAND_ECC_NONE (strongly *not* advised!!)
* soft == NAND_ECC_SOFT
- * 1-bit == NAND_ECC_HW
- * 4-bit == NAND_ECC_HW_SYNDROME (not on all chips)
+ * else == NAND_ECC_HW, according to ecc_bits
+ *
+ * All DaVinci-family chips support 1-bit hardware ECC.
+ * Newer ones also support 4-bit ECC, but are awkward
+ * using it with large page chips.
*/
nand_ecc_modes_t ecc_mode;
+ u8 ecc_bits;
/* e.g. NAND_BUSWIDTH_16 or NAND_USE_FLASH_BBT */
unsigned options;
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index ba528f85749c..b0665f161c03 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -302,7 +302,7 @@ int omap2_wait_clock_ready(void __iomem *reg, u32 mask, const char *name)
udelay(1);
}
- if (i < MAX_CLOCK_ENABLE_WAIT)
+ if (i <= MAX_CLOCK_ENABLE_WAIT)
pr_debug("Clock %s stable after %d loops\n", name, i);
else
printk(KERN_ERR "Clock %s didn't enable in %d tries\n",
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
index 9e43fe5209d3..045da923e75b 100644
--- a/arch/arm/mach-omap2/clock34xx.c
+++ b/arch/arm/mach-omap2/clock34xx.c
@@ -286,6 +286,20 @@ static struct omap_clk omap34xx_clks[] = {
#define MIN_SDRC_DLL_LOCK_FREQ 83000000
+#define CYCLES_PER_MHZ 1000000
+
+/* Scale factor for fixed-point arith in omap3_core_dpll_m2_set_rate() */
+#define SDRC_MPURATE_SCALE 8
+
+/* 2^SDRC_MPURATE_BASE_SHIFT: MPU MHz that SDRC_MPURATE_LOOPS is defined for */
+#define SDRC_MPURATE_BASE_SHIFT 9
+
+/*
+ * SDRC_MPURATE_LOOPS: Number of MPU loops to execute at
+ * 2^MPURATE_BASE_SHIFT MHz for SDRC to stabilize
+ */
+#define SDRC_MPURATE_LOOPS 96
+
/**
* omap3_dpll_recalc - recalculate DPLL rate
* @clk: DPLL struct clk
@@ -709,7 +723,8 @@ static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
{
u32 new_div = 0;
u32 unlock_dll = 0;
- unsigned long validrate, sdrcrate;
+ u32 c;
+ unsigned long validrate, sdrcrate, mpurate;
struct omap_sdrc_params *sp;
if (!clk || !rate)
@@ -718,18 +733,15 @@ static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
if (clk != &dpll3_m2_ck)
return -EINVAL;
- if (rate == clk->rate)
- return 0;
-
validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
if (validrate != rate)
return -EINVAL;
sdrcrate = sdrc_ick.rate;
if (rate > clk->rate)
- sdrcrate <<= ((rate / clk->rate) - 1);
+ sdrcrate <<= ((rate / clk->rate) >> 1);
else
- sdrcrate >>= ((clk->rate / rate) - 1);
+ sdrcrate >>= ((clk->rate / rate) >> 1);
sp = omap2_sdrc_get_params(sdrcrate);
if (!sp)
@@ -740,17 +752,25 @@ static int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
unlock_dll = 1;
}
+ /*
+ * XXX This only needs to be done when the CPU frequency changes
+ */
+ mpurate = arm_fck.rate / CYCLES_PER_MHZ;
+ c = (mpurate << SDRC_MPURATE_SCALE) >> SDRC_MPURATE_BASE_SHIFT;
+ c += 1; /* for safety */
+ c *= SDRC_MPURATE_LOOPS;
+ c >>= SDRC_MPURATE_SCALE;
+ if (c == 0)
+ c = 1;
+
pr_debug("clock: changing CORE DPLL rate from %lu to %lu\n", clk->rate,
validrate);
pr_debug("clock: SDRC timing params used: %08x %08x %08x\n",
sp->rfr_ctrl, sp->actim_ctrla, sp->actim_ctrlb);
- /* REVISIT: SRAM code doesn't support other M2 divisors yet */
- WARN_ON(new_div != 1 && new_div != 2);
-
- /* REVISIT: Add SDRC_MR changing to this code also */
omap3_configure_core_dpll(sp->rfr_ctrl, sp->actim_ctrla,
- sp->actim_ctrlb, new_div, unlock_dll);
+ sp->actim_ctrlb, new_div, unlock_dll, c,
+ sp->mr, rate > clk->rate);
return 0;
}
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 32afd9448216..3a86b0f66031 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <asm/tlb.h>
@@ -241,6 +242,40 @@ void __init omap2_map_common_io(void)
omapfb_reserve_sdram();
}
+/*
+ * omap2_init_reprogram_sdrc - reprogram SDRC timing parameters
+ *
+ * Sets the CORE DPLL3 M2 divider to the same value that it's at
+ * currently. This has the effect of setting the SDRC SDRAM AC timing
+ * registers to the values currently defined by the kernel. Currently
+ * only defined for OMAP3; will return 0 if called on OMAP2. Returns
+ * -EINVAL if the dpll3_m2_ck cannot be found, 0 if called on OMAP2,
+ * or passes along the return value of clk_set_rate().
+ */
+static int __init _omap2_init_reprogram_sdrc(void)
+{
+ struct clk *dpll3_m2_ck;
+ int v = -EINVAL;
+ long rate;
+
+ if (!cpu_is_omap34xx())
+ return 0;
+
+ dpll3_m2_ck = clk_get(NULL, "dpll3_m2_ck");
+ if (!dpll3_m2_ck)
+ return -EINVAL;
+
+ rate = clk_get_rate(dpll3_m2_ck);
+ pr_info("Reprogramming SDRC clock to %ld Hz\n", rate);
+ v = clk_set_rate(dpll3_m2_ck, rate);
+ if (v)
+ pr_err("dpll3_m2_clk rate change failed: %d\n", v);
+
+ clk_put(dpll3_m2_ck);
+
+ return v;
+}
+
void __init omap2_init_common_hw(struct omap_sdrc_params *sp)
{
omap2_mux_init();
@@ -249,6 +284,7 @@ void __init omap2_init_common_hw(struct omap_sdrc_params *sp)
clkdm_init(clockdomains_omap, clkdm_pwrdm_autodeps);
omap2_clk_init();
omap2_sdrc_init(sp);
+ _omap2_init_reprogram_sdrc();
#endif
gpmc_init();
}
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 73e2971b1757..983f1cb676be 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -1099,7 +1099,7 @@ int pwrdm_wait_transition(struct powerdomain *pwrdm)
(c++ < PWRDM_TRANSITION_BAILOUT))
udelay(1);
- if (c >= PWRDM_TRANSITION_BAILOUT) {
+ if (c > PWRDM_TRANSITION_BAILOUT) {
printk(KERN_ERR "powerdomain: waited too long for "
"powerdomain %s to complete transition\n", pwrdm->name);
return -EAGAIN;
diff --git a/arch/arm/mach-omap2/sram34xx.S b/arch/arm/mach-omap2/sram34xx.S
index c080c82521e1..f41f8d96ddba 100644
--- a/arch/arm/mach-omap2/sram34xx.S
+++ b/arch/arm/mach-omap2/sram34xx.S
@@ -3,13 +3,12 @@
*
* Omap3 specific functions that need to be run in internal SRAM
*
- * (C) Copyright 2007
- * Texas Instruments Inc.
- * Rajendra Nayak <rnayak@ti.com>
+ * Copyright (C) 2004, 2007, 2008 Texas Instruments, Inc.
+ * Copyright (C) 2008 Nokia Corporation
*
- * (C) Copyright 2004
- * Texas Instruments, <www.ti.com>
+ * Rajendra Nayak <rnayak@ti.com>
* Richard Woodruff <r-woodruff2@ti.com>
+ * Paul Walmsley
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -37,61 +36,112 @@
.text
+/* r4 parameters */
+#define SDRC_NO_UNLOCK_DLL 0x0
+#define SDRC_UNLOCK_DLL 0x1
+
+/* SDRC_DLLA_CTRL bit settings */
+#define FIXEDDELAY_SHIFT 24
+#define FIXEDDELAY_MASK (0xff << FIXEDDELAY_SHIFT)
+#define DLLIDLE_MASK 0x4
+
+/*
+ * SDRC_DLLA_CTRL default values: TI hardware team indicates that
+ * FIXEDDELAY should be initialized to 0xf. This apparently was
+ * empirically determined during process testing, so no derivation
+ * was provided.
+ */
+#define FIXEDDELAY_DEFAULT (0x0f << FIXEDDELAY_SHIFT)
+
+/* SDRC_DLLA_STATUS bit settings */
+#define LOCKSTATUS_MASK 0x4
+
+/* SDRC_POWER bit settings */
+#define SRFRONIDLEREQ_MASK 0x40
+#define PWDENA_MASK 0x4
+
+/* CM_IDLEST1_CORE bit settings */
+#define ST_SDRC_MASK 0x2
+
+/* CM_ICLKEN1_CORE bit settings */
+#define EN_SDRC_MASK 0x2
+
+/* CM_CLKSEL1_PLL bit settings */
+#define CORE_DPLL_CLKOUT_DIV_SHIFT 0x1b
+
/*
- * Change frequency of core dpll
- * r0 = sdrc_rfr_ctrl r1 = sdrc_actim_ctrla r2 = sdrc_actim_ctrlb r3 = M2
- * r4 = Unlock SDRC DLL? (1 = yes, 0 = no) -- only unlock DLL for
+ * omap3_sram_configure_core_dpll - change DPLL3 M2 divider
+ * r0 = new SDRC_RFR_CTRL register contents
+ * r1 = new SDRC_ACTIM_CTRLA register contents
+ * r2 = new SDRC_ACTIM_CTRLB register contents
+ * r3 = new M2 divider setting (only 1 and 2 supported right now)
+ * r4 = unlock SDRC DLL? (1 = yes, 0 = no). Only unlock DLL for
* SDRC rates < 83MHz
+ * r5 = number of MPU cycles to wait for SDRC to stabilize after
+ * reprogramming the SDRC when switching to a slower MPU speed
+ * r6 = new SDRC_MR_0 register value
+ * r7 = increasing SDRC rate? (1 = yes, 0 = no)
+ *
*/
ENTRY(omap3_sram_configure_core_dpll)
stmfd sp!, {r1-r12, lr} @ store regs to stack
ldr r4, [sp, #52] @ pull extra args off the stack
+ ldr r5, [sp, #56] @ load extra args from the stack
+ ldr r6, [sp, #60] @ load extra args from the stack
+ ldr r7, [sp, #64] @ load extra args from the stack
dsb @ flush buffered writes to interconnect
- cmp r3, #0x2
- blne configure_sdrc
- cmp r4, #0x1
+ cmp r7, #1 @ if increasing SDRC clk rate,
+ bleq configure_sdrc @ program the SDRC regs early (for RFR)
+ cmp r4, #SDRC_UNLOCK_DLL @ set the intended DLL state
bleq unlock_dll
blne lock_dll
- bl sdram_in_selfrefresh @ put the SDRAM in self refresh
- bl configure_core_dpll
- bl enable_sdrc
- cmp r4, #0x1
+ bl sdram_in_selfrefresh @ put SDRAM in self refresh, idle SDRC
+ bl configure_core_dpll @ change the DPLL3 M2 divider
+ bl enable_sdrc @ take SDRC out of idle
+ cmp r4, #SDRC_UNLOCK_DLL @ wait for DLL status to change
bleq wait_dll_unlock
blne wait_dll_lock
- cmp r3, #0x1
- blne configure_sdrc
+ cmp r7, #1 @ if increasing SDRC clk rate,
+ beq return_to_sdram @ return to SDRAM code, otherwise,
+ bl configure_sdrc @ reprogram SDRC regs now
+ mov r12, r5
+ bl wait_clk_stable @ wait for SDRC to stabilize
+return_to_sdram:
isb @ prevent speculative exec past here
mov r0, #0 @ return value
ldmfd sp!, {r1-r12, pc} @ restore regs and return
unlock_dll:
ldr r11, omap3_sdrc_dlla_ctrl
ldr r12, [r11]
- orr r12, r12, #0x4
+ and r12, r12, #FIXEDDELAY_MASK
+ orr r12, r12, #FIXEDDELAY_DEFAULT
+ orr r12, r12, #DLLIDLE_MASK
str r12, [r11] @ (no OCP barrier needed)
bx lr
lock_dll:
ldr r11, omap3_sdrc_dlla_ctrl
ldr r12, [r11]
- bic r12, r12, #0x4
+ bic r12, r12, #DLLIDLE_MASK
str r12, [r11] @ (no OCP barrier needed)
bx lr
sdram_in_selfrefresh:
ldr r11, omap3_sdrc_power @ read the SDRC_POWER register
ldr r12, [r11] @ read the contents of SDRC_POWER
mov r9, r12 @ keep a copy of SDRC_POWER bits
- orr r12, r12, #0x40 @ enable self refresh on idle req
- bic r12, r12, #0x4 @ clear PWDENA
+ orr r12, r12, #SRFRONIDLEREQ_MASK @ enable self refresh on idle
+ bic r12, r12, #PWDENA_MASK @ clear PWDENA
str r12, [r11] @ write back to SDRC_POWER register
ldr r12, [r11] @ posted-write barrier for SDRC
+idle_sdrc:
ldr r11, omap3_cm_iclken1_core @ read the CM_ICLKEN1_CORE reg
ldr r12, [r11]
- bic r12, r12, #0x2 @ disable iclk bit for SDRC
+ bic r12, r12, #EN_SDRC_MASK @ disable iclk bit for SDRC
str r12, [r11]
wait_sdrc_idle:
ldr r11, omap3_cm_idlest1_core
ldr r12, [r11]
- and r12, r12, #0x2 @ check for SDRC idle
- cmp r12, #2
+ and r12, r12, #ST_SDRC_MASK @ check for SDRC idle
+ cmp r12, #ST_SDRC_MASK
bne wait_sdrc_idle
bx lr
configure_core_dpll:
@@ -99,36 +149,23 @@ configure_core_dpll:
ldr r12, [r11]
ldr r10, core_m2_mask_val @ modify m2 for core dpll
and r12, r12, r10
- orr r12, r12, r3, lsl #0x1B @ r3 contains the M2 val
+ orr r12, r12, r3, lsl #CORE_DPLL_CLKOUT_DIV_SHIFT
str r12, [r11]
ldr r12, [r11] @ posted-write barrier for CM
- mov r12, #0x800 @ wait for the clock to stabilise
- cmp r3, #2
- bne wait_clk_stable
bx lr
wait_clk_stable:
subs r12, r12, #1
bne wait_clk_stable
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
bx lr
enable_sdrc:
ldr r11, omap3_cm_iclken1_core
ldr r12, [r11]
- orr r12, r12, #0x2 @ enable iclk bit for SDRC
+ orr r12, r12, #EN_SDRC_MASK @ enable iclk bit for SDRC
str r12, [r11]
wait_sdrc_idle1:
ldr r11, omap3_cm_idlest1_core
ldr r12, [r11]
- and r12, r12, #0x2
+ and r12, r12, #ST_SDRC_MASK
cmp r12, #0
bne wait_sdrc_idle1
restore_sdrc_power_val:
@@ -138,14 +175,14 @@ restore_sdrc_power_val:
wait_dll_lock:
ldr r11, omap3_sdrc_dlla_status
ldr r12, [r11]
- and r12, r12, #0x4
- cmp r12, #0x4
+ and r12, r12, #LOCKSTATUS_MASK
+ cmp r12, #LOCKSTATUS_MASK
bne wait_dll_lock
bx lr
wait_dll_unlock:
ldr r11, omap3_sdrc_dlla_status
ldr r12, [r11]
- and r12, r12, #0x4
+ and r12, r12, #LOCKSTATUS_MASK
cmp r12, #0x0
bne wait_dll_unlock
bx lr
@@ -156,7 +193,9 @@ configure_sdrc:
str r1, [r11]
ldr r11, omap3_sdrc_actim_ctrlb
str r2, [r11]
- ldr r2, [r11] @ posted-write barrier for SDRC
+ ldr r11, omap3_sdrc_mr_0
+ str r6, [r11]
+ ldr r6, [r11] @ posted-write barrier for SDRC
bx lr
omap3_sdrc_power:
@@ -173,6 +212,8 @@ omap3_sdrc_actim_ctrla:
.word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_A_0)
omap3_sdrc_actim_ctrlb:
.word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_B_0)
+omap3_sdrc_mr_0:
+ .word OMAP34XX_SDRC_REGADDR(SDRC_MR_0)
omap3_sdrc_dlla_status:
.word OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
omap3_sdrc_dlla_ctrl:
diff --git a/arch/arm/mach-orion5x/addr-map.c b/arch/arm/mach-orion5x/addr-map.c
index 6f3f77d031d0..d78731edebb6 100644
--- a/arch/arm/mach-orion5x/addr-map.c
+++ b/arch/arm/mach-orion5x/addr-map.c
@@ -200,6 +200,6 @@ void __init orion5x_setup_pcie_wa_win(u32 base, u32 size)
int __init orion5x_setup_sram_win(void)
{
- return setup_cpu_win(win_alloc_count, ORION5X_SRAM_PHYS_BASE,
+ return setup_cpu_win(win_alloc_count++, ORION5X_SRAM_PHYS_BASE,
ORION5X_SRAM_SIZE, TARGET_SRAM, ATTR_SRAM, -1);
}
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index eafcc49009ea..f87fa1253803 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -562,7 +562,7 @@ static struct platform_device orion5x_crypto_device = {
.resource = orion5x_crypto_res,
};
-int __init orion5x_crypto_init(void)
+static int __init orion5x_crypto_init(void)
{
int ret;
@@ -697,6 +697,14 @@ void __init orion5x_init(void)
}
/*
+ * The 5082/5181l/5182/6082/6082l/6183 have crypto
+ * while 5180n/5181/5281 don't have crypto.
+ */
+ if ((dev == MV88F5181_DEV_ID && rev >= MV88F5181L_REV_A0) ||
+ dev == MV88F5182_DEV_ID || dev == MV88F6183_DEV_ID)
+ orion5x_crypto_init();
+
+ /*
* Register watchdog driver
*/
orion5x_wdt_init();
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index de483e83edd7..8f004503c96d 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -38,7 +38,6 @@ void orion5x_spi_init(void);
void orion5x_uart0_init(void);
void orion5x_uart1_init(void);
void orion5x_xor_init(void);
-int orion5x_crypto_init(void);
/*
* PCIe/PCI functions.
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index f4533f8ff4e8..89c992b8f75b 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -401,6 +401,16 @@ config MACH_PALMZ72
Say Y here if you intend to run this kernel on Palm Zire 72
handheld computer.
+config MACH_TREO680
+ bool "Palm Treo 680"
+ default y
+ depends on ARCH_PXA_PALM
+ select PXA27x
+ select IWMMXT
+ help
+ Say Y here if you intend to run this kernel on Palm Treo 680
+ smartphone.
+
config MACH_PALMLD
bool "Palm LifeDrive"
default y
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index d18ffef44b8c..d4c6122a342f 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_MACH_PALMT5) += palmt5.o
obj-$(CONFIG_MACH_PALMTX) += palmtx.o
obj-$(CONFIG_MACH_PALMLD) += palmld.o
obj-$(CONFIG_MACH_PALMZ72) += palmz72.o
+obj-$(CONFIG_MACH_TREO680) += treo680.o
obj-$(CONFIG_ARCH_VIPER) += viper.o
ifeq ($(CONFIG_MACH_ZYLONITE),y)
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index 962dda2e154a..5363e1aea3fb 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -23,6 +23,7 @@
#include <linux/pm.h>
#include <linux/gpio.h>
#include <linux/backlight.h>
+#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
@@ -600,6 +601,10 @@ static struct platform_device *devices[] __initdata = {
&sharpsl_rom_device,
};
+static struct i2c_board_info __initdata corgi_i2c_devices[] = {
+ { I2C_BOARD_INFO("wm8731", 0x1b) },
+};
+
static void corgi_poweroff(void)
{
if (!machine_is_corgi())
@@ -634,6 +639,7 @@ static void __init corgi_init(void)
pxa_set_mci_info(&corgi_mci_platform_data);
pxa_set_ficp_info(&corgi_ficp_platform_data);
pxa_set_i2c_info(NULL);
+ i2c_register_board_info(0, ARRAY_AND_SIZE(corgi_i2c_devices));
platform_scoop_config = &corgi_pcmcia_config;
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index 243e0802b5f4..63b10d9bb1d3 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -30,6 +30,7 @@
#include <linux/apm-emulation.h>
#include <linux/i2c.h>
#include <linux/i2c/pca953x.h>
+#include <linux/regulator/userspace-consumer.h>
#include <media/soc_camera.h>
@@ -735,6 +736,7 @@ static struct pxa2xx_spi_chip em_x270_libertas_chip = {
.rx_threshold = 1,
.tx_threshold = 1,
.timeout = 1000,
+ .gpio_cs = 14,
};
static unsigned long em_x270_libertas_pin_config[] = {
@@ -803,7 +805,6 @@ static int em_x270_libertas_teardown(struct spi_device *spi)
struct libertas_spi_platform_data em_x270_libertas_pdata = {
.use_dummy_writes = 1,
- .gpio_cs = 14,
.setup = em_x270_libertas_setup,
.teardown = em_x270_libertas_teardown,
};
@@ -838,10 +839,14 @@ static void __init em_x270_init_spi(void)
static inline void em_x270_init_spi(void) {}
#endif
-#if defined(CONFIG_SND_PXA2XX_AC97) || defined(CONFIG_SND_PXA2XX_AC97_MODULE)
+#if defined(CONFIG_SND_PXA2XX_LIB_AC97)
+static pxa2xx_audio_ops_t em_x270_ac97_info = {
+ .reset_gpio = 113,
+};
+
static void __init em_x270_init_ac97(void)
{
- pxa_set_ac97_info(NULL);
+ pxa_set_ac97_info(&em_x270_ac97_info);
}
#else
static inline void em_x270_init_ac97(void) {}
@@ -1038,6 +1043,52 @@ static void __init em_x270_init_camera(void)
static inline void em_x270_init_camera(void) {}
#endif
+static struct regulator_bulk_data em_x270_gps_consumer_supply = {
+ .supply = "vcc gps",
+};
+
+static struct regulator_userspace_consumer_data em_x270_gps_consumer_data = {
+ .name = "vcc gps",
+ .num_supplies = 1,
+ .supplies = &em_x270_gps_consumer_supply,
+};
+
+static struct platform_device em_x270_gps_userspace_consumer = {
+ .name = "reg-userspace-consumer",
+ .id = 0,
+ .dev = {
+ .platform_data = &em_x270_gps_consumer_data,
+ },
+};
+
+static struct regulator_bulk_data em_x270_gprs_consumer_supply = {
+ .supply = "vcc gprs",
+};
+
+static struct regulator_userspace_consumer_data em_x270_gprs_consumer_data = {
+ .name = "vcc gprs",
+ .num_supplies = 1,
+ .supplies = &em_x270_gprs_consumer_supply
+};
+
+static struct platform_device em_x270_gprs_userspace_consumer = {
+ .name = "reg-userspace-consumer",
+ .id = 1,
+ .dev = {
+ .platform_data = &em_x270_gprs_consumer_data,
+ }
+};
+
+static struct platform_device *em_x270_userspace_consumers[] = {
+ &em_x270_gps_userspace_consumer,
+ &em_x270_gprs_userspace_consumer,
+};
+
+static void __init em_x270_userspace_consumers_init(void)
+{
+ platform_add_devices(ARRAY_AND_SIZE(em_x270_userspace_consumers));
+}
+
/* DA9030 related initializations */
#define REGULATOR_CONSUMER(_name, _dev, _supply) \
static struct regulator_consumer_supply _name##_consumers[] = { \
@@ -1047,11 +1098,11 @@ static inline void em_x270_init_camera(void) {}
}, \
}
-REGULATOR_CONSUMER(ldo3, NULL, "vcc gps");
+REGULATOR_CONSUMER(ldo3, &em_x270_gps_userspace_consumer.dev, "vcc gps");
REGULATOR_CONSUMER(ldo5, NULL, "vcc cam");
REGULATOR_CONSUMER(ldo10, &pxa_device_mci.dev, "vcc sdio");
REGULATOR_CONSUMER(ldo12, NULL, "vcc usb");
-REGULATOR_CONSUMER(ldo19, NULL, "vcc gprs");
+REGULATOR_CONSUMER(ldo19, &em_x270_gprs_userspace_consumer.dev, "vcc gprs");
#define REGULATOR_INIT(_ldo, _min_uV, _max_uV, _ops_mask) \
static struct regulator_init_data _ldo##_data = { \
@@ -1062,6 +1113,7 @@ REGULATOR_CONSUMER(ldo19, NULL, "vcc gprs");
.enabled = 0, \
}, \
.valid_ops_mask = _ops_mask, \
+ .apply_uV = 1, \
}, \
.num_consumer_supplies = ARRAY_SIZE(_ldo##_consumers), \
.consumer_supplies = _ldo##_consumers, \
@@ -1240,6 +1292,7 @@ static void __init em_x270_init(void)
em_x270_init_spi();
em_x270_init_i2c();
em_x270_init_camera();
+ em_x270_userspace_consumers_init();
}
MACHINE_START(EM_X270, "Compulab EM-X270")
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index 7fff467e84fc..81359d574f88 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -30,6 +30,7 @@
#include <linux/pwm_backlight.h>
#include <linux/regulator/bq24022.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/max1586.h>
#include <linux/spi/ads7846.h>
#include <linux/spi/spi.h>
#include <linux/usb/gpio_vbus.h>
@@ -775,6 +776,45 @@ static struct platform_device strataflash = {
};
/*
+ * Maxim MAX1587A on PI2C
+ */
+
+static struct regulator_consumer_supply max1587a_consumer = {
+ .supply = "vcc_core",
+};
+
+static struct regulator_init_data max1587a_v3_info = {
+ .constraints = {
+ .name = "vcc_core range",
+ .min_uV = 900000,
+ .max_uV = 1705000,
+ .always_on = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ },
+ .num_consumer_supplies = 1,
+ .consumer_supplies = &max1587a_consumer,
+};
+
+static struct max1586_subdev_data max1587a_subdev = {
+ .name = "vcc_core",
+ .id = MAX1586_V3,
+ .platform_data = &max1587a_v3_info,
+};
+
+static struct max1586_platform_data max1587a_info = {
+ .num_subdevs = 1,
+ .subdevs = &max1587a_subdev,
+ .v3_gain = MAX1586_GAIN_R24_3k32, /* 730..1550 mV */
+};
+
+static struct i2c_board_info __initdata pi2c_board_info[] = {
+ {
+ I2C_BOARD_INFO("max1586", 0x14),
+ .platform_data = &max1587a_info,
+ },
+};
+
+/*
* PCMCIA
*/
@@ -828,6 +868,7 @@ static void __init hx4700_init(void)
pxa_set_ficp_info(&ficp_info);
pxa27x_set_i2c_power_info(NULL);
pxa_set_i2c_info(NULL);
+ i2c_register_board_info(1, ARRAY_AND_SIZE(pi2c_board_info));
pxa2xx_set_spi_info(2, &pxa_ssp2_master_info);
spi_register_board_info(ARRAY_AND_SIZE(tsc2046_board_info));
diff --git a/arch/arm/mach-pxa/include/mach/palmz72.h b/arch/arm/mach-pxa/include/mach/palmz72.h
index 5032307ebf7d..2806ef69ba5a 100644
--- a/arch/arm/mach-pxa/include/mach/palmz72.h
+++ b/arch/arm/mach-pxa/include/mach/palmz72.h
@@ -21,7 +21,7 @@
/* SD/MMC */
#define GPIO_NR_PALMZ72_SD_DETECT_N 14
#define GPIO_NR_PALMZ72_SD_POWER_N 98
-#define GPIO_NR_PALMZ72_SD_RO 115
+#define GPIO_NR_PALMZ72_SD_RO 115
/* Touchscreen */
#define GPIO_NR_PALMZ72_WM9712_IRQ 27
@@ -31,8 +31,7 @@
/* USB */
#define GPIO_NR_PALMZ72_USB_DETECT_N 15
-#define GPIO_NR_PALMZ72_USB_POWER 95
-#define GPIO_NR_PALMZ72_USB_PULLUP 12
+#define GPIO_NR_PALMZ72_USB_PULLUP 95
/* LCD/Backlight */
#define GPIO_NR_PALMZ72_BL_POWER 20
diff --git a/arch/arm/mach-pxa/include/mach/treo680.h b/arch/arm/mach-pxa/include/mach/treo680.h
new file mode 100644
index 000000000000..af443b24d99a
--- /dev/null
+++ b/arch/arm/mach-pxa/include/mach/treo680.h
@@ -0,0 +1,49 @@
+/*
+ * GPIOs and interrupts for Palm Treo 680 smartphone
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _INCLUDE_TREO680_H_
+#define _INCLUDE_TREO680_H_
+
+/* GPIOs */
+#define GPIO_NR_TREO680_POWER_DETECT 0
+#define GPIO_NR_TREO680_AMP_EN 27
+#define GPIO_NR_TREO680_KEYB_BL 24
+#define GPIO_NR_TREO680_VIBRATE_EN 44
+#define GPIO_NR_TREO680_GREEN_LED 20
+#define GPIO_NR_TREO680_RED_LED 79
+#define GPIO_NR_TREO680_SD_DETECT_N 113
+#define GPIO_NR_TREO680_SD_READONLY 33
+#define GPIO_NR_TREO680_EP_DETECT_N 116
+#define GPIO_NR_TREO680_SD_POWER 42
+#define GPIO_NR_TREO680_USB_DETECT 1
+#define GPIO_NR_TREO680_USB_PULLUP 114
+#define GPIO_NR_TREO680_GSM_POWER 40
+#define GPIO_NR_TREO680_GSM_RESET 87
+#define GPIO_NR_TREO680_GSM_WAKE 57
+#define GPIO_NR_TREO680_GSM_HOST_WAKE 14
+#define GPIO_NR_TREO680_GSM_TRIGGER 10
+#define GPIO_NR_TREO680_BT_EN 43
+#define GPIO_NR_TREO680_IR_EN 115
+#define GPIO_NR_TREO680_IR_TXD 47
+#define GPIO_NR_TREO680_BL_POWER 38
+#define GPIO_NR_TREO680_LCD_POWER 25
+
+/* Various addresses */
+#define TREO680_PHYS_RAM_START 0xa0000000
+#define TREO680_PHYS_IO_START 0x40000000
+#define TREO680_STR_BASE 0xa2000000
+
+/* BACKLIGHT */
+#define TREO680_MAX_INTENSITY 254
+#define TREO680_DEFAULT_INTENSITY 160
+#define TREO680_LIMIT_MASK 0x7F
+#define TREO680_PRESCALER 63
+#define TREO680_PERIOD_NS 3500
+
+#endif
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index 4dc8c2ec40a9..2d28132c725b 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -37,6 +37,7 @@
#include <linux/wm97xx_batt.h>
#include <linux/mtd/physmap.h>
#include <linux/usb/gpio_vbus.h>
+#include <linux/regulator/max1586.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -717,6 +718,38 @@ static struct wm97xx_batt_info mioa701_battery_data = {
};
/*
+ * Voltage regulation
+ */
+static struct regulator_consumer_supply max1586_consumers[] = {
+ {
+ .supply = "vcc_core",
+ }
+};
+
+static struct regulator_init_data max1586_v3_info = {
+ .constraints = {
+ .name = "vcc_core range",
+ .min_uV = 1000000,
+ .max_uV = 1705000,
+ .always_on = 1,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(max1586_consumers),
+ .consumer_supplies = max1586_consumers,
+};
+
+static struct max1586_subdev_data max1586_subdevs[] = {
+ { .name = "vcc_core", .id = MAX1586_V3,
+ .platform_data = &max1586_v3_info },
+};
+
+static struct max1586_platform_data max1586_info = {
+ .subdevs = max1586_subdevs,
+ .num_subdevs = ARRAY_SIZE(max1586_subdevs),
+ .v3_gain = MAX1586_GAIN_NO_R24, /* 700..1475 mV */
+};
+
+/*
* Camera interface
*/
struct pxacamera_platform_data mioa701_pxacamera_platform_data = {
@@ -725,6 +758,13 @@ struct pxacamera_platform_data mioa701_pxacamera_platform_data = {
.mclk_10khz = 5000,
};
+static struct i2c_board_info __initdata mioa701_pi2c_devices[] = {
+ {
+ I2C_BOARD_INFO("max1586", 0x14),
+ .platform_data = &max1586_info,
+ },
+};
+
static struct soc_camera_link iclink = {
.bus_id = 0, /* Must match id in pxa27x_device_camera in device.c */
};
@@ -825,7 +865,9 @@ static void __init mioa701_machine_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
gsm_init();
+ i2c_register_board_info(1, ARRAY_AND_SIZE(mioa701_pi2c_devices));
pxa_set_i2c_info(&i2c_pdata);
+ pxa27x_set_i2c_power_info(NULL);
pxa_set_camera_info(&mioa701_pxacamera_platform_data);
i2c_register_board_info(0, ARRAY_AND_SIZE(mioa701_i2c_devices));
}
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index b88eb4dd2c84..c3645aa3fa3d 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -27,7 +27,9 @@
#include <linux/pda_power.h>
#include <linux/pwm_backlight.h>
#include <linux/gpio.h>
+#include <linux/wm97xx_batt.h>
#include <linux/power_supply.h>
+#include <linux/usb/gpio_vbus.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -41,6 +43,8 @@
#include <mach/irda.h>
#include <mach/pxa27x_keypad.h>
#include <mach/udc.h>
+#include <mach/palmasoc.h>
+
#include <mach/pm.h>
#include "generic.h"
@@ -66,6 +70,8 @@ static unsigned long palmz72_pin_config[] __initdata = {
GPIO29_AC97_SDATA_IN_0,
GPIO30_AC97_SDATA_OUT,
GPIO31_AC97_SYNC,
+ GPIO89_AC97_SYSCLK,
+ GPIO113_AC97_nRESET,
/* IrDA */
GPIO49_GPIO, /* ir disable */
@@ -77,8 +83,7 @@ static unsigned long palmz72_pin_config[] __initdata = {
/* USB */
GPIO15_GPIO, /* usb detect */
- GPIO12_GPIO, /* usb pullup */
- GPIO95_GPIO, /* usb power */
+ GPIO95_GPIO, /* usb pullup */
/* Matrix keypad */
GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH,
@@ -355,6 +360,22 @@ static struct platform_device palmz72_leds = {
};
/******************************************************************************
+ * UDC
+ ******************************************************************************/
+static struct gpio_vbus_mach_info palmz72_udc_info = {
+ .gpio_vbus = GPIO_NR_PALMZ72_USB_DETECT_N,
+ .gpio_pullup = GPIO_NR_PALMZ72_USB_PULLUP,
+};
+
+static struct platform_device palmz72_gpio_vbus = {
+ .name = "gpio-vbus",
+ .id = -1,
+ .dev = {
+ .platform_data = &palmz72_udc_info,
+ },
+};
+
+/******************************************************************************
* Power supply
******************************************************************************/
static int power_supply_init(struct device *dev)
@@ -422,6 +443,31 @@ static struct platform_device power_supply = {
};
/******************************************************************************
+ * WM97xx battery
+ ******************************************************************************/
+static struct wm97xx_batt_info wm97xx_batt_pdata = {
+ .batt_aux = WM97XX_AUX_ID3,
+ .temp_aux = WM97XX_AUX_ID2,
+ .charge_gpio = -1,
+ .max_voltage = PALMZ72_BAT_MAX_VOLTAGE,
+ .min_voltage = PALMZ72_BAT_MIN_VOLTAGE,
+ .batt_mult = 1000,
+ .batt_div = 414,
+ .temp_mult = 1,
+ .temp_div = 1,
+ .batt_tech = POWER_SUPPLY_TECHNOLOGY_LIPO,
+ .batt_name = "main-batt",
+};
+
+/******************************************************************************
+ * aSoC audio
+ ******************************************************************************/
+static struct platform_device palmz72_asoc = {
+ .name = "palm27x-asoc",
+ .id = -1,
+};
+
+/******************************************************************************
* Framebuffer
******************************************************************************/
static struct pxafb_mode_info palmz72_lcd_modes[] = {
@@ -527,17 +573,32 @@ device_initcall(palmz72_pm_init);
static struct platform_device *devices[] __initdata = {
&palmz72_backlight,
&palmz72_leds,
+ &palmz72_asoc,
&power_supply,
+ &palmz72_gpio_vbus,
};
+/* setup udc GPIOs initial state */
+static void __init palmz72_udc_init(void)
+{
+ if (!gpio_request(GPIO_NR_PALMZ72_USB_PULLUP, "USB Pullup")) {
+ gpio_direction_output(GPIO_NR_PALMZ72_USB_PULLUP, 0);
+ gpio_free(GPIO_NR_PALMZ72_USB_PULLUP);
+ }
+}
+
static void __init palmz72_init(void)
{
pxa2xx_mfp_config(ARRAY_AND_SIZE(palmz72_pin_config));
+
set_pxa_fb_info(&palmz72_lcd_screen);
pxa_set_mci_info(&palmz72_mci_platform_data);
+ palmz72_udc_init();
pxa_set_ac97_info(NULL);
pxa_set_ficp_info(&palmz72_ficp_platform_data);
pxa_set_keypad_info(&palmz72_keypad_platform_data);
+ wm97xx_bat_set_pdata(&wm97xx_batt_pdata);
+
platform_add_devices(devices, ARRAY_SIZE(devices));
}
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index ac431ed10399..9352d4a34837 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/mtd/physmap.h>
#include <linux/gpio.h>
+#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/mtd/sharpsl.h>
@@ -486,6 +487,10 @@ static struct platform_device *devices[] __initdata = {
&sharpsl_rom_device,
};
+static struct i2c_board_info __initdata poodle_i2c_devices[] = {
+ { I2C_BOARD_INFO("wm8731", 0x1b) },
+};
+
static void poodle_poweroff(void)
{
arm_machine_restart('h', NULL);
@@ -519,6 +524,7 @@ static void __init poodle_init(void)
pxa_set_mci_info(&poodle_mci_platform_data);
pxa_set_ficp_info(&poodle_ficp_platform_data);
pxa_set_i2c_info(NULL);
+ i2c_register_board_info(0, ARRAY_AND_SIZE(poodle_i2c_devices));
poodle_init_spi();
}
diff --git a/arch/arm/mach-pxa/treo680.c b/arch/arm/mach-pxa/treo680.c
new file mode 100644
index 000000000000..a06f19edebb3
--- /dev/null
+++ b/arch/arm/mach-pxa/treo680.c
@@ -0,0 +1,612 @@
+/*
+ * Hardware definitions for Palm Treo 680
+ *
+ * Author: Tomas Cech <sleep_walker@suse.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * (find more info at www.hackndev.com)
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+#include <linux/pda_power.h>
+#include <linux/pwm_backlight.h>
+#include <linux/gpio.h>
+#include <linux/wm97xx_batt.h>
+#include <linux/power_supply.h>
+#include <linux/sysdev.h>
+#include <linux/w1-gpio.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/pxa27x.h>
+#include <mach/pxa27x-udc.h>
+#include <mach/audio.h>
+#include <mach/treo680.h>
+#include <mach/mmc.h>
+#include <mach/pxafb.h>
+#include <mach/irda.h>
+#include <mach/pxa27x_keypad.h>
+#include <mach/udc.h>
+#include <mach/ohci.h>
+#include <mach/pxa2xx-regs.h>
+#include <mach/palmasoc.h>
+#include <mach/camera.h>
+
+#include <sound/pxa2xx-lib.h>
+
+#include "generic.h"
+#include "devices.h"
+
+/******************************************************************************
+ * Pin configuration
+ ******************************************************************************/
+static unsigned long treo680_pin_config[] __initdata = {
+ /* MMC */
+ GPIO32_MMC_CLK,
+ GPIO92_MMC_DAT_0,
+ GPIO109_MMC_DAT_1,
+ GPIO110_MMC_DAT_2,
+ GPIO111_MMC_DAT_3,
+ GPIO112_MMC_CMD,
+ GPIO33_GPIO, /* SD read only */
+ GPIO113_GPIO, /* SD detect */
+
+ /* AC97 */
+ GPIO28_AC97_BITCLK,
+ GPIO29_AC97_SDATA_IN_0,
+ GPIO30_AC97_SDATA_OUT,
+ GPIO31_AC97_SYNC,
+ GPIO89_AC97_SYSCLK,
+ GPIO95_AC97_nRESET,
+
+ /* IrDA */
+ GPIO46_FICP_RXD,
+ GPIO47_FICP_TXD,
+
+ /* PWM */
+ GPIO16_PWM0_OUT,
+
+ /* USB */
+ GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH, /* usb detect */
+
+ /* MATRIX KEYPAD */
+ GPIO100_KP_MKIN_0 | WAKEUP_ON_LEVEL_HIGH,
+ GPIO101_KP_MKIN_1,
+ GPIO102_KP_MKIN_2,
+ GPIO97_KP_MKIN_3,
+ GPIO98_KP_MKIN_4,
+ GPIO99_KP_MKIN_5,
+ GPIO91_KP_MKIN_6,
+ GPIO13_KP_MKIN_7,
+ GPIO103_KP_MKOUT_0 | MFP_LPM_DRIVE_HIGH,
+ GPIO104_KP_MKOUT_1,
+ GPIO105_KP_MKOUT_2,
+ GPIO106_KP_MKOUT_3,
+ GPIO107_KP_MKOUT_4,
+ GPIO108_KP_MKOUT_5,
+ GPIO96_KP_MKOUT_6,
+ GPIO93_KP_DKIN_0 | WAKEUP_ON_LEVEL_HIGH, /* Hotsync button */
+
+ /* LCD */
+ GPIO58_LCD_LDD_0,
+ GPIO59_LCD_LDD_1,
+ GPIO60_LCD_LDD_2,
+ GPIO61_LCD_LDD_3,
+ GPIO62_LCD_LDD_4,
+ GPIO63_LCD_LDD_5,
+ GPIO64_LCD_LDD_6,
+ GPIO65_LCD_LDD_7,
+ GPIO66_LCD_LDD_8,
+ GPIO67_LCD_LDD_9,
+ GPIO68_LCD_LDD_10,
+ GPIO69_LCD_LDD_11,
+ GPIO70_LCD_LDD_12,
+ GPIO71_LCD_LDD_13,
+ GPIO72_LCD_LDD_14,
+ GPIO73_LCD_LDD_15,
+ GPIO74_LCD_FCLK,
+ GPIO75_LCD_LCLK,
+ GPIO76_LCD_PCLK,
+
+ /* Quick Capture Interface */
+ GPIO84_CIF_FV,
+ GPIO85_CIF_LV,
+ GPIO53_CIF_MCLK,
+ GPIO54_CIF_PCLK,
+ GPIO81_CIF_DD_0,
+ GPIO55_CIF_DD_1,
+ GPIO51_CIF_DD_2,
+ GPIO50_CIF_DD_3,
+ GPIO52_CIF_DD_4,
+ GPIO48_CIF_DD_5,
+ GPIO17_CIF_DD_6,
+ GPIO12_CIF_DD_7,
+
+ /* I2C */
+ GPIO117_I2C_SCL,
+ GPIO118_I2C_SDA,
+
+ /* GSM */
+ GPIO14_GPIO | WAKEUP_ON_EDGE_BOTH, /* GSM host wake up */
+ GPIO34_FFUART_RXD,
+ GPIO35_FFUART_CTS,
+ GPIO39_FFUART_TXD,
+ GPIO41_FFUART_RTS,
+
+ /* MISC. */
+ GPIO0_GPIO | WAKEUP_ON_EDGE_BOTH, /* external power detect */
+ GPIO15_GPIO | WAKEUP_ON_EDGE_BOTH, /* silent switch */
+ GPIO116_GPIO, /* headphone detect */
+ GPIO11_GPIO | WAKEUP_ON_EDGE_BOTH, /* bluetooth host wake up */
+};
+
+/******************************************************************************
+ * SD/MMC card controller
+ ******************************************************************************/
+static int treo680_mci_init(struct device *dev,
+ irq_handler_t treo680_detect_int, void *data)
+{
+ int err = 0;
+
+ /* Setup an interrupt for detecting card insert/remove events */
+ err = gpio_request(GPIO_NR_TREO680_SD_DETECT_N, "SD IRQ");
+
+ if (err)
+ goto err;
+
+ err = gpio_direction_input(GPIO_NR_TREO680_SD_DETECT_N);
+ if (err)
+ goto err2;
+
+ err = request_irq(gpio_to_irq(GPIO_NR_TREO680_SD_DETECT_N),
+ treo680_detect_int, IRQF_DISABLED | IRQF_SAMPLE_RANDOM |
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "SD/MMC card detect", data);
+
+ if (err) {
+ dev_err(dev, "%s: cannot request SD/MMC card detect IRQ\n",
+ __func__);
+ goto err2;
+ }
+
+ err = gpio_request(GPIO_NR_TREO680_SD_POWER, "SD_POWER");
+ if (err)
+ goto err3;
+
+ err = gpio_direction_output(GPIO_NR_TREO680_SD_POWER, 1);
+ if (err)
+ goto err4;
+
+ err = gpio_request(GPIO_NR_TREO680_SD_READONLY, "SD_READONLY");
+ if (err)
+ goto err4;
+
+ err = gpio_direction_input(GPIO_NR_TREO680_SD_READONLY);
+ if (err)
+ goto err5;
+
+ return 0;
+
+err5:
+ gpio_free(GPIO_NR_TREO680_SD_READONLY);
+err4:
+ gpio_free(GPIO_NR_TREO680_SD_POWER);
+err3:
+ free_irq(gpio_to_irq(GPIO_NR_TREO680_SD_DETECT_N), data);
+err2:
+ gpio_free(GPIO_NR_TREO680_SD_DETECT_N);
+err:
+ return err;
+}
+
+static void treo680_mci_exit(struct device *dev, void *data)
+{
+ gpio_free(GPIO_NR_TREO680_SD_READONLY);
+ gpio_free(GPIO_NR_TREO680_SD_POWER);
+ free_irq(gpio_to_irq(GPIO_NR_TREO680_SD_DETECT_N), data);
+ gpio_free(GPIO_NR_TREO680_SD_DETECT_N);
+}
+
+static void treo680_mci_power(struct device *dev, unsigned int vdd)
+{
+ struct pxamci_platform_data *p_d = dev->platform_data;
+ gpio_set_value(GPIO_NR_TREO680_SD_POWER, p_d->ocr_mask & (1 << vdd));
+}
+
+static int treo680_mci_get_ro(struct device *dev)
+{
+ return gpio_get_value(GPIO_NR_TREO680_SD_READONLY);
+}
+
+static struct pxamci_platform_data treo680_mci_platform_data = {
+ .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
+ .setpower = treo680_mci_power,
+ .get_ro = treo680_mci_get_ro,
+ .init = treo680_mci_init,
+ .exit = treo680_mci_exit,
+};
+
+/******************************************************************************
+ * GPIO keyboard
+ ******************************************************************************/
+static unsigned int treo680_matrix_keys[] = {
+ KEY(0, 0, KEY_F8), /* Red/Off/Power */
+ KEY(0, 1, KEY_LEFT),
+ KEY(0, 2, KEY_LEFTCTRL), /* Alternate */
+ KEY(0, 3, KEY_L),
+ KEY(0, 4, KEY_A),
+ KEY(0, 5, KEY_Q),
+ KEY(0, 6, KEY_P),
+
+ KEY(1, 0, KEY_RIGHTCTRL), /* Menu */
+ KEY(1, 1, KEY_RIGHT),
+ KEY(1, 2, KEY_LEFTSHIFT), /* Left shift */
+ KEY(1, 3, KEY_Z),
+ KEY(1, 4, KEY_S),
+ KEY(1, 5, KEY_W),
+
+ KEY(2, 0, KEY_F1), /* Phone */
+ KEY(2, 1, KEY_UP),
+ KEY(2, 2, KEY_0),
+ KEY(2, 3, KEY_X),
+ KEY(2, 4, KEY_D),
+ KEY(2, 5, KEY_E),
+
+ KEY(3, 0, KEY_F10), /* Calendar */
+ KEY(3, 1, KEY_DOWN),
+ KEY(3, 2, KEY_SPACE),
+ KEY(3, 3, KEY_C),
+ KEY(3, 4, KEY_F),
+ KEY(3, 5, KEY_R),
+
+ KEY(4, 0, KEY_F12), /* Mail */
+ KEY(4, 1, KEY_KPENTER),
+ KEY(4, 2, KEY_RIGHTALT), /* Alt */
+ KEY(4, 3, KEY_V),
+ KEY(4, 4, KEY_G),
+ KEY(4, 5, KEY_T),
+
+ KEY(5, 0, KEY_F9), /* Home */
+ KEY(5, 1, KEY_PAGEUP), /* Side up */
+ KEY(5, 2, KEY_DOT),
+ KEY(5, 3, KEY_B),
+ KEY(5, 4, KEY_H),
+ KEY(5, 5, KEY_Y),
+
+ KEY(6, 0, KEY_TAB), /* Side Activate */
+ KEY(6, 1, KEY_PAGEDOWN), /* Side down */
+ KEY(6, 2, KEY_ENTER),
+ KEY(6, 3, KEY_N),
+ KEY(6, 4, KEY_J),
+ KEY(6, 5, KEY_U),
+
+ KEY(7, 0, KEY_F6), /* Green/Call */
+ KEY(7, 1, KEY_O),
+ KEY(7, 2, KEY_BACKSPACE),
+ KEY(7, 3, KEY_M),
+ KEY(7, 4, KEY_K),
+ KEY(7, 5, KEY_I),
+};
+
+static struct pxa27x_keypad_platform_data treo680_keypad_platform_data = {
+ .matrix_key_rows = 8,
+ .matrix_key_cols = 7,
+ .matrix_key_map = treo680_matrix_keys,
+ .matrix_key_map_size = ARRAY_SIZE(treo680_matrix_keys),
+ .direct_key_map = { KEY_CONNECT },
+ .direct_key_num = 1,
+
+ .debounce_interval = 30,
+};
+
+/******************************************************************************
+ * aSoC audio
+ ******************************************************************************/
+
+static pxa2xx_audio_ops_t treo680_ac97_pdata = {
+ .reset_gpio = 95,
+};
+
+/******************************************************************************
+ * Backlight
+ ******************************************************************************/
+static int treo680_backlight_init(struct device *dev)
+{
+ int ret;
+
+ ret = gpio_request(GPIO_NR_TREO680_BL_POWER, "BL POWER");
+ if (ret)
+ goto err;
+ ret = gpio_direction_output(GPIO_NR_TREO680_BL_POWER, 0);
+ if (ret)
+ goto err2;
+ ret = gpio_request(GPIO_NR_TREO680_LCD_POWER, "LCD POWER");
+ if (ret)
+ goto err2;
+ ret = gpio_direction_output(GPIO_NR_TREO680_LCD_POWER, 0);
+ if (ret)
+ goto err3;
+
+ return 0;
+err3:
+ gpio_free(GPIO_NR_TREO680_LCD_POWER);
+err2:
+ gpio_free(GPIO_NR_TREO680_BL_POWER);
+err:
+ return ret;
+}
+
+static int treo680_backlight_notify(int brightness)
+{
+ gpio_set_value(GPIO_NR_TREO680_BL_POWER, brightness);
+ return TREO680_MAX_INTENSITY - brightness;
+};
+
+static void treo680_backlight_exit(struct device *dev)
+{
+ gpio_free(GPIO_NR_TREO680_BL_POWER);
+ gpio_free(GPIO_NR_TREO680_LCD_POWER);
+}
+
+static struct platform_pwm_backlight_data treo680_backlight_data = {
+ .pwm_id = 0,
+ .max_brightness = TREO680_MAX_INTENSITY,
+ .dft_brightness = TREO680_DEFAULT_INTENSITY,
+ .pwm_period_ns = TREO680_PERIOD_NS,
+ .init = treo680_backlight_init,
+ .notify = treo680_backlight_notify,
+ .exit = treo680_backlight_exit,
+};
+
+static struct platform_device treo680_backlight = {
+ .name = "pwm-backlight",
+ .dev = {
+ .parent = &pxa27x_device_pwm0.dev,
+ .platform_data = &treo680_backlight_data,
+ },
+};
+
+/******************************************************************************
+ * IrDA
+ ******************************************************************************/
+static void treo680_transceiver_mode(struct device *dev, int mode)
+{
+ gpio_set_value(GPIO_NR_TREO680_IR_EN, mode & IR_OFF);
+ pxa2xx_transceiver_mode(dev, mode);
+}
+
+static int treo680_irda_startup(struct device *dev)
+{
+ int err;
+
+ err = gpio_request(GPIO_NR_TREO680_IR_EN, "Ir port disable");
+ if (err)
+ goto err1;
+
+ err = gpio_direction_output(GPIO_NR_TREO680_IR_EN, 1);
+ if (err)
+ goto err2;
+
+ return 0;
+
+err2:
+ dev_err(dev, "treo680_irda: cannot change IR gpio direction\n");
+ gpio_free(GPIO_NR_TREO680_IR_EN);
+err1:
+ dev_err(dev, "treo680_irda: cannot allocate IR gpio\n");
+ return err;
+}
+
+static void treo680_irda_shutdown(struct device *dev)
+{
+ gpio_free(GPIO_NR_TREO680_AMP_EN);
+}
+
+static struct pxaficp_platform_data treo680_ficp_info = {
+ .transceiver_cap = IR_FIRMODE | IR_SIRMODE | IR_OFF,
+ .startup = treo680_irda_startup,
+ .shutdown = treo680_irda_shutdown,
+ .transceiver_mode = treo680_transceiver_mode,
+};
+
+/******************************************************************************
+ * UDC
+ ******************************************************************************/
+static struct pxa2xx_udc_mach_info treo680_udc_info __initdata = {
+ .gpio_vbus = GPIO_NR_TREO680_USB_DETECT,
+ .gpio_vbus_inverted = 1,
+ .gpio_pullup = GPIO_NR_TREO680_USB_PULLUP,
+};
+
+
+/******************************************************************************
+ * USB host
+ ******************************************************************************/
+static struct pxaohci_platform_data treo680_ohci_info = {
+ .port_mode = PMM_PERPORT_MODE,
+ .flags = ENABLE_PORT1 | ENABLE_PORT3,
+ .power_budget = 0,
+};
+
+/******************************************************************************
+ * Power supply
+ ******************************************************************************/
+static int power_supply_init(struct device *dev)
+{
+ int ret;
+
+ ret = gpio_request(GPIO_NR_TREO680_POWER_DETECT, "CABLE_STATE_AC");
+ if (ret)
+ goto err1;
+ ret = gpio_direction_input(GPIO_NR_TREO680_POWER_DETECT);
+ if (ret)
+ goto err2;
+
+ return 0;
+
+err2:
+ gpio_free(GPIO_NR_TREO680_POWER_DETECT);
+err1:
+ return ret;
+}
+
+static int treo680_is_ac_online(void)
+{
+ return gpio_get_value(GPIO_NR_TREO680_POWER_DETECT);
+}
+
+static void power_supply_exit(struct device *dev)
+{
+ gpio_free(GPIO_NR_TREO680_POWER_DETECT);
+}
+
+static char *treo680_supplicants[] = {
+ "main-battery",
+};
+
+static struct pda_power_pdata power_supply_info = {
+ .init = power_supply_init,
+ .is_ac_online = treo680_is_ac_online,
+ .exit = power_supply_exit,
+ .supplied_to = treo680_supplicants,
+ .num_supplicants = ARRAY_SIZE(treo680_supplicants),
+};
+
+static struct platform_device power_supply = {
+ .name = "pda-power",
+ .id = -1,
+ .dev = {
+ .platform_data = &power_supply_info,
+ },
+};
+
+/******************************************************************************
+ * Vibra and LEDs
+ ******************************************************************************/
+static struct gpio_led gpio_leds[] = {
+ {
+ .name = "treo680:vibra:vibra",
+ .default_trigger = "none",
+ .gpio = GPIO_NR_TREO680_VIBRATE_EN,
+ },
+ {
+ .name = "treo680:green:led",
+ .default_trigger = "mmc0",
+ .gpio = GPIO_NR_TREO680_GREEN_LED,
+ },
+ {
+ .name = "treo680:keybbl:keybbl",
+ .default_trigger = "none",
+ .gpio = GPIO_NR_TREO680_KEYB_BL,
+ },
+};
+
+static struct gpio_led_platform_data gpio_led_info = {
+ .leds = gpio_leds,
+ .num_leds = ARRAY_SIZE(gpio_leds),
+};
+
+static struct platform_device treo680_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &gpio_led_info,
+ }
+};
+
+
+/******************************************************************************
+ * Framebuffer
+ ******************************************************************************/
+/* TODO: add support for 324x324 */
+static struct pxafb_mode_info treo680_lcd_modes[] = {
+{
+ .pixclock = 86538,
+ .xres = 320,
+ .yres = 320,
+ .bpp = 16,
+
+ .left_margin = 20,
+ .right_margin = 8,
+ .upper_margin = 8,
+ .lower_margin = 5,
+
+ .hsync_len = 4,
+ .vsync_len = 1,
+},
+};
+
+static struct pxafb_mach_info treo680_lcd_screen = {
+ .modes = treo680_lcd_modes,
+ .num_modes = ARRAY_SIZE(treo680_lcd_modes),
+ .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
+};
+
+/******************************************************************************
+ * Power management - standby
+ ******************************************************************************/
+static void __init treo680_pm_init(void)
+{
+ static u32 resume[] = {
+ 0xe3a00101, /* mov r0, #0x40000000 */
+ 0xe380060f, /* orr r0, r0, #0x00f00000 */
+ 0xe590f008, /* ldr pc, [r0, #0x08] */
+ };
+
+ /* this is where the bootloader jumps */
+ memcpy(phys_to_virt(TREO680_STR_BASE), resume, sizeof(resume));
+}
+
+/******************************************************************************
+ * Machine init
+ ******************************************************************************/
+static struct platform_device *devices[] __initdata = {
+ &treo680_backlight,
+ &treo680_leds,
+ &power_supply,
+};
+
+/* setup udc GPIOs initial state */
+static void __init treo680_udc_init(void)
+{
+ if (!gpio_request(GPIO_NR_TREO680_USB_PULLUP, "UDC Vbus")) {
+ gpio_direction_output(GPIO_NR_TREO680_USB_PULLUP, 1);
+ gpio_free(GPIO_NR_TREO680_USB_PULLUP);
+ }
+}
+
+static void __init treo680_init(void)
+{
+ treo680_pm_init();
+ pxa2xx_mfp_config(ARRAY_AND_SIZE(treo680_pin_config));
+ pxa_set_keypad_info(&treo680_keypad_platform_data);
+ set_pxa_fb_info(&treo680_lcd_screen);
+ pxa_set_mci_info(&treo680_mci_platform_data);
+ treo680_udc_init();
+ pxa_set_udc_info(&treo680_udc_info);
+ pxa_set_ac97_info(&treo680_ac97_pdata);
+ pxa_set_ficp_info(&treo680_ficp_info);
+ pxa_set_ohci_info(&treo680_ohci_info);
+
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+}
+
+MACHINE_START(TREO680, "Palm Treo 680")
+ .phys_io = TREO680_PHYS_IO_START,
+ .io_pg_offst = io_p2v(0x40000000),
+ .boot_params = 0xa0000100,
+ .map_io = pxa_map_io,
+ .init_irq = pxa27x_init_irq,
+ .timer = &pxa_timer,
+ .init_machine = treo680_init,
+MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index 1fe294d0bf9d..ede2a57240a3 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -27,6 +27,7 @@
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
+#include <asm/smp_twd.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
diff --git a/arch/arm/mach-s3c2410/usb-simtec.c b/arch/arm/mach-s3c2410/usb-simtec.c
index 6cd9377ddb82..50e25fc5f8ab 100644
--- a/arch/arm/mach-s3c2410/usb-simtec.c
+++ b/arch/arm/mach-s3c2410/usb-simtec.c
@@ -22,7 +22,6 @@
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/device.h>
-#include <linux/gpio.h>
#include <linux/io.h>
#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-s3c2440/Kconfig b/arch/arm/mach-s3c2440/Kconfig
index 5df73cbf2b40..8cfeaec37306 100644
--- a/arch/arm/mach-s3c2440/Kconfig
+++ b/arch/arm/mach-s3c2440/Kconfig
@@ -84,5 +84,15 @@ config MACH_AT2440EVB
help
Say Y here if you are using the AT2440EVB development board
+config MACH_MINI2440
+ bool "MINI2440 development board"
+ select CPU_S3C2440
+ select EEPROM_AT24
+ select LEDS_TRIGGER_BACKLIGHT
+ select SND_S3C24XX_SOC_S3C24XX_UDA134X
+ help
+ Say Y here to select support for the MINI2440. Is a 10cm x 10cm board
+ available via various sources. It can come with a 3.5" or 7" touch LCD.
+
endmenu
diff --git a/arch/arm/mach-s3c2440/Makefile b/arch/arm/mach-s3c2440/Makefile
index 0b4440e79b90..bfadcf684a2a 100644
--- a/arch/arm/mach-s3c2440/Makefile
+++ b/arch/arm/mach-s3c2440/Makefile
@@ -22,3 +22,4 @@ obj-$(CONFIG_MACH_RX3715) += mach-rx3715.o
obj-$(CONFIG_ARCH_S3C2440) += mach-smdk2440.o
obj-$(CONFIG_MACH_NEXCODER_2440) += mach-nexcoder.o
obj-$(CONFIG_MACH_AT2440EVB) += mach-at2440evb.o
+obj-$(CONFIG_MACH_MINI2440) += mach-mini2440.o
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c
new file mode 100644
index 000000000000..6a5bc3021bdb
--- /dev/null
+++ b/arch/arm/mach-s3c2440/mach-mini2440.c
@@ -0,0 +1,703 @@
+/* linux/arch/arm/mach-s3c2440/mach-mini2440.c
+ *
+ * Copyright (c) 2008 Ramax Lo <ramaxlo@gmail.com>
+ * Based on mach-anubis.c by Ben Dooks <ben@simtec.co.uk>
+ * and modifications by SBZ <sbz@spgui.org> and
+ * Weibing <http://weibing.blogbus.com> and
+ * Michel Pollet <buserror@gmail.com>
+ *
+ * For product information, visit http://code.google.com/p/mini2440/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/serial_core.h>
+#include <linux/dm9000.h>
+#include <linux/i2c/at24.h>
+#include <linux/platform_device.h>
+#include <linux/gpio_keys.h>
+#include <linux/i2c.h>
+#include <linux/mmc/host.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <mach/hardware.h>
+#include <mach/fb.h>
+#include <asm/mach-types.h>
+
+#include <plat/regs-serial.h>
+#include <mach/regs-gpio.h>
+#include <mach/leds-gpio.h>
+#include <mach/regs-mem.h>
+#include <mach/regs-lcd.h>
+#include <mach/irqs.h>
+#include <plat/nand.h>
+#include <plat/iic.h>
+#include <plat/mci.h>
+#include <plat/udc.h>
+
+#include <plat/regs-serial.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <plat/clock.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+
+#include <sound/s3c24xx_uda134x.h>
+
+#define MACH_MINI2440_DM9K_BASE (S3C2410_CS4 + 0x300)
+
+static struct map_desc mini2440_iodesc[] __initdata = {
+ /* nothing to declare, move along */
+};
+
+#define UCON S3C2410_UCON_DEFAULT
+#define ULCON S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB
+#define UFCON S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE
+
+
+static struct s3c2410_uartcfg mini2440_uartcfgs[] __initdata = {
+ [0] = {
+ .hwport = 0,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [1] = {
+ .hwport = 1,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [2] = {
+ .hwport = 2,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+};
+
+/* USB device UDC support */
+
+static void mini2440_udc_pullup(enum s3c2410_udc_cmd_e cmd)
+{
+ pr_debug("udc: pullup(%d)\n", cmd);
+
+ switch (cmd) {
+ case S3C2410_UDC_P_ENABLE :
+ s3c2410_gpio_setpin(S3C2410_GPC(5), 1);
+ break;
+ case S3C2410_UDC_P_DISABLE :
+ s3c2410_gpio_setpin(S3C2410_GPC(5), 0);
+ break;
+ case S3C2410_UDC_P_RESET :
+ break;
+ default:
+ break;
+ }
+}
+
+static struct s3c2410_udc_mach_info mini2440_udc_cfg __initdata = {
+ .udc_command = mini2440_udc_pullup,
+};
+
+
+/* LCD timing and setup */
+
+/*
+ * This macro simplifies the table bellow
+ */
+#define _LCD_DECLARE(_clock,_xres,margin_left,margin_right,hsync, \
+ _yres,margin_top,margin_bottom,vsync, refresh) \
+ .width = _xres, \
+ .xres = _xres, \
+ .height = _yres, \
+ .yres = _yres, \
+ .left_margin = margin_left, \
+ .right_margin = margin_right, \
+ .upper_margin = margin_top, \
+ .lower_margin = margin_bottom, \
+ .hsync_len = hsync, \
+ .vsync_len = vsync, \
+ .pixclock = ((_clock*100000000000LL) / \
+ ((refresh) * \
+ (hsync + margin_left + _xres + margin_right) * \
+ (vsync + margin_top + _yres + margin_bottom))), \
+ .bpp = 16,\
+ .type = (S3C2410_LCDCON1_TFT16BPP |\
+ S3C2410_LCDCON1_TFT)
+
+struct s3c2410fb_display mini2440_lcd_cfg[] __initdata = {
+ [0] = { /* mini2440 + 3.5" TFT + touchscreen */
+ _LCD_DECLARE(
+ 7, /* The 3.5 is quite fast */
+ 240, 21, 38, 6, /* x timing */
+ 320, 4, 4, 2, /* y timing */
+ 60), /* refresh rate */
+ .lcdcon5 = (S3C2410_LCDCON5_FRM565 |
+ S3C2410_LCDCON5_INVVLINE |
+ S3C2410_LCDCON5_INVVFRAME |
+ S3C2410_LCDCON5_INVVDEN |
+ S3C2410_LCDCON5_PWREN),
+ },
+ [1] = { /* mini2440 + 7" TFT + touchscreen */
+ _LCD_DECLARE(
+ 10, /* the 7" runs slower */
+ 800, 40, 40, 48, /* x timing */
+ 480, 29, 3, 3, /* y timing */
+ 50), /* refresh rate */
+ .lcdcon5 = (S3C2410_LCDCON5_FRM565 |
+ S3C2410_LCDCON5_INVVLINE |
+ S3C2410_LCDCON5_INVVFRAME |
+ S3C2410_LCDCON5_PWREN),
+ },
+ /* The VGA shield can outout at several resolutions. All share
+ * the same timings, however, anything smaller than 1024x768
+ * will only be displayed in the top left corner of a 1024x768
+ * XGA output unless you add optional dip switches to the shield.
+ * Therefore timings for other resolutions have been ommited here.
+ */
+ [2] = {
+ _LCD_DECLARE(
+ 10,
+ 1024, 1, 2, 2, /* y timing */
+ 768, 200, 16, 16, /* x timing */
+ 24), /* refresh rate, maximum stable,
+ tested with the FPGA shield */
+ .lcdcon5 = (S3C2410_LCDCON5_FRM565 |
+ S3C2410_LCDCON5_HWSWP),
+ },
+};
+
+/* todo - put into gpio header */
+
+#define S3C2410_GPCCON_MASK(x) (3 << ((x) * 2))
+#define S3C2410_GPDCON_MASK(x) (3 << ((x) * 2))
+
+struct s3c2410fb_mach_info mini2440_fb_info __initdata = {
+ .displays = &mini2440_lcd_cfg[0], /* not constant! see init */
+ .num_displays = 1,
+ .default_display = 0,
+
+ /* Enable VD[2..7], VD[10..15], VD[18..23] and VCLK, syncs, VDEN
+ * and disable the pull down resistors on pins we are using for LCD
+ * data. */
+
+ .gpcup = (0xf << 1) | (0x3f << 10),
+
+ .gpccon = (S3C2410_GPC1_VCLK | S3C2410_GPC2_VLINE |
+ S3C2410_GPC3_VFRAME | S3C2410_GPC4_VM |
+ S3C2410_GPC10_VD2 | S3C2410_GPC11_VD3 |
+ S3C2410_GPC12_VD4 | S3C2410_GPC13_VD5 |
+ S3C2410_GPC14_VD6 | S3C2410_GPC15_VD7),
+
+ .gpccon_mask = (S3C2410_GPCCON_MASK(1) | S3C2410_GPCCON_MASK(2) |
+ S3C2410_GPCCON_MASK(3) | S3C2410_GPCCON_MASK(4) |
+ S3C2410_GPCCON_MASK(10) | S3C2410_GPCCON_MASK(11) |
+ S3C2410_GPCCON_MASK(12) | S3C2410_GPCCON_MASK(13) |
+ S3C2410_GPCCON_MASK(14) | S3C2410_GPCCON_MASK(15)),
+
+ .gpdup = (0x3f << 2) | (0x3f << 10),
+
+ .gpdcon = (S3C2410_GPD2_VD10 | S3C2410_GPD3_VD11 |
+ S3C2410_GPD4_VD12 | S3C2410_GPD5_VD13 |
+ S3C2410_GPD6_VD14 | S3C2410_GPD7_VD15 |
+ S3C2410_GPD10_VD18 | S3C2410_GPD11_VD19 |
+ S3C2410_GPD12_VD20 | S3C2410_GPD13_VD21 |
+ S3C2410_GPD14_VD22 | S3C2410_GPD15_VD23),
+
+ .gpdcon_mask = (S3C2410_GPDCON_MASK(2) | S3C2410_GPDCON_MASK(3) |
+ S3C2410_GPDCON_MASK(4) | S3C2410_GPDCON_MASK(5) |
+ S3C2410_GPDCON_MASK(6) | S3C2410_GPDCON_MASK(7) |
+ S3C2410_GPDCON_MASK(10) | S3C2410_GPDCON_MASK(11)|
+ S3C2410_GPDCON_MASK(12) | S3C2410_GPDCON_MASK(13)|
+ S3C2410_GPDCON_MASK(14) | S3C2410_GPDCON_MASK(15)),
+};
+
+/* MMC/SD */
+
+static struct s3c24xx_mci_pdata mini2440_mmc_cfg __initdata = {
+ .gpio_detect = S3C2410_GPG(8),
+ .gpio_wprotect = S3C2410_GPH(8),
+ .set_power = NULL,
+ .ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34,
+};
+
+/* NAND Flash on MINI2440 board */
+
+static struct mtd_partition mini2440_default_nand_part[] __initdata = {
+ [0] = {
+ .name = "u-boot",
+ .size = SZ_256K,
+ .offset = 0,
+ },
+ [1] = {
+ .name = "u-boot-env",
+ .size = SZ_128K,
+ .offset = SZ_256K,
+ },
+ [2] = {
+ .name = "kernel",
+ /* 5 megabytes, for a kernel with no modules
+ * or a uImage with a ramdisk attached */
+ .size = 0x00500000,
+ .offset = SZ_256K + SZ_128K,
+ },
+ [3] = {
+ .name = "root",
+ .offset = SZ_256K + SZ_128K + 0x00500000,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct s3c2410_nand_set mini2440_nand_sets[] __initdata = {
+ [0] = {
+ .name = "nand",
+ .nr_chips = 1,
+ .nr_partitions = ARRAY_SIZE(mini2440_default_nand_part),
+ .partitions = mini2440_default_nand_part,
+ },
+};
+
+static struct s3c2410_platform_nand mini2440_nand_info __initdata = {
+ .tacls = 0,
+ .twrph0 = 25,
+ .twrph1 = 15,
+ .nr_sets = ARRAY_SIZE(mini2440_nand_sets),
+ .sets = mini2440_nand_sets,
+ .ignore_unset_ecc = 1,
+};
+
+/* DM9000AEP 10/100 ethernet controller */
+
+static struct resource mini2440_dm9k_resource[] __initdata = {
+ [0] = {
+ .start = MACH_MINI2440_DM9K_BASE,
+ .end = MACH_MINI2440_DM9K_BASE + 3,
+ .flags = IORESOURCE_MEM
+ },
+ [1] = {
+ .start = MACH_MINI2440_DM9K_BASE + 4,
+ .end = MACH_MINI2440_DM9K_BASE + 7,
+ .flags = IORESOURCE_MEM
+ },
+ [2] = {
+ .start = IRQ_EINT7,
+ .end = IRQ_EINT7,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
+ }
+};
+
+/*
+ * The DM9000 has no eeprom, and it's MAC address is set by
+ * the bootloader before starting the kernel.
+ */
+static struct dm9000_plat_data mini2440_dm9k_pdata __initdata = {
+ .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM),
+};
+
+static struct platform_device mini2440_device_eth __initdata = {
+ .name = "dm9000",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(mini2440_dm9k_resource),
+ .resource = mini2440_dm9k_resource,
+ .dev = {
+ .platform_data = &mini2440_dm9k_pdata,
+ },
+};
+
+/* CON5
+ * +--+ /-----\
+ * | | | |
+ * | | | BAT |
+ * | | \_____/
+ * | |
+ * | | +----+ +----+
+ * | | | K5 | | K1 |
+ * | | +----+ +----+
+ * | | +----+ +----+
+ * | | | K4 | | K2 |
+ * | | +----+ +----+
+ * | | +----+ +----+
+ * | | | K6 | | K3 |
+ * | | +----+ +----+
+ * .....
+ */
+static struct gpio_keys_button mini2440_buttons[] __initdata = {
+ {
+ .gpio = S3C2410_GPG(0), /* K1 */
+ .code = KEY_F1,
+ .desc = "Button 1",
+ .active_low = 1,
+ },
+ {
+ .gpio = S3C2410_GPG(3), /* K2 */
+ .code = KEY_F2,
+ .desc = "Button 2",
+ .active_low = 1,
+ },
+ {
+ .gpio = S3C2410_GPG(5), /* K3 */
+ .code = KEY_F3,
+ .desc = "Button 3",
+ .active_low = 1,
+ },
+ {
+ .gpio = S3C2410_GPG(6), /* K4 */
+ .code = KEY_POWER,
+ .desc = "Power",
+ .active_low = 1,
+ },
+ {
+ .gpio = S3C2410_GPG(7), /* K5 */
+ .code = KEY_F5,
+ .desc = "Button 5",
+ .active_low = 1,
+ },
+#if 0
+ /* this pin is also known as TCLK1 and seems to already
+ * marked as "in use" somehow in the kernel -- possibly wrongly */
+ {
+ .gpio = S3C2410_GPG(11), /* K6 */
+ .code = KEY_F6,
+ .desc = "Button 6",
+ .active_low = 1,
+ },
+#endif
+};
+
+static struct gpio_keys_platform_data mini2440_button_data __initdata = {
+ .buttons = mini2440_buttons,
+ .nbuttons = ARRAY_SIZE(mini2440_buttons),
+};
+
+static struct platform_device mini2440_button_device __initdata = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &mini2440_button_data,
+ }
+};
+
+/* LEDS */
+
+static struct s3c24xx_led_platdata mini2440_led1_pdata __initdata = {
+ .name = "led1",
+ .gpio = S3C2410_GPB(5),
+ .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
+ .def_trigger = "heartbeat",
+};
+
+static struct s3c24xx_led_platdata mini2440_led2_pdata __initdata = {
+ .name = "led2",
+ .gpio = S3C2410_GPB(6),
+ .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
+ .def_trigger = "nand-disk",
+};
+
+static struct s3c24xx_led_platdata mini2440_led3_pdata __initdata = {
+ .name = "led3",
+ .gpio = S3C2410_GPB(7),
+ .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
+ .def_trigger = "mmc0",
+};
+
+static struct s3c24xx_led_platdata mini2440_led4_pdata __initdata = {
+ .name = "led4",
+ .gpio = S3C2410_GPB(8),
+ .flags = S3C24XX_LEDF_ACTLOW | S3C24XX_LEDF_TRISTATE,
+ .def_trigger = "",
+};
+
+static struct s3c24xx_led_platdata mini2440_led_backlight_pdata __initdata = {
+ .name = "backlight",
+ .gpio = S3C2410_GPG(4),
+ .def_trigger = "backlight",
+};
+
+static struct platform_device mini2440_led1 __initdata = {
+ .name = "s3c24xx_led",
+ .id = 1,
+ .dev = {
+ .platform_data = &mini2440_led1_pdata,
+ },
+};
+
+static struct platform_device mini2440_led2 __initdata = {
+ .name = "s3c24xx_led",
+ .id = 2,
+ .dev = {
+ .platform_data = &mini2440_led2_pdata,
+ },
+};
+
+static struct platform_device mini2440_led3 __initdata = {
+ .name = "s3c24xx_led",
+ .id = 3,
+ .dev = {
+ .platform_data = &mini2440_led3_pdata,
+ },
+};
+
+static struct platform_device mini2440_led4 __initdata = {
+ .name = "s3c24xx_led",
+ .id = 4,
+ .dev = {
+ .platform_data = &mini2440_led4_pdata,
+ },
+};
+
+static struct platform_device mini2440_led_backlight __initdata = {
+ .name = "s3c24xx_led",
+ .id = 5,
+ .dev = {
+ .platform_data = &mini2440_led_backlight_pdata,
+ },
+};
+
+/* AUDIO */
+
+static struct s3c24xx_uda134x_platform_data mini2440_audio_pins __initdata = {
+ .l3_clk = S3C2410_GPB(4),
+ .l3_mode = S3C2410_GPB(2),
+ .l3_data = S3C2410_GPB(3),
+ .model = UDA134X_UDA1341
+};
+
+static struct platform_device mini2440_audio __initdata = {
+ .name = "s3c24xx_uda134x",
+ .id = 0,
+ .dev = {
+ .platform_data = &mini2440_audio_pins,
+ },
+};
+
+/*
+ * I2C devices
+ */
+static struct at24_platform_data at24c08 = {
+ .byte_len = SZ_8K / 8,
+ .page_size = 16,
+};
+
+static struct i2c_board_info mini2440_i2c_devs[] __initdata = {
+ {
+ I2C_BOARD_INFO("24c08", 0x50),
+ .platform_data = &at24c08,
+ },
+};
+
+static struct platform_device *mini2440_devices[] __initdata = {
+ &s3c_device_usb,
+ &s3c_device_wdt,
+/* &s3c_device_adc,*/ /* ADC doesn't like living with touchscreen ! */
+ &s3c_device_i2c0,
+ &s3c_device_rtc,
+ &s3c_device_usbgadget,
+ &mini2440_device_eth,
+ &mini2440_led1,
+ &mini2440_led2,
+ &mini2440_led3,
+ &mini2440_led4,
+ &mini2440_button_device,
+ &s3c_device_nand,
+ &s3c_device_sdi,
+ &s3c_device_iis,
+ &mini2440_audio,
+/* &s3c_device_timer[0],*/ /* buzzer pwm, no API for it */
+ /* remaining devices are optional */
+};
+
+static void __init mini2440_map_io(void)
+{
+ s3c24xx_init_io(mini2440_iodesc, ARRAY_SIZE(mini2440_iodesc));
+ s3c24xx_init_clocks(12000000);
+ s3c24xx_init_uarts(mini2440_uartcfgs, ARRAY_SIZE(mini2440_uartcfgs));
+
+ s3c_device_nand.dev.platform_data = &mini2440_nand_info;
+ s3c_device_sdi.dev.platform_data = &mini2440_mmc_cfg;
+}
+
+/*
+ * mini2440_features string
+ *
+ * t = Touchscreen present
+ * b = backlight control
+ * c = camera [TODO]
+ * 0-9 LCD configuration
+ *
+ */
+static char mini2440_features_str[12] __initdata = "0tb";
+
+static int __init mini2440_features_setup(char *str)
+{
+ if (str)
+ strlcpy(mini2440_features_str, str, sizeof(mini2440_features_str));
+ return 1;
+}
+
+__setup("mini2440=", mini2440_features_setup);
+
+#define FEATURE_SCREEN (1 << 0)
+#define FEATURE_BACKLIGHT (1 << 1)
+#define FEATURE_TOUCH (1 << 2)
+#define FEATURE_CAMERA (1 << 3)
+
+struct mini2440_features_t {
+ int count;
+ int done;
+ int lcd_index;
+ struct platform_device *optional[8];
+};
+
+static void mini2440_parse_features(
+ struct mini2440_features_t * features,
+ const char * features_str )
+{
+ const char * fp = features_str;
+
+ features->count = 0;
+ features->done = 0;
+ features->lcd_index = -1;
+
+ while (*fp) {
+ char f = *fp++;
+
+ switch (f) {
+ case '0'...'9': /* tft screen */
+ if (features->done & FEATURE_SCREEN) {
+ printk(KERN_INFO "MINI2440: '%c' ignored, "
+ "screen type already set\n", f);
+ } else {
+ int li = f - '0';
+ if (li >= ARRAY_SIZE(mini2440_lcd_cfg))
+ printk(KERN_INFO "MINI2440: "
+ "'%c' out of range LCD mode\n", f);
+ else {
+ features->optional[features->count++] =
+ &s3c_device_lcd;
+ features->lcd_index = li;
+ }
+ }
+ features->done |= FEATURE_SCREEN;
+ break;
+ case 'b':
+ if (features->done & FEATURE_BACKLIGHT)
+ printk(KERN_INFO "MINI2440: '%c' ignored, "
+ "backlight already set\n", f);
+ else {
+ features->optional[features->count++] =
+ &mini2440_led_backlight;
+ }
+ features->done |= FEATURE_BACKLIGHT;
+ break;
+ case 't':
+ printk(KERN_INFO "MINI2440: '%c' ignored, "
+ "touchscreen not compiled in\n", f);
+ break;
+ case 'c':
+ if (features->done & FEATURE_CAMERA)
+ printk(KERN_INFO "MINI2440: '%c' ignored, "
+ "camera already registered\n", f);
+ else
+ features->optional[features->count++] =
+ &s3c_device_camif;
+ features->done |= FEATURE_CAMERA;
+ break;
+ }
+ }
+}
+
+static void __init mini2440_init(void)
+{
+ struct mini2440_features_t features = { 0 };
+ int i;
+
+ printk(KERN_INFO "MINI2440: Option string mini2440=%s\n",
+ mini2440_features_str);
+
+ /* Parse the feature string */
+ mini2440_parse_features(&features, mini2440_features_str);
+
+ /* turn LCD on */
+ s3c2410_gpio_cfgpin(S3C2410_GPC(0), S3C2410_GPC0_LEND);
+
+ /* Turn the backlight early on */
+ s3c2410_gpio_setpin(S3C2410_GPG(4), 1);
+ s3c2410_gpio_cfgpin(S3C2410_GPG(4), S3C2410_GPIO_OUTPUT);
+
+ /* remove pullup on optional PWM backlight -- unused on 3.5 and 7"s */
+ s3c2410_gpio_pullup(S3C2410_GPB(1), 0);
+ s3c2410_gpio_setpin(S3C2410_GPB(1), 0);
+ s3c2410_gpio_cfgpin(S3C2410_GPB(1), S3C2410_GPIO_INPUT);
+
+ /* Make sure the D+ pullup pin is output */
+ s3c2410_gpio_cfgpin(S3C2410_GPC(5), S3C2410_GPIO_OUTPUT);
+
+ /* mark the key as input, without pullups (there is one on the board) */
+ for (i = 0; i < ARRAY_SIZE(mini2440_buttons); i++) {
+ s3c2410_gpio_pullup(mini2440_buttons[i].gpio, 0);
+ s3c2410_gpio_cfgpin(mini2440_buttons[i].gpio,
+ S3C2410_GPIO_INPUT);
+ }
+ if (features.lcd_index != -1) {
+ int li;
+
+ mini2440_fb_info.displays =
+ &mini2440_lcd_cfg[features.lcd_index];
+
+ printk(KERN_INFO "MINI2440: LCD");
+ for (li = 0; li < ARRAY_SIZE(mini2440_lcd_cfg); li++)
+ if (li == features.lcd_index)
+ printk(" [%d:%dx%d]", li,
+ mini2440_lcd_cfg[li].width,
+ mini2440_lcd_cfg[li].height);
+ else
+ printk(" %d:%dx%d", li,
+ mini2440_lcd_cfg[li].width,
+ mini2440_lcd_cfg[li].height);
+ printk("\n");
+ s3c24xx_fb_set_platdata(&mini2440_fb_info);
+ }
+ s3c24xx_udc_set_platdata(&mini2440_udc_cfg);
+ s3c_i2c0_set_platdata(NULL);
+ i2c_register_board_info(0, mini2440_i2c_devs,
+ ARRAY_SIZE(mini2440_i2c_devs));
+
+ platform_add_devices(mini2440_devices, ARRAY_SIZE(mini2440_devices));
+
+ if (features.count) /* the optional features */
+ platform_add_devices(features.optional, features.count);
+
+}
+
+
+MACHINE_START(MINI2440, "MINI2440")
+ /* Maintainer: Michel Pollet <buserror@gmail.com> */
+ .phys_io = S3C2410_PA_UART,
+ .io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
+ .boot_params = S3C2410_SDRAM_PA + 0x100,
+ .map_io = mini2440_map_io,
+ .init_machine = mini2440_init,
+ .init_irq = s3c24xx_init_irq,
+ .timer = &s3c24xx_timer,
+MACHINE_END
diff --git a/arch/arm/mach-s3c2442/Kconfig b/arch/arm/mach-s3c2442/Kconfig
index b289d198020e..103e913f2258 100644
--- a/arch/arm/mach-s3c2442/Kconfig
+++ b/arch/arm/mach-s3c2442/Kconfig
@@ -24,6 +24,18 @@ config SMDK2440_CPU2442
depends on ARCH_S3C2440
select CPU_S3C2442
+config MACH_NEO1973_GTA02
+ bool "Openmoko GTA02 / Freerunner phone"
+ select CPU_S3C2442
+ select MFD_PCF50633
+ select PCF50633_GPIO
+ select I2C
+ select POWER_SUPPLY
+ select MACH_NEO1973
+ select S3C2410_PWM
+ help
+ Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone
+
endmenu
diff --git a/arch/arm/mach-s3c2442/Makefile b/arch/arm/mach-s3c2442/Makefile
index 2a909c6c5798..2a19113a5769 100644
--- a/arch/arm/mach-s3c2442/Makefile
+++ b/arch/arm/mach-s3c2442/Makefile
@@ -12,5 +12,7 @@ obj- :=
obj-$(CONFIG_CPU_S3C2442) += s3c2442.o
obj-$(CONFIG_CPU_S3C2442) += clock.o
+obj-$(CONFIG_MACH_NEO1973_GTA02) += mach-gta02.o
+
# Machine support
diff --git a/arch/arm/mach-s3c2442/include/mach/gta02.h b/arch/arm/mach-s3c2442/include/mach/gta02.h
new file mode 100644
index 000000000000..953331d8d56a
--- /dev/null
+++ b/arch/arm/mach-s3c2442/include/mach/gta02.h
@@ -0,0 +1,84 @@
+#ifndef _GTA02_H
+#define _GTA02_H
+
+#include <mach/regs-gpio.h>
+
+/* Different hardware revisions, passed in ATAG_REVISION by u-boot */
+#define GTA02v1_SYSTEM_REV 0x00000310
+#define GTA02v2_SYSTEM_REV 0x00000320
+#define GTA02v3_SYSTEM_REV 0x00000330
+#define GTA02v4_SYSTEM_REV 0x00000340
+#define GTA02v5_SYSTEM_REV 0x00000350
+/* since A7 is basically same as A6, we use A6 PCB ID */
+#define GTA02v6_SYSTEM_REV 0x00000360
+
+#define GTA02_GPIO_n3DL_GSM S3C2410_GPA(13) /* v1 + v2 + v3 only */
+
+#define GTA02_GPIO_PWR_LED1 S3C2410_GPB(0)
+#define GTA02_GPIO_PWR_LED2 S3C2410_GPB(1)
+#define GTA02_GPIO_AUX_LED S3C2410_GPB(2)
+#define GTA02_GPIO_VIBRATOR_ON S3C2410_GPB(3)
+#define GTA02_GPIO_MODEM_RST S3C2410_GPB(5)
+#define GTA02_GPIO_BT_EN S3C2410_GPB(6)
+#define GTA02_GPIO_MODEM_ON S3C2410_GPB(7)
+#define GTA02_GPIO_EXTINT8 S3C2410_GPB(8)
+#define GTA02_GPIO_USB_PULLUP S3C2410_GPB(9)
+
+#define GTA02_GPIO_PIO5 S3C2410_GPC(5) /* v3 + v4 only */
+
+#define GTA02v3_GPIO_nG1_CS S3C2410_GPD(12) /* v3 + v4 only */
+#define GTA02v3_GPIO_nG2_CS S3C2410_GPD(13) /* v3 + v4 only */
+#define GTA02v5_GPIO_HDQ S3C2410_GPD(14) /* v5 + */
+
+#define GTA02_GPIO_nG1_INT S3C2410_GPF(0)
+#define GTA02_GPIO_IO1 S3C2410_GPF(1)
+#define GTA02_GPIO_PIO_2 S3C2410_GPF(2) /* v2 + v3 + v4 only */
+#define GTA02_GPIO_JACK_INSERT S3C2410_GPF(4)
+#define GTA02_GPIO_WLAN_GPIO1 S3C2410_GPF(5) /* v2 + v3 + v4 only */
+#define GTA02_GPIO_AUX_KEY S3C2410_GPF(6)
+#define GTA02_GPIO_HOLD_KEY S3C2410_GPF(7)
+
+#define GTA02_GPIO_3D_IRQ S3C2410_GPG(4)
+#define GTA02v2_GPIO_nG2_INT S3C2410_GPG(8) /* v2 + v3 + v4 only */
+#define GTA02v3_GPIO_nUSB_OC S3C2410_GPG(9) /* v3 + v4 only */
+#define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */
+#define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */
+
+#define GTA02_GPIO_AMP_SHUT S3C2440_GPJ1 /* v2 + v3 + v4 only */
+#define GTA02v1_GPIO_WLAN_GPIO10 S3C2440_GPJ2
+#define GTA02_GPIO_HP_IN S3C2440_GPJ2 /* v2 + v3 + v4 only */
+#define GTA02_GPIO_INT0 S3C2440_GPJ3 /* v2 + v3 + v4 only */
+#define GTA02_GPIO_nGSM_EN S3C2440_GPJ4
+#define GTA02_GPIO_3D_RESET S3C2440_GPJ5
+#define GTA02_GPIO_nDL_GSM S3C2440_GPJ6 /* v4 + v5 only */
+#define GTA02_GPIO_WLAN_GPIO0 S3C2440_GPJ7
+#define GTA02v1_GPIO_BAT_ID S3C2440_GPJ8
+#define GTA02_GPIO_KEEPACT S3C2440_GPJ8
+#define GTA02v1_GPIO_HP_IN S3C2440_GPJ10
+#define GTA02_CHIP_PWD S3C2440_GPJ11 /* v2 + v3 + v4 only */
+#define GTA02_GPIO_nWLAN_RESET S3C2440_GPJ12 /* v2 + v3 + v4 only */
+
+#define GTA02_IRQ_GSENSOR_1 IRQ_EINT0
+#define GTA02_IRQ_MODEM IRQ_EINT1
+#define GTA02_IRQ_PIO_2 IRQ_EINT2 /* v2 + v3 + v4 only */
+#define GTA02_IRQ_nJACK_INSERT IRQ_EINT4
+#define GTA02_IRQ_WLAN_GPIO1 IRQ_EINT5
+#define GTA02_IRQ_AUX IRQ_EINT6
+#define GTA02_IRQ_nHOLD IRQ_EINT7
+#define GTA02_IRQ_PCF50633 IRQ_EINT9
+#define GTA02_IRQ_3D IRQ_EINT12
+#define GTA02_IRQ_GSENSOR_2 IRQ_EINT16 /* v2 + v3 + v4 only */
+#define GTA02v3_IRQ_nUSB_OC IRQ_EINT17 /* v3 + v4 only */
+#define GTA02v3_IRQ_nUSB_FLT IRQ_EINT18 /* v3 + v4 only */
+#define GTA02v3_IRQ_nGSM_OC IRQ_EINT19 /* v3 + v4 only */
+
+/* returns 00 000 on GTA02 A5 and earlier, A6 returns 01 001 */
+#define GTA02_PCB_ID1_0 S3C2410_GPC(13)
+#define GTA02_PCB_ID1_1 S3C2410_GPC(15)
+#define GTA02_PCB_ID1_2 S3C2410_GPD(0)
+#define GTA02_PCB_ID2_0 S3C2410_GPD(3)
+#define GTA02_PCB_ID2_1 S3C2410_GPD(4)
+
+int gta02_get_pcb_revision(void);
+
+#endif /* _GTA02_H */
diff --git a/arch/arm/mach-s3c2442/mach-gta02.c b/arch/arm/mach-s3c2442/mach-gta02.c
new file mode 100644
index 000000000000..e23b581aa0e1
--- /dev/null
+++ b/arch/arm/mach-s3c2442/mach-gta02.c
@@ -0,0 +1,646 @@
+/*
+ * linux/arch/arm/mach-s3c2442/mach-gta02.c
+ *
+ * S3C2442 Machine Support for Openmoko GTA02 / FreeRunner.
+ *
+ * Copyright (C) 2006-2009 by Openmoko, Inc.
+ * Authors: Harald Welte <laforge@openmoko.org>
+ * Andy Green <andy@openmoko.org>
+ * Werner Almesberger <werner@openmoko.org>
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/spi/spi.h>
+
+#include <linux/mmc/host.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/io.h>
+
+#include <linux/i2c.h>
+#include <linux/backlight.h>
+#include <linux/regulator/machine.h>
+
+#include <linux/mfd/pcf50633/core.h>
+#include <linux/mfd/pcf50633/mbc.h>
+#include <linux/mfd/pcf50633/adc.h>
+#include <linux/mfd/pcf50633/gpio.h>
+#include <linux/mfd/pcf50633/pmic.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+
+#include <mach/regs-irq.h>
+#include <mach/regs-gpio.h>
+#include <mach/regs-gpioj.h>
+#include <mach/fb.h>
+
+#include <mach/spi.h>
+#include <mach/spi-gpio.h>
+#include <plat/usb-control.h>
+#include <mach/regs-mem.h>
+#include <mach/hardware.h>
+
+#include <mach/gta02.h>
+
+#include <plat/regs-serial.h>
+#include <plat/nand.h>
+#include <plat/devs.h>
+#include <plat/cpu.h>
+#include <plat/pm.h>
+#include <plat/udc.h>
+#include <plat/gpio-cfg.h>
+#include <plat/iic.h>
+
+static struct pcf50633 *gta02_pcf;
+
+/*
+ * This gets called every 1ms when we paniced.
+ */
+
+static long gta02_panic_blink(long count)
+{
+ long delay = 0;
+ static long last_blink;
+ static char led;
+
+ /* Fast blink: 200ms period. */
+ if (count - last_blink < 100)
+ return 0;
+
+ led ^= 1;
+ gpio_direction_output(GTA02_GPIO_AUX_LED, led);
+
+ last_blink = count;
+
+ return delay;
+}
+
+
+static struct map_desc gta02_iodesc[] __initdata = {
+ {
+ .virtual = 0xe0000000,
+ .pfn = __phys_to_pfn(S3C2410_CS3 + 0x01000000),
+ .length = SZ_1M,
+ .type = MT_DEVICE
+ },
+};
+
+#define UCON (S3C2410_UCON_DEFAULT | S3C2443_UCON_RXERR_IRQEN)
+#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
+#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
+
+static struct s3c2410_uartcfg gta02_uartcfgs[] = {
+ [0] = {
+ .hwport = 0,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [1] = {
+ .hwport = 1,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+ [2] = {
+ .hwport = 2,
+ .flags = 0,
+ .ucon = UCON,
+ .ulcon = ULCON,
+ .ufcon = UFCON,
+ },
+};
+
+#ifdef CONFIG_CHARGER_PCF50633
+/*
+ * On GTA02 the 1A charger features a 48K resistor to 0V on the ID pin.
+ * We use this to recognize that we can pull 1A from the USB socket.
+ *
+ * These constants are the measured pcf50633 ADC levels with the 1A
+ * charger / 48K resistor, and with no pulldown resistor.
+ */
+
+#define ADC_NOM_CHG_DETECT_1A 6
+#define ADC_NOM_CHG_DETECT_USB 43
+
+static void
+gta02_configure_pmu_for_charger(struct pcf50633 *pcf, void *unused, int res)
+{
+ int ma;
+
+ /* Interpret charger type */
+ if (res < ((ADC_NOM_CHG_DETECT_USB + ADC_NOM_CHG_DETECT_1A) / 2)) {
+
+ /*
+ * Sanity - stop GPO driving out now that we have a 1A charger
+ * GPO controls USB Host power generation on GTA02
+ */
+ pcf50633_gpio_set(pcf, PCF50633_GPO, 0);
+
+ ma = 1000;
+ } else
+ ma = 100;
+
+ pcf50633_mbc_usb_curlim_set(pcf, ma);
+}
+
+static struct delayed_work gta02_charger_work;
+static int gta02_usb_vbus_draw;
+
+static void gta02_charger_worker(struct work_struct *work)
+{
+ if (gta02_usb_vbus_draw) {
+ pcf50633_mbc_usb_curlim_set(gta02_pcf, gta02_usb_vbus_draw);
+ return;
+ }
+
+#ifdef CONFIG_PCF50633_ADC
+ pcf50633_adc_async_read(gta02_pcf,
+ PCF50633_ADCC1_MUX_ADCIN1,
+ PCF50633_ADCC1_AVERAGE_16,
+ gta02_configure_pmu_for_charger,
+ NULL);
+#else
+ /*
+ * If the PCF50633 ADC is disabled we fallback to a
+ * 100mA limit for safety.
+ */
+ pcf50633_mbc_usb_curlim_set(pcf, 100);
+#endif
+}
+
+#define GTA02_CHARGER_CONFIGURE_TIMEOUT ((3000 * HZ) / 1000)
+
+static void gta02_pmu_event_callback(struct pcf50633 *pcf, int irq)
+{
+ if (irq == PCF50633_IRQ_USBINS) {
+ schedule_delayed_work(&gta02_charger_work,
+ GTA02_CHARGER_CONFIGURE_TIMEOUT);
+
+ return;
+ }
+
+ if (irq == PCF50633_IRQ_USBREM) {
+ cancel_delayed_work_sync(&gta02_charger_work);
+ gta02_usb_vbus_draw = 0;
+ }
+}
+
+static void gta02_udc_vbus_draw(unsigned int ma)
+{
+ if (!gta02_pcf)
+ return;
+
+ gta02_usb_vbus_draw = ma;
+
+ schedule_delayed_work(&gta02_charger_work,
+ GTA02_CHARGER_CONFIGURE_TIMEOUT);
+}
+#else /* !CONFIG_CHARGER_PCF50633 */
+#define gta02_pmu_event_callback NULL
+#define gta02_udc_vbus_draw NULL
+#endif
+
+/*
+ * This is called when pc50633 is probed, unfortunately quite late in the
+ * day since it is an I2C bus device. Here we can belatedly define some
+ * platform devices with the advantage that we can mark the pcf50633 as the
+ * parent. This makes them get suspended and resumed with their parent
+ * the pcf50633 still around.
+ */
+
+static void gta02_pmu_attach_child_devices(struct pcf50633 *pcf);
+
+
+static char *gta02_batteries[] = {
+ "battery",
+};
+
+struct pcf50633_platform_data gta02_pcf_pdata = {
+ .resumers = {
+ [0] = PCF50633_INT1_USBINS |
+ PCF50633_INT1_USBREM |
+ PCF50633_INT1_ALARM,
+ [1] = PCF50633_INT2_ONKEYF,
+ [2] = PCF50633_INT3_ONKEY1S,
+ [3] = PCF50633_INT4_LOWSYS |
+ PCF50633_INT4_LOWBAT |
+ PCF50633_INT4_HIGHTMP,
+ },
+
+ .batteries = gta02_batteries,
+ .num_batteries = ARRAY_SIZE(gta02_batteries),
+ .reg_init_data = {
+ [PCF50633_REGULATOR_AUTO] = {
+ .constraints = {
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .always_on = 1,
+ .apply_uV = 1,
+ .state_mem = {
+ .enabled = 1,
+ },
+ },
+ },
+ [PCF50633_REGULATOR_DOWN1] = {
+ .constraints = {
+ .min_uV = 1300000,
+ .max_uV = 1600000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .always_on = 1,
+ .apply_uV = 1,
+ },
+ },
+ [PCF50633_REGULATOR_DOWN2] = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ .always_on = 1,
+ .state_mem = {
+ .enabled = 1,
+ },
+ },
+ },
+ [PCF50633_REGULATOR_HCLDO] = {
+ .constraints = {
+ .min_uV = 2000000,
+ .max_uV = 3300000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
+ .always_on = 1,
+ },
+ },
+ [PCF50633_REGULATOR_LDO1] = {
+ .constraints = {
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ .state_mem = {
+ .enabled = 0,
+ },
+ },
+ },
+ [PCF50633_REGULATOR_LDO2] = {
+ .constraints = {
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ },
+ },
+ [PCF50633_REGULATOR_LDO3] = {
+ .constraints = {
+ .min_uV = 3000000,
+ .max_uV = 3000000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ },
+ },
+ [PCF50633_REGULATOR_LDO4] = {
+ .constraints = {
+ .min_uV = 3200000,
+ .max_uV = 3200000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ },
+ },
+ [PCF50633_REGULATOR_LDO5] = {
+ .constraints = {
+ .min_uV = 3000000,
+ .max_uV = 3000000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .apply_uV = 1,
+ .state_mem = {
+ .enabled = 1,
+ },
+ },
+ },
+ [PCF50633_REGULATOR_LDO6] = {
+ .constraints = {
+ .min_uV = 3000000,
+ .max_uV = 3000000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ },
+ },
+ [PCF50633_REGULATOR_MEMLDO] = {
+ .constraints = {
+ .min_uV = 1800000,
+ .max_uV = 1800000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .state_mem = {
+ .enabled = 1,
+ },
+ },
+ },
+
+ },
+ .probe_done = gta02_pmu_attach_child_devices,
+ .mbc_event_callback = gta02_pmu_event_callback,
+};
+
+
+/* NOR Flash. */
+
+#define GTA02_FLASH_BASE 0x18000000 /* GCS3 */
+#define GTA02_FLASH_SIZE 0x200000 /* 2MBytes */
+
+static struct physmap_flash_data gta02_nor_flash_data = {
+ .width = 2,
+};
+
+static struct resource gta02_nor_flash_resource = {
+ .start = GTA02_FLASH_BASE,
+ .end = GTA02_FLASH_BASE + GTA02_FLASH_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device gta02_nor_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &gta02_nor_flash_data,
+ },
+ .resource = &gta02_nor_flash_resource,
+ .num_resources = 1,
+};
+
+
+struct platform_device s3c24xx_pwm_device = {
+ .name = "s3c24xx_pwm",
+ .num_resources = 0,
+};
+
+static struct i2c_board_info gta02_i2c_devs[] __initdata = {
+ {
+ I2C_BOARD_INFO("pcf50633", 0x73),
+ .irq = GTA02_IRQ_PCF50633,
+ .platform_data = &gta02_pcf_pdata,
+ },
+ {
+ I2C_BOARD_INFO("wm8753", 0x1a),
+ },
+};
+
+static struct s3c2410_nand_set gta02_nand_sets[] = {
+ [0] = {
+ /*
+ * This name is also hard-coded in the boot loaders, so
+ * changing it would would require all users to upgrade
+ * their boot loaders, some of which are stored in a NOR
+ * that is considered to be immutable.
+ */
+ .name = "neo1973-nand",
+ .nr_chips = 1,
+ .use_bbt = 1,
+ .force_soft_ecc = 1,
+ },
+};
+
+/*
+ * Choose a set of timings derived from S3C@2442B MCP54
+ * data sheet (K5D2G13ACM-D075 MCP Memory).
+ */
+
+static struct s3c2410_platform_nand gta02_nand_info = {
+ .tacls = 0,
+ .twrph0 = 25,
+ .twrph1 = 15,
+ .nr_sets = ARRAY_SIZE(gta02_nand_sets),
+ .sets = gta02_nand_sets,
+};
+
+
+static void gta02_udc_command(enum s3c2410_udc_cmd_e cmd)
+{
+ switch (cmd) {
+ case S3C2410_UDC_P_ENABLE:
+ pr_debug("%s S3C2410_UDC_P_ENABLE\n", __func__);
+ gpio_direction_output(GTA02_GPIO_USB_PULLUP, 1);
+ break;
+ case S3C2410_UDC_P_DISABLE:
+ pr_debug("%s S3C2410_UDC_P_DISABLE\n", __func__);
+ gpio_direction_output(GTA02_GPIO_USB_PULLUP, 0);
+ break;
+ case S3C2410_UDC_P_RESET:
+ pr_debug("%s S3C2410_UDC_P_RESET\n", __func__);
+ /* FIXME: Do something here. */
+ }
+}
+
+/* Get PMU to set USB current limit accordingly. */
+static struct s3c2410_udc_mach_info gta02_udc_cfg = {
+ .vbus_draw = gta02_udc_vbus_draw,
+ .udc_command = gta02_udc_command,
+
+};
+
+
+
+static void gta02_bl_set_intensity(int intensity)
+{
+ struct pcf50633 *pcf = gta02_pcf;
+ int old_intensity = pcf50633_reg_read(pcf, PCF50633_REG_LEDOUT);
+
+ /* We map 8-bit intensity to 6-bit intensity in hardware. */
+ intensity >>= 2;
+
+ /*
+ * This can happen during, eg, print of panic on blanked console,
+ * but we can't service i2c without interrupts active, so abort.
+ */
+ if (in_atomic()) {
+ printk(KERN_ERR "gta02_bl_set_intensity called while atomic\n");
+ return;
+ }
+
+ old_intensity = pcf50633_reg_read(pcf, PCF50633_REG_LEDOUT);
+ if (intensity == old_intensity)
+ return;
+
+ /* We can't do this anywhere else. */
+ pcf50633_reg_write(pcf, PCF50633_REG_LEDDIM, 5);
+
+ if (!(pcf50633_reg_read(pcf, PCF50633_REG_LEDENA) & 3))
+ old_intensity = 0;
+
+ /*
+ * The PCF50633 cannot handle LEDOUT = 0 (datasheet p60)
+ * if seen, you have to re-enable the LED unit.
+ */
+ if (!intensity || !old_intensity)
+ pcf50633_reg_write(pcf, PCF50633_REG_LEDENA, 0);
+
+ /* Illegal to set LEDOUT to 0. */
+ if (!intensity)
+ pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_LEDOUT, 0x3f, 2);
+ else
+ pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_LEDOUT, 0x3f,
+ intensity);
+
+ if (intensity)
+ pcf50633_reg_write(pcf, PCF50633_REG_LEDENA, 2);
+
+}
+
+static struct generic_bl_info gta02_bl_info = {
+ .name = "gta02-bl",
+ .max_intensity = 0xff,
+ .default_intensity = 0xff,
+ .set_bl_intensity = gta02_bl_set_intensity,
+};
+
+static struct platform_device gta02_bl_dev = {
+ .name = "generic-bl",
+ .id = 1,
+ .dev = {
+ .platform_data = &gta02_bl_info,
+ },
+};
+
+
+
+/* USB */
+static struct s3c2410_hcd_info gta02_usb_info = {
+ .port[0] = {
+ .flags = S3C_HCDFLG_USED,
+ },
+ .port[1] = {
+ .flags = 0,
+ },
+};
+
+
+static void __init gta02_map_io(void)
+{
+ s3c24xx_init_io(gta02_iodesc, ARRAY_SIZE(gta02_iodesc));
+ s3c24xx_init_clocks(12000000);
+ s3c24xx_init_uarts(gta02_uartcfgs, ARRAY_SIZE(gta02_uartcfgs));
+}
+
+
+/* These are the guys that don't need to be children of PMU. */
+
+static struct platform_device *gta02_devices[] __initdata = {
+ &s3c_device_usb,
+ &s3c_device_wdt,
+ &s3c_device_sdi,
+ &s3c_device_usbgadget,
+ &s3c_device_nand,
+ &gta02_nor_flash,
+ &s3c24xx_pwm_device,
+ &s3c_device_iis,
+ &s3c_device_i2c0,
+};
+
+/* These guys DO need to be children of PMU. */
+
+static struct platform_device *gta02_devices_pmu_children[] = {
+ &gta02_bl_dev,
+};
+
+
+/*
+ * This is called when pc50633 is probed, quite late in the day since it is an
+ * I2C bus device. Here we can define platform devices with the advantage that
+ * we can mark the pcf50633 as the parent. This makes them get suspended and
+ * resumed with their parent the pcf50633 still around. All devices whose
+ * operation depends on something from pcf50633 must have this relationship
+ * made explicit like this, or suspend and resume will become an unreliable
+ * hellworld.
+ */
+
+static void gta02_pmu_attach_child_devices(struct pcf50633 *pcf)
+{
+ int n;
+
+ /* Grab a copy of the now probed PMU pointer. */
+ gta02_pcf = pcf;
+
+ for (n = 0; n < ARRAY_SIZE(gta02_devices_pmu_children); n++)
+ gta02_devices_pmu_children[n]->dev.parent = pcf->dev;
+
+ platform_add_devices(gta02_devices_pmu_children,
+ ARRAY_SIZE(gta02_devices_pmu_children));
+}
+
+static void gta02_poweroff(void)
+{
+ pcf50633_reg_set_bit_mask(gta02_pcf, PCF50633_REG_OOCSHDWN, 1, 1);
+}
+
+static void __init gta02_machine_init(void)
+{
+ /* Set the panic callback to make AUX LED blink at ~5Hz. */
+ panic_blink = gta02_panic_blink;
+
+ s3c_pm_init();
+
+#ifdef CONFIG_CHARGER_PCF50633
+ INIT_DELAYED_WORK(&gta02_charger_work, gta02_charger_worker);
+#endif
+
+ s3c_device_usb.dev.platform_data = &gta02_usb_info;
+ s3c_device_nand.dev.platform_data = &gta02_nand_info;
+
+ s3c24xx_udc_set_platdata(&gta02_udc_cfg);
+ s3c_i2c0_set_platdata(NULL);
+
+ i2c_register_board_info(0, gta02_i2c_devs, ARRAY_SIZE(gta02_i2c_devs));
+
+ platform_add_devices(gta02_devices, ARRAY_SIZE(gta02_devices));
+ pm_power_off = gta02_poweroff;
+}
+
+
+MACHINE_START(NEO1973_GTA02, "GTA02")
+ /* Maintainer: Nelson Castillo <arhuaco@freaks-unidos.net> */
+ .phys_io = S3C2410_PA_UART,
+ .io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
+ .boot_params = S3C2410_SDRAM_PA + 0x100,
+ .map_io = gta02_map_io,
+ .init_irq = s3c24xx_init_irq,
+ .init_machine = gta02_machine_init,
+ .timer = &s3c24xx_timer,
+MACHINE_END
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 3a398befed41..03cd27d917b9 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -62,6 +62,12 @@
#define SHIFT_ASR 0x40
#define SHIFT_RORRRX 0x60
+#define BAD_INSTR 0xdeadc0de
+
+/* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */
+#define IS_T32(hi16) \
+ (((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800))
+
static unsigned long ai_user;
static unsigned long ai_sys;
static unsigned long ai_skipped;
@@ -332,38 +338,48 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
struct pt_regs *regs)
{
unsigned int rd = RD_BITS(instr);
-
- if (((rd & 1) == 1) || (rd == 14))
+ unsigned int rd2;
+ int load;
+
+ if ((instr & 0xfe000000) == 0xe8000000) {
+ /* ARMv7 Thumb-2 32-bit LDRD/STRD */
+ rd2 = (instr >> 8) & 0xf;
+ load = !!(LDST_L_BIT(instr));
+ } else if (((rd & 1) == 1) || (rd == 14))
goto bad;
+ else {
+ load = ((instr & 0xf0) == 0xd0);
+ rd2 = rd + 1;
+ }
ai_dword += 1;
if (user_mode(regs))
goto user;
- if ((instr & 0xf0) == 0xd0) {
+ if (load) {
unsigned long val;
get32_unaligned_check(val, addr);
regs->uregs[rd] = val;
get32_unaligned_check(val, addr + 4);
- regs->uregs[rd + 1] = val;
+ regs->uregs[rd2] = val;
} else {
put32_unaligned_check(regs->uregs[rd], addr);
- put32_unaligned_check(regs->uregs[rd + 1], addr + 4);
+ put32_unaligned_check(regs->uregs[rd2], addr + 4);
}
return TYPE_LDST;
user:
- if ((instr & 0xf0) == 0xd0) {
+ if (load) {
unsigned long val;
get32t_unaligned_check(val, addr);
regs->uregs[rd] = val;
get32t_unaligned_check(val, addr + 4);
- regs->uregs[rd + 1] = val;
+ regs->uregs[rd2] = val;
} else {
put32t_unaligned_check(regs->uregs[rd], addr);
- put32t_unaligned_check(regs->uregs[rd + 1], addr + 4);
+ put32t_unaligned_check(regs->uregs[rd2], addr + 4);
}
return TYPE_LDST;
@@ -616,8 +632,72 @@ thumb2arm(u16 tinstr)
/* Else fall through for illegal instruction case */
default:
- return 0xdeadc0de;
+ return BAD_INSTR;
+ }
+}
+
+/*
+ * Convert Thumb-2 32 bit LDM, STM, LDRD, STRD to equivalent instruction
+ * handlable by ARM alignment handler, also find the corresponding handler,
+ * so that we can reuse ARM userland alignment fault fixups for Thumb.
+ *
+ * @pinstr: original Thumb-2 instruction; returns new handlable instruction
+ * @regs: register context.
+ * @poffset: return offset from faulted addr for later writeback
+ *
+ * NOTES:
+ * 1. Comments below refer to ARMv7 DDI0406A Thumb Instruction sections.
+ * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
+ */
+static void *
+do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
+ union offset_union *poffset)
+{
+ unsigned long instr = *pinstr;
+ u16 tinst1 = (instr >> 16) & 0xffff;
+ u16 tinst2 = instr & 0xffff;
+ poffset->un = 0;
+
+ switch (tinst1 & 0xffe0) {
+ /* A6.3.5 Load/Store multiple */
+ case 0xe880: /* STM/STMIA/STMEA,LDM/LDMIA, PUSH/POP T2 */
+ case 0xe8a0: /* ...above writeback version */
+ case 0xe900: /* STMDB/STMFD, LDMDB/LDMEA */
+ case 0xe920: /* ...above writeback version */
+ /* no need offset decision since handler calculates it */
+ return do_alignment_ldmstm;
+
+ case 0xf840: /* POP/PUSH T3 (single register) */
+ if (RN_BITS(instr) == 13 && (tinst2 & 0x09ff) == 0x0904) {
+ u32 L = !!(LDST_L_BIT(instr));
+ const u32 subset[2] = {
+ 0xe92d0000, /* STMDB sp!,{registers} */
+ 0xe8bd0000, /* LDMIA sp!,{registers} */
+ };
+ *pinstr = subset[L] | (1<<RD_BITS(instr));
+ return do_alignment_ldmstm;
+ }
+ /* Else fall through for illegal instruction case */
+ break;
+
+ /* A6.3.6 Load/store double, STRD/LDRD(immed, lit, reg) */
+ case 0xe860:
+ case 0xe960:
+ case 0xe8e0:
+ case 0xe9e0:
+ poffset->un = (tinst2 & 0xff) << 2;
+ case 0xe940:
+ case 0xe9c0:
+ return do_alignment_ldrdstrd;
+
+ /*
+ * No need to handle load/store instructions up to word size
+ * since ARMv6 and later CPUs can perform unaligned accesses.
+ */
+ default:
+ break;
}
+ return NULL;
}
static int
@@ -630,6 +710,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
mm_segment_t fs;
unsigned int fault;
u16 tinstr = 0;
+ int isize = 4;
+ int thumb2_32b = 0;
instrptr = instruction_pointer(regs);
@@ -637,8 +719,19 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
set_fs(KERNEL_DS);
if (thumb_mode(regs)) {
fault = __get_user(tinstr, (u16 *)(instrptr & ~1));
- if (!(fault))
- instr = thumb2arm(tinstr);
+ if (!fault) {
+ if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
+ IS_T32(tinstr)) {
+ /* Thumb-2 32-bit */
+ u16 tinst2 = 0;
+ fault = __get_user(tinst2, (u16 *)(instrptr+2));
+ instr = (tinstr << 16) | tinst2;
+ thumb2_32b = 1;
+ } else {
+ isize = 2;
+ instr = thumb2arm(tinstr);
+ }
+ }
} else
fault = __get_user(instr, (u32 *)instrptr);
set_fs(fs);
@@ -655,7 +748,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
fixup:
- regs->ARM_pc += thumb_mode(regs) ? 2 : 4;
+ regs->ARM_pc += isize;
switch (CODING_BITS(instr)) {
case 0x00000000: /* 3.13.4 load/store instruction extensions */
@@ -714,18 +807,25 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
handler = do_alignment_ldrstr;
break;
- case 0x08000000: /* ldm or stm */
- handler = do_alignment_ldmstm;
+ case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */
+ if (thumb2_32b)
+ handler = do_alignment_t32_to_handler(&instr, regs, &offset);
+ else
+ handler = do_alignment_ldmstm;
break;
default:
goto bad;
}
+ if (!handler)
+ goto bad;
type = handler(addr, instr, regs);
- if (type == TYPE_ERROR || type == TYPE_FAULT)
+ if (type == TYPE_ERROR || type == TYPE_FAULT) {
+ regs->ARM_pc -= isize;
goto bad_or_fault;
+ }
if (type == TYPE_LDST)
do_alignment_finish_ldst(addr, instr, regs, offset);
@@ -735,7 +835,6 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
bad_or_fault:
if (type == TYPE_ERROR)
goto bad;
- regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
/*
* We got a fault - fix it up, or die.
*/
@@ -751,8 +850,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
*/
printk(KERN_ERR "Alignment trap: not handling instruction "
"%0*lx at [<%08lx>]\n",
- thumb_mode(regs) ? 4 : 8,
- thumb_mode(regs) ? tinstr : instr, instrptr);
+ isize << 1,
+ isize == 2 ? tinstr : instr, instrptr);
ai_skipped += 1;
return 1;
@@ -763,8 +862,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
"Address=0x%08lx FSR 0x%03x\n", current->comm,
task_pid_nr(current), instrptr,
- thumb_mode(regs) ? 4 : 8,
- thumb_mode(regs) ? tinstr : instr,
+ isize << 1,
+ isize == 2 ? tinstr : instr,
addr, fsr);
if (ai_usermode & UM_FIXUP)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index fdaa9bb87c16..4722582b17b8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -836,6 +836,13 @@ void __init reserve_node_zero(pg_data_t *pgdat)
BOOTMEM_EXCLUSIVE);
}
+ if (machine_is_treo680()) {
+ reserve_bootmem_node(pgdat, 0xa0000000, 0x1000,
+ BOOTMEM_EXCLUSIVE);
+ reserve_bootmem_node(pgdat, 0xa2000000, 0x1000,
+ BOOTMEM_EXCLUSIVE);
+ }
+
if (machine_is_palmt5())
reserve_bootmem_node(pgdat, 0xa0200000, 0x1000,
BOOTMEM_EXCLUSIVE);
diff --git a/arch/arm/plat-omap/include/mach/sram.h b/arch/arm/plat-omap/include/mach/sram.h
index dca7c16ae903..4d53cc59d7a3 100644
--- a/arch/arm/plat-omap/include/mach/sram.h
+++ b/arch/arm/plat-omap/include/mach/sram.h
@@ -24,7 +24,8 @@ extern u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);
extern u32 omap3_configure_core_dpll(u32 sdrc_rfr_ctrl,
u32 sdrc_actim_ctrla,
u32 sdrc_actim_ctrlb, u32 m2,
- u32 unlock_dll);
+ u32 unlock_dll, u32 f, u32 sdrc_mr,
+ u32 inc);
/* Do not use these */
extern void omap1_sram_reprogram_clock(u32 ckctl, u32 dpllctl);
@@ -62,7 +63,8 @@ extern unsigned long omap243x_sram_reprogram_sdrc_sz;
extern u32 omap3_sram_configure_core_dpll(u32 sdrc_rfr_ctrl,
u32 sdrc_actim_ctrla,
u32 sdrc_actim_ctrlb, u32 m2,
- u32 unlock_dll);
+ u32 unlock_dll, u32 f, u32 sdrc_mr,
+ u32 inc);
extern unsigned long omap3_sram_configure_core_dpll_sz;
#endif
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index a5b9bcd6b108..65006df3f1b7 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -371,15 +371,17 @@ static inline int omap243x_sram_init(void)
static u32 (*_omap3_sram_configure_core_dpll)(u32 sdrc_rfr_ctrl,
u32 sdrc_actim_ctrla,
u32 sdrc_actim_ctrlb,
- u32 m2, u32 unlock_dll);
+ u32 m2, u32 unlock_dll,
+ u32 f, u32 sdrc_mr, u32 inc);
u32 omap3_configure_core_dpll(u32 sdrc_rfr_ctrl, u32 sdrc_actim_ctrla,
- u32 sdrc_actim_ctrlb, u32 m2, u32 unlock_dll)
+ u32 sdrc_actim_ctrlb, u32 m2, u32 unlock_dll,
+ u32 f, u32 sdrc_mr, u32 inc)
{
BUG_ON(!_omap3_sram_configure_core_dpll);
return _omap3_sram_configure_core_dpll(sdrc_rfr_ctrl,
sdrc_actim_ctrla,
sdrc_actim_ctrlb, m2,
- unlock_dll);
+ unlock_dll, f, sdrc_mr, inc);
}
/* REVISIT: Should this be same as omap34xx_sram_init() after off-idle? */
diff --git a/arch/arm/plat-s3c/Makefile b/arch/arm/plat-s3c/Makefile
index 610651455a78..74bb7cb5da49 100644
--- a/arch/arm/plat-s3c/Makefile
+++ b/arch/arm/plat-s3c/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_S3C_DEV_HSMMC) += dev-hsmmc.o
obj-$(CONFIG_S3C_DEV_HSMMC1) += dev-hsmmc1.o
obj-y += dev-i2c0.o
obj-$(CONFIG_S3C_DEV_I2C1) += dev-i2c1.o
+obj-$(CONFIG_SND_S3C24XX_SOC) += dev-audio.o
obj-$(CONFIG_S3C_DEV_FB) += dev-fb.o
obj-$(CONFIG_S3C_DEV_USB_HOST) += dev-usb.o
obj-$(CONFIG_S3C_DEV_USB_HSOTG) += dev-usb-hsotg.o
diff --git a/arch/arm/plat-s3c/dev-audio.c b/arch/arm/plat-s3c/dev-audio.c
new file mode 100644
index 000000000000..1322beb40dd7
--- /dev/null
+++ b/arch/arm/plat-s3c/dev-audio.c
@@ -0,0 +1,68 @@
+/* linux/arch/arm/plat-s3c/dev-audio.c
+ *
+ * Copyright 2009 Wolfson Microelectronics
+ * Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+
+#include <mach/irqs.h>
+#include <mach/map.h>
+
+#include <plat/devs.h>
+
+
+static struct resource s3c64xx_iis0_resource[] = {
+ [0] = {
+ .start = S3C64XX_PA_IIS0,
+ .end = S3C64XX_PA_IIS0 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device s3c64xx_device_iis0 = {
+ .name = "s3c64xx-iis",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(s3c64xx_iis0_resource),
+ .resource = s3c64xx_iis0_resource,
+};
+EXPORT_SYMBOL(s3c64xx_device_iis0);
+
+static struct resource s3c64xx_iis1_resource[] = {
+ [0] = {
+ .start = S3C64XX_PA_IIS1,
+ .end = S3C64XX_PA_IIS1 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device s3c64xx_device_iis1 = {
+ .name = "s3c64xx-iis",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(s3c64xx_iis1_resource),
+ .resource = s3c64xx_iis1_resource,
+};
+EXPORT_SYMBOL(s3c64xx_device_iis1);
+
+static struct resource s3c64xx_iisv4_resource[] = {
+ [0] = {
+ .start = S3C64XX_PA_IISV4,
+ .end = S3C64XX_PA_IISV4 + 0x100 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device s3c64xx_device_iisv4 = {
+ .name = "s3c64xx-iis-v4",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(s3c64xx_iisv4_resource),
+ .resource = s3c64xx_iisv4_resource,
+};
+EXPORT_SYMBOL(s3c64xx_device_iisv4);
diff --git a/arch/arm/plat-s3c/gpio-config.c b/arch/arm/plat-s3c/gpio-config.c
index 08044dec9731..456969b6fa0d 100644
--- a/arch/arm/plat-s3c/gpio-config.c
+++ b/arch/arm/plat-s3c/gpio-config.c
@@ -119,7 +119,7 @@ int s3c_gpio_setcfg_s3c64xx_4bit(struct s3c_gpio_chip *chip,
unsigned int shift = (off & 7) * 4;
u32 con;
- if (off < 8 && chip->chip.ngpio >= 8)
+ if (off < 8 && chip->chip.ngpio > 8)
reg -= 4;
if (s3c_gpio_is_cfg_special(cfg)) {
diff --git a/arch/arm/plat-s3c/include/plat/devs.h b/arch/arm/plat-s3c/include/plat/devs.h
index a0b6768fddcf..b5b9c4d46e9a 100644
--- a/arch/arm/plat-s3c/include/plat/devs.h
+++ b/arch/arm/plat-s3c/include/plat/devs.h
@@ -24,13 +24,16 @@ extern struct platform_device *s3c24xx_uart_src[];
extern struct platform_device s3c_device_timer[];
+extern struct platform_device s3c64xx_device_iis0;
+extern struct platform_device s3c64xx_device_iis1;
+extern struct platform_device s3c64xx_device_iisv4;
+
extern struct platform_device s3c_device_fb;
extern struct platform_device s3c_device_usb;
extern struct platform_device s3c_device_lcd;
extern struct platform_device s3c_device_wdt;
extern struct platform_device s3c_device_i2c0;
extern struct platform_device s3c_device_i2c1;
-extern struct platform_device s3c_device_iis;
extern struct platform_device s3c_device_rtc;
extern struct platform_device s3c_device_adc;
extern struct platform_device s3c_device_sdi;
diff --git a/arch/arm/plat-s3c/include/plat/nand.h b/arch/arm/plat-s3c/include/plat/nand.h
index f4dcd14af059..18f958801e64 100644
--- a/arch/arm/plat-s3c/include/plat/nand.h
+++ b/arch/arm/plat-s3c/include/plat/nand.h
@@ -10,19 +10,26 @@
* published by the Free Software Foundation.
*/
-/* struct s3c2410_nand_set
+/**
+ * struct s3c2410_nand_set - define a set of one or more nand chips
+ * @disable_ecc: Entirely disable ECC - Dangerous
+ * @flash_bbt: Openmoko u-boot can create a Bad Block Table
+ * Setting this flag will allow the kernel to
+ * look for it at boot time and also skip the NAND
+ * scan.
+ * @nr_chips: Number of chips in this set
+ * @nr_partitions: Number of partitions pointed to by @partitions
+ * @name: Name of set (optional)
+ * @nr_map: Map for low-layer logical to physical chip numbers (option)
+ * @partitions: The mtd partition list
*
- * define an set of one or more nand chips registered with an unique mtd
- *
- * nr_chips = number of chips in this set
- * nr_partitions = number of partitions pointed to be partitoons (or zero)
- * name = name of set (optional)
- * nr_map = map for low-layer logical to physical chip numbers (option)
- * partitions = mtd partition list
-*/
-
+ * define a set of one or more nand chips registered with an unique mtd. Also
+ * allows to pass flag to the underlying NAND layer. 'disable_ecc' will trigger
+ * a warning at boot time.
+ */
struct s3c2410_nand_set {
- unsigned int disable_ecc : 1;
+ unsigned int disable_ecc:1;
+ unsigned int flash_bbt:1;
int nr_chips;
int nr_partitions;
@@ -39,7 +46,7 @@ struct s3c2410_platform_nand {
int twrph0; /* active time for nWE/nOE */
int twrph1; /* time for release CLE/ALE from nWE/nOE inactive */
- unsigned int ignore_unset_ecc : 1;
+ unsigned int ignore_unset_ecc:1;
int nr_sets;
struct s3c2410_nand_set *sets;
diff --git a/arch/arm/plat-s3c64xx/Makefile b/arch/arm/plat-s3c64xx/Makefile
index 2ed5df34f9ea..3c8882cd6268 100644
--- a/arch/arm/plat-s3c64xx/Makefile
+++ b/arch/arm/plat-s3c64xx/Makefile
@@ -23,6 +23,7 @@ obj-y += gpiolib.o
obj-$(CONFIG_CPU_S3C6400_INIT) += s3c6400-init.o
obj-$(CONFIG_CPU_S3C6400_CLOCK) += s3c6400-clock.o
+obj-$(CONFIG_CPU_FREQ_S3C64XX) += cpufreq.o
# PM support
diff --git a/arch/arm/plat-s3c64xx/clock.c b/arch/arm/plat-s3c64xx/clock.c
index 0bc2fa1dfc40..7a36e899360d 100644
--- a/arch/arm/plat-s3c64xx/clock.c
+++ b/arch/arm/plat-s3c64xx/clock.c
@@ -191,7 +191,7 @@ static struct clk init_clocks[] = {
.id = -1,
.parent = &clk_h,
.enable = s3c64xx_hclk_ctrl,
- .ctrlbit = S3C_CLKCON_SCLK_UHOST,
+ .ctrlbit = S3C_CLKCON_HCLK_UHOST,
}, {
.name = "hsmmc",
.id = 0,
diff --git a/arch/arm/plat-s3c64xx/cpufreq.c b/arch/arm/plat-s3c64xx/cpufreq.c
new file mode 100644
index 000000000000..e6e0843215df
--- /dev/null
+++ b/arch/arm/plat-s3c64xx/cpufreq.c
@@ -0,0 +1,262 @@
+/* linux/arch/arm/plat-s3c64xx/cpufreq.c
+ *
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * S3C64xx CPUfreq Support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+
+static struct clk *armclk;
+static struct regulator *vddarm;
+
+#ifdef CONFIG_CPU_S3C6410
+struct s3c64xx_dvfs {
+ unsigned int vddarm_min;
+ unsigned int vddarm_max;
+};
+
+static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
+ [0] = { 1000000, 1000000 },
+ [1] = { 1000000, 1050000 },
+ [2] = { 1050000, 1100000 },
+ [3] = { 1050000, 1150000 },
+ [4] = { 1250000, 1350000 },
+};
+
+static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
+ { 0, 66000 },
+ { 0, 133000 },
+ { 1, 222000 },
+ { 1, 266000 },
+ { 2, 333000 },
+ { 2, 400000 },
+ { 3, 532000 },
+ { 3, 533000 },
+ { 4, 667000 },
+ { 0, CPUFREQ_TABLE_END },
+};
+#endif
+
+static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
+}
+
+static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
+{
+ if (cpu != 0)
+ return 0;
+
+ return clk_get_rate(armclk) / 1000;
+}
+
+static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ int ret;
+ unsigned int i;
+ struct cpufreq_freqs freqs;
+ struct s3c64xx_dvfs *dvfs;
+
+ ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table,
+ target_freq, relation, &i);
+ if (ret != 0)
+ return ret;
+
+ freqs.cpu = 0;
+ freqs.old = clk_get_rate(armclk) / 1000;
+ freqs.new = s3c64xx_freq_table[i].frequency;
+ freqs.flags = 0;
+ dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].index];
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+#ifdef CONFIG_REGULATOR
+ if (vddarm && freqs.new > freqs.old) {
+ ret = regulator_set_voltage(vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+ if (ret != 0) {
+ pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
+ freqs.new, ret);
+ goto err;
+ }
+ }
+#endif
+
+ ret = clk_set_rate(armclk, freqs.new * 1000);
+ if (ret < 0) {
+ pr_err("cpufreq: Failed to set rate %dkHz: %d\n",
+ freqs.new, ret);
+ goto err;
+ }
+
+#ifdef CONFIG_REGULATOR
+ if (vddarm && freqs.new < freqs.old) {
+ ret = regulator_set_voltage(vddarm,
+ dvfs->vddarm_min,
+ dvfs->vddarm_max);
+ if (ret != 0) {
+ pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
+ freqs.new, ret);
+ goto err_clk;
+ }
+ }
+#endif
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ pr_debug("cpufreq: Set actual frequency %lukHz\n",
+ clk_get_rate(armclk) / 1000);
+
+ return 0;
+
+err_clk:
+ if (clk_set_rate(armclk, freqs.old * 1000) < 0)
+ pr_err("Failed to restore original clock rate\n");
+err:
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return ret;
+}
+
+#ifdef CONFIG_REGULATOR
+static void __init s3c64xx_cpufreq_constrain_voltages(void)
+{
+ int count, v, i, found;
+ struct cpufreq_frequency_table *freq;
+ struct s3c64xx_dvfs *dvfs;
+
+ count = regulator_count_voltages(vddarm);
+ if (count < 0) {
+ pr_err("cpufreq: Unable to check supported voltages\n");
+ return;
+ }
+
+ freq = s3c64xx_freq_table;
+ while (freq->frequency != CPUFREQ_TABLE_END) {
+ if (freq->frequency == CPUFREQ_ENTRY_INVALID)
+ continue;
+
+ dvfs = &s3c64xx_dvfs_table[freq->index];
+ found = 0;
+
+ for (i = 0; i < count; i++) {
+ v = regulator_list_voltage(vddarm, i);
+ if (v >= dvfs->vddarm_min && v <= dvfs->vddarm_max)
+ found = 1;
+ }
+
+ if (!found) {
+ pr_debug("cpufreq: %dkHz unsupported by regulator\n",
+ freq->frequency);
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+ }
+
+ freq++;
+ }
+}
+#endif
+
+static int __init s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
+{
+ int ret;
+ struct cpufreq_frequency_table *freq;
+
+ if (policy->cpu != 0)
+ return -EINVAL;
+
+ if (s3c64xx_freq_table == NULL) {
+ pr_err("cpufreq: No frequency information for this CPU\n");
+ return -ENODEV;
+ }
+
+ armclk = clk_get(NULL, "armclk");
+ if (IS_ERR(armclk)) {
+ pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n",
+ PTR_ERR(armclk));
+ return PTR_ERR(armclk);
+ }
+
+#ifdef CONFIG_REGULATOR
+ vddarm = regulator_get(NULL, "vddarm");
+ if (IS_ERR(vddarm)) {
+ ret = PTR_ERR(vddarm);
+ pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret);
+ pr_err("cpufreq: Only frequency scaling available\n");
+ vddarm = NULL;
+ } else {
+ s3c64xx_cpufreq_constrain_voltages();
+ }
+#endif
+
+ freq = s3c64xx_freq_table;
+ while (freq->frequency != CPUFREQ_TABLE_END) {
+ unsigned long r;
+
+ /* Check for frequencies we can generate */
+ r = clk_round_rate(armclk, freq->frequency * 1000);
+ r /= 1000;
+ if (r != freq->frequency)
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+
+ /* If we have no regulator then assume startup
+ * frequency is the maximum we can support. */
+ if (!vddarm && freq->frequency > s3c64xx_cpufreq_get_speed(0))
+ freq->frequency = CPUFREQ_ENTRY_INVALID;
+
+ freq++;
+ }
+
+ policy->cur = clk_get_rate(armclk) / 1000;
+
+ /* Pick a conservative guess in ns: we'll need ~1 I2C/SPI
+ * write plus clock reprogramming. */
+ policy->cpuinfo.transition_latency = 2 * 1000 * 1000;
+
+ ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
+ if (ret != 0) {
+ pr_err("cpufreq: Failed to configure frequency table: %d\n",
+ ret);
+ regulator_put(vddarm);
+ clk_put(armclk);
+ }
+
+ return ret;
+}
+
+static struct cpufreq_driver s3c64xx_cpufreq_driver = {
+ .owner = THIS_MODULE,
+ .flags = 0,
+ .verify = s3c64xx_cpufreq_verify_speed,
+ .target = s3c64xx_cpufreq_set_target,
+ .get = s3c64xx_cpufreq_get_speed,
+ .init = s3c64xx_cpufreq_driver_init,
+ .name = "s3c",
+};
+
+static int __init s3c64xx_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&s3c64xx_cpufreq_driver);
+}
+module_init(s3c64xx_cpufreq_init);
diff --git a/arch/arm/plat-s3c64xx/gpiolib.c b/arch/arm/plat-s3c64xx/gpiolib.c
index da7b60ee5e67..92859290ea33 100644
--- a/arch/arm/plat-s3c64xx/gpiolib.c
+++ b/arch/arm/plat-s3c64xx/gpiolib.c
@@ -321,6 +321,11 @@ static struct s3c_gpio_cfg gpio_2bit_cfg_eint11 = {
.get_pull = s3c_gpio_getpull_updown,
};
+int s3c64xx_gpio2int_gpn(struct gpio_chip *chip, unsigned pin)
+{
+ return IRQ_EINT(0) + pin;
+}
+
static struct s3c_gpio_chip gpio_2bit[] = {
{
.base = S3C64XX_GPF_BASE,
@@ -353,6 +358,7 @@ static struct s3c_gpio_chip gpio_2bit[] = {
.base = S3C64XX_GPN(0),
.ngpio = S3C64XX_GPIO_N_NR,
.label = "GPN",
+ .to_irq = s3c64xx_gpio2int_gpn,
},
}, {
.base = S3C64XX_GPO_BASE,
diff --git a/arch/arm/plat-s3c64xx/include/plat/regs-clock.h b/arch/arm/plat-s3c64xx/include/plat/regs-clock.h
index 52836d41e333..a8777a755dfa 100644
--- a/arch/arm/plat-s3c64xx/include/plat/regs-clock.h
+++ b/arch/arm/plat-s3c64xx/include/plat/regs-clock.h
@@ -88,11 +88,11 @@
#define S3C6400_CLKDIV2_SPI0_SHIFT (0)
/* HCLK GATE Registers */
-#define S3C_CLKCON_HCLK_BUS (1<<30)
-#define S3C_CLKCON_HCLK_SECUR (1<<29)
-#define S3C_CLKCON_HCLK_SDMA1 (1<<28)
-#define S3C_CLKCON_HCLK_SDMA2 (1<<27)
-#define S3C_CLKCON_HCLK_UHOST (1<<26)
+#define S3C_CLKCON_HCLK_3DSE (1<<31)
+#define S3C_CLKCON_HCLK_UHOST (1<<29)
+#define S3C_CLKCON_HCLK_SECUR (1<<28)
+#define S3C_CLKCON_HCLK_SDMA1 (1<<27)
+#define S3C_CLKCON_HCLK_SDMA0 (1<<26)
#define S3C_CLKCON_HCLK_IROM (1<<25)
#define S3C_CLKCON_HCLK_DDR1 (1<<24)
#define S3C_CLKCON_HCLK_DDR0 (1<<23)
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index fec64678a63a..33026eff2aa4 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
-# Last update: Fri May 29 10:14:20 2009
+# Last update: Sat Jun 20 22:28:39 2009
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@@ -1455,7 +1455,7 @@ gba MACH_GBA GBA 1457
h6044 MACH_H6044 H6044 1458
app MACH_APP APP 1459
tct_hammer MACH_TCT_HAMMER TCT_HAMMER 1460
-herald MACH_HERMES HERMES 1461
+herald MACH_HERALD HERALD 1461
artemis MACH_ARTEMIS ARTEMIS 1462
htctitan MACH_HTCTITAN HTCTITAN 1463
qranium MACH_QRANIUM QRANIUM 1464
@@ -2245,3 +2245,38 @@ str9 MACH_STR9 STR9 2257
omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258
simcom MACH_SIMCOM SIMCOM 2259
mcwebio MACH_MCWEBIO MCWEBIO 2260
+omap3_phrazer MACH_OMAP3_PHRAZER OMAP3_PHRAZER 2261
+darwin MACH_DARWIN DARWIN 2262
+oratiscomu MACH_ORATISCOMU ORATISCOMU 2263
+rtsbc20 MACH_RTSBC20 RTSBC20 2264
+i780 MACH_I780 I780 2265
+gemini324 MACH_GEMINI324 GEMINI324 2266
+oratislan MACH_ORATISLAN ORATISLAN 2267
+oratisalog MACH_ORATISALOG ORATISALOG 2268
+oratismadi MACH_ORATISMADI ORATISMADI 2269
+oratisot16 MACH_ORATISOT16 ORATISOT16 2270
+oratisdesk MACH_ORATISDESK ORATISDESK 2271
+v2p_ca9 MACH_V2P_CA9 V2P_CA9 2272
+sintexo MACH_SINTEXO SINTEXO 2273
+cm3389 MACH_CM3389 CM3389 2274
+omap3_cio MACH_OMAP3_CIO OMAP3_CIO 2275
+sgh_i900 MACH_SGH_I900 SGH_I900 2276
+bst100 MACH_BST100 BST100 2277
+passion MACH_PASSION PASSION 2278
+indesign_at91sam MACH_INDESIGN_AT91SAM INDESIGN_AT91SAM 2279
+c4_badger MACH_C4_BADGER C4_BADGER 2280
+c4_viper MACH_C4_VIPER C4_VIPER 2281
+d2net MACH_D2NET D2NET 2282
+bigdisk MACH_BIGDISK BIGDISK 2283
+notalvision MACH_NOTALVISION NOTALVISION 2284
+omap3_kboc MACH_OMAP3_KBOC OMAP3_KBOC 2285
+cyclone MACH_CYCLONE CYCLONE 2286
+ninja MACH_NINJA NINJA 2287
+at91sam9g20ek_2mmc MACH_AT91SAM9G20EK_2MMC AT91SAM9G20EK_2MMC 2288
+bcmring MACH_BCMRING BCMRING 2289
+resol_dl2 MACH_RESOL_DL2 RESOL_DL2 2290
+ifosw MACH_IFOSW IFOSW 2291
+htcrhodium MACH_HTCRHODIUM HTCRHODIUM 2292
+htctopaz MACH_HTCTOPAZ HTCTOPAZ 2293
+matrix504 MACH_MATRIX504 MATRIX504 2294
+mrfsa MACH_MRFSA MRFSA 2295
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 8ea0d942cdea..7faa2f554ab1 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -274,7 +274,7 @@ config BF_REV_0_0
config BF_REV_0_1
bool "0.1"
- depends on (BF52x || (BF54x && !BF54xM))
+ depends on (BF51x || BF52x || (BF54x && !BF54xM))
config BF_REV_0_2
bool "0.2"
@@ -358,7 +358,7 @@ config MEM_MT48LC8M32B2B5_7
config MEM_MT48LC32M16A2TG_75
bool
- depends on (BFIN527_EZKIT || BFIN532_IP0X || BLACKSTAMP || BFIN526_EZBRD)
+ depends on (BFIN527_EZKIT || BFIN532_IP0X || BLACKSTAMP)
default y
config MEM_MT48LC32M8A2_75
@@ -366,6 +366,11 @@ config MEM_MT48LC32M8A2_75
depends on (BFIN518F_EZBRD)
default y
+config MEM_MT48H32M16LFCJ_75
+ bool
+ depends on (BFIN526_EZBRD)
+ default y
+
source "arch/blackfin/mach-bf518/Kconfig"
source "arch/blackfin/mach-bf527/Kconfig"
source "arch/blackfin/mach-bf533/Kconfig"
@@ -623,7 +628,6 @@ choice
config TICKSOURCE_GPTMR0
bool "Gptimer0 (SCLK domain)"
select BFIN_GPTIMERS
- depends on !IPIPE
config TICKSOURCE_CORETMR
bool "Core timer (CCLK domain)"
@@ -644,6 +648,7 @@ config CYCLES_CLOCKSOURCE
config GPTMR0_CLOCKSOURCE
bool "Use GPTimer0 as a clocksource (higher rating)"
+ select BFIN_GPTIMERS
depends on GENERIC_CLOCKEVENTS
depends on !TICKSOURCE_GPTMR0
@@ -908,23 +913,41 @@ endchoice
comment "Cache Support"
+
config BFIN_ICACHE
bool "Enable ICACHE"
+ default y
+config BFIN_ICACHE_LOCK
+ bool "Enable Instruction Cache Locking"
+ depends on BFIN_ICACHE
+ default n
+config BFIN_EXTMEM_ICACHEABLE
+ bool "Enable ICACHE for external memory"
+ depends on BFIN_ICACHE
+ default y
+config BFIN_L2_ICACHEABLE
+ bool "Enable ICACHE for L2 SRAM"
+ depends on BFIN_ICACHE
+ depends on BF54x || BF561
+ default n
+
config BFIN_DCACHE
bool "Enable DCACHE"
+ default y
config BFIN_DCACHE_BANKA
bool "Enable only 16k BankA DCACHE - BankB is SRAM"
depends on BFIN_DCACHE && !BF531
default n
-config BFIN_ICACHE_LOCK
- bool "Enable Instruction Cache Locking"
-
-choice
- prompt "External memory cache policy"
+config BFIN_EXTMEM_DCACHEABLE
+ bool "Enable DCACHE for external memory"
depends on BFIN_DCACHE
- default BFIN_WB if !SMP
- default BFIN_WT if SMP
-config BFIN_WB
+ default y
+choice
+ prompt "External memory DCACHE policy"
+ depends on BFIN_EXTMEM_DCACHEABLE
+ default BFIN_EXTMEM_WRITEBACK if !SMP
+ default BFIN_EXTMEM_WRITETHROUGH if SMP
+config BFIN_EXTMEM_WRITEBACK
bool "Write back"
depends on !SMP
help
@@ -942,7 +965,7 @@ config BFIN_WB
If you are unsure of the options and you want to be safe,
then go with Write Through.
-config BFIN_WT
+config BFIN_EXTMEM_WRITETHROUGH
bool "Write through"
help
Write Back Policy:
@@ -961,23 +984,26 @@ config BFIN_WT
endchoice
+config BFIN_L2_DCACHEABLE
+ bool "Enable DCACHE for L2 SRAM"
+ depends on BFIN_DCACHE
+ depends on BF54x || BF561
+ default n
choice
- prompt "L2 SRAM cache policy"
- depends on (BF54x || BF561)
- default BFIN_L2_WT
-config BFIN_L2_WB
+ prompt "L2 SRAM DCACHE policy"
+ depends on BFIN_L2_DCACHEABLE
+ default BFIN_L2_WRITEBACK
+config BFIN_L2_WRITEBACK
bool "Write back"
depends on !SMP
-config BFIN_L2_WT
+config BFIN_L2_WRITETHROUGH
bool "Write through"
depends on !SMP
-
-config BFIN_L2_NOT_CACHED
- bool "Not cached"
-
endchoice
+
+comment "Memory Protection Unit"
config MPU
bool "Enable the memory protection unit (EXPERIMENTAL)"
default n
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index 3ab6f23561dd..fd9ccc5fea10 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -13,7 +13,7 @@ extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma
quiet_cmd_uimage = UIMAGE $@
cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \
- -C $(2) -n 'Linux-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \
+ -C $(2) -n '$(MACHINE)-$(KERNELRELEASE)' -a $(CONFIG_BOOT_LOAD) \
-e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \
-d $< $@
diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
index baec1337f282..dcfb4889559a 100644
--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
@@ -326,11 +326,17 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -413,11 +419,11 @@ CONFIG_IP_PNP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -916,7 +922,7 @@ CONFIG_MMC_BLOCK_BOUNCE=y
# CONFIG_MMC_SDHCI is not set
CONFIG_SDH_BFIN=m
CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND=y
-CONFIG_SDH_BFIN_ENABLE_SDIO_IRQ=y
+# CONFIG_SDH_BFIN_ENABLE_SDIO_IRQ is not set
# CONFIG_MMC_SPI is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
@@ -1147,7 +1153,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
index c06262e41f7c..48a3a7a9099c 100644
--- a/arch/blackfin/configs/BF526-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
@@ -331,16 +331,18 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-# CONFIG_MPU is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
#
-# Asynchonous Memory Configuration
+# Memory Protection Unit
#
+# CONFIG_MPU is not set
#
# EBIU_AMGCTL Global Control
@@ -418,11 +420,11 @@ CONFIG_IP_PNP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1424,7 +1426,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index e9175c608aa7..dd8352791daf 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -331,11 +331,17 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -418,11 +424,11 @@ CONFIG_IP_PNP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1505,7 +1511,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
index 5aa63bafdd62..4c044805cb5c 100644
--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
@@ -292,12 +292,21 @@ CONFIG_DMA_UNCACHED_1M=y
#
# Cache Support
#
+#
+# Cache Support
+#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -391,11 +400,11 @@ CONFIG_IP_PNP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1052,7 +1061,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index fed25329e13c..c99bbcd09a68 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -293,11 +293,17 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -391,11 +397,11 @@ CONFIG_IP_PNP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1216,7 +1222,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index f9ac20d55799..092ffda80e68 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -300,11 +300,17 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -399,11 +405,11 @@ CONFIG_IP_PNP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1269,7 +1275,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
index ee98e227b887..fa698a89f6fe 100644
--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
@@ -311,11 +311,17 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -398,11 +404,11 @@ CONFIG_IP_PNP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1203,7 +1209,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
index deeabef8ab80..b3d3cab81cfe 100644
--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
@@ -366,14 +366,19 @@ CONFIG_DMA_UNCACHED_2M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-# CONFIG_BFIN_L2_WB is not set
-CONFIG_BFIN_L2_WT=y
-# CONFIG_BFIN_L2_NOT_CACHED is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+# CONFIG_BFIN_L2_ICACHEABLE is not set
+# CONFIG_BFIN_L2_DCACHEABLE is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -459,11 +464,11 @@ CONFIG_IP_PNP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1606,7 +1611,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index dcfbe2e2931e..0313cd1d9824 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -331,14 +331,19 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-# CONFIG_BFIN_L2_WB is not set
-CONFIG_BFIN_L2_WT=y
-# CONFIG_BFIN_L2_NOT_CACHED is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+# CONFIG_BFIN_L2_ICACHEABLE is not set
+# CONFIG_BFIN_L2_DCACHEABLE is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -425,11 +430,11 @@ CONFIG_IP_PNP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1044,7 +1049,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
index 174c578b8ec4..5d944ffd4ab0 100644
--- a/arch/blackfin/configs/BlackStamp_defconfig
+++ b/arch/blackfin/configs/BlackStamp_defconfig
@@ -285,11 +285,17 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
index e17875e8abe8..648a31d01bf4 100644
--- a/arch/blackfin/configs/CM-BF527_defconfig
+++ b/arch/blackfin/configs/CM-BF527_defconfig
@@ -329,11 +329,17 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -417,11 +423,11 @@ CONFIG_IP_PNP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1246,7 +1252,7 @@ CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
# CONFIG_DEBUG_KOBJECT is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
diff --git a/arch/blackfin/configs/CM-BF533_defconfig b/arch/blackfin/configs/CM-BF533_defconfig
index fafd95e84b28..ae665b93b875 100644
--- a/arch/blackfin/configs/CM-BF533_defconfig
+++ b/arch/blackfin/configs/CM-BF533_defconfig
@@ -262,12 +262,17 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -353,10 +358,10 @@ CONFIG_IP_FIB_HASH=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
-CONFIG_INET_DIAG=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -873,7 +878,7 @@ CONFIG_ENABLE_MUST_CHECK=y
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_MMRS=y
CONFIG_DEBUG_HUNT_FOR_ZERO=y
CONFIG_DEBUG_BFIN_HWTRACE_ON=y
diff --git a/arch/blackfin/configs/CM-BF537E_defconfig b/arch/blackfin/configs/CM-BF537E_defconfig
index e73aa5af58b9..d74b6f4db35d 100644
--- a/arch/blackfin/configs/CM-BF537E_defconfig
+++ b/arch/blackfin/configs/CM-BF537E_defconfig
@@ -297,11 +297,17 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -383,11 +389,11 @@ CONFIG_IP_PNP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -861,7 +867,7 @@ CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_SECTION_MISMATCH=y
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/blackfin/configs/CM-BF537U_defconfig b/arch/blackfin/configs/CM-BF537U_defconfig
index 80211303f6b9..7fc8dfa1719f 100644
--- a/arch/blackfin/configs/CM-BF537U_defconfig
+++ b/arch/blackfin/configs/CM-BF537U_defconfig
@@ -270,12 +270,17 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -361,10 +366,10 @@ CONFIG_IP_FIB_HASH=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
-CONFIG_INET_DIAG=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -901,7 +906,7 @@ CONFIG_ENABLE_MUST_CHECK=y
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_MMRS=y
CONFIG_DEBUG_HUNT_FOR_ZERO=y
CONFIG_DEBUG_BFIN_HWTRACE_ON=y
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index dd815f0d1517..acca4e51a45a 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -333,12 +333,19 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+# CONFIG_BFIN_L2_ICACHEABLE is not set
+# CONFIG_BFIN_L2_DCACHEABLE is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -428,11 +435,11 @@ CONFIG_IP_PNP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -1334,7 +1341,7 @@ CONFIG_ENABLE_MUST_CHECK=y
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_SAMPLES is not set
CONFIG_DEBUG_MMRS=y
CONFIG_DEBUG_HUNT_FOR_ZERO=y
diff --git a/arch/blackfin/configs/CM-BF561_defconfig b/arch/blackfin/configs/CM-BF561_defconfig
index 16c198bd40c5..bae4ee6e68bb 100644
--- a/arch/blackfin/configs/CM-BF561_defconfig
+++ b/arch/blackfin/configs/CM-BF561_defconfig
@@ -308,12 +308,19 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+# CONFIG_BFIN_L2_ICACHEABLE is not set
+# CONFIG_BFIN_L2_DCACHEABLE is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -395,11 +402,11 @@ CONFIG_IP_FIB_HASH=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -837,7 +844,7 @@ CONFIG_ENABLE_MUST_CHECK=y
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_SAMPLES is not set
CONFIG_DEBUG_MMRS=y
CONFIG_DEBUG_HUNT_FOR_ZERO=y
diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig
index 6b4c1a982383..a6a7c8ede705 100644
--- a/arch/blackfin/configs/H8606_defconfig
+++ b/arch/blackfin/configs/H8606_defconfig
@@ -258,12 +258,18 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-CONFIG_BFIN_ICACHE_LOCK=y
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
+# CONFIG_MPU is not set
#
# Asynchonous Memory Configuration
diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
index 09701f907e9b..ff377fae061b 100644
--- a/arch/blackfin/configs/PNAV-10_defconfig
+++ b/arch/blackfin/configs/PNAV-10_defconfig
@@ -295,11 +295,17 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -382,11 +388,11 @@ CONFIG_IP_PNP=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=y
-CONFIG_INET_XFRM_MODE_TUNNEL=y
-CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG=y
+# CONFIG_INET_DIAG is not set
CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
index ec84a53daae9..814f9cacf407 100644
--- a/arch/blackfin/configs/SRV1_defconfig
+++ b/arch/blackfin/configs/SRV1_defconfig
@@ -279,12 +279,18 @@ CONFIG_DMA_UNCACHED_2M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
-CONFIG_L1_MAX_PIECE=16
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
+# CONFIG_MPU is not set
#
# Asynchonous Memory Configuration
diff --git a/arch/blackfin/configs/TCM-BF537_defconfig b/arch/blackfin/configs/TCM-BF537_defconfig
index 6e2796240fdc..375e75a27abc 100644
--- a/arch/blackfin/configs/TCM-BF537_defconfig
+++ b/arch/blackfin/configs/TCM-BF537_defconfig
@@ -287,11 +287,17 @@ CONFIG_DMA_UNCACHED_1M=y
# Cache Support
#
CONFIG_BFIN_ICACHE=y
+# CONFIG_BFIN_ICACHE_LOCK is not set
CONFIG_BFIN_DCACHE=y
# CONFIG_BFIN_DCACHE_BANKA is not set
-# CONFIG_BFIN_ICACHE_LOCK is not set
-CONFIG_BFIN_WB=y
-# CONFIG_BFIN_WT is not set
+CONFIG_BFIN_EXTMEM_ICACHEABLE=y
+CONFIG_BFIN_EXTMEM_DCACHEABLE=y
+CONFIG_BFIN_EXTMEM_WRITEBACK=y
+# CONFIG_BFIN_EXTMEM_WRITETHROUGH is not set
+
+#
+# Memory Protection Unit
+#
# CONFIG_MPU is not set
#
@@ -709,7 +715,7 @@ CONFIG_FRAME_WARN=1024
CONFIG_DEBUG_FS=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
diff --git a/arch/blackfin/include/asm/blackfin.h b/arch/blackfin/include/asm/blackfin.h
index 8bb2cb139756..4d4439583396 100644
--- a/arch/blackfin/include/asm/blackfin.h
+++ b/arch/blackfin/include/asm/blackfin.h
@@ -86,6 +86,7 @@ static inline void CSYNC(void)
#endif /* __ASSEMBLY__ */
+#include <asm/mem_map.h>
#include <mach/blackfin.h>
#include <asm/bfin-global.h>
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
index 2ef669ed9222..477050ad5c53 100644
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -35,10 +35,10 @@
#if defined(CONFIG_SMP) && \
!defined(CONFIG_BFIN_CACHE_COHERENT)
-# if defined(CONFIG_BFIN_ICACHE)
+# if defined(CONFIG_BFIN_ICACHEABLE) || defined(CONFIG_BFIN_L2_ICACHEABLE)
# define __ARCH_SYNC_CORE_ICACHE
# endif
-# if defined(CONFIG_BFIN_DCACHE)
+# if defined(CONFIG_BFIN_DCACHEABLE) || defined(CONFIG_BFIN_L2_DCACHEABLE)
# define __ARCH_SYNC_CORE_DCACHE
# endif
#ifndef __ASSEMBLY__
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 5c17dee53b5d..7e55549e180f 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -56,7 +56,7 @@ extern void blackfin_invalidate_entire_icache(void);
static inline void flush_icache_range(unsigned start, unsigned end)
{
-#if defined(CONFIG_BFIN_WB)
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
blackfin_dcache_flush_range(start, end);
#endif
@@ -87,9 +87,9 @@ do { memcpy(dst, src, len); \
#else
# define invalidate_dcache_range(start,end) do { } while (0)
#endif
-#if defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_WB)
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
# define flush_dcache_range(start,end) blackfin_dcache_flush_range((start), (end))
-# define flush_dcache_page(page) blackfin_dflush_page(page_address(page))
+# define flush_dcache_page(page) blackfin_dflush_page(page_address(page))
#else
# define flush_dcache_range(start,end) do { } while (0)
# define flush_dcache_page(page) do { } while (0)
@@ -100,7 +100,7 @@ extern unsigned long reserved_mem_icache_on;
static inline int bfin_addr_dcacheable(unsigned long addr)
{
-#ifdef CONFIG_BFIN_DCACHE
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
if (addr < (_ramend - DMA_UNCACHED_REGION))
return 1;
#endif
@@ -109,7 +109,7 @@ static inline int bfin_addr_dcacheable(unsigned long addr)
addr >= _ramend && addr < physical_mem_end)
return 1;
-#ifndef CONFIG_BFIN_L2_NOT_CACHED
+#ifdef CONFIG_BFIN_L2_DCACHEABLE
if (addr >= L2_START && addr < L2_START + L2_LENGTH)
return 1;
#endif
diff --git a/arch/blackfin/include/asm/cplb.h b/arch/blackfin/include/asm/cplb.h
index a75a6a9f0949..c5dacf8f8cf9 100644
--- a/arch/blackfin/include/asm/cplb.h
+++ b/arch/blackfin/include/asm/cplb.h
@@ -37,8 +37,6 @@
#define L1_IMEMORY ( CPLB_USER_RD | CPLB_VALID | CPLB_LOCK)
#define SDRAM_INON_CHBL ( CPLB_USER_RD | CPLB_VALID)
-/*Use the menuconfig cache policy here - CONFIG_BFIN_WT/CONFIG_BFIN_WB*/
-
#if ANOMALY_05000158
#define ANOMALY_05000158_WORKAROUND 0x200
#else
@@ -47,10 +45,12 @@
#define CPLB_COMMON (CPLB_DIRTY | CPLB_SUPV_WR | CPLB_USER_WR | CPLB_USER_RD | CPLB_VALID | ANOMALY_05000158_WORKAROUND)
-#ifdef CONFIG_BFIN_WB /*Write Back Policy */
+#ifdef CONFIG_BFIN_EXTMEM_WRITEBACK
#define SDRAM_DGENERIC (CPLB_L1_CHBL | CPLB_COMMON)
-#else /*Write Through */
+#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
#define SDRAM_DGENERIC (CPLB_L1_CHBL | CPLB_WT | CPLB_L1_AOW | CPLB_COMMON)
+#else
+#define SDRAM_DGENERIC (CPLB_COMMON)
#endif
#define SDRAM_DNON_CHBL (CPLB_COMMON)
@@ -61,21 +61,23 @@
#ifdef CONFIG_SMP
#define L2_ATTR (INITIAL_T | I_CPLB | D_CPLB)
-#define L2_IMEMORY (CPLB_COMMON)
-#define L2_DMEMORY (CPLB_LOCK | CPLB_COMMON)
+#define L2_IMEMORY (CPLB_COMMON | PAGE_SIZE_1MB)
+#define L2_DMEMORY (CPLB_LOCK | CPLB_COMMON | PAGE_SIZE_1MB)
#else
#define L2_ATTR (INITIAL_T | SWITCH_T | I_CPLB | D_CPLB)
-#define L2_IMEMORY (SDRAM_IGENERIC)
-
-# if defined(CONFIG_BFIN_L2_WB)
-# define L2_DMEMORY (CPLB_L1_CHBL | CPLB_COMMON)
-# elif defined(CONFIG_BFIN_L2_WT)
-# define L2_DMEMORY (CPLB_L1_CHBL | CPLB_WT | CPLB_L1_AOW | CPLB_COMMON)
-# elif defined(CONFIG_BFIN_L2_NOT_CACHED)
-# define L2_DMEMORY (CPLB_COMMON)
+# if defined(CONFIG_BFIN_L2_ICACHEABLE)
+# define L2_IMEMORY (CPLB_L1_CHBL | CPLB_USER_RD | CPLB_VALID | PAGE_SIZE_1MB)
+# else
+# define L2_IMEMORY ( CPLB_USER_RD | CPLB_VALID | PAGE_SIZE_1MB)
+# endif
+
+# if defined(CONFIG_BFIN_L2_WRITEBACK)
+# define L2_DMEMORY (CPLB_L1_CHBL | CPLB_COMMON | PAGE_SIZE_1MB)
+# elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
+# define L2_DMEMORY (CPLB_L1_CHBL | CPLB_WT | CPLB_L1_AOW | CPLB_COMMON | PAGE_SIZE_1MB)
# else
-# define L2_DMEMORY (0)
+# define L2_DMEMORY (CPLB_COMMON | PAGE_SIZE_1MB)
# endif
#endif /* CONFIG_SMP */
diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
index d7d9148e433c..ed6b1f3cccce 100644
--- a/arch/blackfin/include/asm/dma-mapping.h
+++ b/arch/blackfin/include/asm/dma-mapping.h
@@ -95,4 +95,17 @@ static inline void dma_sync_single_for_device(struct device *dev,
enum dma_data_direction dir)
{
}
+
+static inline void dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+}
+
+static inline void dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+}
+
#endif /* _BLACKFIN_DMA_MAPPING_H */
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h
index bbe1c3726b69..87ba9ad399cb 100644
--- a/arch/blackfin/include/asm/ipipe.h
+++ b/arch/blackfin/include/asm/ipipe.h
@@ -35,9 +35,9 @@
#include <asm/atomic.h>
#include <asm/traps.h>
-#define IPIPE_ARCH_STRING "1.10-00"
+#define IPIPE_ARCH_STRING "1.11-00"
#define IPIPE_MAJOR_NUMBER 1
-#define IPIPE_MINOR_NUMBER 10
+#define IPIPE_MINOR_NUMBER 11
#define IPIPE_PATCH_NUMBER 0
#ifdef CONFIG_SMP
@@ -207,7 +207,7 @@ void ipipe_init_irq_threads(void);
int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
-#ifdef CONFIG_GENERIC_CLOCKEVENTS
+#ifdef CONFIG_TICKSOURCE_CORETMR
#define IRQ_SYSTMR IRQ_CORETMR
#define IRQ_PRIOTMR IRQ_CORETMR
#else
@@ -240,8 +240,13 @@ int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
#define ipipe_init_irq_threads() do { } while (0)
#define ipipe_start_irq_thread(irq, desc) 0
+#ifndef CONFIG_TICKSOURCE_GPTMR0
#define IRQ_SYSTMR IRQ_CORETMR
#define IRQ_PRIOTMR IRQ_CORETMR
+#else
+#define IRQ_SYSTMR IRQ_TIMER0
+#define IRQ_PRIOTMR CONFIG_IRQ_TIMER0
+#endif
#define __ipipe_root_tick_p(regs) 1
diff --git a/arch/blackfin/include/asm/ipipe_base.h b/arch/blackfin/include/asm/ipipe_base.h
index 3e8acbd1a3be..490098f532a7 100644
--- a/arch/blackfin/include/asm/ipipe_base.h
+++ b/arch/blackfin/include/asm/ipipe_base.h
@@ -51,23 +51,23 @@
extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */
-static inline void __ipipe_stall_root(void)
-{
- volatile unsigned long *p = &__ipipe_root_status;
- set_bit(0, p);
-}
+#define __ipipe_stall_root() \
+ do { \
+ volatile unsigned long *p = &__ipipe_root_status; \
+ set_bit(0, p); \
+ } while (0)
-static inline unsigned long __ipipe_test_and_stall_root(void)
-{
- volatile unsigned long *p = &__ipipe_root_status;
- return test_and_set_bit(0, p);
-}
+#define __ipipe_test_and_stall_root() \
+ ({ \
+ volatile unsigned long *p = &__ipipe_root_status; \
+ test_and_set_bit(0, p); \
+ })
-static inline unsigned long __ipipe_test_root(void)
-{
- const unsigned long *p = &__ipipe_root_status;
- return test_bit(0, p);
-}
+#define __ipipe_test_root() \
+ ({ \
+ const unsigned long *p = &__ipipe_root_status; \
+ test_bit(0, p); \
+ })
#endif /* !__ASSEMBLY__ */
diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h
index 9a7f63a83c47..42a15f5ce0d0 100644
--- a/arch/blackfin/include/asm/irq.h
+++ b/arch/blackfin/include/asm/irq.h
@@ -22,13 +22,6 @@
/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h> */
#include <mach/irq.h>
-/* Xenomai IPIPE helpers */
-#define local_irq_restore_hw(x) local_irq_restore(x)
-#define local_irq_save_hw(x) local_irq_save(x)
-#define local_irq_enable_hw(x) local_irq_enable(x)
-#define local_irq_disable_hw(x) local_irq_disable(x)
-#define irqs_disabled_hw(x) irqs_disabled(x)
-
#if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
# define NOP_PAD_ANOMALY_05000244 "nop; nop;"
#else
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
index 139cba4651b1..9b19a19d9ae9 100644
--- a/arch/blackfin/include/asm/irqflags.h
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -31,6 +31,150 @@ static inline unsigned long bfin_cli(void)
return flags;
}
+#ifdef CONFIG_IPIPE
+
+#include <linux/ipipe_base.h>
+#include <linux/ipipe_trace.h>
+
+#ifdef CONFIG_DEBUG_HWERR
+# define bfin_no_irqs 0x3f
+#else
+# define bfin_no_irqs 0x1f
+#endif
+
+#define raw_local_irq_disable() \
+ do { \
+ ipipe_check_context(ipipe_root_domain); \
+ __ipipe_stall_root(); \
+ barrier(); \
+ } while (0)
+
+static inline void raw_local_irq_enable(void)
+{
+ barrier();
+ ipipe_check_context(ipipe_root_domain);
+ __ipipe_unstall_root();
+}
+
+#define raw_local_save_flags_ptr(x) \
+ do { \
+ *(x) = __ipipe_test_root() ? bfin_no_irqs : bfin_irq_flags; \
+ } while (0)
+
+#define raw_local_save_flags(x) raw_local_save_flags_ptr(&(x))
+
+#define raw_irqs_disabled_flags(x) ((x) == bfin_no_irqs)
+
+#define raw_local_irq_save_ptr(x) \
+ do { \
+ *(x) = __ipipe_test_and_stall_root() ? bfin_no_irqs : bfin_irq_flags; \
+ barrier(); \
+ } while (0)
+
+#define raw_local_irq_save(x) \
+ do { \
+ ipipe_check_context(ipipe_root_domain); \
+ raw_local_irq_save_ptr(&(x)); \
+ } while (0)
+
+static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real)
+{
+ /*
+ * Merge virtual and real interrupt mask bits into a single
+ * 32bit word.
+ */
+ return (real & ~(1 << 31)) | ((virt != 0) << 31);
+}
+
+static inline int raw_demangle_irq_bits(unsigned long *x)
+{
+ int virt = (*x & (1 << 31)) != 0;
+ *x &= ~(1L << 31);
+ return virt;
+}
+
+static inline void local_irq_disable_hw_notrace(void)
+{
+ bfin_cli();
+}
+
+static inline void local_irq_enable_hw_notrace(void)
+{
+ bfin_sti(bfin_irq_flags);
+}
+
+#define local_save_flags_hw(flags) \
+ do { \
+ (flags) = bfin_read_IMASK(); \
+ } while (0)
+
+#define irqs_disabled_flags_hw(flags) (((flags) & ~0x3f) == 0)
+
+#define irqs_disabled_hw() \
+ ({ \
+ unsigned long flags; \
+ local_save_flags_hw(flags); \
+ irqs_disabled_flags_hw(flags); \
+ })
+
+static inline void local_irq_save_ptr_hw(unsigned long *flags)
+{
+ *flags = bfin_cli();
+#ifdef CONFIG_DEBUG_HWERR
+ bfin_sti(0x3f);
+#endif
+}
+
+#define local_irq_save_hw_notrace(flags) \
+ do { \
+ local_irq_save_ptr_hw(&(flags)); \
+ } while (0)
+
+static inline void local_irq_restore_hw_notrace(unsigned long flags)
+{
+ if (!irqs_disabled_flags_hw(flags))
+ local_irq_enable_hw_notrace();
+}
+
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
+# define local_irq_disable_hw() \
+ do { \
+ if (!irqs_disabled_hw()) { \
+ local_irq_disable_hw_notrace(); \
+ ipipe_trace_begin(0x80000000); \
+ } \
+ } while (0)
+# define local_irq_enable_hw() \
+ do { \
+ if (irqs_disabled_hw()) { \
+ ipipe_trace_end(0x80000000); \
+ local_irq_enable_hw_notrace(); \
+ } \
+ } while (0)
+# define local_irq_save_hw(flags) \
+ do { \
+ local_save_flags_hw(flags); \
+ if (!irqs_disabled_flags_hw(flags)) { \
+ local_irq_disable_hw_notrace(); \
+ ipipe_trace_begin(0x80000001); \
+ } \
+ } while (0)
+# define local_irq_restore_hw(flags) \
+ do { \
+ if (!irqs_disabled_flags_hw(flags)) { \
+ ipipe_trace_end(0x80000001); \
+ local_irq_enable_hw_notrace(); \
+ } \
+ } while (0)
+#else /* !CONFIG_IPIPE_TRACE_IRQSOFF */
+# define local_irq_disable_hw() local_irq_disable_hw_notrace()
+# define local_irq_enable_hw() local_irq_enable_hw_notrace()
+# define local_irq_save_hw(flags) local_irq_save_hw_notrace(flags)
+# define local_irq_restore_hw(flags) local_irq_restore_hw_notrace(flags)
+#endif /* !CONFIG_IPIPE_TRACE_IRQSOFF */
+
+#else /* CONFIG_IPIPE */
+
static inline void raw_local_irq_disable(void)
{
bfin_cli();
@@ -44,12 +188,6 @@ static inline void raw_local_irq_enable(void)
#define raw_irqs_disabled_flags(flags) (((flags) & ~0x3f) == 0)
-static inline void raw_local_irq_restore(unsigned long flags)
-{
- if (!raw_irqs_disabled_flags(flags))
- raw_local_irq_enable();
-}
-
static inline unsigned long __raw_local_irq_save(void)
{
unsigned long flags = bfin_cli();
@@ -60,4 +198,18 @@ static inline unsigned long __raw_local_irq_save(void)
}
#define raw_local_irq_save(flags) do { (flags) = __raw_local_irq_save(); } while (0)
+#define local_irq_save_hw(flags) raw_local_irq_save(flags)
+#define local_irq_restore_hw(flags) raw_local_irq_restore(flags)
+#define local_irq_enable_hw() raw_local_irq_enable()
+#define local_irq_disable_hw() raw_local_irq_disable()
+#define irqs_disabled_hw() irqs_disabled()
+
+#endif /* !CONFIG_IPIPE */
+
+static inline void raw_local_irq_restore(unsigned long flags)
+{
+ if (!raw_irqs_disabled_flags(flags))
+ raw_local_irq_enable();
+}
+
#endif
diff --git a/arch/blackfin/include/asm/mem_init.h b/arch/blackfin/include/asm/mem_init.h
index 61f7487fbf12..4179e329b9c9 100644
--- a/arch/blackfin/include/asm/mem_init.h
+++ b/arch/blackfin/include/asm/mem_init.h
@@ -59,7 +59,7 @@
#define SDRAM_tRP TRP_1
#define SDRAM_tRP_num 1
#define SDRAM_tRAS TRAS_4
-#define SDRAM_tRAS_num 3
+#define SDRAM_tRAS_num 4
#define SDRAM_tRCD TRCD_1
#define SDRAM_tWR TWR_2
#endif
@@ -89,6 +89,85 @@
#endif
#endif
+/*
+ * The BF526-EZ-Board changed SDRAM chips between revisions,
+ * so we use below timings to accommodate both.
+ */
+#if defined(CONFIG_MEM_MT48H32M16LFCJ_75)
+#if (CONFIG_SCLK_HZ > 119402985)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_8
+#define SDRAM_tRAS_num 8
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 104477612) && (CONFIG_SCLK_HZ <= 119402985)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_7
+#define SDRAM_tRAS_num 7
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 89552239) && (CONFIG_SCLK_HZ <= 104477612)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_6
+#define SDRAM_tRAS_num 6
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 74626866) && (CONFIG_SCLK_HZ <= 89552239)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_5
+#define SDRAM_tRAS_num 5
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 66666667) && (CONFIG_SCLK_HZ <= 74626866)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_4
+#define SDRAM_tRAS_num 4
+#define SDRAM_tRCD TRCD_2
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 59701493) && (CONFIG_SCLK_HZ <= 66666667)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_4
+#define SDRAM_tRAS_num 4
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 44776119) && (CONFIG_SCLK_HZ <= 59701493)
+#define SDRAM_tRP TRP_2
+#define SDRAM_tRP_num 2
+#define SDRAM_tRAS TRAS_3
+#define SDRAM_tRAS_num 3
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ > 29850746) && (CONFIG_SCLK_HZ <= 44776119)
+#define SDRAM_tRP TRP_1
+#define SDRAM_tRP_num 1
+#define SDRAM_tRAS TRAS_3
+#define SDRAM_tRAS_num 3
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#if (CONFIG_SCLK_HZ <= 29850746)
+#define SDRAM_tRP TRP_1
+#define SDRAM_tRP_num 1
+#define SDRAM_tRAS TRAS_2
+#define SDRAM_tRAS_num 2
+#define SDRAM_tRCD TRCD_1
+#define SDRAM_tWR TWR_2
+#endif
+#endif
+
#if defined(CONFIG_MEM_MT48LC16M8A2TG_75) || \
defined(CONFIG_MEM_MT48LC8M32B2B5_7)
/*SDRAM INFORMATION: */
@@ -109,6 +188,13 @@
#define SDRAM_CL CL_3
#endif
+#if defined(CONFIG_MEM_MT48H32M16LFCJ_75)
+ /*SDRAM INFORMATION: */
+#define SDRAM_Tref 64 /* Refresh period in milliseconds */
+#define SDRAM_NRA 8192 /* Number of row addresses in SDRAM */
+#define SDRAM_CL CL_2
+#endif
+
#ifdef CONFIG_BFIN_KERNEL_CLOCK_MEMINIT_CALC
/* Equation from section 17 (p17-46) of BF533 HRM */
diff --git a/arch/blackfin/include/asm/mem_map.h b/arch/blackfin/include/asm/mem_map.h
index e92b31051bb7..5e21627c9ba2 100644
--- a/arch/blackfin/include/asm/mem_map.h
+++ b/arch/blackfin/include/asm/mem_map.h
@@ -1,87 +1,84 @@
/*
- * mem_map.h
- * Common header file for blackfin family of processors.
+ * Common Blackfin memory map
*
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_H_
-#define _MEM_MAP_H_
+#ifndef __BFIN_MEM_MAP_H__
+#define __BFIN_MEM_MAP_H__
#include <mach/mem_map.h>
-#ifndef __ASSEMBLY__
+/* Every Blackfin so far has MMRs like this */
+#ifndef COREMMR_BASE
+# define COREMMR_BASE 0xFFE00000
+#endif
+#ifndef SYSMMR_BASE
+# define SYSMMR_BASE 0xFFC00000
+#endif
-#ifdef CONFIG_SMP
-static inline ulong get_l1_scratch_start_cpu(int cpu)
-{
- return (cpu) ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START;
-}
-static inline ulong get_l1_code_start_cpu(int cpu)
-{
- return (cpu) ? COREB_L1_CODE_START : COREA_L1_CODE_START;
-}
-static inline ulong get_l1_data_a_start_cpu(int cpu)
-{
- return (cpu) ? COREB_L1_DATA_A_START : COREA_L1_DATA_A_START;
-}
-static inline ulong get_l1_data_b_start_cpu(int cpu)
-{
- return (cpu) ? COREB_L1_DATA_B_START : COREA_L1_DATA_B_START;
-}
+/* Every Blackfin so far has on-chip Scratch Pad SRAM like this */
+#ifndef L1_SCRATCH_START
+# define L1_SCRATCH_START 0xFFB00000
+# define L1_SCRATCH_LENGTH 0x1000
+#endif
-static inline ulong get_l1_scratch_start(void)
-{
- return get_l1_scratch_start_cpu(blackfin_core_id());
-}
-static inline ulong get_l1_code_start(void)
-{
- return get_l1_code_start_cpu(blackfin_core_id());
-}
-static inline ulong get_l1_data_a_start(void)
-{
- return get_l1_data_a_start_cpu(blackfin_core_id());
-}
-static inline ulong get_l1_data_b_start(void)
-{
- return get_l1_data_b_start_cpu(blackfin_core_id());
-}
+/* Most parts lack on-chip L2 SRAM */
+#ifndef L2_START
+# define L2_START 0
+# define L2_LENGTH 0
+#endif
+
+/* Most parts lack on-chip L1 ROM */
+#ifndef L1_ROM_START
+# define L1_ROM_START 0
+# define L1_ROM_LENGTH 0
+#endif
+
+/* Allow wonky SMP ports to override this */
+#ifndef GET_PDA_SAFE
+# define GET_PDA_SAFE(preg) \
+ preg.l = _cpu_pda; \
+ preg.h = _cpu_pda;
+# define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-#else /* !CONFIG_SMP */
+# ifndef __ASSEMBLY__
-static inline ulong get_l1_scratch_start_cpu(int cpu)
+static inline unsigned long get_l1_scratch_start_cpu(int cpu)
{
return L1_SCRATCH_START;
}
-static inline ulong get_l1_code_start_cpu(int cpu)
+static inline unsigned long get_l1_code_start_cpu(int cpu)
{
return L1_CODE_START;
}
-static inline ulong get_l1_data_a_start_cpu(int cpu)
+static inline unsigned long get_l1_data_a_start_cpu(int cpu)
{
return L1_DATA_A_START;
}
-static inline ulong get_l1_data_b_start_cpu(int cpu)
+static inline unsigned long get_l1_data_b_start_cpu(int cpu)
{
return L1_DATA_B_START;
}
-static inline ulong get_l1_scratch_start(void)
+static inline unsigned long get_l1_scratch_start(void)
{
return get_l1_scratch_start_cpu(0);
}
-static inline ulong get_l1_code_start(void)
+static inline unsigned long get_l1_code_start(void)
{
return get_l1_code_start_cpu(0);
}
-static inline ulong get_l1_data_a_start(void)
+static inline unsigned long get_l1_data_a_start(void)
{
return get_l1_data_a_start_cpu(0);
}
-static inline ulong get_l1_data_b_start(void)
+static inline unsigned long get_l1_data_b_start(void)
{
return get_l1_data_b_start_cpu(0);
}
-#endif /* CONFIG_SMP */
-#endif /* __ASSEMBLY__ */
+# endif /* __ASSEMBLY__ */
+#endif /* !GET_PDA_SAFE */
-#endif /* _MEM_MAP_H_ */
+#endif
diff --git a/arch/blackfin/include/asm/system.h b/arch/blackfin/include/asm/system.h
index 294dbda24164..85e8f16cf8c2 100644
--- a/arch/blackfin/include/asm/system.h
+++ b/arch/blackfin/include/asm/system.h
@@ -135,11 +135,13 @@ struct __xchg_dummy {
};
#define __xg(x) ((volatile struct __xchg_dummy *)(x))
+#include <mach/blackfin.h>
+
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
int size)
{
unsigned long tmp = 0;
- unsigned long flags = 0;
+ unsigned long flags;
local_irq_save_hw(flags);
diff --git a/arch/blackfin/include/asm/traps.h b/arch/blackfin/include/asm/traps.h
index 34f7295fb070..3cdc454cde23 100644
--- a/arch/blackfin/include/asm/traps.h
+++ b/arch/blackfin/include/asm/traps.h
@@ -111,9 +111,7 @@
level " bits in the Watchpoint Instruction Address Control register (WPIACTL) is set.\n"
#define EXC_0x2A(level) \
"Instruction fetch misaligned address violation\n" \
- level " - Attempted misaligned instruction cache fetch. On a misaligned instruction fetch\n" \
- level " exception, the return address provided in RETX is the destination address which is\n" \
- level " misaligned, rather than the address of the offending instruction.\n"
+ level " - Attempted misaligned instruction cache fetch.\n"
#define EXC_0x2B(level) \
"CPLB protection violation\n" \
level " - Illegal instruction fetch access (memory protection violation).\n"
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
index 8894e9ffbb57..2f469a1f80fb 100644
--- a/arch/blackfin/include/asm/uaccess.h
+++ b/arch/blackfin/include/asm/uaccess.h
@@ -265,4 +265,26 @@ __clear_user(void *to, unsigned long n)
#define clear_user(to, n) __clear_user(to, n)
+/* How to interpret these return values:
+ * CORE: can be accessed by core load or dma memcpy
+ * CORE_ONLY: can only be accessed by core load
+ * DMA: can only be accessed by dma memcpy
+ * IDMA: can only be accessed by interprocessor dma memcpy (BF561)
+ * ITEST: can be accessed by isram memcpy or dma memcpy
+ */
+enum {
+ BFIN_MEM_ACCESS_CORE = 0,
+ BFIN_MEM_ACCESS_CORE_ONLY,
+ BFIN_MEM_ACCESS_DMA,
+ BFIN_MEM_ACCESS_IDMA,
+ BFIN_MEM_ACCESS_ITEST,
+};
+/**
+ * bfin_mem_access_type() - what kind of memory access is required
+ * @addr: the address to check
+ * @size: number of bytes needed
+ * @return: <0 is error, >=0 is BFIN_MEM_ACCESS_xxx enum (see above)
+ */
+int bfin_mem_access_type(unsigned long addr, unsigned long size);
+
#endif /* _BLACKFIN_UACCESS_H */
diff --git a/arch/blackfin/include/asm/unistd.h b/arch/blackfin/include/asm/unistd.h
index da35133c171d..c8e7ee4768cd 100644
--- a/arch/blackfin/include/asm/unistd.h
+++ b/arch/blackfin/include/asm/unistd.h
@@ -381,8 +381,9 @@
#define __NR_preadv 366
#define __NR_pwritev 367
#define __NR_rt_tgsigqueueinfo 368
+#define __NR_perf_counter_open 369
-#define __NR_syscall 369
+#define __NR_syscall 370
#define NR_syscalls __NR_syscall
/* Old optional stuff no one actually uses */
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index 3731088e181b..141d9281e4b0 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -20,7 +20,6 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
CFLAGS_REMOVE_ftrace.o = -pg
obj-$(CONFIG_IPIPE) += ipipe.o
-obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o
obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index c006a44527bf..36193eed9a1f 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -46,13 +46,13 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
printk(KERN_INFO "MPU: setting up cplb tables with memory protection\n");
-#ifdef CONFIG_BFIN_ICACHE
+#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
#endif
-#ifdef CONFIG_BFIN_DCACHE
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
d_cache = CPLB_L1_CHBL;
-#ifdef CONFIG_BFIN_WT
+#ifdef CONFIG_BFIN_EXTMEM_WRITETROUGH
d_cache |= CPLB_L1_AOW | CPLB_WT;
#endif
#endif
@@ -91,9 +91,9 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
/* Cover L2 memory */
#if L2_LENGTH > 0
dcplb_tbl[cpu][i_d].addr = L2_START;
- dcplb_tbl[cpu][i_d++].data = L2_DMEMORY | PAGE_SIZE_1MB;
+ dcplb_tbl[cpu][i_d++].data = L2_DMEMORY;
icplb_tbl[cpu][i_i].addr = L2_START;
- icplb_tbl[cpu][i_i++].data = L2_IMEMORY | PAGE_SIZE_1MB;
+ icplb_tbl[cpu][i_i++].data = L2_IMEMORY;
#endif
first_mask_dcplb = i_d;
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index 784923e52a9a..bcdfe9b0b71f 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -150,15 +150,19 @@ static noinline int dcplb_miss(unsigned int cpu)
nr_dcplb_miss[cpu]++;
d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
-#ifdef CONFIG_BFIN_DCACHE
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
if (bfin_addr_dcacheable(addr)) {
d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
-#ifdef CONFIG_BFIN_WT
+# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
d_data |= CPLB_L1_AOW | CPLB_WT;
-#endif
+# endif
}
#endif
- if (addr >= physical_mem_end) {
+
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ d_data = L2_DMEMORY;
+ } else if (addr >= physical_mem_end) {
if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
&& (status & FAULT_USERSUPV)) {
addr &= ~0x3fffff;
@@ -235,7 +239,7 @@ static noinline int icplb_miss(unsigned int cpu)
i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
-#ifdef CONFIG_BFIN_ICACHE
+#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
/*
* Normal RAM, and possibly the reserved memory area, are
* cacheable.
@@ -245,7 +249,10 @@ static noinline int icplb_miss(unsigned int cpu)
i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
#endif
- if (addr >= physical_mem_end) {
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ i_data = L2_IMEMORY;
+ } else if (addr >= physical_mem_end) {
if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
&& (status & FAULT_USERSUPV)) {
addr &= ~(1 * 1024 * 1024 - 1);
@@ -365,13 +372,18 @@ void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
local_irq_save_hw(flags);
current_rwx_mask[cpu] = masks;
- d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
-#ifdef CONFIG_BFIN_DCACHE
- d_data |= CPLB_L1_CHBL;
-#ifdef CONFIG_BFIN_WT
- d_data |= CPLB_L1_AOW | CPLB_WT;
-#endif
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ d_data = L2_DMEMORY;
+ } else {
+ d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
+ d_data |= CPLB_L1_CHBL;
+# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
+ d_data |= CPLB_L1_AOW | CPLB_WT;
+# endif
#endif
+ }
disable_dcplb();
for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index d8cde1fc5cb9..b8d22034b9a6 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__ipipe_freq_scale);
atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
-unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags;
+unsigned long __ipipe_irq_lvmask = bfin_no_irqs;
EXPORT_SYMBOL(__ipipe_irq_lvmask);
static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
@@ -342,8 +342,3 @@ void ___ipipe_sync_pipeline(unsigned long syncmask)
}
EXPORT_SYMBOL(show_stack);
-
-#ifdef CONFIG_IPIPE_TRACE_MCOUNT
-void notrace _mcount(void);
-EXPORT_SYMBOL(_mcount);
-#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 6e31e935bb31..4b5fd36187d9 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -38,38 +38,15 @@
#include <asm/pda.h>
static atomic_t irq_err_count;
-static spinlock_t irq_controller_lock;
-
-/*
- * Dummy mask/unmask handler
- */
-void dummy_mask_unmask_irq(unsigned int irq)
-{
-}
-
void ack_bad_irq(unsigned int irq)
{
atomic_inc(&irq_err_count);
printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
}
-static struct irq_chip bad_chip = {
- .ack = dummy_mask_unmask_irq,
- .mask = dummy_mask_unmask_irq,
- .unmask = dummy_mask_unmask_irq,
-};
-
-static int bad_stats;
static struct irq_desc bad_irq_desc = {
- .status = IRQ_DISABLED,
- .chip = &bad_chip,
.handle_irq = handle_bad_irq,
- .depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
- .kstat_irqs = &bad_stats,
-#ifdef CONFIG_SMP
- .affinity = CPU_MASK_ALL
-#endif
};
#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -77,6 +54,7 @@ static struct irq_desc bad_irq_desc = {
#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
#endif
+#ifdef CONFIG_PROC_FS
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
@@ -108,50 +86,29 @@ int show_interrupts(struct seq_file *p, void *v)
}
return 0;
}
-
-/*
- * do_IRQ handles all hardware IRQs. Decoded IRQs should not
- * come via this function. Instead, they should provide their
- * own 'handler'
- */
-#ifdef CONFIG_DO_IRQ_L1
-__attribute__((l1_text))
-#endif
-asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
-{
- struct pt_regs *old_regs;
- struct irq_desc *desc = irq_desc + irq;
-#ifndef CONFIG_IPIPE
- unsigned short pending, other_ints;
#endif
- old_regs = set_irq_regs(regs);
- /*
- * Some hardware gives randomly wrong interrupts. Rather
- * than crashing, do something sensible.
- */
- if (irq >= NR_IRQS)
- desc = &bad_irq_desc;
-
- irq_enter();
#ifdef CONFIG_DEBUG_STACKOVERFLOW
+static void check_stack_overflow(int irq)
+{
/* Debugging check for stack overflow: is there less than STACK_WARN free? */
- {
- long sp;
-
- sp = __get_SP() & (THREAD_SIZE-1);
+ long sp = __get_SP() & (THREAD_SIZE - 1);
- if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- dump_stack();
- printk(KERN_EMERG "%s: possible stack overflow while handling irq %i "
- " only %ld bytes free\n",
- __func__, irq, sp - sizeof(struct thread_info));
- }
+ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+ dump_stack();
+ pr_emerg("irq%i: possible stack overflow only %ld bytes free\n",
+ irq, sp - sizeof(struct thread_info));
}
+}
+#else
+static inline void check_stack_overflow(int irq) { }
#endif
- generic_handle_irq(irq);
#ifndef CONFIG_IPIPE
+static void maybe_lower_to_irq14(void)
+{
+ unsigned short pending, other_ints;
+
/*
* If we're the only interrupt running (ignoring IRQ15 which
* is for syscalls), lower our priority to IRQ14 so that
@@ -165,7 +122,38 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
other_ints = pending & (pending - 1);
if (other_ints == 0)
lower_to_irq14();
-#endif /* !CONFIG_IPIPE */
+}
+#else
+static inline void maybe_lower_to_irq14(void) { }
+#endif
+
+/*
+ * do_IRQ handles all hardware IRQs. Decoded IRQs should not
+ * come via this function. Instead, they should provide their
+ * own 'handler'
+ */
+#ifdef CONFIG_DO_IRQ_L1
+__attribute__((l1_text))
+#endif
+asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+
+ check_stack_overflow(irq);
+
+ /*
+ * Some hardware gives randomly wrong interrupts. Rather
+ * than crashing, do something sensible.
+ */
+ if (irq >= NR_IRQS)
+ handle_bad_irq(irq, &bad_irq_desc);
+ else
+ generic_handle_irq(irq);
+
+ maybe_lower_to_irq14();
+
irq_exit();
set_irq_regs(old_regs);
@@ -173,14 +161,6 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
void __init init_IRQ(void)
{
- struct irq_desc *desc;
- int irq;
-
- spin_lock_init(&irq_controller_lock);
- for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
- *desc = bad_irq_desc;
- }
-
init_arch_irq();
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index da28f796ad78..cce79d05b90b 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -34,15 +34,6 @@ int gdb_bfin_vector = -1;
#error change the definition of slavecpulocks
#endif
-#define IN_MEM(addr, size, l1_addr, l1_size) \
-({ \
- unsigned long __addr = (unsigned long)(addr); \
- (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \
-})
-#define ASYNC_BANK_SIZE \
- (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
- ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE)
-
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
gdb_regs[BFIN_R0] = regs->r0;
@@ -463,41 +454,88 @@ static int hex(char ch)
static int validate_memory_access_address(unsigned long addr, int size)
{
- int cpu = raw_smp_processor_id();
-
- if (size < 0)
+ if (size < 0 || addr == 0)
return -EFAULT;
- if (addr >= 0x1000 && (addr + size) <= physical_mem_end)
- return 0;
- if (addr >= SYSMMR_BASE)
- return 0;
- if (IN_MEM(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK_SIZE))
- return 0;
- if (cpu == 0) {
- if (IN_MEM(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
- return 0;
- if (IN_MEM(addr, size, L1_CODE_START, L1_CODE_LENGTH))
- return 0;
- if (IN_MEM(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
- return 0;
- if (IN_MEM(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
- return 0;
-#ifdef CONFIG_SMP
- } else if (cpu == 1) {
- if (IN_MEM(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return bfin_mem_access_type(addr, size);
+}
+
+static int bfin_probe_kernel_read(char *dst, char *src, int size)
+{
+ unsigned long lsrc = (unsigned long)src;
+ int mem_type;
+
+ mem_type = validate_memory_access_address(lsrc, size);
+ if (mem_type < 0)
+ return mem_type;
+
+ if (lsrc >= SYSMMR_BASE) {
+ if (size == 2 && lsrc % 2 == 0) {
+ u16 mmr = bfin_read16(src);
+ memcpy(dst, &mmr, sizeof(mmr));
return 0;
- if (IN_MEM(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
+ } else if (size == 4 && lsrc % 4 == 0) {
+ u32 mmr = bfin_read32(src);
+ memcpy(dst, &mmr, sizeof(mmr));
return 0;
- if (IN_MEM(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
+ }
+ } else {
+ switch (mem_type) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ return probe_kernel_read(dst, src, size);
+ /* XXX: should support IDMA here with SMP */
+ case BFIN_MEM_ACCESS_DMA:
+ if (dma_memcpy(dst, src, size))
+ return 0;
+ break;
+ case BFIN_MEM_ACCESS_ITEST:
+ if (isram_memcpy(dst, src, size))
+ return 0;
+ break;
+ }
+ }
+
+ return -EFAULT;
+}
+
+static int bfin_probe_kernel_write(char *dst, char *src, int size)
+{
+ unsigned long ldst = (unsigned long)dst;
+ int mem_type;
+
+ mem_type = validate_memory_access_address(ldst, size);
+ if (mem_type < 0)
+ return mem_type;
+
+ if (ldst >= SYSMMR_BASE) {
+ if (size == 2 && ldst % 2 == 0) {
+ u16 mmr;
+ memcpy(&mmr, src, sizeof(mmr));
+ bfin_write16(dst, mmr);
return 0;
- if (IN_MEM(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
+ } else if (size == 4 && ldst % 4 == 0) {
+ u32 mmr;
+ memcpy(&mmr, src, sizeof(mmr));
+ bfin_write32(dst, mmr);
return 0;
-#endif
+ }
+ } else {
+ switch (mem_type) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ return probe_kernel_write(dst, src, size);
+ /* XXX: should support IDMA here with SMP */
+ case BFIN_MEM_ACCESS_DMA:
+ if (dma_memcpy(dst, src, size))
+ return 0;
+ break;
+ case BFIN_MEM_ACCESS_ITEST:
+ if (isram_memcpy(dst, src, size))
+ return 0;
+ break;
+ }
}
- if (IN_MEM(addr, size, L2_START, L2_LENGTH))
- return 0;
-
return -EFAULT;
}
@@ -509,14 +547,6 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
{
char *tmp;
int err;
- unsigned char *pch;
- unsigned short mmr16;
- unsigned long mmr32;
- int cpu = raw_smp_processor_id();
-
- err = validate_memory_access_address((unsigned long)mem, count);
- if (err)
- return err;
/*
* We use the upper half of buf as an intermediate buffer for the
@@ -524,44 +554,7 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
*/
tmp = buf + count;
- if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
- switch (count) {
- case 2:
- if ((unsigned int)mem % 2 == 0) {
- mmr16 = *(unsigned short *)mem;
- pch = (unsigned char *)&mmr16;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- tmp -= 2;
- } else
- err = -EFAULT;
- break;
- case 4:
- if ((unsigned int)mem % 4 == 0) {
- mmr32 = *(unsigned long *)mem;
- pch = (unsigned char *)&mmr32;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- tmp -= 4;
- } else
- err = -EFAULT;
- break;
- default:
- err = -EFAULT;
- }
- } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM*/
- if (dma_memcpy(tmp, mem, count) == NULL)
- err = -EFAULT;
- } else
- err = probe_kernel_read(tmp, mem, count);
-
+ err = bfin_probe_kernel_read(tmp, mem, count);
if (!err) {
while (count > 0) {
buf = pack_hex_byte(buf, *tmp);
@@ -582,13 +575,8 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
*/
int kgdb_ebin2mem(char *buf, char *mem, int count)
{
- char *tmp_old;
- char *tmp_new;
- unsigned short *mmr16;
- unsigned long *mmr32;
- int err;
+ char *tmp_old, *tmp_new;
int size;
- int cpu = raw_smp_processor_id();
tmp_old = tmp_new = buf;
@@ -601,41 +589,7 @@ int kgdb_ebin2mem(char *buf, char *mem, int count)
tmp_old++;
}
- err = validate_memory_access_address((unsigned long)mem, size);
- if (err)
- return err;
-
- if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
- switch (size) {
- case 2:
- if ((unsigned int)mem % 2 == 0) {
- mmr16 = (unsigned short *)buf;
- *(unsigned short *)mem = *mmr16;
- } else
- err = -EFAULT;
- break;
- case 4:
- if ((unsigned int)mem % 4 == 0) {
- mmr32 = (unsigned long *)buf;
- *(unsigned long *)mem = *mmr32;
- } else
- err = -EFAULT;
- break;
- default:
- err = -EFAULT;
- }
- } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM */
- if (dma_memcpy(mem, buf, size) == NULL)
- err = -EFAULT;
- } else
- err = probe_kernel_write(mem, buf, size);
-
- return err;
+ return bfin_probe_kernel_write(mem, buf, count);
}
/*
@@ -645,16 +599,7 @@ int kgdb_ebin2mem(char *buf, char *mem, int count)
*/
int kgdb_hex2mem(char *buf, char *mem, int count)
{
- char *tmp_raw;
- char *tmp_hex;
- unsigned short *mmr16;
- unsigned long *mmr32;
- int err;
- int cpu = raw_smp_processor_id();
-
- err = validate_memory_access_address((unsigned long)mem, count);
- if (err)
- return err;
+ char *tmp_raw, *tmp_hex;
/*
* We use the upper half of buf as an intermediate buffer for the
@@ -669,39 +614,18 @@ int kgdb_hex2mem(char *buf, char *mem, int count)
*tmp_raw |= hex(*tmp_hex--) << 4;
}
- if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
- switch (count) {
- case 2:
- if ((unsigned int)mem % 2 == 0) {
- mmr16 = (unsigned short *)tmp_raw;
- *(unsigned short *)mem = *mmr16;
- } else
- err = -EFAULT;
- break;
- case 4:
- if ((unsigned int)mem % 4 == 0) {
- mmr32 = (unsigned long *)tmp_raw;
- *(unsigned long *)mem = *mmr32;
- } else
- err = -EFAULT;
- break;
- default:
- err = -EFAULT;
- }
- } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM */
- if (dma_memcpy(mem, tmp_raw, count) == NULL)
- err = -EFAULT;
- } else
- err = probe_kernel_write(mem, tmp_raw, count);
-
- return err;
+ return bfin_probe_kernel_write(mem, tmp_raw, count);
}
+#define IN_MEM(addr, size, l1_addr, l1_size) \
+({ \
+ unsigned long __addr = (unsigned long)(addr); \
+ (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \
+})
+#define ASYNC_BANK_SIZE \
+ (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
+ ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE)
+
int kgdb_validate_break_address(unsigned long addr)
{
int cpu = raw_smp_processor_id();
@@ -724,46 +648,17 @@ int kgdb_validate_break_address(unsigned long addr)
int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
{
- int err;
- int cpu = raw_smp_processor_id();
-
- if ((cpu == 0 && IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(addr, BREAK_INSTR_SIZE, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM */
- if (dma_memcpy(saved_instr, (void *)addr, BREAK_INSTR_SIZE)
- == NULL)
- return -EFAULT;
-
- if (dma_memcpy((void *)addr, arch_kgdb_ops.gdb_bpt_instr,
- BREAK_INSTR_SIZE) == NULL)
- return -EFAULT;
-
- return 0;
- } else {
- err = probe_kernel_read(saved_instr, (char *)addr,
- BREAK_INSTR_SIZE);
- if (err)
- return err;
-
- return probe_kernel_write((char *)addr,
- arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
- }
+ int err = bfin_probe_kernel_read(saved_instr, (char *)addr,
+ BREAK_INSTR_SIZE);
+ if (err)
+ return err;
+ return bfin_probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
}
int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
{
- if (IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH)) {
- /* access L1 instruction SRAM */
- if (dma_memcpy((void *)addr, bundle, BREAK_INSTR_SIZE) == NULL)
- return -EFAULT;
-
- return 0;
- } else
- return probe_kernel_write((char *)addr,
- (char *)bundle, BREAK_INSTR_SIZE);
+ return bfin_probe_kernel_write((char *)addr, bundle, BREAK_INSTR_SIZE);
}
int kgdb_arch_init(void)
diff --git a/arch/blackfin/kernel/mcount.S b/arch/blackfin/kernel/mcount.S
deleted file mode 100644
index edcfb3865f46..000000000000
--- a/arch/blackfin/kernel/mcount.S
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * linux/arch/blackfin/mcount.S
- *
- * Copyright (C) 2006 Analog Devices Inc.
- *
- * 2007/04/12 Save index, length, modify and base registers. --rpm
- */
-
-#include <linux/linkage.h>
-#include <asm/blackfin.h>
-
-.text
-
-.align 4 /* just in case */
-
-ENTRY(__mcount)
- [--sp] = i0;
- [--sp] = i1;
- [--sp] = i2;
- [--sp] = i3;
- [--sp] = l0;
- [--sp] = l1;
- [--sp] = l2;
- [--sp] = l3;
- [--sp] = m0;
- [--sp] = m1;
- [--sp] = m2;
- [--sp] = m3;
- [--sp] = b0;
- [--sp] = b1;
- [--sp] = b2;
- [--sp] = b3;
- [--sp] = ( r7:0, p5:0 );
- [--sp] = ASTAT;
-
- p1.L = _ipipe_trace_enable;
- p1.H = _ipipe_trace_enable;
- r7 = [p1];
- CC = r7 == 0;
- if CC jump out;
- link 0x10;
- r0 = 0x0;
- [sp + 0xc] = r0; /* v */
- r0 = 0x0; /* type: IPIPE_TRACE_FN */
- r1 = rets;
- p0 = [fp]; /* p0: Prior FP */
- r2 = [p0 + 4]; /* r2: Prior RETS */
- call ___ipipe_trace;
- unlink;
-out:
- ASTAT = [sp++];
- ( r7:0, p5:0 ) = [sp++];
- b3 = [sp++];
- b2 = [sp++];
- b1 = [sp++];
- b0 = [sp++];
- m3 = [sp++];
- m2 = [sp++];
- m1 = [sp++];
- m0 = [sp++];
- l3 = [sp++];
- l2 = [sp++];
- l1 = [sp++];
- l0 = [sp++];
- i3 = [sp++];
- i2 = [sp++];
- i1 = [sp++];
- i0 = [sp++];
- rts;
-ENDPROC(__mcount)
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 3e1d86e456f6..79cad0ac5892 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -344,6 +344,87 @@ void finish_atomic_sections (struct pt_regs *regs)
}
}
+static inline
+int in_mem(unsigned long addr, unsigned long size,
+ unsigned long start, unsigned long end)
+{
+ return addr >= start && addr + size <= end;
+}
+static inline
+int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off,
+ unsigned long const_addr, unsigned long const_size)
+{
+ return const_size &&
+ in_mem(addr, size, const_addr + off, const_addr + const_size);
+}
+static inline
+int in_mem_const(unsigned long addr, unsigned long size,
+ unsigned long const_addr, unsigned long const_size)
+{
+ return in_mem_const_off(addr, 0, size, const_addr, const_size);
+}
+#define IN_ASYNC(bnum, bctlnum) \
+({ \
+ (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? -EFAULT : \
+ bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? -EFAULT : \
+ BFIN_MEM_ACCESS_CORE; \
+})
+
+int bfin_mem_access_type(unsigned long addr, unsigned long size)
+{
+ int cpu = raw_smp_processor_id();
+
+ /* Check that things do not wrap around */
+ if (addr > ULONG_MAX - size)
+ return -EFAULT;
+
+ if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end))
+ return BFIN_MEM_ACCESS_CORE;
+
+ if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
+ if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+#ifdef COREB_L1_CODE_START
+ if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
+ if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+#endif
+ if (in_mem_const(addr, size, L2_START, L2_LENGTH))
+ return BFIN_MEM_ACCESS_CORE;
+
+ if (addr >= SYSMMR_BASE)
+ return BFIN_MEM_ACCESS_CORE_ONLY;
+
+ /* We can't read EBIU banks that aren't enabled or we end up hanging
+ * on the access to the async space.
+ */
+ if (in_mem_const(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK0_SIZE))
+ return IN_ASYNC(0, 0);
+ if (in_mem_const(addr, size, ASYNC_BANK1_BASE, ASYNC_BANK1_SIZE))
+ return IN_ASYNC(1, 0);
+ if (in_mem_const(addr, size, ASYNC_BANK2_BASE, ASYNC_BANK2_SIZE))
+ return IN_ASYNC(2, 1);
+ if (in_mem_const(addr, size, ASYNC_BANK3_BASE, ASYNC_BANK3_SIZE))
+ return IN_ASYNC(3, 1);
+
+ if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
+ return BFIN_MEM_ACCESS_CORE;
+ if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
+ return BFIN_MEM_ACCESS_DMA;
+
+ return -EFAULT;
+}
+
#if defined(CONFIG_ACCESS_CHECK)
#ifdef CONFIG_ACCESS_OK_L1
__attribute__((l1_text))
@@ -353,51 +434,61 @@ int _access_ok(unsigned long addr, unsigned long size)
{
if (size == 0)
return 1;
- if (addr > (addr + size))
+ /* Check that things do not wrap around */
+ if (addr > ULONG_MAX - size)
return 0;
if (segment_eq(get_fs(), KERNEL_DS))
return 1;
#ifdef CONFIG_MTD_UCLINUX
- if (addr >= memory_start && (addr + size) <= memory_end)
- return 1;
- if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end)
+ if (1)
+#else
+ if (0)
+#endif
+ {
+ if (in_mem(addr, size, memory_start, memory_end))
+ return 1;
+ if (in_mem(addr, size, memory_mtd_end, physical_mem_end))
+ return 1;
+# ifndef CONFIG_ROMFS_ON_MTD
+ if (0)
+# endif
+ /* For XIP, allow user space to use pointers within the ROMFS. */
+ if (in_mem(addr, size, memory_mtd_start, memory_mtd_end))
+ return 1;
+ } else {
+ if (in_mem(addr, size, memory_start, physical_mem_end))
+ return 1;
+ }
+
+ if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end))
return 1;
-#ifdef CONFIG_ROMFS_ON_MTD
- /* For XIP, allow user space to use pointers within the ROMFS. */
- if (addr >= memory_mtd_start && (addr + size) <= memory_mtd_end)
+ if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
return 1;
-#endif
-#else
- if (addr >= memory_start && (addr + size) <= physical_mem_end)
+ if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH))
return 1;
-#endif
- if (addr >= (unsigned long)__init_begin &&
- addr + size <= (unsigned long)__init_end)
+ if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH))
return 1;
- if (addr >= get_l1_scratch_start()
- && addr + size <= get_l1_scratch_start() + L1_SCRATCH_LENGTH)
+ if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH))
return 1;
-#if L1_CODE_LENGTH != 0
- if (addr >= get_l1_code_start() + (_etext_l1 - _stext_l1)
- && addr + size <= get_l1_code_start() + L1_CODE_LENGTH)
+#ifdef COREB_L1_CODE_START
+ if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
return 1;
-#endif
-#if L1_DATA_A_LENGTH != 0
- if (addr >= get_l1_data_a_start() + (_ebss_l1 - _sdata_l1)
- && addr + size <= get_l1_data_a_start() + L1_DATA_A_LENGTH)
+ if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
return 1;
-#endif
-#if L1_DATA_B_LENGTH != 0
- if (addr >= get_l1_data_b_start() + (_ebss_b_l1 - _sdata_b_l1)
- && addr + size <= get_l1_data_b_start() + L1_DATA_B_LENGTH)
+ if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
return 1;
-#endif
-#if L2_LENGTH != 0
- if (addr >= L2_START + (_ebss_l2 - _stext_l2)
- && addr + size <= L2_START + L2_LENGTH)
+ if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
return 1;
#endif
+ if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
+ return 1;
+
+ if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
+ return 1;
+ if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
+ return 1;
+
return 0;
}
EXPORT_SYMBOL(_access_ok);
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 6454babdfaff..298f023bcc09 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -117,15 +117,49 @@ void __cpuinit bfin_setup_caches(unsigned int cpu)
*/
#ifdef CONFIG_BFIN_ICACHE
printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
+ printk(KERN_INFO " External memory:"
+# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
+ " cacheable"
+# else
+ " uncacheable"
+# endif
+ " in instruction cache\n");
+ if (L2_LENGTH)
+ printk(KERN_INFO " L2 SRAM :"
+# ifdef CONFIG_BFIN_L2_ICACHEABLE
+ " cacheable"
+# else
+ " uncacheable"
+# endif
+ " in instruction cache\n");
+
+#else
+ printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
#endif
+
#ifdef CONFIG_BFIN_DCACHE
- printk(KERN_INFO "Data Cache Enabled for CPU%u"
-# if defined CONFIG_BFIN_WB
- " (write-back)"
-# elif defined CONFIG_BFIN_WT
- " (write-through)"
+ printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
+ printk(KERN_INFO " External memory:"
+# if defined CONFIG_BFIN_EXTMEM_WRITEBACK
+ " cacheable (write-back)"
+# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
+ " cacheable (write-through)"
+# else
+ " uncacheable"
# endif
- "\n", cpu);
+ " in data cache\n");
+ if (L2_LENGTH)
+ printk(KERN_INFO " L2 SRAM :"
+# if defined CONFIG_BFIN_L2_WRITEBACK
+ " cacheable (write-back)"
+# elif defined CONFIG_BFIN_L2_WRITETHROUGH
+ " cacheable (write-through)"
+# else
+ " uncacheable"
+# endif
+ " in data cache\n");
+#else
+ printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
#endif
}
@@ -443,9 +477,11 @@ static __init void parse_cmdline_early(char *cmdline_p)
} else if (!memcmp(to, "clkin_hz=", 9)) {
to += 9;
early_init_clkin_hz(to);
+#ifdef CONFIG_EARLY_PRINTK
} else if (!memcmp(to, "earlyprintk=", 12)) {
to += 12;
setup_early_printk(to);
+#endif
} else if (!memcmp(to, "memmap=", 7)) {
to += 7;
parse_memmap(to);
@@ -516,7 +552,7 @@ static __init void memory_setup(void)
&& ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1)
mtd_size =
PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
-# if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263)
+# if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
/* Due to a Hardware Anomaly we need to limit the size of usable
* instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
* 05000263 - Hardware loop corrupted when taking an ICPLB exception
@@ -544,7 +580,7 @@ static __init void memory_setup(void)
dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
#endif /* CONFIG_MTD_UCLINUX */
-#if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263)
+#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
/* Due to a Hardware Anomaly we need to limit the size of usable
* instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
* 05000263 - Hardware loop corrupted when taking an ICPLB exception
@@ -764,6 +800,11 @@ void __init setup_arch(char **cmdline_p)
{
unsigned long sclk, cclk;
+ /* Check to make sure we are running on the right processor */
+ if (unlikely(CPUID != bfin_cpuid()))
+ printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
+ CPU, bfin_cpuid(), bfin_revid());
+
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
@@ -778,14 +819,17 @@ void __init setup_arch(char **cmdline_p)
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
- /* setup memory defaults from the user config */
- physical_mem_end = 0;
- _ramend = get_mem_size() * 1024 * 1024;
-
memset(&bfin_memmap, 0, sizeof(bfin_memmap));
+ /* If the user does not specify things on the command line, use
+ * what the bootloader set things up as
+ */
+ physical_mem_end = 0;
parse_cmdline_early(&command_line[0]);
+ if (_ramend == 0)
+ _ramend = get_mem_size() * 1024 * 1024;
+
if (physical_mem_end == 0)
physical_mem_end = _ramend;
@@ -837,7 +881,8 @@ void __init setup_arch(char **cmdline_p)
defined(CONFIG_BF538) || defined(CONFIG_BF539)
_bfin_swrst = bfin_read_SWRST();
#else
- _bfin_swrst = bfin_read_SYSCR();
+ /* Clear boot mode field */
+ _bfin_swrst = bfin_read_SYSCR() & ~0xf;
#endif
#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
@@ -875,10 +920,7 @@ void __init setup_arch(char **cmdline_p)
else
printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
- if (unlikely(CPUID != bfin_cpuid()))
- printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
- CPU, bfin_cpuid(), bfin_revid());
- else {
+ if (likely(CPUID == bfin_cpuid())) {
if (bfin_revid() != bfin_compiled_revid()) {
if (bfin_compiled_revid() == -1)
printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
@@ -1157,16 +1199,25 @@ static int show_cpuinfo(struct seq_file *m, void *v)
icache_size = 0;
seq_printf(m, "cache size\t: %d KB(L1 icache) "
- "%d KB(L1 dcache%s) %d KB(L2 cache)\n",
- icache_size, dcache_size,
-#if defined CONFIG_BFIN_WB
- "-wb"
-#elif defined CONFIG_BFIN_WT
- "-wt"
-#endif
- "", 0);
-
+ "%d KB(L1 dcache) %d KB(L2 cache)\n",
+ icache_size, dcache_size, 0);
seq_printf(m, "%s\n", cache);
+ seq_printf(m, "external memory\t: "
+#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
+ "cacheable"
+#else
+ "uncacheable"
+#endif
+ " in instruction cache\n");
+ seq_printf(m, "external memory\t: "
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
+ "cacheable (write-back)"
+#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
+ "cacheable (write-through)"
+#else
+ "uncacheable"
+#endif
+ " in data cache\n");
if (icache_size)
seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
@@ -1239,8 +1290,25 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_num != num_possible_cpus() - 1)
return 0;
- if (L2_LENGTH)
+ if (L2_LENGTH) {
seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
+ seq_printf(m, "L2 SRAM\t\t: "
+#if defined(CONFIG_BFIN_L2_ICACHEABLE)
+ "cacheable"
+#else
+ "uncacheable"
+#endif
+ " in instruction cache\n");
+ seq_printf(m, "L2 SRAM\t\t: "
+#if defined(CONFIG_BFIN_L2_WRITEBACK)
+ "cacheable (write-back)"
+#elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
+ "cacheable (write-through)"
+#else
+ "uncacheable"
+#endif
+ " in data cache\n");
+ }
seq_printf(m, "board name\t: %s\n", bfin_board_name);
seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index d279552fe9b0..8eeb457ce5d5 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -37,6 +37,7 @@
#include <asm/traps.h>
#include <asm/cacheflush.h>
#include <asm/cplb.h>
+#include <asm/dma.h>
#include <asm/blackfin.h>
#include <asm/irq_handler.h>
#include <linux/irq.h>
@@ -636,57 +637,30 @@ asmlinkage void trap_c(struct pt_regs *fp)
*/
static bool get_instruction(unsigned short *val, unsigned short *address)
{
-
- unsigned long addr;
-
- addr = (unsigned long)address;
+ unsigned long addr = (unsigned long)address;
/* Check for odd addresses */
if (addr & 0x1)
return false;
- /* Check that things do not wrap around */
- if (addr > (addr + 2))
+ /* MMR region will never have instructions */
+ if (addr >= SYSMMR_BASE)
return false;
- /*
- * Since we are in exception context, we need to do a little address checking
- * We need to make sure we are only accessing valid memory, and
- * we don't read something in the async space that can hang forever
- */
- if ((addr >= FIXED_CODE_START && (addr + 2) <= physical_mem_end) ||
-#if L2_LENGTH != 0
- (addr >= L2_START && (addr + 2) <= (L2_START + L2_LENGTH)) ||
-#endif
- (addr >= BOOT_ROM_START && (addr + 2) <= (BOOT_ROM_START + BOOT_ROM_LENGTH)) ||
-#if L1_DATA_A_LENGTH != 0
- (addr >= L1_DATA_A_START && (addr + 2) <= (L1_DATA_A_START + L1_DATA_A_LENGTH)) ||
-#endif
-#if L1_DATA_B_LENGTH != 0
- (addr >= L1_DATA_B_START && (addr + 2) <= (L1_DATA_B_START + L1_DATA_B_LENGTH)) ||
-#endif
- (addr >= L1_SCRATCH_START && (addr + 2) <= (L1_SCRATCH_START + L1_SCRATCH_LENGTH)) ||
- (!(bfin_read_EBIU_AMBCTL0() & B0RDYEN) &&
- addr >= ASYNC_BANK0_BASE && (addr + 2) <= (ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)) ||
- (!(bfin_read_EBIU_AMBCTL0() & B1RDYEN) &&
- addr >= ASYNC_BANK1_BASE && (addr + 2) <= (ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)) ||
- (!(bfin_read_EBIU_AMBCTL1() & B2RDYEN) &&
- addr >= ASYNC_BANK2_BASE && (addr + 2) <= (ASYNC_BANK2_BASE + ASYNC_BANK1_SIZE)) ||
- (!(bfin_read_EBIU_AMBCTL1() & B3RDYEN) &&
- addr >= ASYNC_BANK3_BASE && (addr + 2) <= (ASYNC_BANK3_BASE + ASYNC_BANK1_SIZE))) {
- *val = *address;
- return true;
+ switch (bfin_mem_access_type(addr, 2)) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ *val = *address;
+ return true;
+ case BFIN_MEM_ACCESS_DMA:
+ dma_memcpy(val, address, 2);
+ return true;
+ case BFIN_MEM_ACCESS_ITEST:
+ isram_memcpy(val, address, 2);
+ return true;
+ default: /* invalid access */
+ return false;
}
-
-#if L1_CODE_LENGTH != 0
- if (addr >= L1_CODE_START && (addr + 2) <= (L1_CODE_START + L1_CODE_LENGTH)) {
- isram_memcpy(val, address, 2);
- return true;
- }
-#endif
-
-
- return false;
}
/*
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index 1382f0382359..d9791106be9f 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -119,13 +119,19 @@ static struct platform_device bfin_mac_device = {
};
#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
-static struct dsa_platform_data ksz8893m_switch_data = {
+static struct dsa_chip_data ksz8893m_switch_chip_data = {
.mii_bus = &bfin_mii_bus.dev,
+ .port_names = {
+ NULL,
+ "eth%d",
+ "eth%d",
+ "cpu",
+ },
+};
+static struct dsa_platform_data ksz8893m_switch_data = {
+ .nr_chips = 1,
.netdev = &bfin_mac_device.dev,
- .port_names[0] = NULL,
- .port_names[1] = "eth%d",
- .port_names[2] = "eth%d",
- .port_names[3] = "cpu",
+ .chip = &ksz8893m_switch_chip_data,
};
static struct platform_device ksz8893m_switch_device = {
diff --git a/arch/blackfin/mach-bf518/include/mach/anomaly.h b/arch/blackfin/mach-bf518/include/mach/anomaly.h
index b69bd9af38dd..426e064062a0 100644
--- a/arch/blackfin/mach-bf518/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf518/include/mach/anomaly.h
@@ -7,7 +7,7 @@
*/
/* This file should be up to date with:
- * - Revision B, 02/03/2009; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List
+ * - Revision C, 06/12/2009; ADSP-BF512/BF514/BF516/BF518 Blackfin Processor Anomaly List
*/
/* We plan on not supporting 0.0 silicon, but 0.1 isn't out yet - sorry */
@@ -18,7 +18,7 @@
#ifndef _MACH_ANOMALY_H_
#define _MACH_ANOMALY_H_
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
@@ -45,29 +45,31 @@
/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
#define ANOMALY_05000426 (1)
/* Software System Reset Corrupts PLL_LOCKCNT Register */
-#define ANOMALY_05000430 (1)
+#define ANOMALY_05000430 (__SILICON_REVISION__ < 1)
/* Incorrect Use of Stack in Lockbox Firmware During Authentication */
#define ANOMALY_05000431 (1)
/* Certain SIC Registers are not Reset After Soft or Core Double Fault Reset */
-#define ANOMALY_05000435 (1)
+#define ANOMALY_05000435 (__SILICON_REVISION__ < 1)
/* PORTx_DRIVE and PORTx_HYSTERESIS Registers Read Back Incorrect Values */
-#define ANOMALY_05000438 (1)
+#define ANOMALY_05000438 (__SILICON_REVISION__ < 1)
/* Preboot Cannot be Used to Alter the PLL_DIV Register */
-#define ANOMALY_05000439 (1)
+#define ANOMALY_05000439 (__SILICON_REVISION__ < 1)
/* bfrom_SysControl() Cannot be Used to Write the PLL_DIV Register */
-#define ANOMALY_05000440 (1)
+#define ANOMALY_05000440 (__SILICON_REVISION__ < 1)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
/* Incorrect L1 Instruction Bank B Memory Map Location */
-#define ANOMALY_05000444 (1)
+#define ANOMALY_05000444 (__SILICON_REVISION__ < 1)
/* Incorrect Default Hysteresis Setting for RESET, NMI, and BMODE Signals */
-#define ANOMALY_05000452 (1)
+#define ANOMALY_05000452 (__SILICON_REVISION__ < 1)
/* PWM_TRIPB Signal Not Available on PG10 */
-#define ANOMALY_05000453 (1)
+#define ANOMALY_05000453 (__SILICON_REVISION__ < 1)
/* PPI_FS3 is Driven One Half Cycle Later Than PPI Data */
-#define ANOMALY_05000455 (1)
-/* False Hardware Error when RETI points to invalid memory */
+#define ANOMALY_05000455 (__SILICON_REVISION__ < 1)
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
+/* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
+#define ANOMALY_05000462 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@@ -78,24 +80,30 @@
#define ANOMALY_05000158 (0)
#define ANOMALY_05000171 (0)
#define ANOMALY_05000179 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000183 (0)
#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000242 (0)
#define ANOMALY_05000244 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000250 (0)
+#define ANOMALY_05000257 (0)
#define ANOMALY_05000261 (0)
#define ANOMALY_05000263 (0)
#define ANOMALY_05000266 (0)
#define ANOMALY_05000273 (0)
#define ANOMALY_05000274 (0)
#define ANOMALY_05000278 (0)
+#define ANOMALY_05000281 (0)
+#define ANOMALY_05000283 (0)
#define ANOMALY_05000285 (0)
#define ANOMALY_05000287 (0)
#define ANOMALY_05000301 (0)
@@ -103,10 +111,13 @@
#define ANOMALY_05000307 (0)
#define ANOMALY_05000311 (0)
#define ANOMALY_05000312 (0)
+#define ANOMALY_05000315 (0)
#define ANOMALY_05000323 (0)
#define ANOMALY_05000353 (0)
+#define ANOMALY_05000357 (0)
#define ANOMALY_05000362 (1)
#define ANOMALY_05000363 (0)
+#define ANOMALY_05000371 (0)
#define ANOMALY_05000380 (0)
#define ANOMALY_05000386 (0)
#define ANOMALY_05000389 (0)
@@ -117,5 +128,7 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/blackfin.h b/arch/blackfin/mach-bf518/include/mach/blackfin.h
index 267bb7c8bfb5..e8e14c2769ed 100644
--- a/arch/blackfin/mach-bf518/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf518/include/mach/blackfin.h
@@ -33,7 +33,6 @@
#define _MACH_BLACKFIN_H_
#include "bf518.h"
-#include "mem_map.h"
#include "defBF512.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf518/include/mach/mem_map.h b/arch/blackfin/mach-bf518/include/mach/mem_map.h
index 62bcc781bfaa..3c6777cb3532 100644
--- a/arch/blackfin/mach-bf518/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf518/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * file: include/asm-blackfin/mach-bf518/mem_map.h
- * based on: include/asm-blackfin/mach-bf527/mem_map.h
- * author: Bryan Wu <cooloney@kernel.org>
+ * BF51x memory map
*
- * created:
- * description:
- * Memory MAP Common header file for blackfin BF518/6/4/2 of processors.
- * rev:
- *
- * modified:
- *
- * bugs: enter bugs at http://blackfin.uclinux.org/
- *
- * this program is free software; you can redistribute it and/or modify
- * it under the terms of the gnu general public license as published by
- * the free software foundation; either version 2, or (at your option)
- * any later version.
- *
- * this program is distributed in the hope that it will be useful,
- * but without any warranty; without even the implied warranty of
- * merchantability or fitness for a particular purpose. see the
- * gnu general public license for more details.
- *
- * you should have received a copy of the gnu general public license
- * along with this program; see the file copying.
- * if not, write to the free software foundation,
- * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_518_H_
-#define _MEM_MAP_518_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */
@@ -89,20 +67,4 @@
#define BFIN_DSUPBANKS 0
#endif /*CONFIG_BFIN_DCACHE */
-/* Level 2 Memory - none */
-
-#define L2_START 0
-#define L2_LENGTH 0
-
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif /* _MEM_MAP_518_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 1eaf27ff722e..f4867ce0c618 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -78,7 +78,6 @@ static struct resource bfin_isp1760_resources[] = {
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
- .port1_disable = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index 9f9c0005dcf1..b2f30f06b73e 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -237,10 +237,10 @@ static struct flash_platform_data bfin_spi_flash_data = {
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
- .type = "m25p16",
+ .type = "sst25wf040",
};
-/* SPI flash chip (m25p64) */
+/* SPI flash chip (sst25wf040) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
.bits_per_word = 8,
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 3e5b7db6b065..799a1d1fa890 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -77,7 +77,6 @@ static struct resource bfin_isp1760_resources[] = {
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
- .port1_disable = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
diff --git a/arch/blackfin/mach-bf527/include/mach/anomaly.h b/arch/blackfin/mach-bf527/include/mach/anomaly.h
index c84ddea95749..0d63f7406168 100644
--- a/arch/blackfin/mach-bf527/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf527/include/mach/anomaly.h
@@ -34,7 +34,7 @@
#define _ANOMALY_BF527(rev527) (ANOMALY_BF527 && __SILICON_REVISION__ rev527)
#define _ANOMALY_BF526_BF527(rev526, rev527) (_ANOMALY_BF526(rev526) || _ANOMALY_BF527(rev527))
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
#define ANOMALY_05000119 (1) /* note: brokenness is noted in documentation, not anomaly sheet */
@@ -184,8 +184,12 @@
#define ANOMALY_05000456 (1)
/* Host DMA Port Responds to Certain Bus Activity Without HOST_CE Assertion */
#define ANOMALY_05000457 (1)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
+/* USB Rx DMA hang */
+#define ANOMALY_05000465 (1)
+/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */
+#define ANOMALY_05000467 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@@ -195,24 +199,30 @@
#define ANOMALY_05000158 (0)
#define ANOMALY_05000171 (0)
#define ANOMALY_05000179 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000183 (0)
#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000242 (0)
#define ANOMALY_05000244 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000250 (0)
+#define ANOMALY_05000257 (0)
#define ANOMALY_05000261 (0)
#define ANOMALY_05000263 (0)
#define ANOMALY_05000266 (0)
#define ANOMALY_05000273 (0)
#define ANOMALY_05000274 (0)
#define ANOMALY_05000278 (0)
+#define ANOMALY_05000281 (0)
+#define ANOMALY_05000283 (0)
#define ANOMALY_05000285 (0)
#define ANOMALY_05000287 (0)
#define ANOMALY_05000301 (0)
@@ -220,6 +230,7 @@
#define ANOMALY_05000307 (0)
#define ANOMALY_05000311 (0)
#define ANOMALY_05000312 (0)
+#define ANOMALY_05000315 (0)
#define ANOMALY_05000323 (0)
#define ANOMALY_05000362 (1)
#define ANOMALY_05000363 (0)
diff --git a/arch/blackfin/mach-bf527/include/mach/blackfin.h b/arch/blackfin/mach-bf527/include/mach/blackfin.h
index 417abcd61f4d..03665a8e16be 100644
--- a/arch/blackfin/mach-bf527/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf527/include/mach/blackfin.h
@@ -33,7 +33,6 @@
#define _MACH_BLACKFIN_H_
#include "bf527.h"
-#include "mem_map.h"
#include "defBF522.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf527/include/mach/mem_map.h b/arch/blackfin/mach-bf527/include/mach/mem_map.h
index 019e0017ad81..d96e894afd2c 100644
--- a/arch/blackfin/mach-bf527/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf527/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * file: include/asm-blackfin/mach-bf527/mem_map.h
- * based on: include/asm-blackfin/mach-bf537/mem_map.h
- * author: Michael Hennerich (michael.hennerich@analog.com)
+ * BF52x memory map
*
- * created:
- * description:
- * Memory MAP Common header file for blackfin BF527/5/2 of processors.
- * rev:
- *
- * modified:
- *
- * bugs: enter bugs at http://blackfin.uclinux.org/
- *
- * this program is free software; you can redistribute it and/or modify
- * it under the terms of the gnu general public license as published by
- * the free software foundation; either version 2, or (at your option)
- * any later version.
- *
- * this program is distributed in the hope that it will be useful,
- * but without any warranty; without even the implied warranty of
- * merchantability or fitness for a particular purpose. see the
- * gnu general public license for more details.
- *
- * you should have received a copy of the gnu general public license
- * along with this program; see the file copying.
- * if not, write to the free software foundation,
- * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_527_H_
-#define _MEM_MAP_527_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */
@@ -89,20 +67,4 @@
#define BFIN_DSUPBANKS 0
#endif /*CONFIG_BFIN_DCACHE */
-/* Level 2 Memory - none */
-
-#define L2_START 0
-#define L2_LENGTH 0
-
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif /* _MEM_MAP_527_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index 89a5ec4ca048..4e3e511bf146 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -32,6 +32,7 @@
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/plat-ram.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
@@ -86,6 +87,101 @@ static struct platform_device smc91x_device = {
};
#endif
+#if defined(CONFIG_MTD_PSD4256G) || defined(CONFIG_MTD_PSD4256G_MODULE)
+static const char *map_probes[] = {
+ "stm_flash",
+ NULL,
+};
+
+static struct platdata_mtd_ram stm_pri_data_a = {
+ .mapname = "Flash A Primary",
+ .map_probes = map_probes,
+ .bankwidth = 2,
+};
+
+static struct resource stm_pri_resource_a = {
+ .start = 0x20000000,
+ .end = 0x200fffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device stm_pri_device_a = {
+ .name = "mtd-ram",
+ .id = 0,
+ .dev = {
+ .platform_data = &stm_pri_data_a,
+ },
+ .num_resources = 1,
+ .resource = &stm_pri_resource_a,
+};
+
+static struct platdata_mtd_ram stm_pri_data_b = {
+ .mapname = "Flash B Primary",
+ .map_probes = map_probes,
+ .bankwidth = 2,
+};
+
+static struct resource stm_pri_resource_b = {
+ .start = 0x20100000,
+ .end = 0x201fffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device stm_pri_device_b = {
+ .name = "mtd-ram",
+ .id = 4,
+ .dev = {
+ .platform_data = &stm_pri_data_b,
+ },
+ .num_resources = 1,
+ .resource = &stm_pri_resource_b,
+};
+#endif
+
+#if defined(CONFIG_MTD_PLATRAM) || defined(CONFIG_MTD_PLATRAM_MODULE)
+static struct platdata_mtd_ram sram_data_a = {
+ .mapname = "Flash A SRAM",
+ .bankwidth = 2,
+};
+
+static struct resource sram_resource_a = {
+ .start = 0x20240000,
+ .end = 0x2024ffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device sram_device_a = {
+ .name = "mtd-ram",
+ .id = 8,
+ .dev = {
+ .platform_data = &sram_data_a,
+ },
+ .num_resources = 1,
+ .resource = &sram_resource_a,
+};
+
+static struct platdata_mtd_ram sram_data_b = {
+ .mapname = "Flash B SRAM",
+ .bankwidth = 2,
+};
+
+static struct resource sram_resource_b = {
+ .start = 0x202c0000,
+ .end = 0x202cffff,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device sram_device_b = {
+ .name = "mtd-ram",
+ .id = 9,
+ .dev = {
+ .platform_data = &sram_data_b,
+ },
+ .num_resources = 1,
+ .resource = &sram_resource_b,
+};
+#endif
+
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
@@ -357,6 +453,16 @@ static struct platform_device *ezkit_devices[] __initdata = {
&bfin_dpmc,
+#if defined(CONFIG_MTD_PSD4256G) || defined(CONFIG_MTD_PSD4256G_MODULE)
+ &stm_pri_device_a,
+ &stm_pri_device_b,
+#endif
+
+#if defined(CONFIG_MTD_PLATRAM) || defined(CONFIG_MTD_PLATRAM_MODULE)
+ &sram_device_a,
+ &sram_device_b,
+#endif
+
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
&smc91x_device,
#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/anomaly.h b/arch/blackfin/mach-bf533/include/mach/anomaly.h
index 31145b509e20..70a0ad69c610 100644
--- a/arch/blackfin/mach-bf533/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf533/include/mach/anomaly.h
@@ -34,7 +34,7 @@
# define ANOMALY_BF533 0
#endif
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* UART Line Status Register (UART_LSR) Bits Are Not Updated at the Same Time */
#define ANOMALY_05000099 (__SILICON_REVISION__ < 5)
@@ -46,7 +46,7 @@
#define ANOMALY_05000122 (1)
/* Instruction DMA Can Cause Data Cache Fills to Fail (Boot Implications) */
#define ANOMALY_05000158 (__SILICON_REVISION__ < 5)
-/* PPI Data Lengths Between 8 and 16 Do Not Zero Out Upper Bits */
+/* PPI Data Lengths between 8 and 16 Do Not Zero Out Upper Bits */
#define ANOMALY_05000166 (1)
/* Turning SPORTs on while External Frame Sync Is Active May Corrupt Data */
#define ANOMALY_05000167 (1)
@@ -56,13 +56,13 @@
#define ANOMALY_05000180 (1)
/* Timer Pin Limitations for PPI TX Modes with External Frame Syncs */
#define ANOMALY_05000183 (__SILICON_REVISION__ < 4)
-/* False Protection Exceptions */
+/* False Protection Exceptions when Speculative Fetch Is Cancelled */
#define ANOMALY_05000189 (__SILICON_REVISION__ < 4)
/* False I/O Pin Interrupts on Edge-Sensitive Inputs When Polarity Setting Is Changed */
#define ANOMALY_05000193 (__SILICON_REVISION__ < 4)
/* Restarting SPORT in Specific Modes May Cause Data Corruption */
#define ANOMALY_05000194 (__SILICON_REVISION__ < 4)
-/* Failing MMR Accesses When Stalled by Preceding Memory Read */
+/* Failing MMR Accesses when Preceding Memory Read Stalls */
#define ANOMALY_05000198 (__SILICON_REVISION__ < 5)
/* Current DMA Address Shows Wrong Value During Carry Fix */
#define ANOMALY_05000199 (__SILICON_REVISION__ < 4)
@@ -74,7 +74,7 @@
#define ANOMALY_05000202 (__SILICON_REVISION__ < 5)
/* Specific Sequence That Can Cause DMA Error or DMA Stopping */
#define ANOMALY_05000203 (__SILICON_REVISION__ < 4)
-/* Incorrect data read with write-through cache and allocate cache lines on reads only mode */
+/* Incorrect Data Read with Writethrough "Allocate Cache Lines on Reads Only" Cache Mode */
#define ANOMALY_05000204 (__SILICON_REVISION__ < 4 && ANOMALY_BF533)
/* Recovery from "Brown-Out" Condition */
#define ANOMALY_05000207 (__SILICON_REVISION__ < 4)
@@ -106,7 +106,7 @@
#define ANOMALY_05000244 (__SILICON_REVISION__ < 5)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (1)
-/* Data CPLBs Should Prevent Spurious Hardware Errors */
+/* Data CPLBs Should Prevent False Hardware Errors */
#define ANOMALY_05000246 (__SILICON_REVISION__ < 5)
/* Incorrect Bit Shift of Data Word in Multichannel (TDM) Mode in Certain Conditions */
#define ANOMALY_05000250 (__SILICON_REVISION__ == 4)
@@ -148,21 +148,21 @@
#define ANOMALY_05000277 (__SILICON_REVISION__ < 6)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 6)
-/* False Hardware Error Exception When ISR Context Is Not Restored */
+/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 6)
/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
#define ANOMALY_05000282 (__SILICON_REVISION__ < 6)
-/* System MMR Write Is Stalled Indefinitely When Killed in a Particular Stage */
+/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
#define ANOMALY_05000283 (__SILICON_REVISION__ < 6)
/* SPORTs May Receive Bad Data If FIFOs Fill Up */
#define ANOMALY_05000288 (__SILICON_REVISION__ < 6)
/* Memory-To-Memory DMA Source/Destination Descriptors Must Be in Same Memory Space */
#define ANOMALY_05000301 (__SILICON_REVISION__ < 6)
-/* SSYNCs After Writes To DMA MMR Registers May Not Be Handled Correctly */
+/* SSYNCs after Writes to DMA MMR Registers May Not Be Handled Correctly */
#define ANOMALY_05000302 (__SILICON_REVISION__ < 5)
/* SPORT_HYS Bit in PLL_CTL Register Is Not Functional */
#define ANOMALY_05000305 (__SILICON_REVISION__ < 5)
-/* New Feature: Additional PPI Frame Sync Sampling Options (Not Available On Older Silicon) */
+/* ALT_TIMING Bit in PPI_CONTROL Register Is Not Functional */
#define ANOMALY_05000306 (__SILICON_REVISION__ < 5)
/* SCKELOW Bit Does Not Maintain State Through Hibernate */
#define ANOMALY_05000307 (1) /* note: brokenness is noted in documentation, not anomaly sheet */
@@ -170,11 +170,11 @@
#define ANOMALY_05000310 (1)
/* Erroneous Flag (GPIO) Pin Operations under Specific Sequences */
#define ANOMALY_05000311 (__SILICON_REVISION__ < 6)
-/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (__SILICON_REVISION__ < 6)
/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
#define ANOMALY_05000313 (__SILICON_REVISION__ < 6)
-/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
+/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
#define ANOMALY_05000315 (__SILICON_REVISION__ < 6)
/* Internal Voltage Regulator Values of 1.05V, 1.10V and 1.15V Not Allowed for LQFP Packages */
#define ANOMALY_05000319 ((ANOMALY_BF531 || ANOMALY_BF532) && __SILICON_REVISION__ < 6)
@@ -200,7 +200,7 @@
#define ANOMALY_05000426 (1)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* These anomalies have been "phased" out of analog.com anomaly sheets and are
@@ -215,17 +215,17 @@
#define ANOMALY_05000070 (__SILICON_REVISION__ < 2)
/* Writing FIO_DIR can corrupt a programmable flag's data */
#define ANOMALY_05000079 (__SILICON_REVISION__ < 2)
-/* Timer Auto-Baud Mode requires the UART clock to be enabled */
+/* Timer Auto-Baud Mode requires the UART clock to be enabled. */
#define ANOMALY_05000086 (__SILICON_REVISION__ < 2)
/* Internal Clocking Modes on SPORT0 not supported */
#define ANOMALY_05000088 (__SILICON_REVISION__ < 2)
/* Internal voltage regulator does not wake up from an RTC wakeup */
#define ANOMALY_05000092 (__SILICON_REVISION__ < 2)
-/* The IFLUSH instruction must be preceded by a CSYNC instruction */
+/* The IFLUSH Instruction Must Be Preceded by a CSYNC Instruction */
#define ANOMALY_05000093 (__SILICON_REVISION__ < 2)
-/* Vectoring to an instruction that is presently being filled into the instruction cache may cause erroneous behavior */
+/* Vectoring to instruction that is being filled into the i-cache may cause erroneous behavior */
#define ANOMALY_05000095 (__SILICON_REVISION__ < 2)
-/* PREFETCH, FLUSH, and FLUSHINV must be followed by a CSYNC */
+/* PREFETCH, FLUSH, and FLUSHINV Instructions Must Be Followed by a CSYNC Instruction */
#define ANOMALY_05000096 (__SILICON_REVISION__ < 2)
/* Performance Monitor 0 and 1 are swapped when monitoring memory events */
#define ANOMALY_05000097 (__SILICON_REVISION__ < 2)
@@ -235,45 +235,45 @@
#define ANOMALY_05000100 (__SILICON_REVISION__ < 2)
/* Reading X_MODIFY or Y_MODIFY while DMA channel is active */
#define ANOMALY_05000101 (__SILICON_REVISION__ < 2)
-/* Descriptor-based MemDMA may lock up with 32-bit transfers or if transfers span 64KB buffers */
+/* Descriptor MemDMA may lock up with 32-bit transfers or if transfers span 64KB buffers */
#define ANOMALY_05000102 (__SILICON_REVISION__ < 2)
-/* Incorrect value written to the cycle counters */
+/* Incorrect Value Written to the Cycle Counters */
#define ANOMALY_05000103 (__SILICON_REVISION__ < 2)
-/* Stores to L1 Data memory incorrect when a specific sequence is followed */
+/* Stores to L1 Data Memory Incorrect when a Specific Sequence Is Followed */
#define ANOMALY_05000104 (__SILICON_REVISION__ < 2)
/* Programmable Flag (PF3) functionality not supported in all PPI modes */
#define ANOMALY_05000106 (__SILICON_REVISION__ < 2)
/* Data store can be lost when targeting a cache line fill */
#define ANOMALY_05000107 (__SILICON_REVISION__ < 2)
-/* Reserved bits in SYSCFG register not set at power on */
+/* Reserved Bits in SYSCFG Register Not Set at Power-On */
#define ANOMALY_05000109 (__SILICON_REVISION__ < 3)
/* Infinite Core Stall */
#define ANOMALY_05000114 (__SILICON_REVISION__ < 2)
-/* PPI_FSx may glitch when generated by the on chip Timers */
+/* PPI_FSx may glitch when generated by the on chip Timers. */
#define ANOMALY_05000115 (__SILICON_REVISION__ < 2)
-/* Trace Buffers may record discontinuities into emulation mode and/or exception, NMI, reset handlers */
+/* Trace Buffers May Contain Errors in Emulation Mode and/or Exception, NMI, Reset Handlers */
#define ANOMALY_05000116 (__SILICON_REVISION__ < 3)
/* DTEST registers allow access to Data Cache when DTEST_COMMAND< 14 >= 0 */
#define ANOMALY_05000117 (__SILICON_REVISION__ < 2)
/* Booting from an 8-bit or 24-bit Addressable SPI device is not supported */
#define ANOMALY_05000118 (__SILICON_REVISION__ < 2)
-/* DTEST_COMMAND initiated memory access may be incorrect if data cache or DMA is active */
+/* DTEST_COMMAND Initiated Memory Access May Be Incorrect If Data Cache or DMA Is Active */
#define ANOMALY_05000123 (__SILICON_REVISION__ < 3)
/* DMA Lock-up at CCLK to SCLK ratios of 4:1, 2:1, or 1:1 */
#define ANOMALY_05000124 (__SILICON_REVISION__ < 3)
-/* Erroneous exception when enabling cache */
+/* Erroneous Exception when Enabling Cache */
#define ANOMALY_05000125 (__SILICON_REVISION__ < 3)
/* SPI clock polarity and phase bits incorrect during booting */
#define ANOMALY_05000126 (__SILICON_REVISION__ < 3)
-/* DMEM_CONTROL is not set on Reset */
+/* DMEM_CONTROL<12> Is Not Set on Reset */
#define ANOMALY_05000137 (__SILICON_REVISION__ < 3)
/* SPI boot will not complete if there is a zero fill block in the loader file */
#define ANOMALY_05000138 (__SILICON_REVISION__ == 2)
-/* Timerx_Config must be set for using the PPI in GP output mode with internal Frame Syncs */
+/* TIMERx_CONFIG[5] must be set for PPI in GP output mode with internal Frame Syncs */
#define ANOMALY_05000139 (__SILICON_REVISION__ < 2)
/* Allowing the SPORT RX FIFO to fill will cause an overflow */
#define ANOMALY_05000140 (__SILICON_REVISION__ < 3)
-/* An Infinite Stall occurs with a particular sequence of consecutive dual dag events */
+/* Infinite Stall may occur with a particular sequence of consecutive dual dag events */
#define ANOMALY_05000141 (__SILICON_REVISION__ < 3)
/* Interrupts may be lost when a programmable input flag is configured to be edge sensitive */
#define ANOMALY_05000142 (__SILICON_REVISION__ < 3)
@@ -287,7 +287,7 @@
#define ANOMALY_05000146 (__SILICON_REVISION__ < 3)
/* Source MDMA descriptor may stop with a DMA Error near beginning of descriptor fetch */
#define ANOMALY_05000147 (__SILICON_REVISION__ < 3)
-/* When booting from a 16-bit asynchronous memory device, the upper 8-bits of each word must be 0x00 */
+/* When booting from 16-bit asynchronous memory, the upper 8 bits of each word must be 0x00 */
#define ANOMALY_05000148 (__SILICON_REVISION__ < 3)
/* Frame Delay in SPORT Multichannel Mode */
#define ANOMALY_05000153 (__SILICON_REVISION__ < 3)
@@ -295,13 +295,13 @@
#define ANOMALY_05000154 (__SILICON_REVISION__ < 3)
/* Timer1 can not be used for PWMOUT mode when a certain PPI mode is in use */
#define ANOMALY_05000155 (__SILICON_REVISION__ < 3)
-/* Killed 32-bit MMR write leads to next system MMR access thinking it should be 32-bit */
+/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
#define ANOMALY_05000157 (__SILICON_REVISION__ < 3)
-/* SPORT transmit data is not gated by external frame sync in certain conditions */
+/* SPORT Transmit Data Is Not Gated by External Frame Sync in Certain Conditions */
#define ANOMALY_05000163 (__SILICON_REVISION__ < 3)
-/* SDRAM auto-refresh and subsequent Power Ups */
+/* Undefined Behavior when Power-Up Sequence Is Issued to SDRAM during Auto-Refresh */
#define ANOMALY_05000168 (__SILICON_REVISION__ < 3)
-/* DATA CPLB page miss can result in lost write-through cache data writes */
+/* DATA CPLB Page Miss Can Result in Lost Write-Through Data Cache Writes */
#define ANOMALY_05000169 (__SILICON_REVISION__ < 3)
/* DMA vs Core accesses to external memory */
#define ANOMALY_05000173 (__SILICON_REVISION__ < 3)
@@ -309,15 +309,15 @@
#define ANOMALY_05000174 (__SILICON_REVISION__ < 3)
/* Overlapping Sequencer and Memory Stalls */
#define ANOMALY_05000175 (__SILICON_REVISION__ < 3)
-/* Multiplication of (-1) by (-1) followed by an accumulator saturation */
+/* Overflow Bit Asserted when Multiplication of -1 by -1 Followed by Accumulator Saturation */
#define ANOMALY_05000176 (__SILICON_REVISION__ < 3)
-/* Disabling the PPI resets the PPI configuration registers */
+/* Disabling the PPI Resets the PPI Configuration Registers */
#define ANOMALY_05000181 (__SILICON_REVISION__ < 3)
-/* PPI TX Mode with 2 External Frame Syncs */
+/* Early PPI Transmit when FS1 Asserts before FS2 in TX Mode with 2 External Frame Syncs */
#define ANOMALY_05000185 (__SILICON_REVISION__ < 3)
/* PPI does not invert the Driving PPICLK edge in Transmit Modes */
#define ANOMALY_05000191 (__SILICON_REVISION__ < 3)
-/* In PPI Transmit Modes with External Frame Syncs POLC */
+/* In PPI Transmit Modes with External Frame Syncs POLC bit must be set to 1 */
#define ANOMALY_05000192 (__SILICON_REVISION__ < 3)
/* Internal Voltage Regulator may not start up */
#define ANOMALY_05000206 (__SILICON_REVISION__ < 3)
@@ -326,6 +326,7 @@
#define ANOMALY_05000120 (0)
#define ANOMALY_05000149 (0)
#define ANOMALY_05000171 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000266 (0)
@@ -345,5 +346,7 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/blackfin.h b/arch/blackfin/mach-bf533/include/mach/blackfin.h
index 045184f81a29..39aa175f19f5 100644
--- a/arch/blackfin/mach-bf533/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf533/include/mach/blackfin.h
@@ -34,7 +34,6 @@
#define BF533_FAMILY
#include "bf533.h"
-#include "mem_map.h"
#include "defBF532.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf533/include/mach/mem_map.h b/arch/blackfin/mach-bf533/include/mach/mem_map.h
index fc33b7cb9937..197af1a398ac 100644
--- a/arch/blackfin/mach-bf533/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf533/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * File: include/asm-blackfin/mach-bf533/mem_map.h
- * Based on:
- * Author:
+ * BF533 memory map
*
- * Created:
- * Description:
- *
- * Rev:
- *
- * Modified:
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_533_H_
-#define _MEM_MAP_533_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */
@@ -158,20 +136,4 @@
#endif
-/* Level 2 Memory - none */
-
-#define L2_START 0
-#define L2_LENGTH 0
-
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif /* _MEM_MAP_533_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index ff7228caa7da..c1f76dd2c4ed 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -79,7 +79,6 @@ static struct resource bfin_isp1760_resources[] = {
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
- .port1_disable = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
diff --git a/arch/blackfin/mach-bf537/include/mach/anomaly.h b/arch/blackfin/mach-bf537/include/mach/anomaly.h
index fc9663425465..57c128cc3b64 100644
--- a/arch/blackfin/mach-bf537/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf537/include/mach/anomaly.h
@@ -34,13 +34,13 @@
# define ANOMALY_BF537 0
#endif
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
#define ANOMALY_05000119 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
-/* Killed 32-bit MMR write leads to next system MMR access thinking it should be 32-bit */
+/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
#define ANOMALY_05000157 (__SILICON_REVISION__ < 2)
/* PPI_DELAY Not Functional in PPI Modes with 0 Frame Syncs */
#define ANOMALY_05000180 (1)
@@ -50,11 +50,11 @@
#define ANOMALY_05000244 (__SILICON_REVISION__ < 3)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (1)
-/* CLKIN Buffer Output Enable Reset Behavior Is Changed */
+/* Buffered CLKIN Output Is Disabled by Default */
#define ANOMALY_05000247 (1)
/* Incorrect Bit Shift of Data Word in Multichannel (TDM) Mode in Certain Conditions */
#define ANOMALY_05000250 (__SILICON_REVISION__ < 3)
-/* EMAC Tx DMA error after an early frame abort */
+/* EMAC TX DMA Error After an Early Frame Abort */
#define ANOMALY_05000252 (__SILICON_REVISION__ < 3)
/* Maximum External Clock Speed for Timers */
#define ANOMALY_05000253 (__SILICON_REVISION__ < 3)
@@ -62,7 +62,7 @@
#define ANOMALY_05000254 (__SILICON_REVISION__ > 2)
/* Entering Hibernate State with RTC Seconds Interrupt Not Functional */
#define ANOMALY_05000255 (__SILICON_REVISION__ < 3)
-/* EMAC MDIO input latched on wrong MDC edge */
+/* EMAC MDIO Input Latched on Wrong MDC Edge */
#define ANOMALY_05000256 (__SILICON_REVISION__ < 3)
/* Interrupt/Exception During Short Hardware Loop May Cause Bad Instruction Fetches */
#define ANOMALY_05000257 (__SILICON_REVISION__ < 3)
@@ -80,7 +80,7 @@
#define ANOMALY_05000264 (__SILICON_REVISION__ < 3)
/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
#define ANOMALY_05000265 (1)
-/* Memory DMA error when peripheral DMA is running with non-zero DEB_TRAFFIC_PERIOD */
+/* Memory DMA Error when Peripheral DMA Is Running with Non-Zero DEB_TRAFFIC_PERIOD */
#define ANOMALY_05000268 (__SILICON_REVISION__ < 3)
/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Decrease */
#define ANOMALY_05000270 (__SILICON_REVISION__ < 3)
@@ -92,15 +92,15 @@
#define ANOMALY_05000277 (__SILICON_REVISION__ < 3)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (((ANOMALY_BF536 || ANOMALY_BF537) && __SILICON_REVISION__ < 3) || (ANOMALY_BF534 && __SILICON_REVISION__ < 2))
-/* SPI Master boot mode does not work well with Atmel Data flash devices */
+/* SPI Master Boot Mode Does Not Work Well with Atmel Data Flash Devices */
#define ANOMALY_05000280 (1)
-/* False Hardware Error Exception When ISR Context Is Not Restored */
+/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 3)
/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
#define ANOMALY_05000282 (__SILICON_REVISION__ < 3)
-/* System MMR Write Is Stalled Indefinitely When Killed in a Particular Stage */
+/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
#define ANOMALY_05000283 (__SILICON_REVISION__ < 3)
-/* New Feature: EMAC TX DMA Word Alignment (Not Available On Older Silicon) */
+/* TXDWA Bit in EMAC_SYSCTL Register Is Not Functional */
#define ANOMALY_05000285 (__SILICON_REVISION__ < 3)
/* SPORTs May Receive Bad Data If FIFOs Fill Up */
#define ANOMALY_05000288 (__SILICON_REVISION__ < 3)
@@ -112,25 +112,25 @@
#define ANOMALY_05000305 (__SILICON_REVISION__ < 3)
/* SCKELOW Bit Does Not Maintain State Through Hibernate */
#define ANOMALY_05000307 (__SILICON_REVISION__ < 3)
-/* Writing UART_THR while UART clock is disabled sends erroneous start bit */
+/* Writing UART_THR While UART Clock Is Disabled Sends Erroneous Start Bit */
#define ANOMALY_05000309 (__SILICON_REVISION__ < 3)
/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
#define ANOMALY_05000310 (1)
-/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (1)
/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
#define ANOMALY_05000313 (1)
-/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
+/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
#define ANOMALY_05000315 (__SILICON_REVISION__ < 3)
-/* EMAC RMII mode: collisions occur in Full Duplex mode */
+/* EMAC RMII Mode: Collisions Occur in Full Duplex Mode */
#define ANOMALY_05000316 (__SILICON_REVISION__ < 3)
-/* EMAC RMII mode: TX frames in half duplex fail with status No Carrier */
+/* EMAC RMII Mode: TX Frames in Half Duplex Fail with Status "No Carrier" */
#define ANOMALY_05000321 (__SILICON_REVISION__ < 3)
-/* EMAC RMII mode at 10-Base-T speed: RX frames not received properly */
+/* EMAC RMII Mode at 10-Base-T Speed: RX Frames Not Received Properly */
#define ANOMALY_05000322 (1)
/* Ethernet MAC MDIO Reads Do Not Meet IEEE Specification */
#define ANOMALY_05000341 (__SILICON_REVISION__ >= 3)
-/* New Feature: UART Remains Enabled after UART Boot */
+/* UART Gets Disabled after UART Boot */
#define ANOMALY_05000350 (__SILICON_REVISION__ >= 3)
/* Regulator Programming Blocked when Hibernate Wakeup Source Remains Active */
#define ANOMALY_05000355 (1)
@@ -154,7 +154,7 @@
#define ANOMALY_05000426 (1)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Anomalies that don't exist on this proc */
@@ -165,14 +165,17 @@
#define ANOMALY_05000158 (0)
#define ANOMALY_05000171 (0)
#define ANOMALY_05000179 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000183 (0)
#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000242 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000266 (0)
@@ -195,5 +198,7 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
#endif
diff --git a/arch/blackfin/mach-bf537/include/mach/blackfin.h b/arch/blackfin/mach-bf537/include/mach/blackfin.h
index 7d6069c886f1..f5e5015ad831 100644
--- a/arch/blackfin/mach-bf537/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf537/include/mach/blackfin.h
@@ -35,7 +35,6 @@
#define BF537_FAMILY
#include "bf537.h"
-#include "mem_map.h"
#include "defBF534.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf537/include/mach/mem_map.h b/arch/blackfin/mach-bf537/include/mach/mem_map.h
index f9010c4b4bf3..942f08de306b 100644
--- a/arch/blackfin/mach-bf537/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf537/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * file: include/asm-blackfin/mach-bf537/mem_map.h
- * based on:
- * author:
+ * BF537 memory map
*
- * created:
- * description:
- * Memory MAP Common header file for blackfin BF537/6/4 of processors.
- * rev:
- *
- * modified:
- *
- * bugs: enter bugs at http://blackfin.uclinux.org/
- *
- * this program is free software; you can redistribute it and/or modify
- * it under the terms of the gnu general public license as published by
- * the free software foundation; either version 2, or (at your option)
- * any later version.
- *
- * this program is distributed in the hope that it will be useful,
- * but without any warranty; without even the implied warranty of
- * merchantability or fitness for a particular purpose. see the
- * gnu general public license for more details.
- *
- * you should have received a copy of the gnu general public license
- * along with this program; see the file copying.
- * if not, write to the free software foundation,
- * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_537_H_
-#define _MEM_MAP_537_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */
@@ -166,20 +144,4 @@
#endif
-/* Level 2 Memory - none */
-
-#define L2_START 0
-#define L2_LENGTH 0
-
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif /* _MEM_MAP_537_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/anomaly.h b/arch/blackfin/mach-bf538/include/mach/anomaly.h
index 175ca9ef7232..c97acdf85cd3 100644
--- a/arch/blackfin/mach-bf538/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf538/include/mach/anomaly.h
@@ -30,13 +30,13 @@
# define ANOMALY_BF539 0
#endif
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
#define ANOMALY_05000119 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
-/* PPI Data Lengths Between 8 and 16 Do Not Zero Out Upper Bits */
+/* PPI Data Lengths between 8 and 16 Do Not Zero Out Upper Bits */
#define ANOMALY_05000166 (1)
/* PPI_COUNT Cannot Be Programmed to 0 in General Purpose TX or RX Modes */
#define ANOMALY_05000179 (1)
@@ -70,11 +70,11 @@
#define ANOMALY_05000277 (__SILICON_REVISION__ < 4)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 4)
-/* False Hardware Error Exception When ISR Context Is Not Restored */
+/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 4)
/* Memory DMA Corruption with 32-Bit Data and Traffic Control */
#define ANOMALY_05000282 (__SILICON_REVISION__ < 4)
-/* System MMR Write Is Stalled Indefinitely When Killed in a Particular Stage */
+/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
#define ANOMALY_05000283 (__SILICON_REVISION__ < 4)
/* SPORTs May Receive Bad Data If FIFOs Fill Up */
#define ANOMALY_05000288 (__SILICON_REVISION__ < 4)
@@ -92,11 +92,11 @@
#define ANOMALY_05000307 (__SILICON_REVISION__ < 4)
/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
#define ANOMALY_05000310 (1)
-/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (__SILICON_REVISION__ < 5)
/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
#define ANOMALY_05000313 (__SILICON_REVISION__ < 4)
-/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
+/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
#define ANOMALY_05000315 (__SILICON_REVISION__ < 4)
/* PFx Glitch on Write to FIO_FLAG_D or FIO_FLAG_T */
#define ANOMALY_05000318 (ANOMALY_BF539 && __SILICON_REVISION__ < 4)
@@ -110,7 +110,7 @@
#define ANOMALY_05000371 (__SILICON_REVISION__ < 5)
/* Entering Hibernate State with Peripheral Wakeups Enabled Draws Excess Current */
#define ANOMALY_05000374 (__SILICON_REVISION__ == 4)
-/* New Feature: Open-Drain GPIO Outputs on PC1 and PC4 (Not Available on Older Silicon) */
+/* GPIO Pins PC1 and PC4 Can Function as Normal Outputs */
#define ANOMALY_05000375 (__SILICON_REVISION__ < 4)
/* SSYNC Stalls Processor when Executed from Non-Cacheable Memory */
#define ANOMALY_05000402 (__SILICON_REVISION__ < 4)
@@ -126,26 +126,32 @@
#define ANOMALY_05000436 (__SILICON_REVISION__ > 3)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
#define ANOMALY_05000120 (0)
+#define ANOMALY_05000125 (0)
#define ANOMALY_05000149 (0)
#define ANOMALY_05000158 (0)
#define ANOMALY_05000171 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000242 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000250 (0)
#define ANOMALY_05000254 (0)
+#define ANOMALY_05000257 (0)
#define ANOMALY_05000263 (0)
+#define ANOMALY_05000266 (0)
#define ANOMALY_05000274 (0)
#define ANOMALY_05000287 (0)
#define ANOMALY_05000305 (0)
@@ -166,5 +172,7 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/blackfin.h b/arch/blackfin/mach-bf538/include/mach/blackfin.h
index 6f628353dde3..9496196ac164 100644
--- a/arch/blackfin/mach-bf538/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf538/include/mach/blackfin.h
@@ -35,7 +35,6 @@
#define BF538_FAMILY
#include "bf538.h"
-#include "mem_map.h"
#include "defBF539.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf538/include/mach/mem_map.h b/arch/blackfin/mach-bf538/include/mach/mem_map.h
index 76811966690e..aff00f453e9e 100644
--- a/arch/blackfin/mach-bf538/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf538/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * File: include/asm-blackfin/mach-bf538/mem_map.h
- * Based on:
- * Author:
+ * BF538 memory map
*
- * Created:
- * Description:
- *
- * Rev:
- *
- * Modified:
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_538_H_
-#define _MEM_MAP_538_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x20300000 /* Async Bank 3 */
@@ -93,21 +71,4 @@
#define BFIN_DSUPBANKS 0
#endif /*CONFIG_BFIN_DCACHE*/
-
-/* Level 2 Memory - none */
-
-#define L2_START 0
-#define L2_LENGTH 0
-
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif /* _MEM_MAP_538_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 805a57b5e650..81f5b95cc361 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -76,7 +76,6 @@ static struct resource bfin_isp1760_resources[] = {
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
- .port1_disable = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
diff --git a/arch/blackfin/mach-bf548/include/mach/anomaly.h b/arch/blackfin/mach-bf548/include/mach/anomaly.h
index c510ae688e28..18a4cd24f673 100644
--- a/arch/blackfin/mach-bf548/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf548/include/mach/anomaly.h
@@ -18,7 +18,7 @@
# error will not work on BF548 silicon version 0.0, or 0.1
#endif
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
#define ANOMALY_05000119 (1)
@@ -30,17 +30,17 @@
#define ANOMALY_05000265 (1)
/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
#define ANOMALY_05000272 (1)
-/* False Hardware Error Exception When ISR Context Is Not Restored */
+/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 1)
/* SSYNCs After Writes To CAN/DMA MMR Registers Are Not Always Handled Correctly */
#define ANOMALY_05000304 (__SILICON_REVISION__ < 1)
/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
#define ANOMALY_05000310 (1)
-/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (__SILICON_REVISION__ < 1)
/* TWI Slave Boot Mode Is Not Functional */
#define ANOMALY_05000324 (__SILICON_REVISION__ < 1)
-/* External FIFO Boot Mode Is Not Functional */
+/* FIFO Boot Mode Not Functional */
#define ANOMALY_05000325 (__SILICON_REVISION__ < 2)
/* Data Lost When Core and DMA Accesses Are Made to the USB FIFO Simultaneously */
#define ANOMALY_05000327 (__SILICON_REVISION__ < 1)
@@ -178,8 +178,12 @@
#define ANOMALY_05000450 (1)
/* USB Receive Interrupt Is Not Generated in DMA Mode 1 */
#define ANOMALY_05000456 (__SILICON_REVISION__ < 3)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
+/* USB Rx DMA hang */
+#define ANOMALY_05000465 (1)
+/* Possible RX data corruption when control & data EP FIFOs are accessed via the core */
+#define ANOMALY_05000467 (1)
/* Anomalies that don't exist on this proc */
#define ANOMALY_05000099 (0)
@@ -189,30 +193,36 @@
#define ANOMALY_05000158 (0)
#define ANOMALY_05000171 (0)
#define ANOMALY_05000179 (0)
+#define ANOMALY_05000182 (0)
#define ANOMALY_05000183 (0)
#define ANOMALY_05000198 (0)
+#define ANOMALY_05000202 (0)
#define ANOMALY_05000215 (0)
#define ANOMALY_05000220 (0)
#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000242 (0)
#define ANOMALY_05000244 (0)
#define ANOMALY_05000248 (0)
#define ANOMALY_05000250 (0)
#define ANOMALY_05000254 (0)
+#define ANOMALY_05000257 (0)
#define ANOMALY_05000261 (0)
#define ANOMALY_05000263 (0)
#define ANOMALY_05000266 (0)
#define ANOMALY_05000273 (0)
#define ANOMALY_05000274 (0)
#define ANOMALY_05000278 (0)
+#define ANOMALY_05000283 (0)
#define ANOMALY_05000287 (0)
#define ANOMALY_05000301 (0)
#define ANOMALY_05000305 (0)
#define ANOMALY_05000307 (0)
#define ANOMALY_05000311 (0)
+#define ANOMALY_05000315 (0)
#define ANOMALY_05000323 (0)
#define ANOMALY_05000362 (1)
#define ANOMALY_05000363 (0)
diff --git a/arch/blackfin/mach-bf548/include/mach/blackfin.h b/arch/blackfin/mach-bf548/include/mach/blackfin.h
index cf6c1500222a..6b97396d817f 100644
--- a/arch/blackfin/mach-bf548/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf548/include/mach/blackfin.h
@@ -33,7 +33,6 @@
#define _MACH_BLACKFIN_H_
#include "bf548.h"
-#include "mem_map.h"
#include "anomaly.h"
#ifdef CONFIG_BF542
diff --git a/arch/blackfin/mach-bf548/include/mach/mem_map.h b/arch/blackfin/mach-bf548/include/mach/mem_map.h
index 70b9c1194024..caac2dfb41eb 100644
--- a/arch/blackfin/mach-bf548/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf548/include/mach/mem_map.h
@@ -1,38 +1,16 @@
/*
- * file: include/asm-blackfin/mach-bf548/mem_map.h
- * based on:
- * author:
+ * BF548 memory map
*
- * created:
- * description:
- * Memory MAP Common header file for blackfin BF537/6/4 of processors.
- * rev:
- *
- * modified:
- *
- * bugs: enter bugs at http://blackfin.uclinux.org/
- *
- * this program is free software; you can redistribute it and/or modify
- * it under the terms of the gnu general public license as published by
- * the free software foundation; either version 2, or (at your option)
- * any later version.
- *
- * this program is distributed in the hope that it will be useful,
- * but without any warranty; without even the implied warranty of
- * merchantability or fitness for a particular purpose. see the
- * gnu general public license for more details.
- *
- * you should have received a copy of the gnu general public license
- * along with this program; see the file copying.
- * if not, write to the free software foundation,
- * 59 temple place - suite 330, boston, ma 02111-1307, usa.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_548_H_
-#define _MEM_MAP_548_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x2C000000 /* Async Bank 3 */
@@ -103,15 +81,4 @@
# define L2_LENGTH 0x20000
#endif
-/* Scratch Pad Memory */
-
-#define L1_SCRATCH_START 0xFFB00000
-#define L1_SCRATCH_LENGTH 0x1000
-
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
-
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
-
-#endif/* _MEM_MAP_548_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index b5ef7ff7b7bd..4df904f9e90a 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -62,7 +62,6 @@ static struct resource bfin_isp1760_resources[] = {
static struct isp1760_platform_data isp1760_priv = {
.is_isp1761 = 0,
- .port1_disable = 0,
.bus_width_16 = 1,
.port1_otg = 0,
.analog_oc = 0,
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index dccd396cd931..94b8e277f09d 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -18,19 +18,19 @@
# error will not work on BF561 silicon version 0.0, 0.1, 0.2, or 0.4
#endif
-/* Multi-issue instruction with dsp32shiftimm in slot1 and P-reg store in slot 2 not supported */
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
#define ANOMALY_05000074 (1)
/* UART Line Status Register (UART_LSR) Bits Are Not Updated at the Same Time */
#define ANOMALY_05000099 (__SILICON_REVISION__ < 5)
-/* Trace Buffers may record discontinuities into emulation mode and/or exception, NMI, reset handlers */
+/* Trace Buffers May Contain Errors in Emulation Mode and/or Exception, NMI, Reset Handlers */
#define ANOMALY_05000116 (__SILICON_REVISION__ < 3)
-/* Testset instructions restricted to 32-bit aligned memory locations */
+/* TESTSET Instructions Restricted to 32-Bit Aligned Memory Locations */
#define ANOMALY_05000120 (1)
/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
#define ANOMALY_05000122 (1)
-/* Erroneous exception when enabling cache */
+/* Erroneous Exception when Enabling Cache */
#define ANOMALY_05000125 (__SILICON_REVISION__ < 3)
-/* Signbits instruction not functional under certain conditions */
+/* SIGNBITS Instruction Not Functional under Certain Conditions */
#define ANOMALY_05000127 (1)
/* Two bits in the Watchpoint Status Register (WPSTAT) are swapped */
#define ANOMALY_05000134 (__SILICON_REVISION__ < 3)
@@ -40,7 +40,7 @@
#define ANOMALY_05000136 (__SILICON_REVISION__ < 3)
/* Allowing the SPORT RX FIFO to fill will cause an overflow */
#define ANOMALY_05000140 (__SILICON_REVISION__ < 3)
-/* An Infinite Stall occurs with a particular sequence of consecutive dual dag events */
+/* Infinite Stall may occur with a particular sequence of consecutive dual dag events */
#define ANOMALY_05000141 (__SILICON_REVISION__ < 3)
/* Interrupts may be lost when a programmable input flag is configured to be edge sensitive */
#define ANOMALY_05000142 (__SILICON_REVISION__ < 3)
@@ -52,7 +52,7 @@
#define ANOMALY_05000146 (__SILICON_REVISION__ < 3)
/* Source MDMA descriptor may stop with a DMA Error near beginning of descriptor fetch */
#define ANOMALY_05000147 (__SILICON_REVISION__ < 3)
-/* IMDMA S1/D1 channel may stall */
+/* IMDMA S1/D1 Channel May Stall */
#define ANOMALY_05000149 (1)
/* DMA engine may lose data due to incorrect handshaking */
#define ANOMALY_05000150 (__SILICON_REVISION__ < 3)
@@ -66,7 +66,7 @@
#define ANOMALY_05000154 (__SILICON_REVISION__ < 3)
/* Timers in PWM-Out Mode with PPI GP Receive (Input) Mode with 0 Frame Syncs */
#define ANOMALY_05000156 (__SILICON_REVISION__ < 4)
-/* Killed 32-bit MMR write leads to next system MMR access thinking it should be 32-bit */
+/* Killed 32-Bit MMR Write Leads to Next System MMR Access Thinking It Should Be 32-Bit */
#define ANOMALY_05000157 (__SILICON_REVISION__ < 3)
/* DMA Lock-up at CCLK to SCLK ratios of 4:1, 2:1, or 1:1 */
#define ANOMALY_05000159 (__SILICON_REVISION__ < 3)
@@ -76,17 +76,17 @@
#define ANOMALY_05000161 (__SILICON_REVISION__ < 3)
/* DMEM_CONTROL<12> is not set on Reset */
#define ANOMALY_05000162 (__SILICON_REVISION__ < 3)
-/* SPORT transmit data is not gated by external frame sync in certain conditions */
+/* SPORT Transmit Data Is Not Gated by External Frame Sync in Certain Conditions */
#define ANOMALY_05000163 (__SILICON_REVISION__ < 3)
-/* PPI Data Lengths Between 8 and 16 Do Not Zero Out Upper Bits */
+/* PPI Data Lengths between 8 and 16 Do Not Zero Out Upper Bits */
#define ANOMALY_05000166 (1)
/* Turning SPORTs on while External Frame Sync Is Active May Corrupt Data */
#define ANOMALY_05000167 (1)
-/* SDRAM auto-refresh and subsequent Power Ups */
+/* Undefined Behavior when Power-Up Sequence Is Issued to SDRAM during Auto-Refresh */
#define ANOMALY_05000168 (__SILICON_REVISION__ < 5)
-/* DATA CPLB page miss can result in lost write-through cache data writes */
+/* DATA CPLB Page Miss Can Result in Lost Write-Through Data Cache Writes */
#define ANOMALY_05000169 (__SILICON_REVISION__ < 5)
-/* Boot-ROM code modifies SICA_IWRx wakeup registers */
+/* Boot-ROM Modifies SICA_IWRx Wakeup Registers */
#define ANOMALY_05000171 (__SILICON_REVISION__ < 5)
/* DSPID register values incorrect */
#define ANOMALY_05000172 (__SILICON_REVISION__ < 3)
@@ -96,29 +96,29 @@
#define ANOMALY_05000174 (__SILICON_REVISION__ < 5)
/* Overlapping Sequencer and Memory Stalls */
#define ANOMALY_05000175 (__SILICON_REVISION__ < 5)
-/* Multiplication of (-1) by (-1) followed by an accumulator saturation */
+/* Overflow Bit Asserted when Multiplication of -1 by -1 Followed by Accumulator Saturation */
#define ANOMALY_05000176 (__SILICON_REVISION__ < 5)
/* PPI_COUNT Cannot Be Programmed to 0 in General Purpose TX or RX Modes */
#define ANOMALY_05000179 (__SILICON_REVISION__ < 5)
/* PPI_DELAY Not Functional in PPI Modes with 0 Frame Syncs */
#define ANOMALY_05000180 (1)
-/* Disabling the PPI resets the PPI configuration registers */
+/* Disabling the PPI Resets the PPI Configuration Registers */
#define ANOMALY_05000181 (__SILICON_REVISION__ < 5)
-/* IMDMA does not operate to full speed for 600MHz and higher devices */
+/* Internal Memory DMA Does Not Operate at Full Speed */
#define ANOMALY_05000182 (1)
-/* Timer Pin limitations for PPI TX Modes with External Frame Syncs */
+/* Timer Pin Limitations for PPI TX Modes with External Frame Syncs */
#define ANOMALY_05000184 (__SILICON_REVISION__ < 5)
-/* PPI TX Mode with 2 External Frame Syncs */
+/* Early PPI Transmit when FS1 Asserts before FS2 in TX Mode with 2 External Frame Syncs */
#define ANOMALY_05000185 (__SILICON_REVISION__ < 5)
-/* PPI packing with Data Length greater than 8 bits (not a meaningful mode) */
+/* Upper PPI Pins Driven when PPI Packing Enabled and Data Length >8 Bits */
#define ANOMALY_05000186 (__SILICON_REVISION__ < 5)
/* IMDMA Corrupted Data after a Halt */
#define ANOMALY_05000187 (1)
/* IMDMA Restrictions on Descriptor and Buffer Placement in Memory */
#define ANOMALY_05000188 (__SILICON_REVISION__ < 5)
-/* False Protection Exceptions */
+/* False Protection Exceptions when Speculative Fetch Is Cancelled */
#define ANOMALY_05000189 (__SILICON_REVISION__ < 5)
-/* PPI not functional at core voltage < 1Volt */
+/* PPI Not Functional at Core Voltage < 1Volt */
#define ANOMALY_05000190 (1)
/* PPI does not invert the Driving PPICLK edge in Transmit Modes */
#define ANOMALY_05000191 (__SILICON_REVISION__ < 3)
@@ -126,7 +126,7 @@
#define ANOMALY_05000193 (__SILICON_REVISION__ < 5)
/* Restarting SPORT in Specific Modes May Cause Data Corruption */
#define ANOMALY_05000194 (__SILICON_REVISION__ < 5)
-/* Failing MMR Accesses When Stalled by Preceding Memory Read */
+/* Failing MMR Accesses when Preceding Memory Read Stalls */
#define ANOMALY_05000198 (__SILICON_REVISION__ < 5)
/* Current DMA Address Shows Wrong Value During Carry Fix */
#define ANOMALY_05000199 (__SILICON_REVISION__ < 5)
@@ -134,9 +134,9 @@
#define ANOMALY_05000200 (__SILICON_REVISION__ < 5)
/* Possible Infinite Stall with Specific Dual-DAG Situation */
#define ANOMALY_05000202 (__SILICON_REVISION__ < 5)
-/* Incorrect data read with write-through cache and allocate cache lines on reads only mode */
+/* Incorrect Data Read with Writethrough "Allocate Cache Lines on Reads Only" Cache Mode */
#define ANOMALY_05000204 (__SILICON_REVISION__ < 5)
-/* Specific sequence that can cause DMA error or DMA stopping */
+/* Specific Sequence that Can Cause DMA Error or DMA Stopping */
#define ANOMALY_05000205 (__SILICON_REVISION__ < 5)
/* Recovery from "Brown-Out" Condition */
#define ANOMALY_05000207 (__SILICON_REVISION__ < 5)
@@ -158,7 +158,7 @@
#define ANOMALY_05000230 (__SILICON_REVISION__ < 5)
/* UART STB Bit Incorrectly Affects Receiver Setting */
#define ANOMALY_05000231 (__SILICON_REVISION__ < 5)
-/* SPORT data transmit lines are incorrectly driven in multichannel mode */
+/* SPORT Data Transmit Lines Are Incorrectly Driven in Multichannel Mode */
#define ANOMALY_05000232 (__SILICON_REVISION__ < 5)
/* DF Bit in PLL_CTL Register Does Not Respond to Hardware Reset */
#define ANOMALY_05000242 (__SILICON_REVISION__ < 5)
@@ -166,7 +166,7 @@
#define ANOMALY_05000244 (__SILICON_REVISION__ < 5)
/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
#define ANOMALY_05000245 (__SILICON_REVISION__ < 5)
-/* TESTSET operation forces stall on the other core */
+/* TESTSET Operation Forces Stall on the Other Core */
#define ANOMALY_05000248 (__SILICON_REVISION__ < 5)
/* Incorrect Bit Shift of Data Word in Multichannel (TDM) Mode in Certain Conditions */
#define ANOMALY_05000250 (__SILICON_REVISION__ > 2 && __SILICON_REVISION__ < 5)
@@ -192,9 +192,9 @@
#define ANOMALY_05000264 (__SILICON_REVISION__ < 5)
/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
#define ANOMALY_05000265 (__SILICON_REVISION__ < 5)
-/* IMDMA destination IRQ status must be read prior to using IMDMA */
+/* IMDMA Destination IRQ Status Must Be Read Prior to Using IMDMA */
#define ANOMALY_05000266 (__SILICON_REVISION__ > 3)
-/* IMDMA may corrupt data under certain conditions */
+/* IMDMA May Corrupt Data under Certain Conditions */
#define ANOMALY_05000267 (1)
/* High I/O Activity Causes Output Voltage of Internal Voltage Regulator (Vddint) to Increase */
#define ANOMALY_05000269 (1)
@@ -202,7 +202,7 @@
#define ANOMALY_05000270 (1)
/* Certain Data Cache Writethrough Modes Fail for Vddint <= 0.9V */
#define ANOMALY_05000272 (1)
-/* Data cache write back to external synchronous memory may be lost */
+/* Data Cache Write Back to External Synchronous Memory May Be Lost */
#define ANOMALY_05000274 (1)
/* PPI Timing and Sampling Information Updates */
#define ANOMALY_05000275 (__SILICON_REVISION__ > 2)
@@ -212,17 +212,17 @@
#define ANOMALY_05000277 (__SILICON_REVISION__ < 3)
/* Disabling Peripherals with DMA Running May Cause DMA System Instability */
#define ANOMALY_05000278 (__SILICON_REVISION__ < 5)
-/* False Hardware Error Exception When ISR Context Is Not Restored */
+/* False Hardware Error Exception when ISR Context Is Not Restored */
#define ANOMALY_05000281 (__SILICON_REVISION__ < 5)
-/* System MMR Write Is Stalled Indefinitely When Killed in a Particular Stage */
+/* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
#define ANOMALY_05000283 (1)
-/* A read will receive incorrect data under certain conditions */
+/* Reads Will Receive Incorrect Data under Certain Conditions */
#define ANOMALY_05000287 (__SILICON_REVISION__ < 5)
/* SPORTs May Receive Bad Data If FIFOs Fill Up */
#define ANOMALY_05000288 (__SILICON_REVISION__ < 5)
/* Memory-To-Memory DMA Source/Destination Descriptors Must Be in Same Memory Space */
#define ANOMALY_05000301 (1)
-/* SSYNCs After Writes To DMA MMR Registers May Not Be Handled Correctly */
+/* SSYNCs after Writes to DMA MMR Registers May Not Be Handled Correctly */
#define ANOMALY_05000302 (1)
/* SPORT_HYS Bit in PLL_CTL Register Is Not Functional */
#define ANOMALY_05000305 (__SILICON_REVISION__ < 5)
@@ -230,25 +230,25 @@
#define ANOMALY_05000307 (__SILICON_REVISION__ < 5)
/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
#define ANOMALY_05000310 (1)
-/* Errors When SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
+/* Errors when SSYNC, CSYNC, or Loads to LT, LB and LC Registers Are Interrupted */
#define ANOMALY_05000312 (1)
/* PPI Is Level-Sensitive on First Transfer In Single Frame Sync Modes */
#define ANOMALY_05000313 (1)
-/* Killed System MMR Write Completes Erroneously On Next System MMR Access */
+/* Killed System MMR Write Completes Erroneously on Next System MMR Access */
#define ANOMALY_05000315 (1)
-/* PF2 Output Remains Asserted After SPI Master Boot */
+/* PF2 Output Remains Asserted after SPI Master Boot */
#define ANOMALY_05000320 (__SILICON_REVISION__ > 3)
-/* Erroneous GPIO Flag Pin Operations Under Specific Sequences */
+/* Erroneous GPIO Flag Pin Operations under Specific Sequences */
#define ANOMALY_05000323 (1)
-/* SPORT Secondary Receive Channel Not Functional When Word Length Exceeds 16 Bits */
+/* SPORT Secondary Receive Channel Not Functional when Word Length >16 Bits */
#define ANOMALY_05000326 (__SILICON_REVISION__ > 3)
-/* New Feature: 24-Bit SPI Boot Mode Support (Not Available On Older Silicon) */
+/* 24-Bit SPI Boot Mode Is Not Functional */
#define ANOMALY_05000331 (__SILICON_REVISION__ < 5)
-/* New Feature: Slave SPI Boot Mode Supported (Not Available On Older Silicon) */
+/* Slave SPI Boot Mode Is Not Functional */
#define ANOMALY_05000332 (__SILICON_REVISION__ < 5)
-/* Flag Data Register Writes One SCLK Cycle After Edge Is Detected May Clear Interrupt Status */
+/* Flag Data Register Writes One SCLK Cycle after Edge Is Detected May Clear Interrupt Status */
#define ANOMALY_05000333 (__SILICON_REVISION__ < 5)
-/* New Feature: Additional PPI Frame Sync Sampling Options (Not Available on Older Silicon) */
+/* ALT_TIMING Bit in PLL_CTL Register Is Not Functional */
#define ANOMALY_05000339 (__SILICON_REVISION__ < 5)
/* Memory DMA FIFO Causes Throughput Degradation on Writes to External Memory */
#define ANOMALY_05000343 (__SILICON_REVISION__ < 5)
@@ -276,7 +276,7 @@
#define ANOMALY_05000428 (__SILICON_REVISION__ > 3)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
#define ANOMALY_05000443 (1)
-/* False Hardware Error when RETI points to invalid memory */
+/* False Hardware Error when RETI Points to Invalid Memory */
#define ANOMALY_05000461 (1)
/* Anomalies that don't exist on this proc */
@@ -284,6 +284,7 @@
#define ANOMALY_05000158 (0)
#define ANOMALY_05000183 (0)
#define ANOMALY_05000233 (0)
+#define ANOMALY_05000234 (0)
#define ANOMALY_05000273 (0)
#define ANOMALY_05000311 (0)
#define ANOMALY_05000353 (1)
@@ -298,5 +299,7 @@
#define ANOMALY_05000448 (0)
#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
+#define ANOMALY_05000465 (0)
+#define ANOMALY_05000467 (0)
#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/blackfin.h b/arch/blackfin/mach-bf561/include/mach/blackfin.h
index f79f6626b7ec..8be31358ef88 100644
--- a/arch/blackfin/mach-bf561/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf561/include/mach/blackfin.h
@@ -34,7 +34,6 @@
#define BF561_FAMILY
#include "bf561.h"
-#include "mem_map.h"
#include "defBF561.h"
#include "anomaly.h"
diff --git a/arch/blackfin/mach-bf561/include/mach/mem_map.h b/arch/blackfin/mach-bf561/include/mach/mem_map.h
index 419dffdc96eb..a63e15c86d90 100644
--- a/arch/blackfin/mach-bf561/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf561/include/mach/mem_map.h
@@ -1,13 +1,16 @@
/*
- * Memory MAP
- * Common header file for blackfin BF561 of processors.
+ * BF561 memory map
+ *
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the GPL-2 or later.
*/
-#ifndef _MEM_MAP_561_H_
-#define _MEM_MAP_561_H_
+#ifndef __BFIN_MACH_MEM_MAP_H__
+#define __BFIN_MACH_MEM_MAP_H__
-#define COREMMR_BASE 0xFFE00000 /* Core MMRs */
-#define SYSMMR_BASE 0xFFC00000 /* System MMRs */
+#ifndef __BFIN_MEM_MAP_H__
+# error "do not include mach/mem_map.h directly -- use asm/mem_map.h"
+#endif
/* Async Memory Banks */
#define ASYNC_BANK3_BASE 0x2C000000 /* Async Bank 3 */
@@ -82,9 +85,6 @@
#define COREA_L1_SCRATCH_START 0xFFB00000
#define COREB_L1_SCRATCH_START 0xFF700000
-#define L1_SCRATCH_START COREA_L1_SCRATCH_START
-#define L1_SCRATCH_LENGTH 0x1000
-
#ifdef __ASSEMBLY__
/*
@@ -155,14 +155,42 @@
dreg = ROT dreg BY -1; \
dreg = CC;
-#else
-#define GET_PDA_SAFE(preg) \
- preg.l = _cpu_pda; \
- preg.h = _cpu_pda;
+static inline unsigned long get_l1_scratch_start_cpu(int cpu)
+{
+ return cpu ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START;
+}
+static inline unsigned long get_l1_code_start_cpu(int cpu)
+{
+ return cpu ? COREB_L1_CODE_START : COREA_L1_CODE_START;
+}
+static inline unsigned long get_l1_data_a_start_cpu(int cpu)
+{
+ return cpu ? COREB_L1_DATA_A_START : COREA_L1_DATA_A_START;
+}
+static inline unsigned long get_l1_data_b_start_cpu(int cpu)
+{
+ return cpu ? COREB_L1_DATA_B_START : COREA_L1_DATA_B_START;
+}
+
+static inline unsigned long get_l1_scratch_start(void)
+{
+ return get_l1_scratch_start_cpu(blackfin_core_id());
+}
+static inline unsigned long get_l1_code_start(void)
+{
+ return get_l1_code_start_cpu(blackfin_core_id());
+}
+static inline unsigned long get_l1_data_a_start(void)
+{
+ return get_l1_data_a_start_cpu(blackfin_core_id());
+}
+static inline unsigned long get_l1_data_b_start(void)
+{
+ return get_l1_data_b_start_cpu(blackfin_core_id());
+}
-#define GET_PDA(preg, dreg) GET_PDA_SAFE(preg)
#endif /* CONFIG_SMP */
#endif /* __ASSEMBLY__ */
-#endif /* _MEM_MAP_533_H_ */
+#endif
diff --git a/arch/blackfin/mach-common/arch_checks.c b/arch/blackfin/mach-common/arch_checks.c
index da93d9207165..5998d8632a73 100644
--- a/arch/blackfin/mach-common/arch_checks.c
+++ b/arch/blackfin/mach-common/arch_checks.c
@@ -74,7 +74,7 @@
/* if 220 exists, can not set External Memory WB and L2 not_cached, either External Memory not_cached and L2 WB */
#if ANOMALY_05000220 && \
- ((defined(CONFIG_BFIN_WB) && defined(CONFIG_BFIN_L2_NOT_CACHED)) || \
- (!defined(CONFIG_BFIN_DCACHE) && defined(CONFIG_BFIN_L2_WB)))
+ ((defined(CONFIG_BFIN_EXTMEM_WRITEBACK) && !defined(CONFIG_BFIN_L2_DCACHEABLE)) || \
+ (!defined(CONFIG_BFIN_EXTMEM_DCACHEABLE) && defined(CONFIG_BFIN_L2_WRITEBACK)))
# error You are exposing Anomaly 220 in this config, either config L2 as Write Through, or make External Memory WB.
#endif
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index 70e3411f558c..85c658083279 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -141,7 +141,7 @@ static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
sclk = get_sclk() / 1000;
#if ANOMALY_05000273 || ANOMALY_05000274 || \
- (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_DCACHE))
+ (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
min_cclk = sclk * 2;
#else
min_cclk = sclk;
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 31fa313e81cf..5a4e7c7fd92c 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1609,6 +1609,7 @@ ENTRY(_sys_call_table)
.long _sys_preadv
.long _sys_pwritev
.long _sys_rt_tgsigqueueinfo
+ .long _sys_perf_counter_open
.rept NR_syscalls-(.-_sys_call_table)/4
.long _sys_ni_syscall
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index af70f09acd55..b42150190d0e 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1052,35 +1052,34 @@ int __init init_arch_irq(void)
set_irq_chained_handler(irq, bfin_demux_error_irq);
break;
#endif
-#if defined(CONFIG_TICKSOURCE_GPTMR0)
- case IRQ_TIMER0:
- set_irq_handler(irq, handle_percpu_irq);
- break;
-#endif
#ifdef CONFIG_SMP
case IRQ_SUPPLE_0:
case IRQ_SUPPLE_1:
set_irq_handler(irq, handle_percpu_irq);
break;
#endif
- default:
#ifdef CONFIG_IPIPE
- /*
- * We want internal interrupt sources to be
- * masked, because ISRs may trigger interrupts
- * recursively (e.g. DMA), but interrupts are
- * _not_ masked at CPU level. So let's handle
- * most of them as level interrupts, except
- * the timer interrupt which is special.
- */
- if (irq == IRQ_SYSTMR || irq == IRQ_CORETMR)
- set_irq_handler(irq, handle_simple_irq);
- else
- set_irq_handler(irq, handle_level_irq);
+#ifndef CONFIG_TICKSOURCE_CORETMR
+ case IRQ_TIMER0:
+ set_irq_handler(irq, handle_simple_irq);
+ break;
+#endif /* !CONFIG_TICKSOURCE_CORETMR */
+ case IRQ_CORETMR:
+ set_irq_handler(irq, handle_simple_irq);
+ break;
+ default:
+ set_irq_handler(irq, handle_level_irq);
+ break;
#else /* !CONFIG_IPIPE */
+#ifdef CONFIG_TICKSOURCE_GPTMR0
+ case IRQ_TIMER0:
+ set_irq_handler(irq, handle_percpu_irq);
+ break;
+#endif /* CONFIG_TICKSOURCE_GPTMR0 */
+ default:
set_irq_handler(irq, handle_simple_irq);
-#endif /* !CONFIG_IPIPE */
break;
+#endif /* !CONFIG_IPIPE */
}
}
@@ -1224,15 +1223,14 @@ __attribute__((l1_text))
asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
{
struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
- struct ipipe_domain *this_domain = ipipe_current_domain;
+ struct ipipe_domain *this_domain = __ipipe_current_domain;
struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
int irq, s;
- if (likely(vec == EVT_IVTMR_P)) {
+ if (likely(vec == EVT_IVTMR_P))
irq = IRQ_CORETMR;
-
- } else {
+ else {
#if defined(SIC_ISR0) || defined(SICA_ISR0)
unsigned long sic_status[3];
@@ -1262,12 +1260,11 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
break;
}
#endif
-
irq = ivg->irqno;
}
if (irq == IRQ_SYSTMR) {
-#ifndef CONFIG_GENERIC_CLOCKEVENTS
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
#endif
/* This is basically what we need from the register frame. */
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index bce5a84be49f..9e7e27b7fc8d 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -132,7 +132,7 @@ int bf53x_resume_l1_mem(unsigned char *memptr)
return 0;
}
-#ifdef CONFIG_BFIN_WB
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
static void flushinv_all_dcache(void)
{
u32 way, bank, subbank, set;
@@ -175,7 +175,7 @@ static inline void dcache_disable(void)
#ifdef CONFIG_BFIN_DCACHE
unsigned long ctrl;
-#ifdef CONFIG_BFIN_WB
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
flushinv_all_dcache();
#endif
SSYNC();
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index 014a55abd09a..68bd0bd680cd 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -160,7 +160,7 @@ void __init mem_init(void)
/* do not count in kernel image between _rambase and _ramstart */
reservedpages -= (_ramstart - _rambase) >> PAGE_SHIFT;
-#if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263)
+#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
reservedpages += (_ramend - memory_end - DMA_UNCACHED_REGION) >> PAGE_SHIFT;
#endif
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
index 0490794fe4aa..745e095fe82e 100644
--- a/arch/ia64/include/asm/iommu.h
+++ b/arch/ia64/include/asm/iommu.h
@@ -9,6 +9,11 @@ extern void pci_iommu_shutdown(void);
extern void no_iommu_init(void);
extern int force_iommu, no_iommu;
extern int iommu_detected;
+#ifdef CONFIG_DMAR
+extern int iommu_pass_through;
+#else
+#define iommu_pass_through (0)
+#endif
extern void iommu_dma_init(void);
extern void machvec_init(const char *name);
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 1376da45fd08..05695962fe44 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -32,6 +32,8 @@ int force_iommu __read_mostly = 1;
int force_iommu __read_mostly;
#endif
+int iommu_pass_through;
+
/* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask, but this is bug-to-bug compatible
to i386. */
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 285aae8431c6..223abb134105 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void)
void __init pci_swiotlb_init(void)
{
- if (!iommu_detected) {
+ if (!iommu_detected || iommu_pass_through) {
#ifdef CONFIG_IA64_GENERIC
swiotlb = 1;
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index 24de6b90f401..bcebcefb4ad7 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -38,14 +38,10 @@ SECTIONS
_etext = .; /* End of text section */
- . = ALIGN(16); /* Exception table */
- __start___ex_table = .;
- __ex_table : { *(__ex_table) }
- __stop___ex_table = .;
-
+ EXCEPTION_TABLE(16)
BUG_TABLE
- RODATA
+ RO_DATA(PAGE_SIZE)
/* writeable */
.data : { /* Data */
@@ -53,27 +49,19 @@ SECTIONS
CONSTRUCTORS
}
- . = ALIGN(PAGE_SIZE);
- __nosave_begin = .;
- .data_nosave : { *(.data.nosave) }
- . = ALIGN(PAGE_SIZE);
- __nosave_end = .;
-
- . = ALIGN(PAGE_SIZE);
- .data.page_aligned : { *(.data.idt) }
+ .data_nosave : { NOSAVE_DATA; }
- . = ALIGN(32);
- .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+ .data.page_aligned : { PAGE_ALIGNED_DATA(PAGE_SIZE); }
+ .data.cacheline_aligned : { CACHELINE_ALIGNED_DATA(32); }
/* rarely changed data like cpu maps */
. = ALIGN(32);
.data.read_mostly : AT(ADDR(.data.read_mostly)) {
- *(.data.read_mostly)
+ READ_MOSTLY_DATA(32);
_edata = .; /* End of data section */
}
- . = ALIGN(THREAD_SIZE); /* init_task */
- .data.init_task : { *(.data.init_task) }
+ .data.init_task : { INIT_TASK(THREAD_SIZE); }
/* might get freed after init */
. = ALIGN(PAGE_SIZE);
@@ -88,23 +76,18 @@ SECTIONS
__init_begin = .;
.init.text : {
_sinittext = .;
- *(.init.text)
+ INIT_TEXT;
_einittext = .;
}
- .init.data : { *(.init.data) }
- . = ALIGN(16);
- __setup_start = .;
- .setup.init : { KEEP(*(.init.setup)) }
- __setup_end = .;
+ .init.data : { INIT_DATA; }
+ .setup.init : { INIT_SETUP(16); }
__initcall_start = .;
.initcall.init : {
INITCALLS
}
__initcall_end = .;
- __con_initcall_start = .;
- .con_initcall.init : { *(.con_initcall.init) }
- __con_initcall_end = .;
+ .con_initcall.init : { CON_INITCALL; }
SECURITY_INIT
. = ALIGN(4);
@@ -114,28 +97,17 @@ SECTIONS
.altinstr_replacement : { *(.altinstr_replacement) }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
- .exit.text : { *(.exit.text) }
- .exit.data : { *(.exit.data) }
+ .exit.text : { EXIT_TEXT; }
+ .exit.data : { EXIT_DATA; }
-#ifdef CONFIG_BLK_DEV_INITRD
- . = ALIGN(PAGE_SIZE);
- __initramfs_start = .;
- .init.ramfs : { *(.init.ramfs) }
- __initramfs_end = .;
-#endif
+ .init.ramfs : { INIT_RAM_FS; }
PERCPU(32)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
- __bss_start = .; /* BSS */
- .bss : {
- *(.bss.page_aligned)
- *(.bss)
- }
- . = ALIGN(4);
- __bss_stop = .;
+ BSS(4)
_end = . ;
@@ -145,7 +117,7 @@ SECTIONS
/* Sections to be discarded */
/DISCARD/ : {
- *(.exitcall.exit)
+ EXIT_CALL
}
STABS_DEBUG
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 9a2a6e32f00f..0e8db6771252 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -122,7 +122,7 @@ static void eeh_enable_irq(struct pci_dev *dev)
* passed back in "userdata".
*/
-static void eeh_report_error(struct pci_dev *dev, void *userdata)
+static int eeh_report_error(struct pci_dev *dev, void *userdata)
{
enum pci_ers_result rc, *res = userdata;
struct pci_driver *driver = dev->driver;
@@ -130,19 +130,21 @@ static void eeh_report_error(struct pci_dev *dev, void *userdata)
dev->error_state = pci_channel_io_frozen;
if (!driver)
- return;
+ return 0;
eeh_disable_irq(dev);
if (!driver->err_handler ||
!driver->err_handler->error_detected)
- return;
+ return 0;
rc = driver->err_handler->error_detected (dev, pci_channel_io_frozen);
/* A driver that needs a reset trumps all others */
if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+
+ return 0;
}
/**
@@ -153,7 +155,7 @@ static void eeh_report_error(struct pci_dev *dev, void *userdata)
* Cumulative response passed back in "userdata".
*/
-static void eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
+static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
{
enum pci_ers_result rc, *res = userdata;
struct pci_driver *driver = dev->driver;
@@ -161,26 +163,28 @@ static void eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
if (!driver ||
!driver->err_handler ||
!driver->err_handler->mmio_enabled)
- return;
+ return 0;
rc = driver->err_handler->mmio_enabled (dev);
/* A driver that needs a reset trumps all others */
if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
if (*res == PCI_ERS_RESULT_NONE) *res = rc;
+
+ return 0;
}
/**
* eeh_report_reset - tell device that slot has been reset
*/
-static void eeh_report_reset(struct pci_dev *dev, void *userdata)
+static int eeh_report_reset(struct pci_dev *dev, void *userdata)
{
enum pci_ers_result rc, *res = userdata;
struct pci_driver *driver = dev->driver;
if (!driver)
- return;
+ return 0;
dev->error_state = pci_channel_io_normal;
@@ -188,35 +192,39 @@ static void eeh_report_reset(struct pci_dev *dev, void *userdata)
if (!driver->err_handler ||
!driver->err_handler->slot_reset)
- return;
+ return 0;
rc = driver->err_handler->slot_reset(dev);
if ((*res == PCI_ERS_RESULT_NONE) ||
(*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
if (*res == PCI_ERS_RESULT_DISCONNECT &&
rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
+
+ return 0;
}
/**
* eeh_report_resume - tell device to resume normal operations
*/
-static void eeh_report_resume(struct pci_dev *dev, void *userdata)
+static int eeh_report_resume(struct pci_dev *dev, void *userdata)
{
struct pci_driver *driver = dev->driver;
dev->error_state = pci_channel_io_normal;
if (!driver)
- return;
+ return 0;
eeh_enable_irq(dev);
if (!driver->err_handler ||
!driver->err_handler->resume)
- return;
+ return 0;
driver->err_handler->resume(dev);
+
+ return 0;
}
/**
@@ -226,22 +234,24 @@ static void eeh_report_resume(struct pci_dev *dev, void *userdata)
* dead, and that no further recovery attempts will be made on it.
*/
-static void eeh_report_failure(struct pci_dev *dev, void *userdata)
+static int eeh_report_failure(struct pci_dev *dev, void *userdata)
{
struct pci_driver *driver = dev->driver;
dev->error_state = pci_channel_io_perm_failure;
if (!driver)
- return;
+ return 0;
eeh_disable_irq(dev);
if (!driver->err_handler ||
!driver->err_handler->error_detected)
- return;
+ return 0;
driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
+
+ return 0;
}
/* ------------------------------------------------------- */
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index a14dba0e4d67..e577839f3073 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -94,6 +94,7 @@ config S390
select HAVE_KVM if 64BIT
select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE
+ select HAVE_PERF_COUNTERS
source "init/Kconfig"
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index d401d56c255f..fcba206529f3 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30-rc3
-# Thu Apr 23 09:29:52 2009
+# Linux kernel version: 2.6.30
+# Mon Jun 22 11:08:16 2009
#
CONFIG_SCHED_MC=y
CONFIG_MMU=y
@@ -25,6 +25,7 @@ CONFIG_VIRT_CPU_ACCOUNTING=y
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
CONFIG_S390=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -90,7 +91,6 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set
-# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
@@ -103,7 +103,14 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+CONFIG_HAVE_PERF_COUNTERS=y
+
+#
+# Performance Counters
+#
+# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
# CONFIG_SLUB is not set
@@ -119,6 +126,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
@@ -150,7 +162,7 @@ CONFIG_DEFAULT_DEADLINE=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="deadline"
CONFIG_PREEMPT_NOTIFIERS=y
-# CONFIG_FREEZER is not set
+CONFIG_FREEZER=y
#
# Base setup
@@ -199,6 +211,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set
# CONFIG_DISCONTIGMEM_MANUAL is not set
@@ -218,9 +231,9 @@ CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
-CONFIG_UNEVICTABLE_LRU=y
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
# I/O subsystem configuration
@@ -257,6 +270,16 @@ CONFIG_KEXEC=y
# CONFIG_ZFCPDUMP is not set
CONFIG_S390_GUEST=y
CONFIG_SECCOMP=y
+
+#
+# Power Management
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP_SMP=y
+CONFIG_PM_SLEEP=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION=""
CONFIG_NET=y
#
@@ -384,6 +407,7 @@ CONFIG_SCTP_HMAC_MD5=y
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
CONFIG_NET_SCHED=y
#
@@ -446,6 +470,7 @@ CONFIG_CAN_BCM=m
# CAN Device Drivers
#
CONFIG_CAN_VCAN=m
+# CONFIG_CAN_DEV is not set
# CONFIG_CAN_DEBUG_DEVICES is not set
# CONFIG_AF_RXRPC is not set
# CONFIG_WIMAX is not set
@@ -524,10 +549,6 @@ CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
@@ -578,7 +599,6 @@ CONFIG_DM_MULTIPATH=m
# CONFIG_DM_DELAY is not set
# CONFIG_DM_UEVENT is not set
CONFIG_NETDEVICES=y
-CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_IFB is not set
CONFIG_DUMMY=m
CONFIG_BONDING=m
@@ -595,6 +615,7 @@ CONFIG_NET_ETHERNET=y
# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_KS8842 is not set
CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y
# CONFIG_TR is not set
@@ -674,6 +695,11 @@ CONFIG_S390_TAPE_34XX=m
# CONFIG_MONREADER is not set
CONFIG_MONWRITER=m
CONFIG_S390_VMUR=m
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_THERMAL is not set
# CONFIG_THERMAL_HWMON is not set
@@ -683,6 +709,10 @@ CONFIG_S390_VMUR=m
# CONFIG_NEW_LEDS is not set
CONFIG_ACCESSIBILITY=y
# CONFIG_AUXDISPLAY is not set
+
+#
+# TI VLYNQ
+#
# CONFIG_STAGING is not set
#
@@ -703,11 +733,12 @@ CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
-CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -865,19 +896,23 @@ CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_DEBUG_PAGEALLOC is not set
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_FTRACE_SYSCALLS=y
CONFIG_TRACING_SUPPORT=y
-
-#
-# Tracers
-#
+CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set
# CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set
-# CONFIG_CONTEXT_SWITCH_TRACER is not set
-# CONFIG_EVENT_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set
-# CONFIG_TRACE_BRANCH_PROFILING is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
# CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set
@@ -886,6 +921,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_SAMPLES=y
# CONFIG_SAMPLE_KOBJECT is not set
# CONFIG_SAMPLE_KPROBES is not set
+# CONFIG_KMEMCHECK is not set
#
# Security options
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index ec917d42ee6d..7a3817a656df 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -178,7 +178,7 @@ cputime64_to_clock_t(cputime64_t cputime)
}
struct s390_idle_data {
- spinlock_t lock;
+ unsigned int sequence;
unsigned long long idle_count;
unsigned long long idle_enter;
unsigned long long idle_time;
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 9450ce6e32de..31ed5686a968 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -248,14 +248,5 @@ int debug_unregister_view(debug_info_t* id, struct debug_view* view);
#define PRINT_FATAL(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#endif /* DASD_DEBUG */
-#undef DEBUG_MALLOC
-#ifdef DEBUG_MALLOC
-void *b;
-#define kmalloc(x...) (PRINT_INFO(" kmalloc %p\n",b=kmalloc(x)),b)
-#define kfree(x) PRINT_INFO(" kfree %p\n",x);kfree(x)
-#define get_zeroed_page(x...) (PRINT_INFO(" gfp %p\n",b=get_zeroed_page(x)),b)
-#define __get_free_pages(x...) (PRINT_INFO(" gfps %p\n",b=__get_free_pages(x)),b)
-#endif /* DEBUG_MALLOC */
-
#endif /* __KERNEL__ */
#endif /* DEBUG_H */
diff --git a/arch/s390/include/asm/perf_counter.h b/arch/s390/include/asm/perf_counter.h
new file mode 100644
index 000000000000..a7205a3828cb
--- /dev/null
+++ b/arch/s390/include/asm/perf_counter.h
@@ -0,0 +1,8 @@
+/*
+ * Performance counter support - s390 specific definitions.
+ *
+ * Copyright 2009 Martin Schwidefsky, IBM Corporation.
+ */
+
+static inline void set_perf_counter_pending(void) {}
+static inline void clear_perf_counter_pending(void) {}
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 402d6dcf0d26..79d849f014f0 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -380,7 +380,7 @@ extern int qdio_establish(struct qdio_initialize *);
extern int qdio_activate(struct ccw_device *);
extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
- int q_nr, int bufnr, int count);
+ int q_nr, unsigned int bufnr, unsigned int count);
extern int qdio_cleanup(struct ccw_device*, int);
extern int qdio_shutdown(struct ccw_device*, int);
extern int qdio_free(struct ccw_device *);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 9bb2f6241d9f..86783efa24ee 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -154,39 +154,35 @@ void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
static int __kprobes swap_instruction(void *aref)
{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long status = kcb->kprobe_status;
struct ins_replace_args *args = aref;
+ int rc;
- return probe_kernel_write(args->ptr, &args->new, sizeof(args->new));
+ kcb->kprobe_status = KPROBE_SWAP_INST;
+ rc = probe_kernel_write(args->ptr, &args->new, sizeof(args->new));
+ kcb->kprobe_status = status;
+ return rc;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long status = kcb->kprobe_status;
struct ins_replace_args args;
args.ptr = p->addr;
args.old = p->opcode;
args.new = BREAKPOINT_INSTRUCTION;
-
- kcb->kprobe_status = KPROBE_SWAP_INST;
stop_machine(swap_instruction, &args, NULL);
- kcb->kprobe_status = status;
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- unsigned long status = kcb->kprobe_status;
struct ins_replace_args args;
args.ptr = p->addr;
args.old = BREAKPOINT_INSTRUCTION;
args.new = p->opcode;
-
- kcb->kprobe_status = KPROBE_SWAP_INST;
stop_machine(swap_instruction, &args, NULL);
- kcb->kprobe_status = status;
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index fd8e3111a4e8..2270730f5354 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -856,13 +856,20 @@ static ssize_t show_idle_count(struct sys_device *dev,
{
struct s390_idle_data *idle;
unsigned long long idle_count;
+ unsigned int sequence;
idle = &per_cpu(s390_idle, dev->id);
- spin_lock(&idle->lock);
+repeat:
+ sequence = idle->sequence;
+ smp_rmb();
+ if (sequence & 1)
+ goto repeat;
idle_count = idle->idle_count;
if (idle->idle_enter)
idle_count++;
- spin_unlock(&idle->lock);
+ smp_rmb();
+ if (idle->sequence != sequence)
+ goto repeat;
return sprintf(buf, "%llu\n", idle_count);
}
static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -872,15 +879,22 @@ static ssize_t show_idle_time(struct sys_device *dev,
{
struct s390_idle_data *idle;
unsigned long long now, idle_time, idle_enter;
+ unsigned int sequence;
idle = &per_cpu(s390_idle, dev->id);
- spin_lock(&idle->lock);
now = get_clock();
+repeat:
+ sequence = idle->sequence;
+ smp_rmb();
+ if (sequence & 1)
+ goto repeat;
idle_time = idle->idle_time;
idle_enter = idle->idle_enter;
if (idle_enter != 0ULL && idle_enter < now)
idle_time += now - idle_enter;
- spin_unlock(&idle->lock);
+ smp_rmb();
+ if (idle->sequence != sequence)
+ goto repeat;
return sprintf(buf, "%llu\n", idle_time >> 12);
}
static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
@@ -908,11 +922,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
idle = &per_cpu(s390_idle, cpu);
- spin_lock_irq(&idle->lock);
- idle->idle_enter = 0;
- idle->idle_time = 0;
- idle->idle_count = 0;
- spin_unlock_irq(&idle->lock);
+ memset(idle, 0, sizeof(struct s390_idle_data));
if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
return NOTIFY_BAD;
break;
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 215330a2c128..d4c8e9c47c81 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -36,7 +36,6 @@
#include <linux/notifier.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
-#include <linux/bootmem.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
#include <asm/s390_ext.h>
@@ -62,9 +61,6 @@
u64 sched_clock_base_cc = -1; /* Force to data section. */
-static ext_int_info_t ext_int_info_cc;
-static ext_int_info_t ext_int_etr_cc;
-
static DEFINE_PER_CPU(struct clock_event_device, comparators);
/*
@@ -255,15 +251,11 @@ void __init time_init(void)
stp_reset();
/* request the clock comparator external interrupt */
- if (register_early_external_interrupt(0x1004,
- clock_comparator_interrupt,
- &ext_int_info_cc) != 0)
+ if (register_external_interrupt(0x1004, clock_comparator_interrupt))
panic("Couldn't request external interrupt 0x1004");
/* request the timing alert external interrupt */
- if (register_early_external_interrupt(0x1406,
- timing_alert_interrupt,
- &ext_int_etr_cc) != 0)
+ if (register_external_interrupt(0x1406, timing_alert_interrupt))
panic("Couldn't request external interrupt 0x1406");
if (clocksource_register(&clocksource_tod) != 0)
@@ -1445,14 +1437,14 @@ static void __init stp_reset(void)
{
int rc;
- stp_page = alloc_bootmem_pages(PAGE_SIZE);
+ stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
if (rc == 0)
set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
else if (stp_online) {
pr_warning("The real or virtual hardware system does "
"not provide an STP interface\n");
- free_bootmem((unsigned long) stp_page, PAGE_SIZE);
+ free_page((unsigned long) stp_page);
stp_page = NULL;
stp_online = 0;
}
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index c8eb7255332b..c41bb0d416e1 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -25,13 +25,9 @@
#include <asm/irq_regs.h>
#include <asm/cputime.h>
-static ext_int_info_t ext_int_info_timer;
-
static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
-DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
- .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
-};
+DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
static inline __u64 get_vtimer(void)
{
@@ -153,11 +149,13 @@ void vtime_start_cpu(void)
vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer;
}
- spin_lock(&idle->lock);
+ idle->sequence++;
+ smp_wmb();
idle->idle_time += idle_time;
idle->idle_enter = 0ULL;
idle->idle_count++;
- spin_unlock(&idle->lock);
+ smp_wmb();
+ idle->sequence++;
}
void vtime_stop_cpu(void)
@@ -244,15 +242,23 @@ cputime64_t s390_get_idle_time(int cpu)
{
struct s390_idle_data *idle;
unsigned long long now, idle_time, idle_enter;
+ unsigned int sequence;
idle = &per_cpu(s390_idle, cpu);
- spin_lock(&idle->lock);
+
now = get_clock();
+repeat:
+ sequence = idle->sequence;
+ smp_rmb();
+ if (sequence & 1)
+ goto repeat;
idle_time = 0;
idle_enter = idle->idle_enter;
if (idle_enter != 0ULL && idle_enter < now)
idle_time = now - idle_enter;
- spin_unlock(&idle->lock);
+ smp_rmb();
+ if (idle->sequence != sequence)
+ goto repeat;
return idle_time;
}
@@ -557,8 +563,7 @@ void init_cpu_vtimer(void)
void __init vtime_init(void)
{
/* request the cpu timer external interrupt */
- if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
- &ext_int_info_timer) != 0)
+ if (register_external_interrupt(0x1005, do_cpu_timer_interrupt))
panic("Couldn't request external interrupt 0x1005");
/* Enable cpu timer interrupts on the boot cpu. */
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/power/swsusp_asm64.S
index 3c74e7d827c9..76d688da32fa 100644
--- a/arch/s390/power/swsusp_asm64.S
+++ b/arch/s390/power/swsusp_asm64.S
@@ -109,10 +109,11 @@ swsusp_arch_resume:
aghi %r15,-STACK_FRAME_OVERHEAD
stg %r1,__SF_BACKCHAIN(%r15)
+#ifdef CONFIG_SMP
/* Save boot cpu number */
brasl %r14,smp_get_phys_cpu_id
lgr %r10,%r2
-
+#endif
/* Deactivate DAT */
stnsm __SF_EMPTY(%r15),0xfb
@@ -177,11 +178,12 @@ swsusp_arch_resume:
/* Pointer to save arae */
lghi %r13,0x1000
+#ifdef CONFIG_SMP
/* Switch CPUs */
lgr %r2,%r10 /* get cpu id */
llgf %r3,0x318(%r13)
brasl %r14,smp_switch_boot_cpu_in_resume
-
+#endif
/* Restore prefix register */
spx 0x318(%r13)
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index af326a2975b5..fd6d21bbee6c 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -6,6 +6,7 @@ extern void no_iommu_init(void);
extern struct dma_map_ops nommu_dma_ops;
extern int force_iommu, no_iommu;
extern int iommu_detected;
+extern int iommu_pass_through;
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index b51a1e8b0baf..927958d13c19 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -130,6 +130,7 @@ extern void pci_iommu_alloc(void);
/* generic pci stuff */
#include <asm-generic/pci.h>
+#define PCIBIOS_MAX_MEM_32 0xffffffff
#ifdef CONFIG_NUMA
/* Returns the node based on pci bus */
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index e60fd3e14bdf..cb739cc0a080 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -25,7 +25,7 @@
#define PCI_BIOS_IRQ_SCAN 0x2000
#define PCI_ASSIGN_ALL_BUSSES 0x4000
#define PCI_CAN_SKIP_ISA_ALIGN 0x8000
-#define PCI_USE__CRS 0x10000
+#define PCI_NO_ROOT_CRS 0x10000
#define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000
#define PCI_HAS_IO_ECS 0x40000
#define PCI_NOASSIGN_ROMS 0x80000
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 328592fb6044..47630479b067 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -32,6 +32,8 @@ int no_iommu __read_mostly;
/* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly = 0;
+int iommu_pass_through;
+
dma_addr_t bad_dma_address __read_mostly = 0;
EXPORT_SYMBOL(bad_dma_address);
@@ -209,6 +211,10 @@ static __init int iommu_setup(char *p)
#ifdef CONFIG_SWIOTLB
if (!strncmp(p, "soft", 4))
swiotlb = 1;
+ if (!strncmp(p, "pt", 2)) {
+ iommu_pass_through = 1;
+ return 1;
+ }
#endif
gart_parse_options(p);
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index a1712f2b50f1..6af96ee44200 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -71,7 +71,8 @@ void __init pci_swiotlb_init(void)
{
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64
- if (!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN)
+ if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) ||
+ iommu_pass_through)
swiotlb = 1;
#endif
if (swiotlb_force)
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index c0ecf250fe51..16c3fda85bba 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -38,15 +38,26 @@ count_resource(struct acpi_resource *acpi_res, void *data)
struct acpi_resource_address64 addr;
acpi_status status;
- if (info->res_num >= PCI_BUS_NUM_RESOURCES)
- return AE_OK;
-
status = resource_to_addr(acpi_res, &addr);
if (ACPI_SUCCESS(status))
info->res_num++;
return AE_OK;
}
+static int
+bus_has_transparent_bridge(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ u16 class = dev->class >> 8;
+
+ if (class == PCI_CLASS_BRIDGE_PCI && dev->transparent)
+ return true;
+ }
+ return false;
+}
+
static acpi_status
setup_resource(struct acpi_resource *acpi_res, void *data)
{
@@ -56,9 +67,7 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
acpi_status status;
unsigned long flags;
struct resource *root;
-
- if (info->res_num >= PCI_BUS_NUM_RESOURCES)
- return AE_OK;
+ int max_root_bus_resources = PCI_BUS_NUM_RESOURCES;
status = resource_to_addr(acpi_res, &addr);
if (!ACPI_SUCCESS(status))
@@ -82,6 +91,18 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
res->end = res->start + addr.address_length - 1;
res->child = NULL;
+ if (bus_has_transparent_bridge(info->bus))
+ max_root_bus_resources -= 3;
+ if (info->res_num >= max_root_bus_resources) {
+ printk(KERN_WARNING "PCI: Failed to allocate 0x%lx-0x%lx "
+ "from %s for %s due to _CRS returning more than "
+ "%d resource descriptors\n", (unsigned long) res->start,
+ (unsigned long) res->end, root->name, info->name,
+ max_root_bus_resources);
+ info->res_num++;
+ return AE_OK;
+ }
+
if (insert_resource(root, res)) {
printk(KERN_ERR "PCI: Failed to allocate 0x%lx-0x%lx "
"from %s for %s\n", (unsigned long) res->start,
@@ -217,7 +238,7 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
#endif
}
- if (bus && (pci_probe & PCI_USE__CRS))
+ if (bus && !(pci_probe & PCI_NO_ROOT_CRS))
get_current_resources(device, busnum, domain, bus);
return bus;
}
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index f893d6a6e803..2255f880678b 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -101,7 +101,7 @@ void x86_pci_root_bus_res_quirks(struct pci_bus *b)
struct pci_root_info *info;
/* don't go for it if _CRS is used */
- if (pci_probe & PCI_USE__CRS)
+ if (!(pci_probe & PCI_NO_ROOT_CRS))
return;
/* if only one root bus, don't need to anything */
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 2202b6257b82..4740119e4bb7 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -515,8 +515,8 @@ char * __devinit pcibios_setup(char *str)
} else if (!strcmp(str, "assign-busses")) {
pci_probe |= PCI_ASSIGN_ALL_BUSSES;
return NULL;
- } else if (!strcmp(str, "use_crs")) {
- pci_probe |= PCI_USE__CRS;
+ } else if (!strcmp(str, "nocrs")) {
+ pci_probe |= PCI_NO_ROOT_CRS;
return NULL;
} else if (!strcmp(str, "earlydump")) {
pci_early_dump_regs = 1;
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig
index 768bee006037..bb84fbc9921f 100644
--- a/arch/xtensa/configs/s6105_defconfig
+++ b/arch/xtensa/configs/s6105_defconfig
@@ -263,7 +263,54 @@ CONFIG_HAVE_IDE=y
# CONFIG_SCSI_NETLINK is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
-# CONFIG_NETDEVICES is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+CONFIG_SMSC_PHY=y
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+# CONFIG_NET_ETHERNET is not set
+CONFIG_NETDEV_1000=y
+CONFIG_S6GMAC=y
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
@@ -304,8 +351,6 @@ CONFIG_UNIX98_PTYS=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
# CONFIG_HW_RANDOM is not set
-# CONFIG_RTC is not set
-# CONFIG_GEN_RTC is not set
# CONFIG_R3964 is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
@@ -387,7 +432,59 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_ACCESSIBILITY is not set
-# CONFIG_RTC_CLASS is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+# CONFIG_RTC_INTF_SYSFS is not set
+# CONFIG_RTC_INTF_PROC is not set
+# CONFIG_RTC_INTF_DEV is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+CONFIG_RTC_DRV_M41T80=y
+# CONFIG_RTC_DRV_M41T80_WDT is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
# CONFIG_DMADEVICES is not set
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index 8fc1c0c8de07..b7b8fbe47c77 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -155,5 +155,100 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*,
#endif
+#define XTENSA_CACHEBLK_LOG2 29
+#define XTENSA_CACHEBLK_SIZE (1 << XTENSA_CACHEBLK_LOG2)
+#define XTENSA_CACHEBLK_MASK (7 << XTENSA_CACHEBLK_LOG2)
+
+#if XCHAL_HAVE_CACHEATTR
+static inline u32 xtensa_get_cacheattr(void)
+{
+ u32 r;
+ asm volatile(" rsr %0, CACHEATTR" : "=a"(r));
+ return r;
+}
+
+static inline u32 xtensa_get_dtlb1(u32 addr)
+{
+ u32 r = addr & XTENSA_CACHEBLK_MASK;
+ return r | ((xtensa_get_cacheattr() >> (r >> (XTENSA_CACHEBLK_LOG2-2)))
+ & 0xF);
+}
+#else
+static inline u32 xtensa_get_dtlb1(u32 addr)
+{
+ u32 r;
+ asm volatile(" rdtlb1 %0, %1" : "=a"(r) : "a"(addr));
+ asm volatile(" dsync");
+ return r;
+}
+
+static inline u32 xtensa_get_cacheattr(void)
+{
+ u32 r = 0;
+ u32 a = 0;
+ do {
+ a -= XTENSA_CACHEBLK_SIZE;
+ r = (r << 4) | (xtensa_get_dtlb1(a) & 0xF);
+ } while (a);
+ return r;
+}
+#endif
+
+static inline int xtensa_need_flush_dma_source(u32 addr)
+{
+ return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) >= 4;
+}
+
+static inline int xtensa_need_invalidate_dma_destination(u32 addr)
+{
+ return (xtensa_get_dtlb1(addr) & ((1 << XCHAL_CA_BITS) - 1)) != 2;
+}
+
+static inline void flush_dcache_unaligned(u32 addr, u32 size)
+{
+ u32 cnt;
+ if (size) {
+ cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
+ + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
+ while (cnt--) {
+ asm volatile(" dhwb %0, 0" : : "a"(addr));
+ addr += XCHAL_DCACHE_LINESIZE;
+ }
+ asm volatile(" dsync");
+ }
+}
+
+static inline void invalidate_dcache_unaligned(u32 addr, u32 size)
+{
+ int cnt;
+ if (size) {
+ asm volatile(" dhwbi %0, 0 ;" : : "a"(addr));
+ cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
+ - XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
+ while (cnt-- > 0) {
+ asm volatile(" dhi %0, %1" : : "a"(addr),
+ "n"(XCHAL_DCACHE_LINESIZE));
+ addr += XCHAL_DCACHE_LINESIZE;
+ }
+ asm volatile(" dhwbi %0, %1" : : "a"(addr),
+ "n"(XCHAL_DCACHE_LINESIZE));
+ asm volatile(" dsync");
+ }
+}
+
+static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
+{
+ u32 cnt;
+ if (size) {
+ cnt = (size + ((XCHAL_DCACHE_LINESIZE - 1) & addr)
+ + XCHAL_DCACHE_LINESIZE - 1) / XCHAL_DCACHE_LINESIZE;
+ while (cnt--) {
+ asm volatile(" dhwbi %0, 0" : : "a"(addr));
+ addr += XCHAL_DCACHE_LINESIZE;
+ }
+ asm volatile(" dsync");
+ }
+}
+
#endif /* __KERNEL__ */
#endif /* _XTENSA_CACHEFLUSH_H */
diff --git a/arch/xtensa/include/asm/gpio.h b/arch/xtensa/include/asm/gpio.h
index 0763b0763960..a8c9fc46c790 100644
--- a/arch/xtensa/include/asm/gpio.h
+++ b/arch/xtensa/include/asm/gpio.h
@@ -38,14 +38,14 @@ static inline int gpio_cansleep(unsigned int gpio)
return __gpio_cansleep(gpio);
}
-/*
- * Not implemented, yet.
- */
static inline int gpio_to_irq(unsigned int gpio)
{
- return -ENOSYS;
+ return __gpio_to_irq(gpio);
}
+/*
+ * Not implemented, yet.
+ */
static inline int irq_to_gpio(unsigned int irq)
{
return -EINVAL;
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index dfac82dc52ad..4c0ccc9c4f4c 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -11,6 +11,7 @@
#ifndef _XTENSA_IRQ_H
#define _XTENSA_IRQ_H
+#include <linux/init.h>
#include <platform/hardware.h>
#include <variant/core.h>
@@ -21,11 +22,20 @@ static inline void variant_irq_enable(unsigned int irq) { }
static inline void variant_irq_disable(unsigned int irq) { }
#endif
+#ifndef VARIANT_NR_IRQS
+# define VARIANT_NR_IRQS 0
+#endif
#ifndef PLATFORM_NR_IRQS
# define PLATFORM_NR_IRQS 0
#endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
-#define NR_IRQS (XTENSA_NR_IRQS + PLATFORM_NR_IRQS)
+#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS)
+
+#if VARIANT_NR_IRQS == 0
+static inline void variant_init_irq(void) { }
+#else
+void variant_init_irq(void) __init;
+#endif
static __inline__ int irq_canonicalize(int irq)
{
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index a36c85edd045..a1badb32fcda 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -197,4 +197,6 @@ void __init init_IRQ(void)
}
cached_irq_mask = 0;
+
+ variant_init_irq();
}
diff --git a/arch/xtensa/platforms/s6105/device.c b/arch/xtensa/platforms/s6105/device.c
index 78b08be5a92d..65333ffefb07 100644
--- a/arch/xtensa/platforms/s6105/device.c
+++ b/arch/xtensa/platforms/s6105/device.c
@@ -5,14 +5,27 @@
*/
#include <linux/kernel.h>
+#include <linux/gpio.h>
#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <variant/hardware.h>
+#include <variant/dmac.h>
+#include <platform/gpio.h>
+
+#define GPIO3_INTNUM 3
#define UART_INTNUM 4
+#define GMAC_INTNUM 5
+
+static const signed char gpio3_irq_mappings[] = {
+ S6_INTC_GPIO(3),
+ -1
+};
static const signed char uart_irq_mappings[] = {
S6_INTC_UART(0),
@@ -20,8 +33,18 @@ static const signed char uart_irq_mappings[] = {
-1,
};
+static const signed char gmac_irq_mappings[] = {
+ S6_INTC_GMAC_STAT,
+ S6_INTC_GMAC_ERR,
+ S6_INTC_DMA_HOSTTERMCNT(0),
+ S6_INTC_DMA_HOSTTERMCNT(1),
+ -1
+};
+
const signed char *platform_irq_mappings[NR_IRQS] = {
+ [GPIO3_INTNUM] = gpio3_irq_mappings,
[UART_INTNUM] = uart_irq_mappings,
+ [GMAC_INTNUM] = gmac_irq_mappings,
};
static struct plat_serial8250_port serial_platform_data[] = {
@@ -46,6 +69,66 @@ static struct plat_serial8250_port serial_platform_data[] = {
{ },
};
+static struct resource s6_gmac_resource[] = {
+ {
+ .name = "mem",
+ .start = (resource_size_t)S6_REG_GMAC,
+ .end = (resource_size_t)S6_REG_GMAC + 0x10000 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "dma",
+ .start = (resource_size_t)
+ DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACTX),
+ .end = (resource_size_t)
+ DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACTX) + 0x100 - 1,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .name = "dma",
+ .start = (resource_size_t)
+ DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACRX),
+ .end = (resource_size_t)
+ DMA_CHNL(S6_REG_HIFDMA, S6_HIFDMA_GMACRX) + 0x100 - 1,
+ .flags = IORESOURCE_DMA,
+ },
+ {
+ .name = "io",
+ .start = (resource_size_t)S6_MEM_GMAC,
+ .end = (resource_size_t)S6_MEM_GMAC + 0x2000000 - 1,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "irq",
+ .start = (resource_size_t)GMAC_INTNUM,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "irq",
+ .start = (resource_size_t)PHY_POLL,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static int __init prepare_phy_irq(int pin)
+{
+ int irq;
+ if (gpio_request(pin, "s6gmac_phy") < 0)
+ goto fail;
+ if (gpio_direction_input(pin) < 0)
+ goto free;
+ irq = gpio_to_irq(pin);
+ if (irq < 0)
+ goto free;
+ if (set_irq_type(irq, IRQ_TYPE_LEVEL_LOW) < 0)
+ goto free;
+ return irq;
+free:
+ gpio_free(pin);
+fail:
+ return PHY_POLL;
+}
+
static struct platform_device platform_devices[] = {
{
.name = "serial8250",
@@ -54,12 +137,23 @@ static struct platform_device platform_devices[] = {
.platform_data = serial_platform_data,
},
},
+ {
+ .name = "s6gmac",
+ .id = 0,
+ .resource = s6_gmac_resource,
+ .num_resources = ARRAY_SIZE(s6_gmac_resource),
+ },
+ {
+ I2C_BOARD_INFO("m41t62", S6I2C_ADDR_M41T62),
+ },
};
static int __init device_init(void)
{
int i;
+ s6_gmac_resource[5].start = prepare_phy_irq(GPIO_PHY_IRQ);
+
for (i = 0; i < ARRAY_SIZE(platform_devices); i++)
platform_device_register(&platform_devices[i]);
return 0;
diff --git a/arch/xtensa/platforms/s6105/setup.c b/arch/xtensa/platforms/s6105/setup.c
index 855ddeadc43d..86ce730f7913 100644
--- a/arch/xtensa/platforms/s6105/setup.c
+++ b/arch/xtensa/platforms/s6105/setup.c
@@ -35,12 +35,21 @@ void __init platform_setup(char **cmdline)
{
unsigned long reg;
+ reg = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
+ reg &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC |
+ S6_GREG1_PLLSEL_GMII_MASK << S6_GREG1_PLLSEL_GMII);
+ reg |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC |
+ S6_GREG1_PLLSEL_GMII_125MHZ << S6_GREG1_PLLSEL_GMII;
+ writel(reg, S6_REG_GREG1 + S6_GREG1_PLLSEL);
+
reg = readl(S6_REG_GREG1 + S6_GREG1_CLKGATE);
reg &= ~(1 << S6_GREG1_BLOCK_SB);
+ reg &= ~(1 << S6_GREG1_BLOCK_GMAC);
writel(reg, S6_REG_GREG1 + S6_GREG1_CLKGATE);
reg = readl(S6_REG_GREG1 + S6_GREG1_BLOCKENA);
reg |= 1 << S6_GREG1_BLOCK_SB;
+ reg |= 1 << S6_GREG1_BLOCK_GMAC;
writel(reg, S6_REG_GREG1 + S6_GREG1_BLOCKENA);
printk(KERN_NOTICE "S6105 on Stretch S6000 - "
@@ -49,7 +58,7 @@ void __init platform_setup(char **cmdline)
void __init platform_init(bp_tag_t *first)
{
- s6_gpio_init();
+ s6_gpio_init(0);
gpio_request(GPIO_LED1_NGREEN, "led1_green");
gpio_request(GPIO_LED1_RED, "led1_red");
gpio_direction_output(GPIO_LED1_NGREEN, 1);
diff --git a/arch/xtensa/variants/s6000/Makefile b/arch/xtensa/variants/s6000/Makefile
index d83f3805130c..3e7ef0a0c498 100644
--- a/arch/xtensa/variants/s6000/Makefile
+++ b/arch/xtensa/variants/s6000/Makefile
@@ -1,4 +1,4 @@
# s6000 Makefile
-obj-y += irq.o gpio.o
+obj-y += irq.o gpio.o dmac.o
obj-$(CONFIG_XTENSA_CALIBRATE_CCOUNT) += delay.o
diff --git a/arch/xtensa/variants/s6000/dmac.c b/arch/xtensa/variants/s6000/dmac.c
new file mode 100644
index 000000000000..dc7f7c573518
--- /dev/null
+++ b/arch/xtensa/variants/s6000/dmac.c
@@ -0,0 +1,173 @@
+/*
+ * Authors: Oskar Schirmer <os@emlix.com>
+ * Daniel Gloeckner <dg@emlix.com>
+ * (c) 2008 emlix GmbH http://www.emlix.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <asm/cacheflush.h>
+#include <variant/dmac.h>
+
+/* DMA engine lookup */
+
+struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB];
+
+
+/* DMA control, per engine */
+
+void s6dmac_put_fifo_cache(u32 dmac, int chan, u32 src, u32 dst, u32 size)
+{
+ if (xtensa_need_flush_dma_source(src)) {
+ u32 base = src;
+ u32 span = size;
+ u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
+ if (chunk && (size > chunk)) {
+ s32 skip =
+ readl(DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP);
+ u32 gaps = (size+chunk-1)/chunk - 1;
+ if (skip >= 0) {
+ span += gaps * skip;
+ } else if (-skip > chunk) {
+ s32 decr = gaps * (chunk + skip);
+ base += decr;
+ span = chunk - decr;
+ } else {
+ span = max(span + gaps * skip,
+ (chunk + skip) * gaps - skip);
+ }
+ }
+ flush_dcache_unaligned(base, span);
+ }
+ if (xtensa_need_invalidate_dma_destination(dst)) {
+ u32 base = dst;
+ u32 span = size;
+ u32 chunk = readl(DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
+ if (chunk && (size > chunk)) {
+ s32 skip =
+ readl(DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP);
+ u32 gaps = (size+chunk-1)/chunk - 1;
+ if (skip >= 0) {
+ span += gaps * skip;
+ } else if (-skip > chunk) {
+ s32 decr = gaps * (chunk + skip);
+ base += decr;
+ span = chunk - decr;
+ } else {
+ span = max(span + gaps * skip,
+ (chunk + skip) * gaps - skip);
+ }
+ }
+ invalidate_dcache_unaligned(base, span);
+ }
+ s6dmac_put_fifo(dmac, chan, src, dst, size);
+}
+
+void s6dmac_disable_error_irqs(u32 dmac, u32 mask)
+{
+ unsigned long flags;
+ spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
+ spin_lock_irqsave(spinl, flags);
+ _s6dmac_disable_error_irqs(dmac, mask);
+ spin_unlock_irqrestore(spinl, flags);
+}
+
+u32 s6dmac_int_sources(u32 dmac, u32 channel)
+{
+ u32 mask, ret, tmp;
+ mask = 1 << channel;
+
+ tmp = readl(dmac + S6_DMA_TERMCNTIRQSTAT);
+ tmp &= mask;
+ writel(tmp, dmac + S6_DMA_TERMCNTIRQCLR);
+ ret = tmp >> channel;
+
+ tmp = readl(dmac + S6_DMA_PENDCNTIRQSTAT);
+ tmp &= mask;
+ writel(tmp, dmac + S6_DMA_PENDCNTIRQCLR);
+ ret |= (tmp >> channel) << 1;
+
+ tmp = readl(dmac + S6_DMA_LOWWMRKIRQSTAT);
+ tmp &= mask;
+ writel(tmp, dmac + S6_DMA_LOWWMRKIRQCLR);
+ ret |= (tmp >> channel) << 2;
+
+ tmp = readl(dmac + S6_DMA_INTRAW0);
+ tmp &= (mask << S6_DMA_INT0_OVER) | (mask << S6_DMA_INT0_UNDER);
+ writel(tmp, dmac + S6_DMA_INTCLEAR0);
+
+ if (tmp & (mask << S6_DMA_INT0_UNDER))
+ ret |= 1 << 3;
+ if (tmp & (mask << S6_DMA_INT0_OVER))
+ ret |= 1 << 4;
+
+ tmp = readl(dmac + S6_DMA_MASTERERRINFO);
+ mask <<= S6_DMA_INT1_CHANNEL;
+ if (((tmp >> S6_DMA_MASTERERR_CHAN(0)) & S6_DMA_MASTERERR_CHAN_MASK)
+ == channel)
+ mask |= 1 << S6_DMA_INT1_MASTER;
+ if (((tmp >> S6_DMA_MASTERERR_CHAN(1)) & S6_DMA_MASTERERR_CHAN_MASK)
+ == channel)
+ mask |= 1 << (S6_DMA_INT1_MASTER + 1);
+ if (((tmp >> S6_DMA_MASTERERR_CHAN(2)) & S6_DMA_MASTERERR_CHAN_MASK)
+ == channel)
+ mask |= 1 << (S6_DMA_INT1_MASTER + 2);
+
+ tmp = readl(dmac + S6_DMA_INTRAW1) & mask;
+ writel(tmp, dmac + S6_DMA_INTCLEAR1);
+ ret |= ((tmp >> channel) & 1) << 5;
+ ret |= ((tmp >> S6_DMA_INT1_MASTER) & S6_DMA_INT1_MASTER_MASK) << 6;
+
+ return ret;
+}
+
+void s6dmac_release_chan(u32 dmac, int chan)
+{
+ if (chan >= 0)
+ s6dmac_disable_chan(dmac, chan);
+}
+
+
+/* global init */
+
+static inline void __init dmac_init(u32 dmac, u8 chan_nb)
+{
+ s6dmac_ctrl[S6_DMAC_INDEX(dmac)].dmac = dmac;
+ spin_lock_init(&s6dmac_ctrl[S6_DMAC_INDEX(dmac)].lock);
+ s6dmac_ctrl[S6_DMAC_INDEX(dmac)].chan_nb = chan_nb;
+ writel(S6_DMA_INT1_MASTER_MASK << S6_DMA_INT1_MASTER,
+ dmac + S6_DMA_INTCLEAR1);
+}
+
+static inline void __init dmac_master(u32 dmac,
+ u32 m0start, u32 m0end, u32 m1start, u32 m1end)
+{
+ writel(m0start, dmac + S6_DMA_MASTER0START);
+ writel(m0end - 1, dmac + S6_DMA_MASTER0END);
+ writel(m1start, dmac + S6_DMA_MASTER1START);
+ writel(m1end - 1, dmac + S6_DMA_MASTER1END);
+}
+
+static void __init s6_dmac_init(void)
+{
+ dmac_init(S6_REG_LMSDMA, S6_LMSDMA_NB);
+ dmac_master(S6_REG_LMSDMA,
+ S6_MEM_DDR, S6_MEM_PCIE_APER, S6_MEM_EFI, S6_MEM_GMAC);
+ dmac_init(S6_REG_NIDMA, S6_NIDMA_NB);
+ dmac_init(S6_REG_DPDMA, S6_DPDMA_NB);
+ dmac_master(S6_REG_DPDMA,
+ S6_MEM_DDR, S6_MEM_PCIE_APER, S6_REG_DP, S6_REG_DPDMA);
+ dmac_init(S6_REG_HIFDMA, S6_HIFDMA_NB);
+ dmac_master(S6_REG_HIFDMA,
+ S6_MEM_GMAC, S6_MEM_PCIE_CFG, S6_MEM_PCIE_APER, S6_MEM_AUX);
+}
+
+arch_initcall(s6_dmac_init);
diff --git a/arch/xtensa/variants/s6000/gpio.c b/arch/xtensa/variants/s6000/gpio.c
index 79317fdcf14c..380a70fff756 100644
--- a/arch/xtensa/variants/s6000/gpio.c
+++ b/arch/xtensa/variants/s6000/gpio.c
@@ -4,15 +4,20 @@
* Copyright (c) 2009 emlix GmbH
* Authors: Oskar Schirmer <os@emlix.com>
* Johannes Weiner <jw@emlix.com>
+ * Daniel Gloeckner <dg@emlix.com>
*/
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/gpio.h>
#include <variant/hardware.h>
+#define IRQ_BASE XTENSA_NR_IRQS
+
#define S6_GPIO_DATA 0x000
#define S6_GPIO_IS 0x404
#define S6_GPIO_IBE 0x408
@@ -52,19 +57,175 @@ static void set(struct gpio_chip *chip, unsigned int off, int val)
writeb(val ? ~0 : 0, S6_REG_GPIO + S6_GPIO_DATA + S6_GPIO_OFFSET(off));
}
+static int to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ if (offset < 8)
+ return offset + IRQ_BASE;
+ return -EINVAL;
+}
+
static struct gpio_chip gpiochip = {
.owner = THIS_MODULE,
.direction_input = direction_input,
.get = get,
.direction_output = direction_output,
.set = set,
+ .to_irq = to_irq,
.base = 0,
.ngpio = 24,
.can_sleep = 0, /* no blocking io needed */
.exported = 0, /* no exporting to userspace */
};
-int s6_gpio_init(void)
+int s6_gpio_init(u32 afsel)
{
+ writeb(afsel, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_AFSEL);
+ writeb(afsel >> 8, S6_REG_GPIO + S6_GPIO_BANK(1) + S6_GPIO_AFSEL);
+ writeb(afsel >> 16, S6_REG_GPIO + S6_GPIO_BANK(2) + S6_GPIO_AFSEL);
return gpiochip_add(&gpiochip);
}
+
+static void ack(unsigned int irq)
+{
+ writeb(1 << (irq - IRQ_BASE), S6_REG_GPIO + S6_GPIO_IC);
+}
+
+static void mask(unsigned int irq)
+{
+ u8 r = readb(S6_REG_GPIO + S6_GPIO_IE);
+ r &= ~(1 << (irq - IRQ_BASE));
+ writeb(r, S6_REG_GPIO + S6_GPIO_IE);
+}
+
+static void unmask(unsigned int irq)
+{
+ u8 m = readb(S6_REG_GPIO + S6_GPIO_IE);
+ m |= 1 << (irq - IRQ_BASE);
+ writeb(m, S6_REG_GPIO + S6_GPIO_IE);
+}
+
+static int set_type(unsigned int irq, unsigned int type)
+{
+ const u8 m = 1 << (irq - IRQ_BASE);
+ irq_flow_handler_t handler;
+ struct irq_desc *desc;
+ u8 reg;
+
+ if (type == IRQ_TYPE_PROBE) {
+ if ((readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_AFSEL) & m)
+ || (readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IE) & m)
+ || readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_DIR
+ + S6_GPIO_MASK(irq - IRQ_BASE)))
+ return 0;
+ type = IRQ_TYPE_EDGE_BOTH;
+ }
+
+ reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IS);
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) {
+ reg |= m;
+ handler = handle_level_irq;
+ } else {
+ reg &= ~m;
+ handler = handle_edge_irq;
+ }
+ writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IS);
+ desc = irq_to_desc(irq);
+ desc->handle_irq = handler;
+
+ reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IEV);
+ if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING))
+ reg |= m;
+ else
+ reg &= ~m;
+ writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IEV);
+
+ reg = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IBE);
+ if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
+ reg |= m;
+ else
+ reg &= ~m;
+ writeb(reg, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IBE);
+ return 0;
+}
+
+static struct irq_chip gpioirqs = {
+ .name = "GPIO",
+ .ack = ack,
+ .mask = mask,
+ .unmask = unmask,
+ .set_type = set_type,
+};
+
+static u8 demux_masks[4];
+
+static void demux_irqs(unsigned int irq, struct irq_desc *desc)
+{
+ u8 *mask = get_irq_desc_data(desc);
+ u8 pending;
+ int cirq;
+
+ desc->chip->mask(irq);
+ desc->chip->ack(irq);
+ pending = readb(S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_MIS) & *mask;
+ cirq = IRQ_BASE - 1;
+ while (pending) {
+ int n = ffs(pending);
+ cirq += n;
+ pending >>= n;
+ generic_handle_irq(cirq);
+ }
+ desc->chip->unmask(irq);
+}
+
+extern const signed char *platform_irq_mappings[XTENSA_NR_IRQS];
+
+void __init variant_init_irq(void)
+{
+ int irq, n;
+ writeb(0, S6_REG_GPIO + S6_GPIO_BANK(0) + S6_GPIO_IE);
+ for (irq = n = 0; irq < XTENSA_NR_IRQS; irq++) {
+ const signed char *mapping = platform_irq_mappings[irq];
+ int alone = 1;
+ u8 mask;
+ if (!mapping)
+ continue;
+ for(mask = 0; *mapping != -1; mapping++)
+ switch (*mapping) {
+ case S6_INTC_GPIO(0):
+ mask |= 1 << 0;
+ break;
+ case S6_INTC_GPIO(1):
+ mask |= 1 << 1;
+ break;
+ case S6_INTC_GPIO(2):
+ mask |= 1 << 2;
+ break;
+ case S6_INTC_GPIO(3):
+ mask |= 0x1f << 3;
+ break;
+ default:
+ alone = 0;
+ }
+ if (mask) {
+ int cirq, i;
+ if (!alone) {
+ printk(KERN_ERR "chained irq chips can't share"
+ " parent irq %i\n", irq);
+ continue;
+ }
+ demux_masks[n] = mask;
+ cirq = IRQ_BASE - 1;
+ do {
+ i = ffs(mask);
+ cirq += i;
+ mask >>= i;
+ set_irq_chip(cirq, &gpioirqs);
+ set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
+ } while (mask);
+ set_irq_data(irq, demux_masks + n);
+ set_irq_chained_handler(irq, demux_irqs);
+ if (++n == ARRAY_SIZE(demux_masks))
+ break;
+ }
+ }
+}
diff --git a/arch/xtensa/variants/s6000/include/variant/dmac.h b/arch/xtensa/variants/s6000/include/variant/dmac.h
new file mode 100644
index 000000000000..89ab9484fb71
--- /dev/null
+++ b/arch/xtensa/variants/s6000/include/variant/dmac.h
@@ -0,0 +1,387 @@
+/*
+ * include/asm-xtensa/variant-s6000/dmac.h
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Tensilica Inc.
+ * Copyright (C) 2008 Emlix GmbH <info@emlix.com>
+ * Authors: Fabian Godehardt <fg@emlix.com>
+ * Oskar Schirmer <os@emlix.com>
+ * Daniel Gloeckner <dg@emlix.com>
+ */
+
+#ifndef __ASM_XTENSA_S6000_DMAC_H
+#define __ASM_XTENSA_S6000_DMAC_H
+#include <linux/io.h>
+#include <variant/hardware.h>
+
+/* DMA global */
+
+#define S6_DMA_INTSTAT0 0x000
+#define S6_DMA_INTSTAT1 0x004
+#define S6_DMA_INTENABLE0 0x008
+#define S6_DMA_INTENABLE1 0x00C
+#define S6_DMA_INTRAW0 0x010
+#define S6_DMA_INTRAW1 0x014
+#define S6_DMA_INTCLEAR0 0x018
+#define S6_DMA_INTCLEAR1 0x01C
+#define S6_DMA_INTSET0 0x020
+#define S6_DMA_INTSET1 0x024
+#define S6_DMA_INT0_UNDER 0
+#define S6_DMA_INT0_OVER 16
+#define S6_DMA_INT1_CHANNEL 0
+#define S6_DMA_INT1_MASTER 16
+#define S6_DMA_INT1_MASTER_MASK 7
+#define S6_DMA_TERMCNTIRQSTAT 0x028
+#define S6_DMA_TERMCNTIRQCLR 0x02C
+#define S6_DMA_TERMCNTIRQSET 0x030
+#define S6_DMA_PENDCNTIRQSTAT 0x034
+#define S6_DMA_PENDCNTIRQCLR 0x038
+#define S6_DMA_PENDCNTIRQSET 0x03C
+#define S6_DMA_LOWWMRKIRQSTAT 0x040
+#define S6_DMA_LOWWMRKIRQCLR 0x044
+#define S6_DMA_LOWWMRKIRQSET 0x048
+#define S6_DMA_MASTERERRINFO 0x04C
+#define S6_DMA_MASTERERR_CHAN(n) (4*(n))
+#define S6_DMA_MASTERERR_CHAN_MASK 0xF
+#define S6_DMA_DESCRFIFO0 0x050
+#define S6_DMA_DESCRFIFO1 0x054
+#define S6_DMA_DESCRFIFO2 0x058
+#define S6_DMA_DESCRFIFO2_AUTODISABLE 24
+#define S6_DMA_DESCRFIFO3 0x05C
+#define S6_DMA_MASTER0START 0x060
+#define S6_DMA_MASTER0END 0x064
+#define S6_DMA_MASTER1START 0x068
+#define S6_DMA_MASTER1END 0x06C
+#define S6_DMA_NEXTFREE 0x070
+#define S6_DMA_NEXTFREE_CHAN 0
+#define S6_DMA_NEXTFREE_CHAN_MASK 0x1F
+#define S6_DMA_NEXTFREE_ENA 16
+#define S6_DMA_NEXTFREE_ENA_MASK ((1 << 16) - 1)
+#define S6_DMA_DPORTCTRLGRP(p) ((p) * 4 + 0x074)
+#define S6_DMA_DPORTCTRLGRP_FRAMEREP 0
+#define S6_DMA_DPORTCTRLGRP_NRCHANS 1
+#define S6_DMA_DPORTCTRLGRP_NRCHANS_1 0
+#define S6_DMA_DPORTCTRLGRP_NRCHANS_3 1
+#define S6_DMA_DPORTCTRLGRP_NRCHANS_4 2
+#define S6_DMA_DPORTCTRLGRP_NRCHANS_2 3
+#define S6_DMA_DPORTCTRLGRP_ENA 31
+
+
+/* DMA per channel */
+
+#define DMA_CHNL(dmac, n) ((dmac) + 0x1000 + (n) * 0x100)
+#define DMA_INDEX_CHNL(addr) (((addr) >> 8) & 0xF)
+#define DMA_MASK_DMAC(addr) ((addr) & 0xFFFF0000)
+#define S6_DMA_CHNCTRL 0x000
+#define S6_DMA_CHNCTRL_ENABLE 0
+#define S6_DMA_CHNCTRL_PAUSE 1
+#define S6_DMA_CHNCTRL_PRIO 2
+#define S6_DMA_CHNCTRL_PRIO_MASK 3
+#define S6_DMA_CHNCTRL_PERIPHXFER 4
+#define S6_DMA_CHNCTRL_PERIPHENA 5
+#define S6_DMA_CHNCTRL_SRCINC 6
+#define S6_DMA_CHNCTRL_DSTINC 7
+#define S6_DMA_CHNCTRL_BURSTLOG 8
+#define S6_DMA_CHNCTRL_BURSTLOG_MASK 7
+#define S6_DMA_CHNCTRL_DESCFIFODEPTH 12
+#define S6_DMA_CHNCTRL_DESCFIFODEPTH_MASK 0x1F
+#define S6_DMA_CHNCTRL_DESCFIFOFULL 17
+#define S6_DMA_CHNCTRL_BWCONSEL 18
+#define S6_DMA_CHNCTRL_BWCONENA 19
+#define S6_DMA_CHNCTRL_PENDGCNTSTAT 20
+#define S6_DMA_CHNCTRL_PENDGCNTSTAT_MASK 0x3F
+#define S6_DMA_CHNCTRL_LOWWMARK 26
+#define S6_DMA_CHNCTRL_LOWWMARK_MASK 0xF
+#define S6_DMA_CHNCTRL_TSTAMP 30
+#define S6_DMA_TERMCNTNB 0x004
+#define S6_DMA_TERMCNTNB_MASK 0xFFFF
+#define S6_DMA_TERMCNTTMO 0x008
+#define S6_DMA_TERMCNTSTAT 0x00C
+#define S6_DMA_TERMCNTSTAT_MASK 0xFF
+#define S6_DMA_CMONCHUNK 0x010
+#define S6_DMA_SRCSKIP 0x014
+#define S6_DMA_DSTSKIP 0x018
+#define S6_DMA_CUR_SRC 0x024
+#define S6_DMA_CUR_DST 0x028
+#define S6_DMA_TIMESTAMP 0x030
+
+/* DMA channel lists */
+
+#define S6_DPDMA_CHAN(stream, channel) (4 * (stream) + (channel))
+#define S6_DPDMA_NB 16
+
+#define S6_HIFDMA_GMACTX 0
+#define S6_HIFDMA_GMACRX 1
+#define S6_HIFDMA_I2S0 2
+#define S6_HIFDMA_I2S1 3
+#define S6_HIFDMA_EGIB 4
+#define S6_HIFDMA_PCITX 5
+#define S6_HIFDMA_PCIRX 6
+#define S6_HIFDMA_NB 7
+
+#define S6_NIDMA_NB 4
+
+#define S6_LMSDMA_NB 12
+
+/* controller access */
+
+#define S6_DMAC_NB 4
+#define S6_DMAC_INDEX(dmac) (((unsigned)(dmac) >> 18) % S6_DMAC_NB)
+
+struct s6dmac_ctrl {
+ u32 dmac;
+ spinlock_t lock;
+ u8 chan_nb;
+};
+
+extern struct s6dmac_ctrl s6dmac_ctrl[S6_DMAC_NB];
+
+
+/* DMA control, per channel */
+
+static inline int s6dmac_fifo_full(u32 dmac, int chan)
+{
+ return (readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL)
+ & (1 << S6_DMA_CHNCTRL_DESCFIFOFULL)) && 1;
+}
+
+static inline int s6dmac_termcnt_irq(u32 dmac, int chan)
+{
+ u32 m = 1 << chan;
+ int r = (readl(dmac + S6_DMA_TERMCNTIRQSTAT) & m) && 1;
+ if (r)
+ writel(m, dmac + S6_DMA_TERMCNTIRQCLR);
+ return r;
+}
+
+static inline int s6dmac_pendcnt_irq(u32 dmac, int chan)
+{
+ u32 m = 1 << chan;
+ int r = (readl(dmac + S6_DMA_PENDCNTIRQSTAT) & m) && 1;
+ if (r)
+ writel(m, dmac + S6_DMA_PENDCNTIRQCLR);
+ return r;
+}
+
+static inline int s6dmac_lowwmark_irq(u32 dmac, int chan)
+{
+ int r = (readl(dmac + S6_DMA_LOWWMRKIRQSTAT) & (1 << chan)) ? 1 : 0;
+ if (r)
+ writel(1 << chan, dmac + S6_DMA_LOWWMRKIRQCLR);
+ return r;
+}
+
+static inline u32 s6dmac_pending_count(u32 dmac, int chan)
+{
+ return (readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL)
+ >> S6_DMA_CHNCTRL_PENDGCNTSTAT)
+ & S6_DMA_CHNCTRL_PENDGCNTSTAT_MASK;
+}
+
+static inline void s6dmac_set_terminal_count(u32 dmac, int chan, u32 n)
+{
+ n &= S6_DMA_TERMCNTNB_MASK;
+ n |= readl(DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB)
+ & ~S6_DMA_TERMCNTNB_MASK;
+ writel(n, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB);
+}
+
+static inline u32 s6dmac_get_terminal_count(u32 dmac, int chan)
+{
+ return (readl(DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB))
+ & S6_DMA_TERMCNTNB_MASK;
+}
+
+static inline u32 s6dmac_timestamp(u32 dmac, int chan)
+{
+ return readl(DMA_CHNL(dmac, chan) + S6_DMA_TIMESTAMP);
+}
+
+static inline u32 s6dmac_cur_src(u32 dmac, int chan)
+{
+ return readl(DMA_CHNL(dmac, chan) + S6_DMA_CUR_SRC);
+}
+
+static inline u32 s6dmac_cur_dst(u32 dmac, int chan)
+{
+ return readl(DMA_CHNL(dmac, chan) + S6_DMA_CUR_DST);
+}
+
+static inline void s6dmac_disable_chan(u32 dmac, int chan)
+{
+ u32 ctrl;
+ writel(readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL)
+ & ~(1 << S6_DMA_CHNCTRL_ENABLE),
+ DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
+ do
+ ctrl = readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
+ while (ctrl & (1 << S6_DMA_CHNCTRL_ENABLE));
+}
+
+static inline void s6dmac_set_stride_skip(u32 dmac, int chan,
+ int comchunk, /* 0: disable scatter/gather */
+ int srcskip, int dstskip)
+{
+ writel(comchunk, DMA_CHNL(dmac, chan) + S6_DMA_CMONCHUNK);
+ writel(srcskip, DMA_CHNL(dmac, chan) + S6_DMA_SRCSKIP);
+ writel(dstskip, DMA_CHNL(dmac, chan) + S6_DMA_DSTSKIP);
+}
+
+static inline void s6dmac_enable_chan(u32 dmac, int chan,
+ int prio, /* 0 (highest) .. 3 (lowest) */
+ int periphxfer, /* <0: disable p.req.line, 0..1: mode */
+ int srcinc, int dstinc, /* 0: dont increment src/dst address */
+ int comchunk, /* 0: disable scatter/gather */
+ int srcskip, int dstskip,
+ int burstsize, /* 4 for I2S, 7 for everything else */
+ int bandwidthconserve, /* <0: disable, 0..1: select */
+ int lowwmark, /* 0..15 */
+ int timestamp, /* 0: disable timestamp */
+ int enable) /* 0: disable for now */
+{
+ writel(1, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTNB);
+ writel(0, DMA_CHNL(dmac, chan) + S6_DMA_TERMCNTTMO);
+ writel(lowwmark << S6_DMA_CHNCTRL_LOWWMARK,
+ DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
+ s6dmac_set_stride_skip(dmac, chan, comchunk, srcskip, dstskip);
+ writel(((enable ? 1 : 0) << S6_DMA_CHNCTRL_ENABLE) |
+ (prio << S6_DMA_CHNCTRL_PRIO) |
+ (((periphxfer > 0) ? 1 : 0) << S6_DMA_CHNCTRL_PERIPHXFER) |
+ (((periphxfer < 0) ? 0 : 1) << S6_DMA_CHNCTRL_PERIPHENA) |
+ ((srcinc ? 1 : 0) << S6_DMA_CHNCTRL_SRCINC) |
+ ((dstinc ? 1 : 0) << S6_DMA_CHNCTRL_DSTINC) |
+ (burstsize << S6_DMA_CHNCTRL_BURSTLOG) |
+ (((bandwidthconserve > 0) ? 1 : 0) << S6_DMA_CHNCTRL_BWCONSEL) |
+ (((bandwidthconserve < 0) ? 0 : 1) << S6_DMA_CHNCTRL_BWCONENA) |
+ (lowwmark << S6_DMA_CHNCTRL_LOWWMARK) |
+ ((timestamp ? 1 : 0) << S6_DMA_CHNCTRL_TSTAMP),
+ DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL);
+}
+
+
+/* DMA control, per engine */
+
+static inline unsigned _dmac_addr_index(u32 dmac)
+{
+ unsigned i = S6_DMAC_INDEX(dmac);
+ if (s6dmac_ctrl[i].dmac != dmac)
+ BUG();
+ return i;
+}
+
+static inline void _s6dmac_disable_error_irqs(u32 dmac, u32 mask)
+{
+ writel(mask, dmac + S6_DMA_TERMCNTIRQCLR);
+ writel(mask, dmac + S6_DMA_PENDCNTIRQCLR);
+ writel(mask, dmac + S6_DMA_LOWWMRKIRQCLR);
+ writel(readl(dmac + S6_DMA_INTENABLE0)
+ & ~((mask << S6_DMA_INT0_UNDER) | (mask << S6_DMA_INT0_OVER)),
+ dmac + S6_DMA_INTENABLE0);
+ writel(readl(dmac + S6_DMA_INTENABLE1) & ~(mask << S6_DMA_INT1_CHANNEL),
+ dmac + S6_DMA_INTENABLE1);
+ writel((mask << S6_DMA_INT0_UNDER) | (mask << S6_DMA_INT0_OVER),
+ dmac + S6_DMA_INTCLEAR0);
+ writel(mask << S6_DMA_INT1_CHANNEL, dmac + S6_DMA_INTCLEAR1);
+}
+
+/*
+ * request channel from specified engine
+ * with chan<0, accept any channel
+ * further parameters see s6dmac_enable_chan
+ * returns < 0 upon error, channel nb otherwise
+ */
+static inline int s6dmac_request_chan(u32 dmac, int chan,
+ int prio,
+ int periphxfer,
+ int srcinc, int dstinc,
+ int comchunk,
+ int srcskip, int dstskip,
+ int burstsize,
+ int bandwidthconserve,
+ int lowwmark,
+ int timestamp,
+ int enable)
+{
+ int r = chan;
+ unsigned long flags;
+ spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
+ spin_lock_irqsave(spinl, flags);
+ if (r < 0) {
+ r = (readl(dmac + S6_DMA_NEXTFREE) >> S6_DMA_NEXTFREE_CHAN)
+ & S6_DMA_NEXTFREE_CHAN_MASK;
+ }
+ if (r >= s6dmac_ctrl[_dmac_addr_index(dmac)].chan_nb) {
+ if (chan < 0)
+ r = -EBUSY;
+ else
+ r = -ENXIO;
+ } else if (((readl(dmac + S6_DMA_NEXTFREE) >> S6_DMA_NEXTFREE_ENA)
+ >> r) & 1) {
+ r = -EBUSY;
+ } else {
+ s6dmac_enable_chan(dmac, r, prio, periphxfer,
+ srcinc, dstinc, comchunk, srcskip, dstskip, burstsize,
+ bandwidthconserve, lowwmark, timestamp, enable);
+ }
+ spin_unlock_irqrestore(spinl, flags);
+ return r;
+}
+
+static inline void s6dmac_put_fifo(u32 dmac, int chan,
+ u32 src, u32 dst, u32 size)
+{
+ unsigned long flags;
+ spinlock_t *spinl = &s6dmac_ctrl[_dmac_addr_index(dmac)].lock;
+ spin_lock_irqsave(spinl, flags);
+ writel(src, dmac + S6_DMA_DESCRFIFO0);
+ writel(dst, dmac + S6_DMA_DESCRFIFO1);
+ writel(size, dmac + S6_DMA_DESCRFIFO2);
+ writel(chan, dmac + S6_DMA_DESCRFIFO3);
+ spin_unlock_irqrestore(spinl, flags);
+}
+
+static inline u32 s6dmac_channel_enabled(u32 dmac, int chan)
+{
+ return readl(DMA_CHNL(dmac, chan) + S6_DMA_CHNCTRL) &
+ (1 << S6_DMA_CHNCTRL_ENABLE);
+}
+
+/*
+ * group 1-4 data port channels
+ * with port=0..3, nrch=1-4 channels,
+ * frrep=0/1 (dis- or enable frame repeat)
+ */
+static inline void s6dmac_dp_setup_group(u32 dmac, int port,
+ int nrch, int frrep)
+{
+ const static u8 mask[4] = {0, 3, 1, 2};
+ BUG_ON(dmac != S6_REG_DPDMA);
+ if ((port < 0) || (port > 3) || (nrch < 1) || (nrch > 4))
+ return;
+ writel((mask[nrch - 1] << S6_DMA_DPORTCTRLGRP_NRCHANS)
+ | ((frrep ? 1 : 0) << S6_DMA_DPORTCTRLGRP_FRAMEREP),
+ dmac + S6_DMA_DPORTCTRLGRP(port));
+}
+
+static inline void s6dmac_dp_switch_group(u32 dmac, int port, int enable)
+{
+ u32 tmp;
+ BUG_ON(dmac != S6_REG_DPDMA);
+ tmp = readl(dmac + S6_DMA_DPORTCTRLGRP(port));
+ if (enable)
+ tmp |= (1 << S6_DMA_DPORTCTRLGRP_ENA);
+ else
+ tmp &= ~(1 << S6_DMA_DPORTCTRLGRP_ENA);
+ writel(tmp, dmac + S6_DMA_DPORTCTRLGRP(port));
+}
+
+extern void s6dmac_put_fifo_cache(u32 dmac, int chan,
+ u32 src, u32 dst, u32 size);
+extern void s6dmac_disable_error_irqs(u32 dmac, u32 mask);
+extern u32 s6dmac_int_sources(u32 dmac, u32 channel);
+extern void s6dmac_release_chan(u32 dmac, int chan);
+
+#endif /* __ASM_XTENSA_S6000_DMAC_H */
diff --git a/arch/xtensa/variants/s6000/include/variant/gpio.h b/arch/xtensa/variants/s6000/include/variant/gpio.h
index 8327f62167eb..8484ab0df461 100644
--- a/arch/xtensa/variants/s6000/include/variant/gpio.h
+++ b/arch/xtensa/variants/s6000/include/variant/gpio.h
@@ -1,6 +1,6 @@
#ifndef _XTENSA_VARIANT_S6000_GPIO_H
#define _XTENSA_VARIANT_S6000_GPIO_H
-extern int s6_gpio_init(void);
+extern int s6_gpio_init(u32 afsel);
#endif /* _XTENSA_VARIANT_S6000_GPIO_H */
diff --git a/arch/xtensa/variants/s6000/include/variant/irq.h b/arch/xtensa/variants/s6000/include/variant/irq.h
index fa031cb0acc4..97d6fc48deff 100644
--- a/arch/xtensa/variants/s6000/include/variant/irq.h
+++ b/arch/xtensa/variants/s6000/include/variant/irq.h
@@ -1,9 +1,9 @@
-#ifndef __XTENSA_S6000_IRQ_H
-#define __XTENSA_S6000_IRQ_H
+#ifndef _XTENSA_S6000_IRQ_H
+#define _XTENSA_S6000_IRQ_H
#define NO_IRQ (-1)
+#define VARIANT_NR_IRQS 8 /* GPIO interrupts */
extern void variant_irq_enable(unsigned int irq);
-extern void variant_irq_disable(unsigned int irq);
#endif /* __XTENSA_S6000_IRQ_H */
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 431f8b439553..7ec7d88c5999 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -266,6 +266,7 @@ config ACPI_DEBUG_FUNC_TRACE
config ACPI_PCI_SLOT
tristate "PCI slot detection driver"
+ depends on SYSFS
default n
help
This driver creates entries in /sys/bus/pci/slots/ for all PCI
diff --git a/drivers/char/bfin_jtag_comm.c b/drivers/char/bfin_jtag_comm.c
index 44c113d56045..1d7c34c73b20 100644
--- a/drivers/char/bfin_jtag_comm.c
+++ b/drivers/char/bfin_jtag_comm.c
@@ -8,6 +8,10 @@
* Licensed under the GPL-2 or later.
*/
+#define DRV_NAME "bfin-jtag-comm"
+#define DEV_NAME "ttyBFJC"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
#include <linux/circ_buf.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -22,18 +26,14 @@
#include <linux/tty_flip.h>
#include <asm/atomic.h>
+#define pr_init(fmt, args...) ({ static const __initconst char __fmt[] = fmt; printk(__fmt, ## args); })
+
/* See the Debug/Emulation chapter in the HRM */
#define EMUDOF 0x00000001 /* EMUDAT_OUT full & valid */
#define EMUDIF 0x00000002 /* EMUDAT_IN full & valid */
#define EMUDOOVF 0x00000004 /* EMUDAT_OUT overflow */
#define EMUDIOVF 0x00000008 /* EMUDAT_IN overflow */
-#define DRV_NAME "bfin-jtag-comm"
-#define DEV_NAME "ttyBFJC"
-
-#define pr_init(fmt, args...) ({ static const __initdata char __fmt[] = fmt; printk(__fmt, ## args); })
-#define debug(fmt, args...) pr_debug(DRV_NAME ": " fmt, ## args)
-
static inline uint32_t bfin_write_emudat(uint32_t emudat)
{
__asm__ __volatile__("emudat = %0;" : : "d"(emudat));
@@ -74,7 +74,7 @@ bfin_jc_emudat_manager(void *arg)
while (!kthread_should_stop()) {
/* no one left to give data to, so sleep */
if (bfin_jc_tty == NULL && circ_empty(&bfin_jc_write_buf)) {
- debug("waiting for readers\n");
+ pr_debug("waiting for readers\n");
__set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
__set_current_state(TASK_RUNNING);
@@ -82,7 +82,7 @@ bfin_jc_emudat_manager(void *arg)
/* no data available, so just chill */
if (!(bfin_read_DBGSTAT() & EMUDIF) && circ_empty(&bfin_jc_write_buf)) {
- debug("waiting for data (in_len = %i) (circ: %i %i)\n",
+ pr_debug("waiting for data (in_len = %i) (circ: %i %i)\n",
inbound_len, bfin_jc_write_buf.tail, bfin_jc_write_buf.head);
if (inbound_len)
schedule();
@@ -99,11 +99,11 @@ bfin_jc_emudat_manager(void *arg)
if (tty != NULL) {
uint32_t emudat = bfin_read_emudat();
if (inbound_len == 0) {
- debug("incoming length: 0x%08x\n", emudat);
+ pr_debug("incoming length: 0x%08x\n", emudat);
inbound_len = emudat;
} else {
size_t num_chars = (4 <= inbound_len ? 4 : inbound_len);
- debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
+ pr_debug(" incoming data: 0x%08x (pushing %zu)\n", emudat, num_chars);
inbound_len -= num_chars;
tty_insert_flip_string(tty, (unsigned char *)&emudat, num_chars);
tty_flip_buffer_push(tty);
@@ -117,7 +117,7 @@ bfin_jc_emudat_manager(void *arg)
if (outbound_len == 0) {
outbound_len = circ_cnt(&bfin_jc_write_buf);
bfin_write_emudat(outbound_len);
- debug("outgoing length: 0x%08x\n", outbound_len);
+ pr_debug("outgoing length: 0x%08x\n", outbound_len);
} else {
struct tty_struct *tty;
int tail = bfin_jc_write_buf.tail;
@@ -136,7 +136,7 @@ bfin_jc_emudat_manager(void *arg)
if (tty)
tty_wakeup(tty);
mutex_unlock(&bfin_jc_tty_mutex);
- debug(" outgoing data: 0x%08x (pushing %zu)\n", emudat, ate);
+ pr_debug(" outgoing data: 0x%08x (pushing %zu)\n", emudat, ate);
}
}
}
@@ -149,7 +149,7 @@ static int
bfin_jc_open(struct tty_struct *tty, struct file *filp)
{
mutex_lock(&bfin_jc_tty_mutex);
- debug("open %lu\n", bfin_jc_count);
+ pr_debug("open %lu\n", bfin_jc_count);
++bfin_jc_count;
bfin_jc_tty = tty;
wake_up_process(bfin_jc_kthread);
@@ -161,7 +161,7 @@ static void
bfin_jc_close(struct tty_struct *tty, struct file *filp)
{
mutex_lock(&bfin_jc_tty_mutex);
- debug("close %lu\n", bfin_jc_count);
+ pr_debug("close %lu\n", bfin_jc_count);
if (--bfin_jc_count == 0)
bfin_jc_tty = NULL;
wake_up_process(bfin_jc_kthread);
@@ -174,7 +174,7 @@ bfin_jc_circ_write(const unsigned char *buf, int count)
{
int i;
count = min(count, circ_free(&bfin_jc_write_buf));
- debug("going to write chunk of %i bytes\n", count);
+ pr_debug("going to write chunk of %i bytes\n", count);
for (i = 0; i < count; ++i)
circ_byte(&bfin_jc_write_buf, bfin_jc_write_buf.head + i) = buf[i];
bfin_jc_write_buf.head += i;
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 6799588b0099..65b6ff2442c6 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -1189,11 +1189,6 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
return -ENODEV;
}
- if (port % MAX_PORTS_PER_BOARD >= brd->numPorts) {
- retval = -ENODEV;
- goto out_unlock;
- }
-
ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
ch->port.count++;
tty->driver_data = ch;
@@ -1218,8 +1213,8 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
moxa_close_port(tty);
} else
ch->port.flags |= ASYNC_NORMAL_ACTIVE;
-out_unlock:
mutex_unlock(&moxa_openlock);
+
return retval;
}
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c
index 461ece591a5b..1c43c8cdee25 100644
--- a/drivers/char/n_hdlc.c
+++ b/drivers/char/n_hdlc.c
@@ -10,7 +10,6 @@
* Paul Mackerras <Paul.Mackerras@cs.anu.edu.au>
*
* Original release 01/11/99
- * $Id: n_hdlc.c,v 4.8 2003/05/06 21:18:51 paulkf Exp $
*
* This code is released under the GNU General Public License (GPL)
*
@@ -79,7 +78,6 @@
*/
#define HDLC_MAGIC 0x239e
-#define HDLC_VERSION "$Revision: 4.8 $"
#include <linux/module.h>
#include <linux/init.h>
@@ -114,7 +112,7 @@
#define MAX_HDLC_FRAME_SIZE 65535
#define DEFAULT_RX_BUF_COUNT 10
#define MAX_RX_BUF_COUNT 60
-#define DEFAULT_TX_BUF_COUNT 1
+#define DEFAULT_TX_BUF_COUNT 3
struct n_hdlc_buf {
struct n_hdlc_buf *link;
@@ -199,6 +197,31 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty);
#define tty2n_hdlc(tty) ((struct n_hdlc *) ((tty)->disc_data))
#define n_hdlc2tty(n_hdlc) ((n_hdlc)->tty)
+static void flush_rx_queue(struct tty_struct *tty)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ struct n_hdlc_buf *buf;
+
+ while ((buf = n_hdlc_buf_get(&n_hdlc->rx_buf_list)))
+ n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, buf);
+}
+
+static void flush_tx_queue(struct tty_struct *tty)
+{
+ struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ struct n_hdlc_buf *buf;
+ unsigned long flags;
+
+ while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
+ n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
+ spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
+ if (n_hdlc->tbuf) {
+ n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
+ n_hdlc->tbuf = NULL;
+ }
+ spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+}
+
static struct tty_ldisc_ops n_hdlc_ldisc = {
.owner = THIS_MODULE,
.magic = TTY_LDISC_MAGIC,
@@ -211,6 +234,7 @@ static struct tty_ldisc_ops n_hdlc_ldisc = {
.poll = n_hdlc_tty_poll,
.receive_buf = n_hdlc_tty_receive,
.write_wakeup = n_hdlc_tty_wakeup,
+ .flush_buffer = flush_rx_queue,
};
/**
@@ -341,10 +365,7 @@ static int n_hdlc_tty_open (struct tty_struct *tty)
set_bit(TTY_NO_WRITE_SPLIT,&tty->flags);
#endif
- /* Flush any pending characters in the driver and discipline. */
- if (tty->ldisc->ops->flush_buffer)
- tty->ldisc->ops->flush_buffer(tty);
-
+ /* flush receive data from driver */
tty_driver_flush_buffer(tty);
if (debuglevel >= DEBUG_LEVEL_INFO)
@@ -763,6 +784,14 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
error = put_user(count, (int __user *)arg);
break;
+ case TCFLSH:
+ switch (arg) {
+ case TCIOFLUSH:
+ case TCOFLUSH:
+ flush_tx_queue(tty);
+ }
+ /* fall through to default */
+
default:
error = n_tty_ioctl_helper(tty, file, cmd, arg);
break;
@@ -919,8 +948,7 @@ static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list)
} /* end of n_hdlc_buf_get() */
static char hdlc_banner[] __initdata =
- KERN_INFO "HDLC line discipline: version " HDLC_VERSION
- ", maxframe=%u\n";
+ KERN_INFO "HDLC line discipline maxframe=%u\n";
static char hdlc_register_ok[] __initdata =
KERN_INFO "N_HDLC line discipline registered.\n";
static char hdlc_register_fail[] __initdata =
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c
index d2e93e343226..2e99158ebb8a 100644
--- a/drivers/char/n_r3964.c
+++ b/drivers/char/n_r3964.c
@@ -1062,7 +1062,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
struct r3964_client_info *pClient;
struct r3964_message *pMsg;
struct r3964_client_message theMsg;
- int count;
+ int ret;
TRACE_L("read()");
@@ -1074,8 +1074,8 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
if (pMsg == NULL) {
/* no messages available. */
if (file->f_flags & O_NONBLOCK) {
- unlock_kernel();
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto unlock;
}
/* block until there is a message: */
wait_event_interruptible(pInfo->read_wait,
@@ -1085,29 +1085,31 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
/* If we still haven't got a message, we must have been signalled */
if (!pMsg) {
- unlock_kernel();
- return -EINTR;
+ ret = -EINTR;
+ goto unlock;
}
/* deliver msg to client process: */
theMsg.msg_id = pMsg->msg_id;
theMsg.arg = pMsg->arg;
theMsg.error_code = pMsg->error_code;
- count = sizeof(struct r3964_client_message);
+ ret = sizeof(struct r3964_client_message);
kfree(pMsg);
TRACE_M("r3964_read - msg kfree %p", pMsg);
- if (copy_to_user(buf, &theMsg, count)) {
- unlock_kernel();
- return -EFAULT;
+ if (copy_to_user(buf, &theMsg, ret)) {
+ ret = -EFAULT;
+ goto unlock;
}
- TRACE_PS("read - return %d", count);
- return count;
+ TRACE_PS("read - return %d", ret);
+ goto unlock;
}
+ ret = -EPERM;
+unlock:
unlock_kernel();
- return -EPERM;
+ return ret;
}
static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index dbb912574569..881934c068c8 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -1575,7 +1575,8 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
clear_bit(LOCK_IO, &dev->flags);
wake_up_interruptible(&dev->ioq);
- return 0;
+ rc = 0;
+ break;
case CM_IOCSPTS:
{
struct ptsreq krnptsreq;
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index e6ce632a393e..7539bed0f7e0 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -396,7 +396,8 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
kbd = kbd_table + console;
switch (cmd) {
case TIOCLINUX:
- return tioclinux(tty, arg);
+ ret = tioclinux(tty, arg);
+ break;
case KIOCSOUND:
if (!perm)
goto eperm;
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 450902438208..13efcd362072 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -1,28 +1,29 @@
-comment "A new alternative FireWire stack is available with EXPERIMENTAL=y"
- depends on EXPERIMENTAL=n
-
-comment "Enable only one of the two stacks, unless you know what you are doing"
- depends on EXPERIMENTAL
+comment "You can enable one or both FireWire driver stacks."
+comment "See the help texts for more information."
config FIREWIRE
- tristate "New FireWire stack, EXPERIMENTAL"
- depends on EXPERIMENTAL
+ tristate "FireWire driver stack"
select CRC_ITU_T
help
- This is the "Juju" FireWire stack, a new alternative implementation
- designed for robustness and simplicity. You can build either this
- stack, or the old stack (the ieee1394 driver, ohci1394 etc.) or both.
- Please read http://ieee1394.wiki.kernel.org/index.php/Juju_Migration
- before you enable the new stack.
+ This is the new-generation IEEE 1394 (FireWire) driver stack
+ a.k.a. Juju, a new implementation designed for robustness and
+ simplicity.
+ See http://ieee1394.wiki.kernel.org/index.php/Juju_Migration
+ for information about migration from the older Linux 1394 stack
+ to the new driver stack.
To compile this driver as a module, say M here: the module will be
called firewire-core.
This module functionally replaces ieee1394, raw1394, and video1394.
To access it from application programs, you generally need at least
- libraw1394 version 2. IIDC/DCAM applications also need libdc1394
- version 2. No libraries are required to access storage devices
- through the firewire-sbp2 driver.
+ libraw1394 v2. IIDC/DCAM applications need libdc1394 v2.
+ No libraries are required to access storage devices through the
+ firewire-sbp2 driver.
+
+ NOTE:
+ FireWire audio devices currently require the old drivers (ieee1394,
+ ohci1394, raw1394).
config FIREWIRE_OHCI
tristate "OHCI-1394 controllers"
@@ -37,11 +38,9 @@ config FIREWIRE_OHCI
stack.
NOTE:
-
- You should only build either firewire-ohci or the old ohci1394 driver,
- but not both. If you nevertheless want to install both, you should
- configure them only as modules and blacklist the driver(s) which you
- don't want to have auto-loaded. Add either
+ If you want to install firewire-ohci and ohci1394 together, you
+ should configure them only as modules and blacklist the driver(s)
+ which you don't want to have auto-loaded. Add either
blacklist firewire-ohci
or
@@ -50,12 +49,7 @@ config FIREWIRE_OHCI
blacklist dv1394
to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
- depending on your distribution. The latter two modules should be
- blacklisted together with ohci1394 because they depend on ohci1394.
-
- If you have an old modprobe which doesn't implement the blacklist
- directive, use "install modulename /bin/true" for the modules to be
- blacklisted.
+ depending on your distribution.
config FIREWIRE_OHCI_DEBUG
bool
@@ -77,3 +71,17 @@ config FIREWIRE_SBP2
You should also enable support for disks, CD-ROMs, etc. in the SCSI
configuration section.
+
+config FIREWIRE_NET
+ tristate "IP networking over 1394 (EXPERIMENTAL)"
+ depends on FIREWIRE && INET && EXPERIMENTAL
+ help
+ This enables IPv4 over IEEE 1394, providing IP connectivity with
+ other implementations of RFC 2734 as found on several operating
+ systems. Multicast support is currently limited.
+
+ NOTE, this driver is not stable yet!
+
+ To compile this driver as a module, say M here: The module will be
+ called firewire-net. It replaces eth1394 of the classic IEEE 1394
+ stack.
diff --git a/drivers/firewire/Makefile b/drivers/firewire/Makefile
index bc3b9bf822bf..a8f9bb6d9fdf 100644
--- a/drivers/firewire/Makefile
+++ b/drivers/firewire/Makefile
@@ -6,7 +6,9 @@ firewire-core-y += core-card.o core-cdev.o core-device.o \
core-iso.o core-topology.o core-transaction.o
firewire-ohci-y += ohci.o
firewire-sbp2-y += sbp2.o
+firewire-net-y += net.o
-obj-$(CONFIG_FIREWIRE) += firewire-core.o
+obj-$(CONFIG_FIREWIRE) += firewire-core.o
obj-$(CONFIG_FIREWIRE_OHCI) += firewire-ohci.o
obj-$(CONFIG_FIREWIRE_SBP2) += firewire-sbp2.o
+obj-$(CONFIG_FIREWIRE_NET) += firewire-net.o
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 4c1be64fdddd..543fccac81bb 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -176,6 +176,7 @@ int fw_core_add_descriptor(struct fw_descriptor *desc)
return 0;
}
+EXPORT_SYMBOL(fw_core_add_descriptor);
void fw_core_remove_descriptor(struct fw_descriptor *desc)
{
@@ -189,6 +190,7 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc)
mutex_unlock(&card_mutex);
}
+EXPORT_SYMBOL(fw_core_remove_descriptor);
static void allocate_broadcast_channel(struct fw_card *card, int generation)
{
@@ -459,11 +461,11 @@ EXPORT_SYMBOL(fw_card_add);
/*
- * The next few functions implements a dummy driver that use once a
- * card driver shuts down an fw_card. This allows the driver to
- * cleanly unload, as all IO to the card will be handled by the dummy
- * driver instead of calling into the (possibly) unloaded module. The
- * dummy driver just fails all IO.
+ * The next few functions implement a dummy driver that is used once a card
+ * driver shuts down an fw_card. This allows the driver to cleanly unload,
+ * as all IO to the card will be handled (and failed) by the dummy driver
+ * instead of calling into the module. Only functions for iso context
+ * shutdown still need to be provided by the card driver.
*/
static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
@@ -510,7 +512,7 @@ static int dummy_enable_phys_dma(struct fw_card *card,
return -ENODEV;
}
-static struct fw_card_driver dummy_driver = {
+static const struct fw_card_driver dummy_driver_template = {
.enable = dummy_enable,
.update_phy_reg = dummy_update_phy_reg,
.set_config_rom = dummy_set_config_rom,
@@ -529,6 +531,8 @@ void fw_card_release(struct kref *kref)
void fw_core_remove_card(struct fw_card *card)
{
+ struct fw_card_driver dummy_driver = dummy_driver_template;
+
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
fw_core_initiate_bus_reset(card, 1);
@@ -537,7 +541,9 @@ void fw_core_remove_card(struct fw_card *card)
list_del_init(&card->link);
mutex_unlock(&card_mutex);
- /* Set up the dummy driver. */
+ /* Switch off most of the card driver interface. */
+ dummy_driver.free_iso_context = card->driver->free_iso_context;
+ dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
fw_destroy_nodes(card);
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 28076c892d7e..166f19c6d38d 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -71,7 +71,7 @@ int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
for (j = 0; j < i; j++) {
address = page_private(buffer->pages[j]);
dma_unmap_page(card->device, address,
- PAGE_SIZE, DMA_TO_DEVICE);
+ PAGE_SIZE, direction);
__free_page(buffer->pages[j]);
}
kfree(buffer->pages);
@@ -80,6 +80,7 @@ int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
return -ENOMEM;
}
+EXPORT_SYMBOL(fw_iso_buffer_init);
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
{
@@ -107,13 +108,14 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
for (i = 0; i < buffer->page_count; i++) {
address = page_private(buffer->pages[i]);
dma_unmap_page(card->device, address,
- PAGE_SIZE, DMA_TO_DEVICE);
+ PAGE_SIZE, buffer->direction);
__free_page(buffer->pages[i]);
}
kfree(buffer->pages);
buffer->pages = NULL;
}
+EXPORT_SYMBOL(fw_iso_buffer_destroy);
struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
int type, int channel, int speed, size_t header_size,
@@ -136,6 +138,7 @@ struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
return ctx;
}
+EXPORT_SYMBOL(fw_iso_context_create);
void fw_iso_context_destroy(struct fw_iso_context *ctx)
{
@@ -143,12 +146,14 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx)
card->driver->free_iso_context(ctx);
}
+EXPORT_SYMBOL(fw_iso_context_destroy);
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags)
{
return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
}
+EXPORT_SYMBOL(fw_iso_context_start);
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
@@ -159,11 +164,13 @@ int fw_iso_context_queue(struct fw_iso_context *ctx,
return card->driver->queue_iso(ctx, packet, buffer, payload);
}
+EXPORT_SYMBOL(fw_iso_context_queue);
int fw_iso_context_stop(struct fw_iso_context *ctx)
{
return ctx->card->driver->stop_iso(ctx);
}
+EXPORT_SYMBOL(fw_iso_context_stop);
/*
* Isochronous bus resource management (channels, bandwidth), client side
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0a25a7b38a80..c3cfc647e5e3 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -1,7 +1,6 @@
#ifndef _FIREWIRE_CORE_H
#define _FIREWIRE_CORE_H
-#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/idr.h>
@@ -97,17 +96,6 @@ int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
int fw_compute_block_crc(u32 *block);
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
-struct fw_descriptor {
- struct list_head link;
- size_t length;
- u32 immediate;
- u32 key;
- const u32 *data;
-};
-
-int fw_core_add_descriptor(struct fw_descriptor *desc);
-void fw_core_remove_descriptor(struct fw_descriptor *desc);
-
/* -cdev */
@@ -130,77 +118,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
/* -iso */
-/*
- * The iso packet format allows for an immediate header/payload part
- * stored in 'header' immediately after the packet info plus an
- * indirect payload part that is pointer to by the 'payload' field.
- * Applications can use one or the other or both to implement simple
- * low-bandwidth streaming (e.g. audio) or more advanced
- * scatter-gather streaming (e.g. assembling video frame automatically).
- */
-struct fw_iso_packet {
- u16 payload_length; /* Length of indirect payload. */
- u32 interrupt:1; /* Generate interrupt on this packet */
- u32 skip:1; /* Set to not send packet at all. */
- u32 tag:2;
- u32 sy:4;
- u32 header_length:8; /* Length of immediate header. */
- u32 header[0];
-};
-
-#define FW_ISO_CONTEXT_TRANSMIT 0
-#define FW_ISO_CONTEXT_RECEIVE 1
-
-#define FW_ISO_CONTEXT_MATCH_TAG0 1
-#define FW_ISO_CONTEXT_MATCH_TAG1 2
-#define FW_ISO_CONTEXT_MATCH_TAG2 4
-#define FW_ISO_CONTEXT_MATCH_TAG3 8
-#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
-
-/*
- * An iso buffer is just a set of pages mapped for DMA in the
- * specified direction. Since the pages are to be used for DMA, they
- * are not mapped into the kernel virtual address space. We store the
- * DMA address in the page private. The helper function
- * fw_iso_buffer_map() will map the pages into a given vma.
- */
-struct fw_iso_buffer {
- enum dma_data_direction direction;
- struct page **pages;
- int page_count;
-};
-
-typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
- u32 cycle, size_t header_length,
- void *header, void *data);
-
-struct fw_iso_context {
- struct fw_card *card;
- int type;
- int channel;
- int speed;
- size_t header_size;
- fw_iso_callback_t callback;
- void *callback_data;
-};
-
-int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
- int page_count, enum dma_data_direction direction);
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
-void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
-
-struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
- int type, int channel, int speed, size_t header_size,
- fw_iso_callback_t callback, void *callback_data);
-int fw_iso_context_queue(struct fw_iso_context *ctx,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload);
-int fw_iso_context_start(struct fw_iso_context *ctx,
- int cycle, int sync, int tags);
-int fw_iso_context_stop(struct fw_iso_context *ctx);
-void fw_iso_context_destroy(struct fw_iso_context *ctx);
-
void fw_iso_resource_manage(struct fw_card *card, int generation,
u64 channels_mask, int *channel, int *bandwidth, bool allocate);
@@ -285,9 +203,4 @@ void fw_flush_transactions(struct fw_card *card);
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count);
-static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
-{
- return tag << 14 | channel << 8 | sy;
-}
-
#endif /* _FIREWIRE_CORE_H */
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
new file mode 100644
index 000000000000..a42209a73aed
--- /dev/null
+++ b/drivers/firewire/net.c
@@ -0,0 +1,1655 @@
+/*
+ * IPv4 over IEEE 1394, per RFC 2734
+ *
+ * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com>
+ *
+ * based on eth1394 by Ben Collins et al
+ */
+
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/highmem.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/jiffies.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+#include <asm/unaligned.h>
+#include <net/arp.h>
+
+#define FWNET_MAX_FRAGMENTS 25 /* arbitrary limit */
+#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16 * 1024 ? 4 : 2)
+
+#define IEEE1394_BROADCAST_CHANNEL 31
+#define IEEE1394_ALL_NODES (0xffc0 | 0x003f)
+#define IEEE1394_MAX_PAYLOAD_S100 512
+#define FWNET_NO_FIFO_ADDR (~0ULL)
+
+#define IANA_SPECIFIER_ID 0x00005eU
+#define RFC2734_SW_VERSION 0x000001U
+
+#define IEEE1394_GASP_HDR_SIZE 8
+
+#define RFC2374_UNFRAG_HDR_SIZE 4
+#define RFC2374_FRAG_HDR_SIZE 8
+#define RFC2374_FRAG_OVERHEAD 4
+
+#define RFC2374_HDR_UNFRAG 0 /* unfragmented */
+#define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */
+#define RFC2374_HDR_LASTFRAG 2 /* last fragment */
+#define RFC2374_HDR_INTFRAG 3 /* interior fragment */
+
+#define RFC2734_HW_ADDR_LEN 16
+
+struct rfc2734_arp {
+ __be16 hw_type; /* 0x0018 */
+ __be16 proto_type; /* 0x0806 */
+ u8 hw_addr_len; /* 16 */
+ u8 ip_addr_len; /* 4 */
+ __be16 opcode; /* ARP Opcode */
+ /* Above is exactly the same format as struct arphdr */
+
+ __be64 s_uniq_id; /* Sender's 64bit EUI */
+ u8 max_rec; /* Sender's max packet size */
+ u8 sspd; /* Sender's max speed */
+ __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */
+ __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
+ __be32 sip; /* Sender's IP Address */
+ __be32 tip; /* IP Address of requested hw addr */
+} __attribute__((packed));
+
+/* This header format is specific to this driver implementation. */
+#define FWNET_ALEN 8
+#define FWNET_HLEN 10
+struct fwnet_header {
+ u8 h_dest[FWNET_ALEN]; /* destination address */
+ __be16 h_proto; /* packet type ID field */
+} __attribute__((packed));
+
+/* IPv4 and IPv6 encapsulation header */
+struct rfc2734_header {
+ u32 w0;
+ u32 w1;
+};
+
+#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
+#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
+#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
+#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
+#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
+
+#define fwnet_set_hdr_lf(lf) ((lf) << 30)
+#define fwnet_set_hdr_ether_type(et) (et)
+#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
+#define fwnet_set_hdr_fg_off(fgo) (fgo)
+
+#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
+
+static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr,
+ unsigned ether_type)
+{
+ hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG)
+ | fwnet_set_hdr_ether_type(ether_type);
+}
+
+static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr,
+ unsigned ether_type, unsigned dg_size, unsigned dgl)
+{
+ hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG)
+ | fwnet_set_hdr_dg_size(dg_size)
+ | fwnet_set_hdr_ether_type(ether_type);
+ hdr->w1 = fwnet_set_hdr_dgl(dgl);
+}
+
+static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr,
+ unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl)
+{
+ hdr->w0 = fwnet_set_hdr_lf(lf)
+ | fwnet_set_hdr_dg_size(dg_size)
+ | fwnet_set_hdr_fg_off(fg_off);
+ hdr->w1 = fwnet_set_hdr_dgl(dgl);
+}
+
+/* This list keeps track of what parts of the datagram have been filled in */
+struct fwnet_fragment_info {
+ struct list_head fi_link;
+ u16 offset;
+ u16 len;
+};
+
+struct fwnet_partial_datagram {
+ struct list_head pd_link;
+ struct list_head fi_list;
+ struct sk_buff *skb;
+ /* FIXME Why not use skb->data? */
+ char *pbuf;
+ u16 datagram_label;
+ u16 ether_type;
+ u16 datagram_size;
+};
+
+static DEFINE_MUTEX(fwnet_device_mutex);
+static LIST_HEAD(fwnet_device_list);
+
+struct fwnet_device {
+ struct list_head dev_link;
+ spinlock_t lock;
+ enum {
+ FWNET_BROADCAST_ERROR,
+ FWNET_BROADCAST_RUNNING,
+ FWNET_BROADCAST_STOPPED,
+ } broadcast_state;
+ struct fw_iso_context *broadcast_rcv_context;
+ struct fw_iso_buffer broadcast_rcv_buffer;
+ void **broadcast_rcv_buffer_ptrs;
+ unsigned broadcast_rcv_next_ptr;
+ unsigned num_broadcast_rcv_ptrs;
+ unsigned rcv_buffer_size;
+ /*
+ * This value is the maximum unfragmented datagram size that can be
+ * sent by the hardware. It already has the GASP overhead and the
+ * unfragmented datagram header overhead calculated into it.
+ */
+ unsigned broadcast_xmt_max_payload;
+ u16 broadcast_xmt_datagramlabel;
+
+ /*
+ * The CSR address that remote nodes must send datagrams to for us to
+ * receive them.
+ */
+ struct fw_address_handler handler;
+ u64 local_fifo;
+
+ /* List of packets to be sent */
+ struct list_head packet_list;
+ /*
+ * List of packets that were broadcasted. When we get an ISO interrupt
+ * one of them has been sent
+ */
+ struct list_head broadcasted_list;
+ /* List of packets that have been sent but not yet acked */
+ struct list_head sent_list;
+
+ struct list_head peer_list;
+ struct fw_card *card;
+ struct net_device *netdev;
+};
+
+struct fwnet_peer {
+ struct list_head peer_link;
+ struct fwnet_device *dev;
+ u64 guid;
+ u64 fifo;
+
+ /* guarded by dev->lock */
+ struct list_head pd_list; /* received partial datagrams */
+ unsigned pdg_size; /* pd_list size */
+
+ u16 datagram_label; /* outgoing datagram label */
+ unsigned max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */
+ int node_id;
+ int generation;
+ unsigned speed;
+};
+
+/* This is our task struct. It's used for the packet complete callback. */
+struct fwnet_packet_task {
+ /*
+ * ptask can actually be on dev->packet_list, dev->broadcasted_list,
+ * or dev->sent_list depending on its current state.
+ */
+ struct list_head pt_link;
+ struct fw_transaction transaction;
+ struct rfc2734_header hdr;
+ struct sk_buff *skb;
+ struct fwnet_device *dev;
+
+ int outstanding_pkts;
+ unsigned max_payload;
+ u64 fifo_addr;
+ u16 dest_node;
+ u8 generation;
+ u8 speed;
+};
+
+/*
+ * saddr == NULL means use device source address.
+ * daddr == NULL means leave destination address (eg unresolved arp).
+ */
+static int fwnet_header_create(struct sk_buff *skb, struct net_device *net,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned len)
+{
+ struct fwnet_header *h;
+
+ h = (struct fwnet_header *)skb_push(skb, sizeof(*h));
+ put_unaligned_be16(type, &h->h_proto);
+
+ if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) {
+ memset(h->h_dest, 0, net->addr_len);
+
+ return net->hard_header_len;
+ }
+
+ if (daddr) {
+ memcpy(h->h_dest, daddr, net->addr_len);
+
+ return net->hard_header_len;
+ }
+
+ return -net->hard_header_len;
+}
+
+static int fwnet_header_rebuild(struct sk_buff *skb)
+{
+ struct fwnet_header *h = (struct fwnet_header *)skb->data;
+
+ if (get_unaligned_be16(&h->h_proto) == ETH_P_IP)
+ return arp_find((unsigned char *)&h->h_dest, skb);
+
+ fw_notify("%s: unable to resolve type %04x addresses\n",
+ skb->dev->name, be16_to_cpu(h->h_proto));
+ return 0;
+}
+
+static int fwnet_header_cache(const struct neighbour *neigh,
+ struct hh_cache *hh)
+{
+ struct net_device *net;
+ struct fwnet_header *h;
+
+ if (hh->hh_type == cpu_to_be16(ETH_P_802_3))
+ return -1;
+ net = neigh->dev;
+ h = (struct fwnet_header *)((u8 *)hh->hh_data + 16 - sizeof(*h));
+ h->h_proto = hh->hh_type;
+ memcpy(h->h_dest, neigh->ha, net->addr_len);
+ hh->hh_len = FWNET_HLEN;
+
+ return 0;
+}
+
+/* Called by Address Resolution module to notify changes in address. */
+static void fwnet_header_cache_update(struct hh_cache *hh,
+ const struct net_device *net, const unsigned char *haddr)
+{
+ memcpy((u8 *)hh->hh_data + 16 - FWNET_HLEN, haddr, net->addr_len);
+}
+
+static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+{
+ memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN);
+
+ return FWNET_ALEN;
+}
+
+static const struct header_ops fwnet_header_ops = {
+ .create = fwnet_header_create,
+ .rebuild = fwnet_header_rebuild,
+ .cache = fwnet_header_cache,
+ .cache_update = fwnet_header_cache_update,
+ .parse = fwnet_header_parse,
+};
+
+/* FIXME: is this correct for all cases? */
+static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd,
+ unsigned offset, unsigned len)
+{
+ struct fwnet_fragment_info *fi;
+ unsigned end = offset + len;
+
+ list_for_each_entry(fi, &pd->fi_list, fi_link)
+ if (offset < fi->offset + fi->len && end > fi->offset)
+ return true;
+
+ return false;
+}
+
+/* Assumes that new fragment does not overlap any existing fragments */
+static struct fwnet_fragment_info *fwnet_frag_new(
+ struct fwnet_partial_datagram *pd, unsigned offset, unsigned len)
+{
+ struct fwnet_fragment_info *fi, *fi2, *new;
+ struct list_head *list;
+
+ list = &pd->fi_list;
+ list_for_each_entry(fi, &pd->fi_list, fi_link) {
+ if (fi->offset + fi->len == offset) {
+ /* The new fragment can be tacked on to the end */
+ /* Did the new fragment plug a hole? */
+ fi2 = list_entry(fi->fi_link.next,
+ struct fwnet_fragment_info, fi_link);
+ if (fi->offset + fi->len == fi2->offset) {
+ /* glue fragments together */
+ fi->len += len + fi2->len;
+ list_del(&fi2->fi_link);
+ kfree(fi2);
+ } else {
+ fi->len += len;
+ }
+
+ return fi;
+ }
+ if (offset + len == fi->offset) {
+ /* The new fragment can be tacked on to the beginning */
+ /* Did the new fragment plug a hole? */
+ fi2 = list_entry(fi->fi_link.prev,
+ struct fwnet_fragment_info, fi_link);
+ if (fi2->offset + fi2->len == fi->offset) {
+ /* glue fragments together */
+ fi2->len += fi->len + len;
+ list_del(&fi->fi_link);
+ kfree(fi);
+
+ return fi2;
+ }
+ fi->offset = offset;
+ fi->len += len;
+
+ return fi;
+ }
+ if (offset > fi->offset + fi->len) {
+ list = &fi->fi_link;
+ break;
+ }
+ if (offset + len < fi->offset) {
+ list = fi->fi_link.prev;
+ break;
+ }
+ }
+
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new) {
+ fw_error("out of memory\n");
+ return NULL;
+ }
+
+ new->offset = offset;
+ new->len = len;
+ list_add(&new->fi_link, list);
+
+ return new;
+}
+
+static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net,
+ struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size,
+ void *frag_buf, unsigned frag_off, unsigned frag_len)
+{
+ struct fwnet_partial_datagram *new;
+ struct fwnet_fragment_info *fi;
+
+ new = kmalloc(sizeof(*new), GFP_ATOMIC);
+ if (!new)
+ goto fail;
+
+ INIT_LIST_HEAD(&new->fi_list);
+ fi = fwnet_frag_new(new, frag_off, frag_len);
+ if (fi == NULL)
+ goto fail_w_new;
+
+ new->datagram_label = datagram_label;
+ new->datagram_size = dg_size;
+ new->skb = dev_alloc_skb(dg_size + net->hard_header_len + 15);
+ if (new->skb == NULL)
+ goto fail_w_fi;
+
+ skb_reserve(new->skb, (net->hard_header_len + 15) & ~15);
+ new->pbuf = skb_put(new->skb, dg_size);
+ memcpy(new->pbuf + frag_off, frag_buf, frag_len);
+ list_add_tail(&new->pd_link, &peer->pd_list);
+
+ return new;
+
+fail_w_fi:
+ kfree(fi);
+fail_w_new:
+ kfree(new);
+fail:
+ fw_error("out of memory\n");
+
+ return NULL;
+}
+
+static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer,
+ u16 datagram_label)
+{
+ struct fwnet_partial_datagram *pd;
+
+ list_for_each_entry(pd, &peer->pd_list, pd_link)
+ if (pd->datagram_label == datagram_label)
+ return pd;
+
+ return NULL;
+}
+
+
+static void fwnet_pd_delete(struct fwnet_partial_datagram *old)
+{
+ struct fwnet_fragment_info *fi, *n;
+
+ list_for_each_entry_safe(fi, n, &old->fi_list, fi_link)
+ kfree(fi);
+
+ list_del(&old->pd_link);
+ dev_kfree_skb_any(old->skb);
+ kfree(old);
+}
+
+static bool fwnet_pd_update(struct fwnet_peer *peer,
+ struct fwnet_partial_datagram *pd, void *frag_buf,
+ unsigned frag_off, unsigned frag_len)
+{
+ if (fwnet_frag_new(pd, frag_off, frag_len) == NULL)
+ return false;
+
+ memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
+
+ /*
+ * Move list entry to beginnig of list so that oldest partial
+ * datagrams percolate to the end of the list
+ */
+ list_move_tail(&pd->pd_link, &peer->pd_list);
+
+ return true;
+}
+
+static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd)
+{
+ struct fwnet_fragment_info *fi;
+
+ fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link);
+
+ return fi->len == pd->datagram_size;
+}
+
+/* caller must hold dev->lock */
+static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev,
+ u64 guid)
+{
+ struct fwnet_peer *peer;
+
+ list_for_each_entry(peer, &dev->peer_list, peer_link)
+ if (peer->guid == guid)
+ return peer;
+
+ return NULL;
+}
+
+/* caller must hold dev->lock */
+static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev,
+ int node_id, int generation)
+{
+ struct fwnet_peer *peer;
+
+ list_for_each_entry(peer, &dev->peer_list, peer_link)
+ if (peer->node_id == node_id &&
+ peer->generation == generation)
+ return peer;
+
+ return NULL;
+}
+
+/* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */
+static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed)
+{
+ max_rec = min(max_rec, speed + 8);
+ max_rec = min(max_rec, 0xbU); /* <= 4096 */
+ if (max_rec < 8) {
+ fw_notify("max_rec %x out of range\n", max_rec);
+ max_rec = 8;
+ }
+
+ return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE;
+}
+
+
+static int fwnet_finish_incoming_packet(struct net_device *net,
+ struct sk_buff *skb, u16 source_node_id,
+ bool is_broadcast, u16 ether_type)
+{
+ struct fwnet_device *dev;
+ static const __be64 broadcast_hw = cpu_to_be64(~0ULL);
+ int status;
+ __be64 guid;
+
+ dev = netdev_priv(net);
+ /* Write metadata, and then pass to the receive level */
+ skb->dev = net;
+ skb->ip_summed = CHECKSUM_UNNECESSARY; /* don't check it */
+
+ /*
+ * Parse the encapsulation header. This actually does the job of
+ * converting to an ethernet frame header, as well as arp
+ * conversion if needed. ARP conversion is easier in this
+ * direction, since we are using ethernet as our backend.
+ */
+ /*
+ * If this is an ARP packet, convert it. First, we want to make
+ * use of some of the fields, since they tell us a little bit
+ * about the sending machine.
+ */
+ if (ether_type == ETH_P_ARP) {
+ struct rfc2734_arp *arp1394;
+ struct arphdr *arp;
+ unsigned char *arp_ptr;
+ u64 fifo_addr;
+ u64 peer_guid;
+ unsigned sspd;
+ u16 max_payload;
+ struct fwnet_peer *peer;
+ unsigned long flags;
+
+ arp1394 = (struct rfc2734_arp *)skb->data;
+ arp = (struct arphdr *)skb->data;
+ arp_ptr = (unsigned char *)(arp + 1);
+ peer_guid = get_unaligned_be64(&arp1394->s_uniq_id);
+ fifo_addr = (u64)get_unaligned_be16(&arp1394->fifo_hi) << 32
+ | get_unaligned_be32(&arp1394->fifo_lo);
+
+ sspd = arp1394->sspd;
+ /* Sanity check. OS X 10.3 PPC reportedly sends 131. */
+ if (sspd > SCODE_3200) {
+ fw_notify("sspd %x out of range\n", sspd);
+ sspd = SCODE_3200;
+ }
+ max_payload = fwnet_max_payload(arp1394->max_rec, sspd);
+
+ spin_lock_irqsave(&dev->lock, flags);
+ peer = fwnet_peer_find_by_guid(dev, peer_guid);
+ if (peer) {
+ peer->fifo = fifo_addr;
+
+ if (peer->speed > sspd)
+ peer->speed = sspd;
+ if (peer->max_payload > max_payload)
+ peer->max_payload = max_payload;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (!peer) {
+ fw_notify("No peer for ARP packet from %016llx\n",
+ (unsigned long long)peer_guid);
+ goto failed_proto;
+ }
+
+ /*
+ * Now that we're done with the 1394 specific stuff, we'll
+ * need to alter some of the data. Believe it or not, all
+ * that needs to be done is sender_IP_address needs to be
+ * moved, the destination hardware address get stuffed
+ * in and the hardware address length set to 8.
+ *
+ * IMPORTANT: The code below overwrites 1394 specific data
+ * needed above so keep the munging of the data for the
+ * higher level IP stack last.
+ */
+
+ arp->ar_hln = 8;
+ /* skip over sender unique id */
+ arp_ptr += arp->ar_hln;
+ /* move sender IP addr */
+ put_unaligned(arp1394->sip, (u32 *)arp_ptr);
+ /* skip over sender IP addr */
+ arp_ptr += arp->ar_pln;
+
+ if (arp->ar_op == htons(ARPOP_REQUEST))
+ memset(arp_ptr, 0, sizeof(u64));
+ else
+ memcpy(arp_ptr, net->dev_addr, sizeof(u64));
+ }
+
+ /* Now add the ethernet header. */
+ guid = cpu_to_be64(dev->card->guid);
+ if (dev_hard_header(skb, net, ether_type,
+ is_broadcast ? &broadcast_hw : &guid,
+ NULL, skb->len) >= 0) {
+ struct fwnet_header *eth;
+ u16 *rawp;
+ __be16 protocol;
+
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(*eth));
+ eth = (struct fwnet_header *)skb_mac_header(skb);
+ if (*eth->h_dest & 1) {
+ if (memcmp(eth->h_dest, net->broadcast,
+ net->addr_len) == 0)
+ skb->pkt_type = PACKET_BROADCAST;
+#if 0
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+#endif
+ } else {
+ if (memcmp(eth->h_dest, net->dev_addr, net->addr_len))
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+ if (ntohs(eth->h_proto) >= 1536) {
+ protocol = eth->h_proto;
+ } else {
+ rawp = (u16 *)skb->data;
+ if (*rawp == 0xffff)
+ protocol = htons(ETH_P_802_3);
+ else
+ protocol = htons(ETH_P_802_2);
+ }
+ skb->protocol = protocol;
+ }
+ status = netif_rx(skb);
+ if (status == NET_RX_DROP) {
+ net->stats.rx_errors++;
+ net->stats.rx_dropped++;
+ } else {
+ net->stats.rx_packets++;
+ net->stats.rx_bytes += skb->len;
+ }
+ if (netif_queue_stopped(net))
+ netif_wake_queue(net);
+
+ return 0;
+
+ failed_proto:
+ net->stats.rx_errors++;
+ net->stats.rx_dropped++;
+
+ dev_kfree_skb_any(skb);
+ if (netif_queue_stopped(net))
+ netif_wake_queue(net);
+
+ net->last_rx = jiffies;
+
+ return 0;
+}
+
+static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
+ int source_node_id, int generation,
+ bool is_broadcast)
+{
+ struct sk_buff *skb;
+ struct net_device *net = dev->netdev;
+ struct rfc2734_header hdr;
+ unsigned lf;
+ unsigned long flags;
+ struct fwnet_peer *peer;
+ struct fwnet_partial_datagram *pd;
+ int fg_off;
+ int dg_size;
+ u16 datagram_label;
+ int retval;
+ u16 ether_type;
+
+ hdr.w0 = be32_to_cpu(buf[0]);
+ lf = fwnet_get_hdr_lf(&hdr);
+ if (lf == RFC2374_HDR_UNFRAG) {
+ /*
+ * An unfragmented datagram has been received by the ieee1394
+ * bus. Build an skbuff around it so we can pass it to the
+ * high level network layer.
+ */
+ ether_type = fwnet_get_hdr_ether_type(&hdr);
+ buf++;
+ len -= RFC2374_UNFRAG_HDR_SIZE;
+
+ skb = dev_alloc_skb(len + net->hard_header_len + 15);
+ if (unlikely(!skb)) {
+ fw_error("out of memory\n");
+ net->stats.rx_dropped++;
+
+ return -1;
+ }
+ skb_reserve(skb, (net->hard_header_len + 15) & ~15);
+ memcpy(skb_put(skb, len), buf, len);
+
+ return fwnet_finish_incoming_packet(net, skb, source_node_id,
+ is_broadcast, ether_type);
+ }
+ /* A datagram fragment has been received, now the fun begins. */
+ hdr.w1 = ntohl(buf[1]);
+ buf += 2;
+ len -= RFC2374_FRAG_HDR_SIZE;
+ if (lf == RFC2374_HDR_FIRSTFRAG) {
+ ether_type = fwnet_get_hdr_ether_type(&hdr);
+ fg_off = 0;
+ } else {
+ ether_type = 0;
+ fg_off = fwnet_get_hdr_fg_off(&hdr);
+ }
+ datagram_label = fwnet_get_hdr_dgl(&hdr);
+ dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
+ if (!peer)
+ goto bad_proto;
+
+ pd = fwnet_pd_find(peer, datagram_label);
+ if (pd == NULL) {
+ while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) {
+ /* remove the oldest */
+ fwnet_pd_delete(list_first_entry(&peer->pd_list,
+ struct fwnet_partial_datagram, pd_link));
+ peer->pdg_size--;
+ }
+ pd = fwnet_pd_new(net, peer, datagram_label,
+ dg_size, buf, fg_off, len);
+ if (pd == NULL) {
+ retval = -ENOMEM;
+ goto bad_proto;
+ }
+ peer->pdg_size++;
+ } else {
+ if (fwnet_frag_overlap(pd, fg_off, len) ||
+ pd->datagram_size != dg_size) {
+ /*
+ * Differing datagram sizes or overlapping fragments,
+ * discard old datagram and start a new one.
+ */
+ fwnet_pd_delete(pd);
+ pd = fwnet_pd_new(net, peer, datagram_label,
+ dg_size, buf, fg_off, len);
+ if (pd == NULL) {
+ retval = -ENOMEM;
+ peer->pdg_size--;
+ goto bad_proto;
+ }
+ } else {
+ if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
+ /*
+ * Couldn't save off fragment anyway
+ * so might as well obliterate the
+ * datagram now.
+ */
+ fwnet_pd_delete(pd);
+ peer->pdg_size--;
+ goto bad_proto;
+ }
+ }
+ } /* new datagram or add to existing one */
+
+ if (lf == RFC2374_HDR_FIRSTFRAG)
+ pd->ether_type = ether_type;
+
+ if (fwnet_pd_is_complete(pd)) {
+ ether_type = pd->ether_type;
+ peer->pdg_size--;
+ skb = skb_get(pd->skb);
+ fwnet_pd_delete(pd);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return fwnet_finish_incoming_packet(net, skb, source_node_id,
+ false, ether_type);
+ }
+ /*
+ * Datagram is not complete, we're done for the
+ * moment.
+ */
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+
+ bad_proto:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (netif_queue_stopped(net))
+ netif_wake_queue(net);
+
+ return 0;
+}
+
+static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
+ int tcode, int destination, int source, int generation,
+ int speed, unsigned long long offset, void *payload,
+ size_t length, void *callback_data)
+{
+ struct fwnet_device *dev = callback_data;
+ int rcode;
+
+ if (destination == IEEE1394_ALL_NODES) {
+ kfree(r);
+
+ return;
+ }
+
+ if (offset != dev->handler.offset)
+ rcode = RCODE_ADDRESS_ERROR;
+ else if (tcode != TCODE_WRITE_BLOCK_REQUEST)
+ rcode = RCODE_TYPE_ERROR;
+ else if (fwnet_incoming_packet(dev, payload, length,
+ source, generation, false) != 0) {
+ fw_error("Incoming packet failure\n");
+ rcode = RCODE_CONFLICT_ERROR;
+ } else
+ rcode = RCODE_COMPLETE;
+
+ fw_send_response(card, r, rcode);
+}
+
+static void fwnet_receive_broadcast(struct fw_iso_context *context,
+ u32 cycle, size_t header_length, void *header, void *data)
+{
+ struct fwnet_device *dev;
+ struct fw_iso_packet packet;
+ struct fw_card *card;
+ __be16 *hdr_ptr;
+ __be32 *buf_ptr;
+ int retval;
+ u32 length;
+ u16 source_node_id;
+ u32 specifier_id;
+ u32 ver;
+ unsigned long offset;
+ unsigned long flags;
+
+ dev = data;
+ card = dev->card;
+ hdr_ptr = header;
+ length = be16_to_cpup(hdr_ptr);
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr;
+ buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++];
+ if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs)
+ dev->broadcast_rcv_next_ptr = 0;
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
+ | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
+ ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
+ source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
+
+ if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) {
+ buf_ptr += 2;
+ length -= IEEE1394_GASP_HDR_SIZE;
+ fwnet_incoming_packet(dev, buf_ptr, length,
+ source_node_id, -1, true);
+ }
+
+ packet.payload_length = dev->rcv_buffer_size;
+ packet.interrupt = 1;
+ packet.skip = 0;
+ packet.tag = 3;
+ packet.sy = 0;
+ packet.header_length = IEEE1394_GASP_HDR_SIZE;
+
+ spin_lock_irqsave(&dev->lock, flags);
+
+ retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet,
+ &dev->broadcast_rcv_buffer, offset);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (retval < 0)
+ fw_error("requeue failed\n");
+}
+
+static struct kmem_cache *fwnet_packet_task_cache;
+
+static int fwnet_send_packet(struct fwnet_packet_task *ptask);
+
+static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
+{
+ struct fwnet_device *dev;
+ unsigned long flags;
+
+ dev = ptask->dev;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_del(&ptask->pt_link);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ptask->outstanding_pkts--; /* FIXME access inside lock */
+
+ if (ptask->outstanding_pkts > 0) {
+ u16 dg_size;
+ u16 fg_off;
+ u16 datagram_label;
+ u16 lf;
+ struct sk_buff *skb;
+
+ /* Update the ptask to point to the next fragment and send it */
+ lf = fwnet_get_hdr_lf(&ptask->hdr);
+ switch (lf) {
+ case RFC2374_HDR_LASTFRAG:
+ case RFC2374_HDR_UNFRAG:
+ default:
+ fw_error("Outstanding packet %x lf %x, header %x,%x\n",
+ ptask->outstanding_pkts, lf, ptask->hdr.w0,
+ ptask->hdr.w1);
+ BUG();
+
+ case RFC2374_HDR_FIRSTFRAG:
+ /* Set frag type here for future interior fragments */
+ dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
+ fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
+ datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
+ break;
+
+ case RFC2374_HDR_INTFRAG:
+ dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
+ fg_off = fwnet_get_hdr_fg_off(&ptask->hdr)
+ + ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
+ datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
+ break;
+ }
+ skb = ptask->skb;
+ skb_pull(skb, ptask->max_payload);
+ if (ptask->outstanding_pkts > 1) {
+ fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
+ dg_size, fg_off, datagram_label);
+ } else {
+ fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG,
+ dg_size, fg_off, datagram_label);
+ ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
+ }
+ fwnet_send_packet(ptask);
+ } else {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+}
+
+static void fwnet_write_complete(struct fw_card *card, int rcode,
+ void *payload, size_t length, void *data)
+{
+ struct fwnet_packet_task *ptask;
+
+ ptask = data;
+
+ if (rcode == RCODE_COMPLETE)
+ fwnet_transmit_packet_done(ptask);
+ else
+ fw_error("fwnet_write_complete: failed: %x\n", rcode);
+ /* ??? error recovery */
+}
+
+static int fwnet_send_packet(struct fwnet_packet_task *ptask)
+{
+ struct fwnet_device *dev;
+ unsigned tx_len;
+ struct rfc2734_header *bufhdr;
+ unsigned long flags;
+
+ dev = ptask->dev;
+ tx_len = ptask->max_payload;
+ switch (fwnet_get_hdr_lf(&ptask->hdr)) {
+ case RFC2374_HDR_UNFRAG:
+ bufhdr = (struct rfc2734_header *)
+ skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE);
+ put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
+ break;
+
+ case RFC2374_HDR_FIRSTFRAG:
+ case RFC2374_HDR_INTFRAG:
+ case RFC2374_HDR_LASTFRAG:
+ bufhdr = (struct rfc2734_header *)
+ skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE);
+ put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
+ put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1);
+ break;
+
+ default:
+ BUG();
+ }
+ if (ptask->dest_node == IEEE1394_ALL_NODES) {
+ u8 *p;
+ int generation;
+ int node_id;
+
+ /* ptask->generation may not have been set yet */
+ generation = dev->card->generation;
+ smp_rmb();
+ node_id = dev->card->node_id;
+
+ p = skb_push(ptask->skb, 8);
+ put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
+ put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
+ | RFC2734_SW_VERSION, &p[4]);
+
+ /* We should not transmit if broadcast_channel.valid == 0. */
+ fw_send_request(dev->card, &ptask->transaction,
+ TCODE_STREAM_DATA,
+ fw_stream_packet_destination_id(3,
+ IEEE1394_BROADCAST_CHANNEL, 0),
+ generation, SCODE_100, 0ULL, ptask->skb->data,
+ tx_len + 8, fwnet_write_complete, ptask);
+
+ /* FIXME race? */
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&ptask->pt_link, &dev->broadcasted_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+ }
+
+ fw_send_request(dev->card, &ptask->transaction,
+ TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node,
+ ptask->generation, ptask->speed, ptask->fifo_addr,
+ ptask->skb->data, tx_len, fwnet_write_complete, ptask);
+
+ /* FIXME race? */
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&ptask->pt_link, &dev->sent_list);
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ dev->netdev->trans_start = jiffies;
+
+ return 0;
+}
+
+static int fwnet_broadcast_start(struct fwnet_device *dev)
+{
+ struct fw_iso_context *context;
+ int retval;
+ unsigned num_packets;
+ unsigned max_receive;
+ struct fw_iso_packet packet;
+ unsigned long offset;
+ unsigned u;
+
+ if (dev->local_fifo == FWNET_NO_FIFO_ADDR) {
+ /* outside OHCI posted write area? */
+ static const struct fw_address_region region = {
+ .start = 0xffff00000000ULL,
+ .end = CSR_REGISTER_BASE,
+ };
+
+ dev->handler.length = 4096;
+ dev->handler.address_callback = fwnet_receive_packet;
+ dev->handler.callback_data = dev;
+
+ retval = fw_core_add_address_handler(&dev->handler, &region);
+ if (retval < 0)
+ goto failed_initial;
+
+ dev->local_fifo = dev->handler.offset;
+ }
+
+ max_receive = 1U << (dev->card->max_receive + 1);
+ num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
+
+ if (!dev->broadcast_rcv_context) {
+ void **ptrptr;
+
+ context = fw_iso_context_create(dev->card,
+ FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL,
+ dev->card->link_speed, 8, fwnet_receive_broadcast, dev);
+ if (IS_ERR(context)) {
+ retval = PTR_ERR(context);
+ goto failed_context_create;
+ }
+
+ retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer,
+ dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
+ if (retval < 0)
+ goto failed_buffer_init;
+
+ ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL);
+ if (!ptrptr) {
+ retval = -ENOMEM;
+ goto failed_ptrs_alloc;
+ }
+
+ dev->broadcast_rcv_buffer_ptrs = ptrptr;
+ for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
+ void *ptr;
+ unsigned v;
+
+ ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
+ for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
+ *ptrptr++ = (void *)
+ ((char *)ptr + v * max_receive);
+ }
+ dev->broadcast_rcv_context = context;
+ } else {
+ context = dev->broadcast_rcv_context;
+ }
+
+ packet.payload_length = max_receive;
+ packet.interrupt = 1;
+ packet.skip = 0;
+ packet.tag = 3;
+ packet.sy = 0;
+ packet.header_length = IEEE1394_GASP_HDR_SIZE;
+ offset = 0;
+
+ for (u = 0; u < num_packets; u++) {
+ retval = fw_iso_context_queue(context, &packet,
+ &dev->broadcast_rcv_buffer, offset);
+ if (retval < 0)
+ goto failed_rcv_queue;
+
+ offset += max_receive;
+ }
+ dev->num_broadcast_rcv_ptrs = num_packets;
+ dev->rcv_buffer_size = max_receive;
+ dev->broadcast_rcv_next_ptr = 0U;
+ retval = fw_iso_context_start(context, -1, 0,
+ FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */
+ if (retval < 0)
+ goto failed_rcv_queue;
+
+ /* FIXME: adjust it according to the min. speed of all known peers? */
+ dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100
+ - IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE;
+ dev->broadcast_state = FWNET_BROADCAST_RUNNING;
+
+ return 0;
+
+ failed_rcv_queue:
+ kfree(dev->broadcast_rcv_buffer_ptrs);
+ dev->broadcast_rcv_buffer_ptrs = NULL;
+ failed_ptrs_alloc:
+ fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
+ failed_buffer_init:
+ fw_iso_context_destroy(context);
+ dev->broadcast_rcv_context = NULL;
+ failed_context_create:
+ fw_core_remove_address_handler(&dev->handler);
+ failed_initial:
+ dev->local_fifo = FWNET_NO_FIFO_ADDR;
+
+ return retval;
+}
+
+/* ifup */
+static int fwnet_open(struct net_device *net)
+{
+ struct fwnet_device *dev = netdev_priv(net);
+ int ret;
+
+ if (dev->broadcast_state == FWNET_BROADCAST_ERROR) {
+ ret = fwnet_broadcast_start(dev);
+ if (ret)
+ return ret;
+ }
+ netif_start_queue(net);
+
+ return 0;
+}
+
+/* ifdown */
+static int fwnet_stop(struct net_device *net)
+{
+ netif_stop_queue(net);
+
+ /* Deallocate iso context for use by other applications? */
+
+ return 0;
+}
+
+static int fwnet_tx(struct sk_buff *skb, struct net_device *net)
+{
+ struct fwnet_header hdr_buf;
+ struct fwnet_device *dev = netdev_priv(net);
+ __be16 proto;
+ u16 dest_node;
+ unsigned max_payload;
+ u16 dg_size;
+ u16 *datagram_label_ptr;
+ struct fwnet_packet_task *ptask;
+ struct fwnet_peer *peer;
+ unsigned long flags;
+
+ ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC);
+ if (ptask == NULL)
+ goto fail;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ goto fail;
+
+ /*
+ * Make a copy of the driver-specific header.
+ * We might need to rebuild the header on tx failure.
+ */
+ memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
+ skb_pull(skb, sizeof(hdr_buf));
+
+ proto = hdr_buf.h_proto;
+ dg_size = skb->len;
+
+ /* serialize access to peer, including peer->datagram_label */
+ spin_lock_irqsave(&dev->lock, flags);
+
+ /*
+ * Set the transmission type for the packet. ARP packets and IP
+ * broadcast packets are sent via GASP.
+ */
+ if (memcmp(hdr_buf.h_dest, net->broadcast, FWNET_ALEN) == 0
+ || proto == htons(ETH_P_ARP)
+ || (proto == htons(ETH_P_IP)
+ && IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) {
+ max_payload = dev->broadcast_xmt_max_payload;
+ datagram_label_ptr = &dev->broadcast_xmt_datagramlabel;
+
+ ptask->fifo_addr = FWNET_NO_FIFO_ADDR;
+ ptask->generation = 0;
+ ptask->dest_node = IEEE1394_ALL_NODES;
+ ptask->speed = SCODE_100;
+ } else {
+ __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest);
+ u8 generation;
+
+ peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
+ if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR)
+ goto fail_unlock;
+
+ generation = peer->generation;
+ dest_node = peer->node_id;
+ max_payload = peer->max_payload;
+ datagram_label_ptr = &peer->datagram_label;
+
+ ptask->fifo_addr = peer->fifo;
+ ptask->generation = generation;
+ ptask->dest_node = dest_node;
+ ptask->speed = peer->speed;
+ }
+
+ /* If this is an ARP packet, convert it */
+ if (proto == htons(ETH_P_ARP)) {
+ struct arphdr *arp = (struct arphdr *)skb->data;
+ unsigned char *arp_ptr = (unsigned char *)(arp + 1);
+ struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data;
+ __be32 ipaddr;
+
+ ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN));
+
+ arp1394->hw_addr_len = RFC2734_HW_ADDR_LEN;
+ arp1394->max_rec = dev->card->max_receive;
+ arp1394->sspd = dev->card->link_speed;
+
+ put_unaligned_be16(dev->local_fifo >> 32,
+ &arp1394->fifo_hi);
+ put_unaligned_be32(dev->local_fifo & 0xffffffff,
+ &arp1394->fifo_lo);
+ put_unaligned(ipaddr, &arp1394->sip);
+ }
+
+ ptask->hdr.w0 = 0;
+ ptask->hdr.w1 = 0;
+ ptask->skb = skb;
+ ptask->dev = dev;
+
+ /* Does it all fit in one packet? */
+ if (dg_size <= max_payload) {
+ fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto));
+ ptask->outstanding_pkts = 1;
+ max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE;
+ } else {
+ u16 datagram_label;
+
+ max_payload -= RFC2374_FRAG_OVERHEAD;
+ datagram_label = (*datagram_label_ptr)++;
+ fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size,
+ datagram_label);
+ ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload);
+ max_payload += RFC2374_FRAG_HDR_SIZE;
+ }
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ ptask->max_payload = max_payload;
+ fwnet_send_packet(ptask);
+
+ return NETDEV_TX_OK;
+
+ fail_unlock:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ fail:
+ if (ptask)
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+
+ if (skb != NULL)
+ dev_kfree_skb(skb);
+
+ net->stats.tx_dropped++;
+ net->stats.tx_errors++;
+
+ /*
+ * FIXME: According to a patch from 2003-02-26, "returning non-zero
+ * causes serious problems" here, allegedly. Before that patch,
+ * -ERRNO was returned which is not appropriate under Linux 2.6.
+ * Perhaps more needs to be done? Stop the queue in serious
+ * conditions and restart it elsewhere?
+ */
+ return NETDEV_TX_OK;
+}
+
+static int fwnet_change_mtu(struct net_device *net, int new_mtu)
+{
+ if (new_mtu < 68)
+ return -EINVAL;
+
+ net->mtu = new_mtu;
+ return 0;
+}
+
+static void fwnet_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, KBUILD_MODNAME);
+ strcpy(info->bus_info, "ieee1394");
+}
+
+static struct ethtool_ops fwnet_ethtool_ops = {
+ .get_drvinfo = fwnet_get_drvinfo,
+};
+
+static const struct net_device_ops fwnet_netdev_ops = {
+ .ndo_open = fwnet_open,
+ .ndo_stop = fwnet_stop,
+ .ndo_start_xmit = fwnet_tx,
+ .ndo_change_mtu = fwnet_change_mtu,
+};
+
+static void fwnet_init_dev(struct net_device *net)
+{
+ net->header_ops = &fwnet_header_ops;
+ net->netdev_ops = &fwnet_netdev_ops;
+ net->watchdog_timeo = 2 * HZ;
+ net->flags = IFF_BROADCAST | IFF_MULTICAST;
+ net->features = NETIF_F_HIGHDMA;
+ net->addr_len = FWNET_ALEN;
+ net->hard_header_len = FWNET_HLEN;
+ net->type = ARPHRD_IEEE1394;
+ net->tx_queue_len = 10;
+ SET_ETHTOOL_OPS(net, &fwnet_ethtool_ops);
+}
+
+/* caller must hold fwnet_device_mutex */
+static struct fwnet_device *fwnet_dev_find(struct fw_card *card)
+{
+ struct fwnet_device *dev;
+
+ list_for_each_entry(dev, &fwnet_device_list, dev_link)
+ if (dev->card == card)
+ return dev;
+
+ return NULL;
+}
+
+static int fwnet_add_peer(struct fwnet_device *dev,
+ struct fw_unit *unit, struct fw_device *device)
+{
+ struct fwnet_peer *peer;
+
+ peer = kmalloc(sizeof(*peer), GFP_KERNEL);
+ if (!peer)
+ return -ENOMEM;
+
+ dev_set_drvdata(&unit->device, peer);
+
+ peer->dev = dev;
+ peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
+ peer->fifo = FWNET_NO_FIFO_ADDR;
+ INIT_LIST_HEAD(&peer->pd_list);
+ peer->pdg_size = 0;
+ peer->datagram_label = 0;
+ peer->speed = device->max_speed;
+ peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed);
+
+ peer->generation = device->generation;
+ smp_rmb();
+ peer->node_id = device->node_id;
+
+ spin_lock_irq(&dev->lock);
+ list_add_tail(&peer->peer_link, &dev->peer_list);
+ spin_unlock_irq(&dev->lock);
+
+ return 0;
+}
+
+static int fwnet_probe(struct device *_dev)
+{
+ struct fw_unit *unit = fw_unit(_dev);
+ struct fw_device *device = fw_parent_device(unit);
+ struct fw_card *card = device->card;
+ struct net_device *net;
+ bool allocated_netdev = false;
+ struct fwnet_device *dev;
+ unsigned max_mtu;
+ int ret;
+
+ mutex_lock(&fwnet_device_mutex);
+
+ dev = fwnet_dev_find(card);
+ if (dev) {
+ net = dev->netdev;
+ goto have_dev;
+ }
+
+ net = alloc_netdev(sizeof(*dev), "firewire%d", fwnet_init_dev);
+ if (net == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ allocated_netdev = true;
+ SET_NETDEV_DEV(net, card->device);
+ dev = netdev_priv(net);
+
+ spin_lock_init(&dev->lock);
+ dev->broadcast_state = FWNET_BROADCAST_ERROR;
+ dev->broadcast_rcv_context = NULL;
+ dev->broadcast_xmt_max_payload = 0;
+ dev->broadcast_xmt_datagramlabel = 0;
+
+ dev->local_fifo = FWNET_NO_FIFO_ADDR;
+
+ INIT_LIST_HEAD(&dev->packet_list);
+ INIT_LIST_HEAD(&dev->broadcasted_list);
+ INIT_LIST_HEAD(&dev->sent_list);
+ INIT_LIST_HEAD(&dev->peer_list);
+
+ dev->card = card;
+ dev->netdev = net;
+
+ /*
+ * Use the RFC 2734 default 1500 octets or the maximum payload
+ * as initial MTU
+ */
+ max_mtu = (1 << (card->max_receive + 1))
+ - sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE;
+ net->mtu = min(1500U, max_mtu);
+
+ /* Set our hardware address while we're at it */
+ put_unaligned_be64(card->guid, net->dev_addr);
+ put_unaligned_be64(~0ULL, net->broadcast);
+ ret = register_netdev(net);
+ if (ret) {
+ fw_error("Cannot register the driver\n");
+ goto out;
+ }
+
+ list_add_tail(&dev->dev_link, &fwnet_device_list);
+ fw_notify("%s: IPv4 over FireWire on device %016llx\n",
+ net->name, (unsigned long long)card->guid);
+ have_dev:
+ ret = fwnet_add_peer(dev, unit, device);
+ if (ret && allocated_netdev) {
+ unregister_netdev(net);
+ list_del(&dev->dev_link);
+ }
+ out:
+ if (ret && allocated_netdev)
+ free_netdev(net);
+
+ mutex_unlock(&fwnet_device_mutex);
+
+ return ret;
+}
+
+static void fwnet_remove_peer(struct fwnet_peer *peer)
+{
+ struct fwnet_partial_datagram *pd, *pd_next;
+
+ spin_lock_irq(&peer->dev->lock);
+ list_del(&peer->peer_link);
+ spin_unlock_irq(&peer->dev->lock);
+
+ list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link)
+ fwnet_pd_delete(pd);
+
+ kfree(peer);
+}
+
+static int fwnet_remove(struct device *_dev)
+{
+ struct fwnet_peer *peer = dev_get_drvdata(_dev);
+ struct fwnet_device *dev = peer->dev;
+ struct net_device *net;
+ struct fwnet_packet_task *ptask, *pt_next;
+
+ mutex_lock(&fwnet_device_mutex);
+
+ fwnet_remove_peer(peer);
+
+ if (list_empty(&dev->peer_list)) {
+ net = dev->netdev;
+ unregister_netdev(net);
+
+ if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
+ fw_core_remove_address_handler(&dev->handler);
+ if (dev->broadcast_rcv_context) {
+ fw_iso_context_stop(dev->broadcast_rcv_context);
+ fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer,
+ dev->card);
+ fw_iso_context_destroy(dev->broadcast_rcv_context);
+ }
+ list_for_each_entry_safe(ptask, pt_next,
+ &dev->packet_list, pt_link) {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+ list_for_each_entry_safe(ptask, pt_next,
+ &dev->broadcasted_list, pt_link) {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+ list_for_each_entry_safe(ptask, pt_next,
+ &dev->sent_list, pt_link) {
+ dev_kfree_skb_any(ptask->skb);
+ kmem_cache_free(fwnet_packet_task_cache, ptask);
+ }
+ list_del(&dev->dev_link);
+
+ free_netdev(net);
+ }
+
+ mutex_unlock(&fwnet_device_mutex);
+
+ return 0;
+}
+
+/*
+ * FIXME abort partially sent fragmented datagrams,
+ * discard partially received fragmented datagrams
+ */
+static void fwnet_update(struct fw_unit *unit)
+{
+ struct fw_device *device = fw_parent_device(unit);
+ struct fwnet_peer *peer = dev_get_drvdata(&unit->device);
+ int generation;
+
+ generation = device->generation;
+
+ spin_lock_irq(&peer->dev->lock);
+ peer->node_id = device->node_id;
+ peer->generation = generation;
+ spin_unlock_irq(&peer->dev->lock);
+}
+
+static const struct ieee1394_device_id fwnet_id_table[] = {
+ {
+ .match_flags = IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .specifier_id = IANA_SPECIFIER_ID,
+ .version = RFC2734_SW_VERSION,
+ },
+ { }
+};
+
+static struct fw_driver fwnet_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "net",
+ .bus = &fw_bus_type,
+ .probe = fwnet_probe,
+ .remove = fwnet_remove,
+ },
+ .update = fwnet_update,
+ .id_table = fwnet_id_table,
+};
+
+static const u32 rfc2374_unit_directory_data[] = {
+ 0x00040000, /* directory_length */
+ 0x1200005e, /* unit_specifier_id: IANA */
+ 0x81000003, /* textual descriptor offset */
+ 0x13000001, /* unit_sw_version: RFC 2734 */
+ 0x81000005, /* textual descriptor offset */
+ 0x00030000, /* descriptor_length */
+ 0x00000000, /* text */
+ 0x00000000, /* minimal ASCII, en */
+ 0x49414e41, /* I A N A */
+ 0x00030000, /* descriptor_length */
+ 0x00000000, /* text */
+ 0x00000000, /* minimal ASCII, en */
+ 0x49507634, /* I P v 4 */
+};
+
+static struct fw_descriptor rfc2374_unit_directory = {
+ .length = ARRAY_SIZE(rfc2374_unit_directory_data),
+ .key = (CSR_DIRECTORY | CSR_UNIT) << 24,
+ .data = rfc2374_unit_directory_data
+};
+
+static int __init fwnet_init(void)
+{
+ int err;
+
+ err = fw_core_add_descriptor(&rfc2374_unit_directory);
+ if (err)
+ return err;
+
+ fwnet_packet_task_cache = kmem_cache_create("packet_task",
+ sizeof(struct fwnet_packet_task), 0, 0, NULL);
+ if (!fwnet_packet_task_cache) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = driver_register(&fwnet_driver.driver);
+ if (!err)
+ return 0;
+
+ kmem_cache_destroy(fwnet_packet_task_cache);
+out:
+ fw_core_remove_descriptor(&rfc2374_unit_directory);
+
+ return err;
+}
+module_init(fwnet_init);
+
+static void __exit fwnet_cleanup(void)
+{
+ driver_unregister(&fwnet_driver.driver);
+ kmem_cache_destroy(fwnet_packet_task_cache);
+ fw_core_remove_descriptor(&rfc2374_unit_directory);
+}
+module_exit(fwnet_cleanup);
+
+MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>");
+MODULE_DESCRIPTION("IPv4 over IEEE1394 as per RFC 2734");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index b5db8b883615..9c2e10082b79 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -140,7 +140,7 @@ static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
dev_dbg(&adap->dev, "Interrupt: %x\n", i);
- wake_up_interruptible(&cpm->i2c_wait);
+ wake_up(&cpm->i2c_wait);
return i ? IRQ_HANDLED : IRQ_NONE;
}
@@ -364,12 +364,12 @@ static int cpm_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
dev_dbg(&adap->dev, "test ready.\n");
pmsg = &msgs[tptr];
if (pmsg->flags & I2C_M_RD)
- ret = wait_event_interruptible_timeout(cpm->i2c_wait,
+ ret = wait_event_timeout(cpm->i2c_wait,
(in_be16(&tbdf[tptr].cbd_sc) & BD_SC_NAK) ||
!(in_be16(&rbdf[rptr].cbd_sc) & BD_SC_EMPTY),
1 * HZ);
else
- ret = wait_event_interruptible_timeout(cpm->i2c_wait,
+ ret = wait_event_timeout(cpm->i2c_wait,
!(in_be16(&tbdf[tptr].cbd_sc) & BD_SC_READY),
1 * HZ);
if (ret == 0) {
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b606db85525d..ad8d2010c921 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -339,7 +339,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* to get longer filter period for better noise suppression.
* The filter is iclk (fclk for HS) period.
*/
- if (dev->speed > 400 || cpu_is_omap_2430())
+ if (dev->speed > 400 || cpu_is_omap2430())
internal_clk = 19200;
else if (dev->speed > 100)
internal_clk = 9600;
diff --git a/drivers/ieee1394/Kconfig b/drivers/ieee1394/Kconfig
index 95f45f9b8e5e..f102fcc7e52a 100644
--- a/drivers/ieee1394/Kconfig
+++ b/drivers/ieee1394/Kconfig
@@ -4,7 +4,7 @@ menu "IEEE 1394 (FireWire) support"
source "drivers/firewire/Kconfig"
config IEEE1394
- tristate "Stable FireWire stack"
+ tristate "Legacy alternative FireWire driver stack"
depends on PCI || BROKEN
help
IEEE 1394 describes a high performance serial bus, which is also
@@ -33,11 +33,9 @@ config IEEE1394_OHCI1394
module will be called ohci1394.
NOTE:
-
- You should only build either ohci1394 or the new firewire-ohci driver,
- but not both. If you nevertheless want to install both, you should
- configure them only as modules and blacklist the driver(s) which you
- don't want to have auto-loaded. Add either
+ If you want to install firewire-ohci and ohci1394 together, you
+ should configure them only as modules and blacklist the driver(s)
+ which you don't want to have auto-loaded. Add either
blacklist firewire-ohci
or
@@ -46,12 +44,7 @@ config IEEE1394_OHCI1394
blacklist dv1394
to /etc/modprobe.conf or /etc/modprobe.d/* and update modprobe.conf
- depending on your distribution. The latter two modules should be
- blacklisted together with ohci1394 because they depend on ohci1394.
-
- If you have an old modprobe which doesn't implement the blacklist
- directive, use "install modulename /bin/true" for the modules to be
- blacklisted.
+ depending on your distribution.
comment "PCILynx controller requires I2C"
depends on IEEE1394 && I2C=n
@@ -105,7 +98,7 @@ config IEEE1394_ETH1394_ROM_ENTRY
default n
config IEEE1394_ETH1394
- tristate "IP over 1394"
+ tristate "IP networking over 1394 (experimental)"
depends on IEEE1394 && EXPERIMENTAL && INET
select IEEE1394_ETH1394_ROM_ENTRY
help
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index c240454fd113..8664feebc93b 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -46,6 +46,7 @@
#define MANUFACTURER_INTEL 0x0089
#define I82802AB 0x00ad
#define I82802AC 0x00ac
+#define PF38F4476 0x881c
#define MANUFACTURER_ST 0x0020
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
@@ -315,10 +316,20 @@ static struct cfi_fixup fixup_table[] = {
{ 0, 0, NULL, NULL }
};
+static void cfi_fixup_major_minor(struct cfi_private *cfi,
+ struct cfi_pri_intelext *extp)
+{
+ if (cfi->mfr == MANUFACTURER_INTEL &&
+ cfi->id == PF38F4476 && extp->MinorVersion == '3')
+ extp->MinorVersion = '1';
+}
+
static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info *map, __u16 adr)
{
+ struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *extp;
+ unsigned int extra_size = 0;
unsigned int extp_size = sizeof(*extp);
again:
@@ -326,6 +337,8 @@ read_pri_intelext(struct map_info *map, __u16 adr)
if (!extp)
return NULL;
+ cfi_fixup_major_minor(cfi, extp);
+
if (extp->MajorVersion != '1' ||
(extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
@@ -340,19 +353,24 @@ read_pri_intelext(struct map_info *map, __u16 adr)
extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
- if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
- unsigned int extra_size = 0;
- int nb_parts, i;
+ if (extp->MinorVersion >= '0') {
+ extra_size = 0;
/* Protection Register info */
extra_size += (extp->NumProtectionFields - 1) *
sizeof(struct cfi_intelext_otpinfo);
+ }
+ if (extp->MinorVersion >= '1') {
/* Burst Read info */
extra_size += 2;
if (extp_size < sizeof(*extp) + extra_size)
goto need_more;
- extra_size += extp->extra[extra_size-1];
+ extra_size += extp->extra[extra_size - 1];
+ }
+
+ if (extp->MinorVersion >= '3') {
+ int nb_parts, i;
/* Number of hardware-partitions */
extra_size += 1;
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index e824b9b9b056..ccc4cfc7e4b5 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -166,6 +166,7 @@
#define SST39LF040 0x00D7
#define SST39SF010A 0x00B5
#define SST39SF020A 0x00B6
+#define SST39SF040 0x00B7
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
@@ -1393,6 +1394,18 @@ static const struct amd_flash_info jedec_table[] = {
}
}, {
.mfr_id = MANUFACTURER_SST,
+ .dev_id = SST39SF040,
+ .name = "SST 39SF040",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
+ ERASEINFO(0x01000,128),
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF040B,
.name = "SST 49LF040B",
.devtypes = CFI_DEVICETYPE_X8,
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index cc6369ea67dd..59c46126a5ce 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -500,6 +500,9 @@ static struct flash_info __devinitdata m25p_data [] = {
{ "at26df161a", 0x1f4601, 0, 64 * 1024, 32, SECT_4K, },
{ "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, },
+ /* Macronix */
+ { "mx25l12805d", 0xc22018, 0, 64 * 1024, 256, },
+
/* Spansion -- single (large) sector size only, at least
* for the chips listed here (without boot sectors).
*/
@@ -528,6 +531,7 @@ static struct flash_info __devinitdata m25p_data [] = {
{ "m25p64", 0x202017, 0, 64 * 1024, 128, },
{ "m25p128", 0x202018, 0, 256 * 1024, 64, },
+ { "m45pe10", 0x204011, 0, 64 * 1024, 2, },
{ "m45pe80", 0x204014, 0, 64 * 1024, 16, },
{ "m45pe16", 0x204015, 0, 64 * 1024, 32, },
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 82923bd2d9c5..0b98654d8eed 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -105,15 +105,6 @@ config MSP_FLASH_MAP_LIMIT
default "0x02000000"
depends on MSP_FLASH_MAP_LIMIT_32M
-config MTD_PMC_MSP_RAMROOT
- tristate "Embedded RAM block device for root on PMC-Sierra MSP"
- depends on PMC_MSP_EMBEDDED_ROOTFS && \
- (MTD_BLOCK || MTD_BLOCK_RO) && \
- MTD_RAM
- help
- This provides support for the embedded root file system
- on PMC MSP devices. This memory is mapped as a MTD block device.
-
config MTD_SUN_UFLASH
tristate "Sun Microsystems userflash support"
depends on SPARC && MTD_CFI && PCI
@@ -270,7 +261,7 @@ config MTD_ALCHEMY
config MTD_DILNETPC
tristate "CFI Flash device mapped on DIL/Net PC"
- depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT
+ depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
help
MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP".
For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm>
@@ -501,7 +492,7 @@ config MTD_BFIN_ASYNC
If compiled as a module, it will be called bfin-async-flash.
config MTD_UCLINUX
- tristate "Generic uClinux RAM/ROM filesystem support"
+ bool "Generic uClinux RAM/ROM filesystem support"
depends on MTD_PARTITIONS && MTD_RAM && !MMU
help
Map driver to support image based filesystems for uClinux.
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 2dbc1bec8488..8bae7f9850c0 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -25,7 +25,6 @@ obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
-obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 576611f605db..365c77b1b871 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -40,6 +40,9 @@ struct async_state {
uint32_t flash_ambctl0, flash_ambctl1;
uint32_t save_ambctl0, save_ambctl1;
unsigned long irq_flags;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *parts;
+#endif
};
static void switch_to_flash(struct async_state *state)
@@ -170,6 +173,7 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev)
if (ret > 0) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
add_mtd_partitions(state->mtd, pdata->parts, ret);
+ state->parts = pdata->parts;
} else if (pdata->nr_parts) {
pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
@@ -193,6 +197,7 @@ static int __devexit bfin_flash_remove(struct platform_device *pdev)
gpio_free(state->enet_flash_pin);
#ifdef CONFIG_MTD_PARTITIONS
del_mtd_partitions(state->mtd);
+ kfree(state->parts);
#endif
map_destroy(state->mtd);
kfree(state);
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index c9681a339a59..b08a798ee254 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -36,27 +36,33 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
#include <asm/mach/flash.h>
#include <mach/hardware.h>
#include <asm/system.h>
-#ifdef CONFIG_ARCH_P720T
-#define FLASH_BASE (0x04000000)
-#define FLASH_SIZE (64*1024*1024)
-#endif
+#define SUBDEV_NAME_SIZE (BUS_ID_SIZE + 2)
-struct armflash_info {
+struct armflash_subdev_info {
+ char name[SUBDEV_NAME_SIZE];
+ struct mtd_info *mtd;
+ struct map_info map;
struct flash_platform_data *plat;
+};
+
+struct armflash_info {
struct resource *res;
struct mtd_partition *parts;
struct mtd_info *mtd;
- struct map_info map;
+ int nr_subdev;
+ struct armflash_subdev_info subdev[0];
};
static void armflash_set_vpp(struct map_info *map, int on)
{
- struct armflash_info *info = container_of(map, struct armflash_info, map);
+ struct armflash_subdev_info *info =
+ container_of(map, struct armflash_subdev_info, map);
if (info->plat && info->plat->set_vpp)
info->plat->set_vpp(on);
@@ -64,32 +70,17 @@ static void armflash_set_vpp(struct map_info *map, int on)
static const char *probes[] = { "cmdlinepart", "RedBoot", "afs", NULL };
-static int armflash_probe(struct platform_device *dev)
+static int armflash_subdev_probe(struct armflash_subdev_info *subdev,
+ struct resource *res)
{
- struct flash_platform_data *plat = dev->dev.platform_data;
- struct resource *res = dev->resource;
- unsigned int size = res->end - res->start + 1;
- struct armflash_info *info;
- int err;
+ struct flash_platform_data *plat = subdev->plat;
+ resource_size_t size = res->end - res->start + 1;
void __iomem *base;
+ int err = 0;
- info = kzalloc(sizeof(struct armflash_info), GFP_KERNEL);
- if (!info) {
- err = -ENOMEM;
- goto out;
- }
-
- info->plat = plat;
- if (plat && plat->init) {
- err = plat->init();
- if (err)
- goto no_resource;
- }
-
- info->res = request_mem_region(res->start, size, "armflash");
- if (!info->res) {
+ if (!request_mem_region(res->start, size, subdev->name)) {
err = -EBUSY;
- goto no_resource;
+ goto out;
}
base = ioremap(res->start, size);
@@ -101,27 +92,132 @@ static int armflash_probe(struct platform_device *dev)
/*
* look for CFI based flash parts fitted to this board
*/
- info->map.size = size;
- info->map.bankwidth = plat->width;
- info->map.phys = res->start;
- info->map.virt = base;
- info->map.name = dev_name(&dev->dev);
- info->map.set_vpp = armflash_set_vpp;
+ subdev->map.size = size;
+ subdev->map.bankwidth = plat->width;
+ subdev->map.phys = res->start;
+ subdev->map.virt = base;
+ subdev->map.name = subdev->name;
+ subdev->map.set_vpp = armflash_set_vpp;
- simple_map_init(&info->map);
+ simple_map_init(&subdev->map);
/*
* Also, the CFI layer automatically works out what size
* of chips we have, and does the necessary identification
* for us automatically.
*/
- info->mtd = do_map_probe(plat->map_name, &info->map);
- if (!info->mtd) {
+ subdev->mtd = do_map_probe(plat->map_name, &subdev->map);
+ if (!subdev->mtd) {
err = -ENXIO;
goto no_device;
}
- info->mtd->owner = THIS_MODULE;
+ subdev->mtd->owner = THIS_MODULE;
+
+ /* Successful? */
+ if (err == 0)
+ return err;
+
+ if (subdev->mtd)
+ map_destroy(subdev->mtd);
+ no_device:
+ iounmap(base);
+ no_mem:
+ release_mem_region(res->start, size);
+ out:
+ return err;
+}
+
+static void armflash_subdev_remove(struct armflash_subdev_info *subdev)
+{
+ if (subdev->mtd)
+ map_destroy(subdev->mtd);
+ if (subdev->map.virt)
+ iounmap(subdev->map.virt);
+ release_mem_region(subdev->map.phys, subdev->map.size);
+}
+
+static int armflash_probe(struct platform_device *dev)
+{
+ struct flash_platform_data *plat = dev->dev.platform_data;
+ unsigned int size;
+ struct armflash_info *info;
+ int i, nr, err;
+
+ /* Count the number of devices */
+ for (nr = 0; ; nr++)
+ if (!platform_get_resource(dev, IORESOURCE_MEM, nr))
+ break;
+ if (nr == 0) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ size = sizeof(struct armflash_info) +
+ sizeof(struct armflash_subdev_info) * nr;
+ info = kzalloc(size, GFP_KERNEL);
+ if (!info) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (plat && plat->init) {
+ err = plat->init();
+ if (err)
+ goto no_resource;
+ }
+
+ for (i = 0; i < nr; i++) {
+ struct armflash_subdev_info *subdev = &info->subdev[i];
+ struct resource *res;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, i);
+ if (!res)
+ break;
+
+ if (nr == 1)
+ /* No MTD concatenation, just use the default name */
+ snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s",
+ dev_name(&dev->dev));
+ else
+ snprintf(subdev->name, SUBDEV_NAME_SIZE, "%s-%d",
+ dev_name(&dev->dev), i);
+ subdev->plat = plat;
+
+ err = armflash_subdev_probe(subdev, res);
+ if (err)
+ break;
+ }
+ info->nr_subdev = i;
+
+ if (err)
+ goto subdev_err;
+
+ if (info->nr_subdev == 1)
+ info->mtd = info->subdev[0].mtd;
+ else if (info->nr_subdev > 1) {
+#ifdef CONFIG_MTD_CONCAT
+ struct mtd_info *cdev[info->nr_subdev];
+
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ for (i = 0; i < info->nr_subdev; i++)
+ cdev[i] = info->subdev[i].mtd;
+
+ info->mtd = mtd_concat_create(cdev, info->nr_subdev,
+ dev_name(&dev->dev));
+ if (info->mtd == NULL)
+ err = -ENXIO;
+#else
+ printk(KERN_ERR "armflash: multiple devices found but "
+ "MTD concat support disabled.\n");
+ err = -ENXIO;
+#endif
+ }
+
+ if (err < 0)
+ goto cleanup;
err = parse_mtd_partitions(info->mtd, probes, &info->parts, 0);
if (err > 0) {
@@ -131,28 +227,30 @@ static int armflash_probe(struct platform_device *dev)
"mtd partition registration failed: %d\n", err);
}
- if (err == 0)
+ if (err == 0) {
platform_set_drvdata(dev, info);
+ return err;
+ }
/*
- * If we got an error, free all resources.
+ * We got an error, free all resources.
*/
- if (err < 0) {
- if (info->mtd) {
- del_mtd_partitions(info->mtd);
- map_destroy(info->mtd);
- }
- kfree(info->parts);
-
- no_device:
- iounmap(base);
- no_mem:
- release_mem_region(res->start, size);
- no_resource:
- if (plat && plat->exit)
- plat->exit();
- kfree(info);
+ cleanup:
+ if (info->mtd) {
+ del_mtd_partitions(info->mtd);
+#ifdef CONFIG_MTD_CONCAT
+ if (info->mtd != info->subdev[0].mtd)
+ mtd_concat_destroy(info->mtd);
+#endif
}
+ kfree(info->parts);
+ subdev_err:
+ for (i = info->nr_subdev - 1; i >= 0; i--)
+ armflash_subdev_remove(&info->subdev[i]);
+ no_resource:
+ if (plat && plat->exit)
+ plat->exit();
+ kfree(info);
out:
return err;
}
@@ -160,22 +258,26 @@ static int armflash_probe(struct platform_device *dev)
static int armflash_remove(struct platform_device *dev)
{
struct armflash_info *info = platform_get_drvdata(dev);
+ struct flash_platform_data *plat = dev->dev.platform_data;
+ int i;
platform_set_drvdata(dev, NULL);
if (info) {
if (info->mtd) {
del_mtd_partitions(info->mtd);
- map_destroy(info->mtd);
+#ifdef CONFIG_MTD_CONCAT
+ if (info->mtd != info->subdev[0].mtd)
+ mtd_concat_destroy(info->mtd);
+#endif
}
kfree(info->parts);
- iounmap(info->map.virt);
- release_resource(info->res);
- kfree(info->res);
+ for (i = info->nr_subdev - 1; i >= 0; i--)
+ armflash_subdev_remove(&info->subdev[i]);
- if (info->plat && info->plat->exit)
- info->plat->exit();
+ if (plat && plat->exit)
+ plat->exit();
kfree(info);
}
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 29a901157352..380648e9051a 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -195,42 +195,6 @@ err_out:
}
#ifdef CONFIG_PM
-static int physmap_flash_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct physmap_flash_info *info = platform_get_drvdata(dev);
- int ret = 0;
- int i;
-
- for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->suspend) {
- ret = info->mtd[i]->suspend(info->mtd[i]);
- if (ret)
- goto fail;
- }
-
- return 0;
-fail:
- for (--i; i >= 0; --i)
- if (info->mtd[i]->suspend) {
- BUG_ON(!info->mtd[i]->resume);
- info->mtd[i]->resume(info->mtd[i]);
- }
-
- return ret;
-}
-
-static int physmap_flash_resume(struct platform_device *dev)
-{
- struct physmap_flash_info *info = platform_get_drvdata(dev);
- int i;
-
- for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->resume)
- info->mtd[i]->resume(info->mtd[i]);
-
- return 0;
-}
-
static void physmap_flash_shutdown(struct platform_device *dev)
{
struct physmap_flash_info *info = platform_get_drvdata(dev);
@@ -242,16 +206,12 @@ static void physmap_flash_shutdown(struct platform_device *dev)
info->mtd[i]->resume(info->mtd[i]);
}
#else
-#define physmap_flash_suspend NULL
-#define physmap_flash_resume NULL
#define physmap_flash_shutdown NULL
#endif
static struct platform_driver physmap_flash_driver = {
.probe = physmap_flash_probe,
.remove = physmap_flash_remove,
- .suspend = physmap_flash_suspend,
- .resume = physmap_flash_resume,
.shutdown = physmap_flash_shutdown,
.driver = {
.name = "physmap-flash",
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index c83a60fada53..39d357b2eb47 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -20,16 +20,23 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/concat.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+struct of_flash_list {
+ struct mtd_info *mtd;
+ struct map_info map;
+ struct resource *res;
+};
+
struct of_flash {
- struct mtd_info *mtd;
- struct map_info map;
- struct resource *res;
+ struct mtd_info *cmtd;
#ifdef CONFIG_MTD_PARTITIONS
struct mtd_partition *parts;
#endif
+ int list_size; /* number of elements in of_flash_list */
+ struct of_flash_list list[0];
};
#ifdef CONFIG_MTD_PARTITIONS
@@ -88,30 +95,44 @@ static int parse_obsolete_partitions(struct of_device *dev,
static int of_flash_remove(struct of_device *dev)
{
struct of_flash *info;
+ int i;
info = dev_get_drvdata(&dev->dev);
if (!info)
return 0;
dev_set_drvdata(&dev->dev, NULL);
- if (info->mtd) {
+#ifdef CONFIG_MTD_CONCAT
+ if (info->cmtd != info->list[0].mtd) {
+ del_mtd_device(info->cmtd);
+ mtd_concat_destroy(info->cmtd);
+ }
+#endif
+
+ if (info->cmtd) {
if (OF_FLASH_PARTS(info)) {
- del_mtd_partitions(info->mtd);
+ del_mtd_partitions(info->cmtd);
kfree(OF_FLASH_PARTS(info));
} else {
- del_mtd_device(info->mtd);
+ del_mtd_device(info->cmtd);
}
- map_destroy(info->mtd);
}
- if (info->map.virt)
- iounmap(info->map.virt);
+ for (i = 0; i < info->list_size; i++) {
+ if (info->list[i].mtd)
+ map_destroy(info->list[i].mtd);
- if (info->res) {
- release_resource(info->res);
- kfree(info->res);
+ if (info->list[i].map.virt)
+ iounmap(info->list[i].map.virt);
+
+ if (info->list[i].res) {
+ release_resource(info->list[i].res);
+ kfree(info->list[i].res);
+ }
}
+ kfree(info);
+
return 0;
}
@@ -164,68 +185,130 @@ static int __devinit of_flash_probe(struct of_device *dev,
const char *probe_type = match->data;
const u32 *width;
int err;
-
- err = -ENXIO;
- if (of_address_to_resource(dp, 0, &res)) {
- dev_err(&dev->dev, "Can't get IO address from device tree\n");
+ int i;
+ int count;
+ const u32 *p;
+ int reg_tuple_size;
+ struct mtd_info **mtd_list = NULL;
+
+ reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
+
+ /*
+ * Get number of "reg" tuples. Scan for MTD devices on area's
+ * described by each "reg" region. This makes it possible (including
+ * the concat support) to support the Intel P30 48F4400 chips which
+ * consists internally of 2 non-identical NOR chips on one die.
+ */
+ p = of_get_property(dp, "reg", &count);
+ if (count % reg_tuple_size != 0) {
+ dev_err(&dev->dev, "Malformed reg property on %s\n",
+ dev->node->full_name);
+ err = -EINVAL;
goto err_out;
}
-
- dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
- (unsigned long long)res.start, (unsigned long long)res.end);
+ count /= reg_tuple_size;
err = -ENOMEM;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(struct of_flash) +
+ sizeof(struct of_flash_list) * count, GFP_KERNEL);
+ if (!info)
+ goto err_out;
+
+ mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL);
if (!info)
goto err_out;
dev_set_drvdata(&dev->dev, info);
- err = -EBUSY;
- info->res = request_mem_region(res.start, res.end - res.start + 1,
- dev_name(&dev->dev));
- if (!info->res)
- goto err_out;
+ for (i = 0; i < count; i++) {
+ err = -ENXIO;
+ if (of_address_to_resource(dp, i, &res)) {
+ dev_err(&dev->dev, "Can't get IO address from device"
+ " tree\n");
+ goto err_out;
+ }
- err = -ENXIO;
- width = of_get_property(dp, "bank-width", NULL);
- if (!width) {
- dev_err(&dev->dev, "Can't get bank width from device tree\n");
- goto err_out;
- }
+ dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
+ (unsigned long long)res.start,
+ (unsigned long long)res.end);
+
+ err = -EBUSY;
+ info->list[i].res = request_mem_region(res.start, res.end -
+ res.start + 1,
+ dev_name(&dev->dev));
+ if (!info->list[i].res)
+ goto err_out;
+
+ err = -ENXIO;
+ width = of_get_property(dp, "bank-width", NULL);
+ if (!width) {
+ dev_err(&dev->dev, "Can't get bank width from device"
+ " tree\n");
+ goto err_out;
+ }
- info->map.name = dev_name(&dev->dev);
- info->map.phys = res.start;
- info->map.size = res.end - res.start + 1;
- info->map.bankwidth = *width;
+ info->list[i].map.name = dev_name(&dev->dev);
+ info->list[i].map.phys = res.start;
+ info->list[i].map.size = res.end - res.start + 1;
+ info->list[i].map.bankwidth = *width;
+
+ err = -ENOMEM;
+ info->list[i].map.virt = ioremap(info->list[i].map.phys,
+ info->list[i].map.size);
+ if (!info->list[i].map.virt) {
+ dev_err(&dev->dev, "Failed to ioremap() flash"
+ " region\n");
+ goto err_out;
+ }
- err = -ENOMEM;
- info->map.virt = ioremap(info->map.phys, info->map.size);
- if (!info->map.virt) {
- dev_err(&dev->dev, "Failed to ioremap() flash region\n");
- goto err_out;
- }
+ simple_map_init(&info->list[i].map);
- simple_map_init(&info->map);
+ if (probe_type) {
+ info->list[i].mtd = do_map_probe(probe_type,
+ &info->list[i].map);
+ } else {
+ info->list[i].mtd = obsolete_probe(dev,
+ &info->list[i].map);
+ }
+ mtd_list[i] = info->list[i].mtd;
- if (probe_type)
- info->mtd = do_map_probe(probe_type, &info->map);
- else
- info->mtd = obsolete_probe(dev, &info->map);
+ err = -ENXIO;
+ if (!info->list[i].mtd) {
+ dev_err(&dev->dev, "do_map_probe() failed\n");
+ goto err_out;
+ } else {
+ info->list_size++;
+ }
+ info->list[i].mtd->owner = THIS_MODULE;
+ info->list[i].mtd->dev.parent = &dev->dev;
+ }
- err = -ENXIO;
- if (!info->mtd) {
- dev_err(&dev->dev, "do_map_probe() failed\n");
- goto err_out;
+ err = 0;
+ if (info->list_size == 1) {
+ info->cmtd = info->list[0].mtd;
+ } else if (info->list_size > 1) {
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+#ifdef CONFIG_MTD_CONCAT
+ info->cmtd = mtd_concat_create(mtd_list, info->list_size,
+ dev_name(&dev->dev));
+ if (info->cmtd == NULL)
+ err = -ENXIO;
+#else
+ printk(KERN_ERR "physmap_of: multiple devices "
+ "found but MTD concat support disabled.\n");
+ err = -ENXIO;
+#endif
}
- info->mtd->owner = THIS_MODULE;
- info->mtd->dev.parent = &dev->dev;
+ if (err)
+ goto err_out;
#ifdef CONFIG_MTD_PARTITIONS
/* First look for RedBoot table or partitions on the command
* line, these take precedence over device tree information */
- err = parse_mtd_partitions(info->mtd, part_probe_types,
- &info->parts, 0);
+ err = parse_mtd_partitions(info->cmtd, part_probe_types,
+ &info->parts, 0);
if (err < 0)
return err;
@@ -244,15 +327,19 @@ static int __devinit of_flash_probe(struct of_device *dev,
}
if (err > 0)
- add_mtd_partitions(info->mtd, info->parts, err);
+ add_mtd_partitions(info->cmtd, info->parts, err);
else
#endif
- add_mtd_device(info->mtd);
+ add_mtd_device(info->cmtd);
+
+ kfree(mtd_list);
return 0;
err_out:
+ kfree(mtd_list);
of_flash_remove(dev);
+
return err;
}
diff --git a/drivers/mtd/maps/pmcmsp-ramroot.c b/drivers/mtd/maps/pmcmsp-ramroot.c
deleted file mode 100644
index 30de5c0c09a9..000000000000
--- a/drivers/mtd/maps/pmcmsp-ramroot.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Mapping of the rootfs in a physical region of memory
- *
- * Copyright (C) 2005-2007 PMC-Sierra Inc.
- * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/root_dev.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-
-#include <asm/io.h>
-
-#include <msp_prom.h>
-
-static struct mtd_info *rr_mtd;
-
-struct map_info rr_map = {
- .name = "ramroot",
- .bankwidth = 4,
-};
-
-static int __init init_rrmap(void)
-{
- void *ramroot_start;
- unsigned long ramroot_size;
-
- /* Check for supported rootfs types */
- if (get_ramroot(&ramroot_start, &ramroot_size)) {
- rr_map.phys = CPHYSADDR(ramroot_start);
- rr_map.size = ramroot_size;
-
- printk(KERN_NOTICE
- "PMC embedded root device: 0x%08lx @ 0x%08lx\n",
- rr_map.size, (unsigned long)rr_map.phys);
- } else {
- printk(KERN_ERR
- "init_rrmap: no supported embedded rootfs detected!\n");
- return -ENXIO;
- }
-
- /* Map rootfs to I/O space for block device driver */
- rr_map.virt = ioremap(rr_map.phys, rr_map.size);
- if (!rr_map.virt) {
- printk(KERN_ERR "Failed to ioremap\n");
- return -EIO;
- }
-
- simple_map_init(&rr_map);
-
- rr_mtd = do_map_probe("map_ram", &rr_map);
- if (rr_mtd) {
- rr_mtd->owner = THIS_MODULE;
-
- add_mtd_device(rr_mtd);
-
- return 0;
- }
-
- iounmap(rr_map.virt);
- return -ENXIO;
-}
-
-static void __exit cleanup_rrmap(void)
-{
- del_mtd_device(rr_mtd);
- map_destroy(rr_mtd);
-
- iounmap(rr_map.virt);
- rr_map.virt = NULL;
-}
-
-MODULE_AUTHOR("PMC-Sierra, Inc");
-MODULE_DESCRIPTION("MTD map driver for embedded PMC-Sierra MSP filesystem");
-MODULE_LICENSE("GPL");
-
-module_init(init_rrmap);
-module_exit(cleanup_rrmap);
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 572d32fdf38a..643aa06b599e 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -140,24 +140,6 @@ static int __devexit pxa2xx_flash_remove(struct platform_device *dev)
}
#ifdef CONFIG_PM
-static int pxa2xx_flash_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
- int ret = 0;
-
- if (info->mtd && info->mtd->suspend)
- ret = info->mtd->suspend(info->mtd);
- return ret;
-}
-
-static int pxa2xx_flash_resume(struct platform_device *dev)
-{
- struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
-
- if (info->mtd && info->mtd->resume)
- info->mtd->resume(info->mtd);
- return 0;
-}
static void pxa2xx_flash_shutdown(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
@@ -166,8 +148,6 @@ static void pxa2xx_flash_shutdown(struct platform_device *dev)
info->mtd->resume(info->mtd);
}
#else
-#define pxa2xx_flash_suspend NULL
-#define pxa2xx_flash_resume NULL
#define pxa2xx_flash_shutdown NULL
#endif
@@ -178,8 +158,6 @@ static struct platform_driver pxa2xx_flash_driver = {
},
.probe = pxa2xx_flash_probe,
.remove = __devexit_p(pxa2xx_flash_remove),
- .suspend = pxa2xx_flash_suspend,
- .resume = pxa2xx_flash_resume,
.shutdown = pxa2xx_flash_shutdown,
};
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index d39f0adac846..83ed64512c5e 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -145,25 +145,6 @@ err_out:
}
#ifdef CONFIG_PM
-static int rbtx4939_flash_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
-
- if (info->mtd->suspend)
- return info->mtd->suspend(info->mtd);
- return 0;
-}
-
-static int rbtx4939_flash_resume(struct platform_device *dev)
-{
- struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
-
- if (info->mtd->resume)
- info->mtd->resume(info->mtd);
- return 0;
-}
-
static void rbtx4939_flash_shutdown(struct platform_device *dev)
{
struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
@@ -173,16 +154,12 @@ static void rbtx4939_flash_shutdown(struct platform_device *dev)
info->mtd->resume(info->mtd);
}
#else
-#define rbtx4939_flash_suspend NULL
-#define rbtx4939_flash_resume NULL
#define rbtx4939_flash_shutdown NULL
#endif
static struct platform_driver rbtx4939_flash_driver = {
.probe = rbtx4939_flash_probe,
.remove = rbtx4939_flash_remove,
- .suspend = rbtx4939_flash_suspend,
- .resume = rbtx4939_flash_resume,
.shutdown = rbtx4939_flash_shutdown,
.driver = {
.name = "rbtx4939-flash",
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index 05e9362dc7f0..c6210f5118d1 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -415,25 +415,6 @@ static int __exit sa1100_mtd_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int sa1100_mtd_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct sa_info *info = platform_get_drvdata(dev);
- int ret = 0;
-
- if (info)
- ret = info->mtd->suspend(info->mtd);
-
- return ret;
-}
-
-static int sa1100_mtd_resume(struct platform_device *dev)
-{
- struct sa_info *info = platform_get_drvdata(dev);
- if (info)
- info->mtd->resume(info->mtd);
- return 0;
-}
-
static void sa1100_mtd_shutdown(struct platform_device *dev)
{
struct sa_info *info = platform_get_drvdata(dev);
@@ -441,16 +422,12 @@ static void sa1100_mtd_shutdown(struct platform_device *dev)
info->mtd->resume(info->mtd);
}
#else
-#define sa1100_mtd_suspend NULL
-#define sa1100_mtd_resume NULL
#define sa1100_mtd_shutdown NULL
#endif
static struct platform_driver sa1100_mtd_driver = {
.probe = sa1100_mtd_probe,
.remove = __exit_p(sa1100_mtd_remove),
- .suspend = sa1100_mtd_suspend,
- .resume = sa1100_mtd_resume,
.shutdown = sa1100_mtd_shutdown,
.driver = {
.name = "sa1100-mtd",
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 81756e397711..d4314fb88212 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -22,15 +22,19 @@
/****************************************************************************/
+extern char _ebss;
+
struct map_info uclinux_ram_map = {
.name = "RAM",
+ .phys = (unsigned long)&_ebss,
+ .size = 0,
};
-struct mtd_info *uclinux_ram_mtdinfo;
+static struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
-struct mtd_partition uclinux_romfs[] = {
+static struct mtd_partition uclinux_romfs[] = {
{ .name = "ROMfs" }
};
@@ -38,7 +42,7 @@ struct mtd_partition uclinux_romfs[] = {
/****************************************************************************/
-int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
+static int uclinux_point(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, void **virt, resource_size_t *phys)
{
struct map_info *map = mtd->priv;
@@ -55,12 +59,10 @@ static int __init uclinux_mtd_init(void)
{
struct mtd_info *mtd;
struct map_info *mapp;
- extern char _ebss;
- unsigned long addr = (unsigned long) &_ebss;
mapp = &uclinux_ram_map;
- mapp->phys = addr;
- mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(addr + 8))));
+ if (!mapp->size)
+ mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8))));
mapp->bankwidth = 4;
printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index aaac3b6800b7..c3f62654b6df 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -291,7 +291,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->private_data = new;
new->blkcore_priv = gd;
gd->queue = tr->blkcore_priv->rq;
- gd->driverfs_dev = new->mtd->dev.parent;
+ gd->driverfs_dev = &new->mtd->dev;
if (new->readonly)
set_disk_ro(gd, 1);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 763d3f0a1f42..5b081cb84351 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/smp_lock.h>
#include <linux/backing-dev.h>
+#include <linux/compat.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/compatmac.h>
@@ -355,6 +356,100 @@ static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
# define otp_select_filemode(f,m) -EOPNOTSUPP
#endif
+static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
+ uint64_t start, uint32_t length, void __user *ptr,
+ uint32_t __user *retp)
+{
+ struct mtd_oob_ops ops;
+ uint32_t retlen;
+ int ret = 0;
+
+ if (!(file->f_mode & FMODE_WRITE))
+ return -EPERM;
+
+ if (length > 4096)
+ return -EINVAL;
+
+ if (!mtd->write_oob)
+ ret = -EOPNOTSUPP;
+ else
+ ret = access_ok(VERIFY_READ, ptr, length) ? 0 : EFAULT;
+
+ if (ret)
+ return ret;
+
+ ops.ooblen = length;
+ ops.ooboffs = start & (mtd->oobsize - 1);
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
+ return -EINVAL;
+
+ ops.oobbuf = kmalloc(length, GFP_KERNEL);
+ if (!ops.oobbuf)
+ return -ENOMEM;
+
+ if (copy_from_user(ops.oobbuf, ptr, length)) {
+ kfree(ops.oobbuf);
+ return -EFAULT;
+ }
+
+ start &= ~((uint64_t)mtd->oobsize - 1);
+ ret = mtd->write_oob(mtd, start, &ops);
+
+ if (ops.oobretlen > 0xFFFFFFFFU)
+ ret = -EOVERFLOW;
+ retlen = ops.oobretlen;
+ if (copy_to_user(retp, &retlen, sizeof(length)))
+ ret = -EFAULT;
+
+ kfree(ops.oobbuf);
+ return ret;
+}
+
+static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
+ uint32_t length, void __user *ptr, uint32_t __user *retp)
+{
+ struct mtd_oob_ops ops;
+ int ret = 0;
+
+ if (length > 4096)
+ return -EINVAL;
+
+ if (!mtd->read_oob)
+ ret = -EOPNOTSUPP;
+ else
+ ret = access_ok(VERIFY_WRITE, ptr,
+ length) ? 0 : -EFAULT;
+ if (ret)
+ return ret;
+
+ ops.ooblen = length;
+ ops.ooboffs = start & (mtd->oobsize - 1);
+ ops.datbuf = NULL;
+ ops.mode = MTD_OOB_PLACE;
+
+ if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
+ return -EINVAL;
+
+ ops.oobbuf = kmalloc(length, GFP_KERNEL);
+ if (!ops.oobbuf)
+ return -ENOMEM;
+
+ start &= ~((uint64_t)mtd->oobsize - 1);
+ ret = mtd->read_oob(mtd, start, &ops);
+
+ if (put_user(ops.oobretlen, retp))
+ ret = -EFAULT;
+ else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
+ ops.oobretlen))
+ ret = -EFAULT;
+
+ kfree(ops.oobbuf);
+ return ret;
+}
+
static int mtd_ioctl(struct inode *inode, struct file *file,
u_int cmd, u_long arg)
{
@@ -417,6 +512,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
break;
case MEMERASE:
+ case MEMERASE64:
{
struct erase_info *erase;
@@ -427,20 +523,32 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
if (!erase)
ret = -ENOMEM;
else {
- struct erase_info_user einfo;
-
wait_queue_head_t waitq;
DECLARE_WAITQUEUE(wait, current);
init_waitqueue_head(&waitq);
- if (copy_from_user(&einfo, argp,
- sizeof(struct erase_info_user))) {
- kfree(erase);
- return -EFAULT;
+ if (cmd == MEMERASE64) {
+ struct erase_info_user64 einfo64;
+
+ if (copy_from_user(&einfo64, argp,
+ sizeof(struct erase_info_user64))) {
+ kfree(erase);
+ return -EFAULT;
+ }
+ erase->addr = einfo64.start;
+ erase->len = einfo64.length;
+ } else {
+ struct erase_info_user einfo32;
+
+ if (copy_from_user(&einfo32, argp,
+ sizeof(struct erase_info_user))) {
+ kfree(erase);
+ return -EFAULT;
+ }
+ erase->addr = einfo32.start;
+ erase->len = einfo32.length;
}
- erase->addr = einfo.start;
- erase->len = einfo.length;
erase->mtd = mtd;
erase->callback = mtdchar_erase_callback;
erase->priv = (unsigned long)&waitq;
@@ -474,100 +582,56 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
case MEMWRITEOOB:
{
struct mtd_oob_buf buf;
- struct mtd_oob_ops ops;
- struct mtd_oob_buf __user *user_buf = argp;
- uint32_t retlen;
-
- if(!(file->f_mode & FMODE_WRITE))
- return -EPERM;
-
- if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
- return -EFAULT;
-
- if (buf.length > 4096)
- return -EINVAL;
-
- if (!mtd->write_oob)
- ret = -EOPNOTSUPP;
- else
- ret = access_ok(VERIFY_READ, buf.ptr,
- buf.length) ? 0 : EFAULT;
-
- if (ret)
- return ret;
-
- ops.ooblen = buf.length;
- ops.ooboffs = buf.start & (mtd->oobsize - 1);
- ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
-
- if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
- return -EINVAL;
-
- ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
- if (!ops.oobbuf)
- return -ENOMEM;
-
- if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
- kfree(ops.oobbuf);
- return -EFAULT;
- }
+ struct mtd_oob_buf __user *buf_user = argp;
- buf.start &= ~(mtd->oobsize - 1);
- ret = mtd->write_oob(mtd, buf.start, &ops);
-
- if (ops.oobretlen > 0xFFFFFFFFU)
- ret = -EOVERFLOW;
- retlen = ops.oobretlen;
- if (copy_to_user(&user_buf->length, &retlen, sizeof(buf.length)))
+ /* NOTE: writes return length to buf_user->length */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
-
- kfree(ops.oobbuf);
+ else
+ ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ buf.ptr, &buf_user->length);
break;
-
}
case MEMREADOOB:
{
struct mtd_oob_buf buf;
- struct mtd_oob_ops ops;
-
- if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
- return -EFAULT;
-
- if (buf.length > 4096)
- return -EINVAL;
+ struct mtd_oob_buf __user *buf_user = argp;
- if (!mtd->read_oob)
- ret = -EOPNOTSUPP;
+ /* NOTE: writes return length to buf_user->start */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
else
- ret = access_ok(VERIFY_WRITE, buf.ptr,
- buf.length) ? 0 : -EFAULT;
- if (ret)
- return ret;
-
- ops.ooblen = buf.length;
- ops.ooboffs = buf.start & (mtd->oobsize - 1);
- ops.datbuf = NULL;
- ops.mode = MTD_OOB_PLACE;
+ ret = mtd_do_readoob(mtd, buf.start, buf.length,
+ buf.ptr, &buf_user->start);
+ break;
+ }
- if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
- return -EINVAL;
+ case MEMWRITEOOB64:
+ {
+ struct mtd_oob_buf64 buf;
+ struct mtd_oob_buf64 __user *buf_user = argp;
- ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
- if (!ops.oobbuf)
- return -ENOMEM;
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ (void __user *)(uintptr_t)buf.usr_ptr,
+ &buf_user->length);
+ break;
+ }
- buf.start &= ~(mtd->oobsize - 1);
- ret = mtd->read_oob(mtd, buf.start, &ops);
+ case MEMREADOOB64:
+ {
+ struct mtd_oob_buf64 buf;
+ struct mtd_oob_buf64 __user *buf_user = argp;
- if (put_user(ops.oobretlen, (uint32_t __user *)argp))
- ret = -EFAULT;
- else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf,
- ops.oobretlen))
+ if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
-
- kfree(ops.oobbuf);
+ else
+ ret = mtd_do_readoob(mtd, buf.start, buf.length,
+ (void __user *)(uintptr_t)buf.usr_ptr,
+ &buf_user->length);
break;
}
@@ -758,6 +822,68 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
return ret;
} /* memory_ioctl */
+#ifdef CONFIG_COMPAT
+
+struct mtd_oob_buf32 {
+ u_int32_t start;
+ u_int32_t length;
+ compat_caddr_t ptr; /* unsigned char* */
+};
+
+#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
+#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
+
+static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct mtd_file_info *mfi = file->private_data;
+ struct mtd_info *mtd = mfi->mtd;
+ void __user *argp = compat_ptr(arg);
+ int ret = 0;
+
+ lock_kernel();
+
+ switch (cmd) {
+ case MEMWRITEOOB32:
+ {
+ struct mtd_oob_buf32 buf;
+ struct mtd_oob_buf32 __user *buf_user = argp;
+
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtd_do_writeoob(file, mtd, buf.start,
+ buf.length, compat_ptr(buf.ptr),
+ &buf_user->length);
+ break;
+ }
+
+ case MEMREADOOB32:
+ {
+ struct mtd_oob_buf32 buf;
+ struct mtd_oob_buf32 __user *buf_user = argp;
+
+ /* NOTE: writes return length to buf->start */
+ if (copy_from_user(&buf, argp, sizeof(buf)))
+ ret = -EFAULT;
+ else
+ ret = mtd_do_readoob(mtd, buf.start,
+ buf.length, compat_ptr(buf.ptr),
+ &buf_user->start);
+ break;
+ }
+ default:
+ ret = mtd_ioctl(inode, file, cmd, (unsigned long)argp);
+ }
+
+ unlock_kernel();
+
+ return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
/*
* try to determine where a shared mapping can be made
* - only supported for NOMMU at the moment (MMU can't doesn't copy private
@@ -817,6 +943,9 @@ static const struct file_operations mtd_fops = {
.read = mtd_read,
.write = mtd_write,
.ioctl = mtd_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mtd_compat_ioctl,
+#endif
.open = mtd_open,
.release = mtd_close,
.mmap = mtd_mmap,
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index bccb4b1ffc46..fac54a3fa3f1 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -23,8 +23,15 @@
#include "mtdcore.h"
-
-static struct class *mtd_class;
+static int mtd_cls_suspend(struct device *dev, pm_message_t state);
+static int mtd_cls_resume(struct device *dev);
+
+static struct class mtd_class = {
+ .name = "mtd",
+ .owner = THIS_MODULE,
+ .suspend = mtd_cls_suspend,
+ .resume = mtd_cls_resume,
+};
/* These are exported solely for the purpose of mtd_blkdevs.c. You
should not use them for _anything_ else */
@@ -52,7 +59,26 @@ static void mtd_release(struct device *dev)
/* remove /dev/mtdXro node if needed */
if (index)
- device_destroy(mtd_class, index + 1);
+ device_destroy(&mtd_class, index + 1);
+}
+
+static int mtd_cls_suspend(struct device *dev, pm_message_t state)
+{
+ struct mtd_info *mtd = dev_to_mtd(dev);
+
+ if (mtd->suspend)
+ return mtd->suspend(mtd);
+ else
+ return 0;
+}
+
+static int mtd_cls_resume(struct device *dev)
+{
+ struct mtd_info *mtd = dev_to_mtd(dev);
+
+ if (mtd->resume)
+ mtd->resume(mtd);
+ return 0;
}
static ssize_t mtd_type_show(struct device *dev,
@@ -269,7 +295,7 @@ int add_mtd_device(struct mtd_info *mtd)
* physical device.
*/
mtd->dev.type = &mtd_devtype;
- mtd->dev.class = mtd_class;
+ mtd->dev.class = &mtd_class;
mtd->dev.devt = MTD_DEVT(i);
dev_set_name(&mtd->dev, "mtd%d", i);
if (device_register(&mtd->dev) != 0) {
@@ -278,7 +304,7 @@ int add_mtd_device(struct mtd_info *mtd)
}
if (MTD_DEVT(i))
- device_create(mtd_class, mtd->dev.parent,
+ device_create(&mtd_class, mtd->dev.parent,
MTD_DEVT(i) + 1,
NULL, "mtd%dro", i);
@@ -604,11 +630,12 @@ done:
static int __init init_mtd(void)
{
- mtd_class = class_create(THIS_MODULE, "mtd");
+ int ret;
+ ret = class_register(&mtd_class);
- if (IS_ERR(mtd_class)) {
- pr_err("Error creating mtd class.\n");
- return PTR_ERR(mtd_class);
+ if (ret) {
+ pr_err("Error registering mtd class: %d\n", ret);
+ return ret;
}
#ifdef CONFIG_PROC_FS
if ((proc_mtd = create_proc_entry( "mtd", 0, NULL )))
@@ -623,7 +650,7 @@ static void __exit cleanup_mtd(void)
if (proc_mtd)
remove_proc_entry( "mtd", NULL);
#endif /* CONFIG_PROC_FS */
- class_destroy(mtd_class);
+ class_unregister(&mtd_class);
}
module_init(init_mtd);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 29675edb44b4..349fcbe5cc0f 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -27,9 +27,7 @@ struct mtd_part {
struct mtd_info mtd;
struct mtd_info *master;
uint64_t offset;
- int index;
struct list_head list;
- int registered;
};
/*
@@ -321,8 +319,7 @@ int del_mtd_partitions(struct mtd_info *master)
list_for_each_entry_safe(slave, next, &mtd_partitions, list)
if (slave->master == master) {
list_del(&slave->list);
- if (slave->registered)
- del_mtd_device(&slave->mtd);
+ del_mtd_device(&slave->mtd);
kfree(slave);
}
@@ -395,7 +392,7 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
if (master->sync)
slave->mtd.sync = part_sync;
- if (!partno && master->suspend && master->resume) {
+ if (!partno && !master->dev.class && master->suspend && master->resume) {
slave->mtd.suspend = part_suspend;
slave->mtd.resume = part_resume;
}
@@ -412,7 +409,6 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
slave->mtd.erase = part_erase;
slave->master = master;
slave->offset = part->offset;
- slave->index = partno;
if (slave->offset == MTDPART_OFS_APPEND)
slave->offset = cur_offset;
@@ -500,15 +496,9 @@ static struct mtd_part *add_one_partition(struct mtd_info *master,
}
out_register:
- if (part->mtdp) {
- /* store the object pointer (caller may or may not register it*/
- *part->mtdp = &slave->mtd;
- slave->registered = 0;
- } else {
- /* register our partition */
- add_mtd_device(&slave->mtd);
- slave->registered = 1;
- }
+ /* register our partition */
+ add_mtd_device(&slave->mtd);
+
return slave;
}
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index f3276897859e..ce96c091f01b 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -74,6 +74,12 @@ config MTD_NAND_AMS_DELTA
help
Support for NAND flash on Amstrad E3 (Delta).
+config MTD_NAND_OMAP2
+ tristate "NAND Flash device on OMAP2 and OMAP3"
+ depends on ARM && MTD_NAND && (ARCH_OMAP2 || ARCH_OMAP3)
+ help
+ Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
+
config MTD_NAND_TS7250
tristate "NAND Flash device on TS-7250 board"
depends on MACH_TS72XX
@@ -139,27 +145,27 @@ config MTD_NAND_PPCHAMELEONEVB
This enables the NAND flash driver on the PPChameleon EVB Board.
config MTD_NAND_S3C2410
- tristate "NAND Flash support for S3C2410/S3C2440 SoC"
- depends on ARCH_S3C2410
+ tristate "NAND Flash support for Samsung S3C SoCs"
+ depends on ARCH_S3C2410 || ARCH_S3C64XX
help
- This enables the NAND flash controller on the S3C2410 and S3C2440
+ This enables the NAND flash controller on the S3C24xx and S3C64xx
SoCs
No board specific support is done by this driver, each board
must advertise a platform_device for the driver to attach.
config MTD_NAND_S3C2410_DEBUG
- bool "S3C2410 NAND driver debug"
+ bool "Samsung S3C NAND driver debug"
depends on MTD_NAND_S3C2410
help
- Enable debugging of the S3C2410 NAND driver
+ Enable debugging of the S3C NAND driver
config MTD_NAND_S3C2410_HWECC
- bool "S3C2410 NAND Hardware ECC"
+ bool "Samsung S3C NAND Hardware ECC"
depends on MTD_NAND_S3C2410
help
- Enable the use of the S3C2410's internal ECC generator when
- using NAND. Early versions of the chip have had problems with
+ Enable the use of the controller's internal ECC generator when
+ using NAND. Early versions of the chips have had problems with
incorrect ECC generation, and if using these, the default of
software ECC is preferable.
@@ -171,7 +177,7 @@ config MTD_NAND_NDFC
NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
config MTD_NAND_S3C2410_CLKSTOP
- bool "S3C2410 NAND IDLE clock stop"
+ bool "Samsung S3C NAND IDLE clock stop"
depends on MTD_NAND_S3C2410
default n
help
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index d33860ac42c3..f3a786b3cff3 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
obj-$(CONFIG_MTD_NAND_ATMEL) += atmel_nand.o
obj-$(CONFIG_MTD_NAND_GPIO) += gpio.o
+obj-$(CONFIG_MTD_NAND_OMAP2) += omap2.o
obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
obj-$(CONFIG_MTD_NAND_PXA3xx) += pxa3xx_nand.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 47a33cec3793..2802992b39da 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -47,6 +48,9 @@
#define no_ecc 0
#endif
+static int on_flash_bbt = 0;
+module_param(on_flash_bbt, int, 0);
+
/* Register access macros */
#define ecc_readl(add, reg) \
__raw_readl(add + ATMEL_ECC_##reg)
@@ -459,12 +463,17 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
if (host->board->det_pin) {
if (gpio_get_value(host->board->det_pin)) {
- printk("No SmartMedia card inserted.\n");
+ printk(KERN_INFO "No SmartMedia card inserted.\n");
res = ENXIO;
goto err_no_card;
}
}
+ if (on_flash_bbt) {
+ printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
+ nand_chip->options |= NAND_USE_FLASH_BBT;
+ }
+
/* first scan to find the device and get the page size */
if (nand_scan_ident(mtd, 1)) {
res = -ENXIO;
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 4c2a67ca801e..8506e7e606fd 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -458,7 +458,7 @@ static irqreturn_t bf5xx_nand_dma_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int bf5xx_nand_dma_rw(struct mtd_info *mtd,
+static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
uint8_t *buf, int is_read)
{
struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
@@ -496,11 +496,20 @@ static int bf5xx_nand_dma_rw(struct mtd_info *mtd,
/* setup DMA register with Blackfin DMA API */
set_dma_config(CH_NFC, 0x0);
set_dma_start_addr(CH_NFC, (unsigned long) buf);
+
+/* The DMAs have different size on BF52x and BF54x */
+#ifdef CONFIG_BF52x
+ set_dma_x_count(CH_NFC, (page_size >> 1));
+ set_dma_x_modify(CH_NFC, 2);
+ val = DI_EN | WDSIZE_16;
+#endif
+
+#ifdef CONFIG_BF54x
set_dma_x_count(CH_NFC, (page_size >> 2));
set_dma_x_modify(CH_NFC, 4);
-
- /* setup write or read operation */
val = DI_EN | WDSIZE_32;
+#endif
+ /* setup write or read operation */
if (is_read)
val |= WNR;
set_dma_config(CH_NFC, val);
@@ -512,8 +521,6 @@ static int bf5xx_nand_dma_rw(struct mtd_info *mtd,
else
bfin_write_NFC_PGCTL(0x2);
wait_for_completion(&info->dma_completion);
-
- return 0;
}
static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd,
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 02700f769b8a..0fad6487e6f4 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -44,7 +44,7 @@
* and some flavors of secondary chipselect (e.g. based on A12) as used
* with multichip packages.
*
- * The 1-bit ECC hardware is supported, but not yet the newer 4-bit ECC
+ * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
* available on chips like the DM355 and OMAP-L137 and needed with the
* more error-prone MLC NAND chips.
*
@@ -54,11 +54,14 @@
struct davinci_nand_info {
struct mtd_info mtd;
struct nand_chip chip;
+ struct nand_ecclayout ecclayout;
struct device *dev;
struct clk *clk;
bool partitioned;
+ bool is_readmode;
+
void __iomem *base;
void __iomem *vaddr;
@@ -73,6 +76,7 @@ struct davinci_nand_info {
};
static DEFINE_SPINLOCK(davinci_nand_lock);
+static bool ecc4_busy;
#define to_davinci_nand(m) container_of(m, struct davinci_nand_info, mtd)
@@ -218,6 +222,192 @@ static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
/*----------------------------------------------------------------------*/
/*
+ * 4-bit hardware ECC ... context maintained over entire AEMIF
+ *
+ * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
+ * since that forces use of a problematic "infix OOB" layout.
+ * Among other things, it trashes manufacturer bad block markers.
+ * Also, and specific to this hardware, it ECC-protects the "prepad"
+ * in the OOB ... while having ECC protection for parts of OOB would
+ * seem useful, the current MTD stack sometimes wants to update the
+ * OOB without recomputing ECC.
+ */
+
+static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&davinci_nand_lock, flags);
+
+ /* Start 4-bit ECC calculation for read/write */
+ val = davinci_nand_readl(info, NANDFCR_OFFSET);
+ val &= ~(0x03 << 4);
+ val |= (info->core_chipsel << 4) | BIT(12);
+ davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+ info->is_readmode = (mode == NAND_ECC_READ);
+
+ spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/* Read raw ECC code after writing to NAND. */
+static void
+nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
+{
+ const u32 mask = 0x03ff03ff;
+
+ code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
+ code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
+ code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
+ code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
+}
+
+/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
+static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
+ const u_char *dat, u_char *ecc_code)
+{
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ u32 raw_ecc[4], *p;
+ unsigned i;
+
+ /* After a read, terminate ECC calculation by a dummy read
+ * of some 4-bit ECC register. ECC covers everything that
+ * was read; correct() just uses the hardware state, so
+ * ecc_code is not needed.
+ */
+ if (info->is_readmode) {
+ davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+ return 0;
+ }
+
+ /* Pack eight raw 10-bit ecc values into ten bytes, making
+ * two passes which each convert four values (in upper and
+ * lower halves of two 32-bit words) into five bytes. The
+ * ROM boot loader uses this same packing scheme.
+ */
+ nand_davinci_readecc_4bit(info, raw_ecc);
+ for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
+ *ecc_code++ = p[0] & 0xff;
+ *ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
+ *ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
+ *ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
+ *ecc_code++ = (p[1] >> 18) & 0xff;
+ }
+
+ return 0;
+}
+
+/* Correct up to 4 bits in data we just read, using state left in the
+ * hardware plus the ecc_code computed when it was first written.
+ */
+static int nand_davinci_correct_4bit(struct mtd_info *mtd,
+ u_char *data, u_char *ecc_code, u_char *null)
+{
+ int i;
+ struct davinci_nand_info *info = to_davinci_nand(mtd);
+ unsigned short ecc10[8];
+ unsigned short *ecc16;
+ u32 syndrome[4];
+ unsigned num_errors, corrected;
+
+ /* All bytes 0xff? It's an erased page; ignore its ECC. */
+ for (i = 0; i < 10; i++) {
+ if (ecc_code[i] != 0xff)
+ goto compare;
+ }
+ return 0;
+
+compare:
+ /* Unpack ten bytes into eight 10 bit values. We know we're
+ * little-endian, and use type punning for less shifting/masking.
+ */
+ if (WARN_ON(0x01 & (unsigned) ecc_code))
+ return -EINVAL;
+ ecc16 = (unsigned short *)ecc_code;
+
+ ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
+ ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
+ ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
+ ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
+ ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
+ ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
+ ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
+ ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
+
+ /* Tell ECC controller about the expected ECC codes. */
+ for (i = 7; i >= 0; i--)
+ davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
+
+ /* Allow time for syndrome calculation ... then read it.
+ * A syndrome of all zeroes 0 means no detected errors.
+ */
+ davinci_nand_readl(info, NANDFSR_OFFSET);
+ nand_davinci_readecc_4bit(info, syndrome);
+ if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
+ return 0;
+
+ /* Start address calculation, and wait for it to complete.
+ * We _could_ start reading more data while this is working,
+ * to speed up the overall page read.
+ */
+ davinci_nand_writel(info, NANDFCR_OFFSET,
+ davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
+ for (;;) {
+ u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
+
+ switch ((fsr >> 8) & 0x0f) {
+ case 0: /* no error, should not happen */
+ return 0;
+ case 1: /* five or more errors detected */
+ return -EIO;
+ case 2: /* error addresses computed */
+ case 3:
+ num_errors = 1 + ((fsr >> 16) & 0x03);
+ goto correct;
+ default: /* still working on it */
+ cpu_relax();
+ continue;
+ }
+ }
+
+correct:
+ /* correct each error */
+ for (i = 0, corrected = 0; i < num_errors; i++) {
+ int error_address, error_value;
+
+ if (i > 1) {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD2_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL2_OFFSET);
+ } else {
+ error_address = davinci_nand_readl(info,
+ NAND_ERR_ADD1_OFFSET);
+ error_value = davinci_nand_readl(info,
+ NAND_ERR_ERRVAL1_OFFSET);
+ }
+
+ if (i & 1) {
+ error_address >>= 16;
+ error_value >>= 16;
+ }
+ error_address &= 0x3ff;
+ error_address = (512 + 7) - error_address;
+
+ if (error_address < 512) {
+ data[error_address] ^= error_value;
+ corrected++;
+ }
+ }
+
+ return corrected;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
* NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
* how these chips are normally wired. This translates to both 8 and 16
* bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
@@ -294,6 +484,23 @@ static void __init nand_dm6446evm_flash_init(struct davinci_nand_info *info)
/*----------------------------------------------------------------------*/
+/* An ECC layout for using 4-bit ECC with small-page flash, storing
+ * ten ECC bytes plus the manufacturer's bad block marker byte, and
+ * and not overlapping the default BBT markers.
+ */
+static struct nand_ecclayout hwecc4_small __initconst = {
+ .eccbytes = 10,
+ .eccpos = { 0, 1, 2, 3, 4,
+ /* offset 5 holds the badblock marker */
+ 6, 7,
+ 13, 14, 15, },
+ .oobfree = {
+ {.offset = 8, .length = 5, },
+ {.offset = 16, },
+ },
+};
+
+
static int __init nand_davinci_probe(struct platform_device *pdev)
{
struct davinci_nand_pdata *pdata = pdev->dev.platform_data;
@@ -306,6 +513,10 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
uint32_t val;
nand_ecc_modes_t ecc_mode;
+ /* insist on board-specific configuration */
+ if (!pdata)
+ return -ENODEV;
+
/* which external chipselect will we be managing? */
if (pdev->id < 0 || pdev->id > 3)
return -ENODEV;
@@ -351,7 +562,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.select_chip = nand_davinci_select_chip;
/* options such as NAND_USE_FLASH_BBT or 16-bit widths */
- info->chip.options = pdata ? pdata->options : 0;
+ info->chip.options = pdata->options;
info->ioaddr = (uint32_t __force) vaddr;
@@ -360,14 +571,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->mask_chipsel = pdata->mask_chipsel;
/* use nandboot-capable ALE/CLE masks by default */
- if (pdata && pdata->mask_ale)
- info->mask_ale = pdata->mask_cle;
- else
- info->mask_ale = MASK_ALE;
- if (pdata && pdata->mask_cle)
- info->mask_cle = pdata->mask_cle;
- else
- info->mask_cle = MASK_CLE;
+ info->mask_ale = pdata->mask_cle ? : MASK_ALE;
+ info->mask_cle = pdata->mask_cle ? : MASK_CLE;
/* Set address of hardware control function */
info->chip.cmd_ctrl = nand_davinci_hwcontrol;
@@ -377,30 +582,44 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->chip.read_buf = nand_davinci_read_buf;
info->chip.write_buf = nand_davinci_write_buf;
- /* use board-specific ECC config; else, the best available */
- if (pdata)
- ecc_mode = pdata->ecc_mode;
- else
- ecc_mode = NAND_ECC_HW;
+ /* Use board-specific ECC config */
+ ecc_mode = pdata->ecc_mode;
+ ret = -EINVAL;
switch (ecc_mode) {
case NAND_ECC_NONE:
case NAND_ECC_SOFT:
+ pdata->ecc_bits = 0;
break;
case NAND_ECC_HW:
- info->chip.ecc.calculate = nand_davinci_calculate_1bit;
- info->chip.ecc.correct = nand_davinci_correct_1bit;
- info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+ if (pdata->ecc_bits == 4) {
+ /* No sanity checks: CPUs must support this,
+ * and the chips may not use NAND_BUSWIDTH_16.
+ */
+
+ /* No sharing 4-bit hardware between chipselects yet */
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc4_busy)
+ ret = -EBUSY;
+ else
+ ecc4_busy = true;
+ spin_unlock_irq(&davinci_nand_lock);
+
+ if (ret == -EBUSY)
+ goto err_ecc;
+
+ info->chip.ecc.calculate = nand_davinci_calculate_4bit;
+ info->chip.ecc.correct = nand_davinci_correct_4bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
+ info->chip.ecc.bytes = 10;
+ } else {
+ info->chip.ecc.calculate = nand_davinci_calculate_1bit;
+ info->chip.ecc.correct = nand_davinci_correct_1bit;
+ info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+ info->chip.ecc.bytes = 3;
+ }
info->chip.ecc.size = 512;
- info->chip.ecc.bytes = 3;
break;
- case NAND_ECC_HW_SYNDROME:
- /* FIXME implement */
- info->chip.ecc.size = 512;
- info->chip.ecc.bytes = 10;
-
- dev_warn(&pdev->dev, "4-bit ECC nyet supported\n");
- /* FALL THROUGH */
default:
ret = -EINVAL;
goto err_ecc;
@@ -441,12 +660,56 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
- ret = nand_scan(&info->mtd, pdata->mask_chipsel ? 2 : 1);
+ ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
goto err_scan;
}
+ /* Update ECC layout if needed ... for 1-bit HW ECC, the default
+ * is OK, but it allocates 6 bytes when only 3 are needed (for
+ * each 512 bytes). For the 4-bit HW ECC, that default is not
+ * usable: 10 bytes are needed, not 6.
+ */
+ if (pdata->ecc_bits == 4) {
+ int chunks = info->mtd.writesize / 512;
+
+ if (!chunks || info->mtd.oobsize < 16) {
+ dev_dbg(&pdev->dev, "too small\n");
+ ret = -EINVAL;
+ goto err_scan;
+ }
+
+ /* For small page chips, preserve the manufacturer's
+ * badblock marking data ... and make sure a flash BBT
+ * table marker fits in the free bytes.
+ */
+ if (chunks == 1) {
+ info->ecclayout = hwecc4_small;
+ info->ecclayout.oobfree[1].length =
+ info->mtd.oobsize - 16;
+ goto syndrome_done;
+ }
+
+ /* For large page chips we'll be wanting to use a
+ * not-yet-implemented mode that reads OOB data
+ * before reading the body of the page, to avoid
+ * the "infix OOB" model of NAND_ECC_HW_SYNDROME
+ * (and preserve manufacturer badblock markings).
+ */
+ dev_warn(&pdev->dev, "no 4-bit ECC support yet "
+ "for large page NAND\n");
+ ret = -EIO;
+ goto err_scan;
+
+syndrome_done:
+ info->chip.ecc.layout = &info->ecclayout;
+ }
+
+ ret = nand_scan_tail(&info->mtd);
+ if (ret < 0)
+ goto err_scan;
+
if (mtd_has_partitions()) {
struct mtd_partition *mtd_parts = NULL;
int mtd_parts_nb = 0;
@@ -455,22 +718,11 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
static const char *probes[] __initconst =
{ "cmdlinepart", NULL };
- const char *master_name;
-
- /* Set info->mtd.name = 0 temporarily */
- master_name = info->mtd.name;
- info->mtd.name = (char *)0;
-
- /* info->mtd.name == 0, means: don't bother checking
- <mtd-id> */
mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
&mtd_parts, 0);
-
- /* Restore info->mtd.name */
- info->mtd.name = master_name;
}
- if (mtd_parts_nb <= 0 && pdata) {
+ if (mtd_parts_nb <= 0) {
mtd_parts = pdata->parts;
mtd_parts_nb = pdata->nr_parts;
}
@@ -483,7 +735,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
info->partitioned = true;
}
- } else if (pdata && pdata->nr_parts) {
+ } else if (pdata->nr_parts) {
dev_warn(&pdev->dev, "ignoring %d default partitions on %s\n",
pdata->nr_parts, info->mtd.name);
}
@@ -509,6 +761,11 @@ err_scan:
err_clk_enable:
clk_put(info->clk);
+ spin_lock_irq(&davinci_nand_lock);
+ if (ecc_mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
err_ecc:
err_clk:
err_ioremap:
@@ -532,6 +789,11 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
else
status = del_mtd_device(&info->mtd);
+ spin_lock_irq(&davinci_nand_lock);
+ if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
+ ecc4_busy = false;
+ spin_unlock_irq(&davinci_nand_lock);
+
iounmap(info->base);
iounmap(info->vaddr);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 40c26080ecda..76beea40d2cf 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -138,7 +138,14 @@ static struct nand_ecclayout nand_hw_eccoob_8 = {
static struct nand_ecclayout nand_hw_eccoob_16 = {
.eccbytes = 5,
.eccpos = {6, 7, 8, 9, 10},
- .oobfree = {{0, 6}, {12, 4}, }
+ .oobfree = {{0, 5}, {11, 5}, }
+};
+
+static struct nand_ecclayout nand_hw_eccoob_64 = {
+ .eccbytes = 20,
+ .eccpos = {6, 7, 8, 9, 10, 22, 23, 24, 25, 26,
+ 38, 39, 40, 41, 42, 54, 55, 56, 57, 58},
+ .oobfree = {{2, 4}, {11, 10}, {27, 10}, {43, 10}, {59, 5}, }
};
#ifdef CONFIG_MTD_PARTITIONS
@@ -192,7 +199,7 @@ static void wait_op_done(struct mxc_nand_host *host, int max_retries,
}
udelay(1);
}
- if (max_retries <= 0)
+ if (max_retries < 0)
DEBUG(MTD_DEBUG_LEVEL0, "%s(%d): INT not set\n",
__func__, param);
}
@@ -795,9 +802,13 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
send_addr(host, (page_addr & 0xff), false);
if (host->pagesize_2k) {
- send_addr(host, (page_addr >> 8) & 0xFF, false);
- if (mtd->size >= 0x40000000)
+ if (mtd->size >= 0x10000000) {
+ /* paddr_8 - paddr_15 */
+ send_addr(host, (page_addr >> 8) & 0xff, false);
send_addr(host, (page_addr >> 16) & 0xff, true);
+ } else
+ /* paddr_8 - paddr_15 */
+ send_addr(host, (page_addr >> 8) & 0xff, true);
} else {
/* One more address cycle for higher density devices */
if (mtd->size >= 0x4000000) {
@@ -923,7 +934,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->ecc.mode = NAND_ECC_HW;
this->ecc.size = 512;
this->ecc.bytes = 3;
- this->ecc.layout = &nand_hw_eccoob_8;
tmp = readw(host->regs + NFC_CONFIG1);
tmp |= NFC_ECC_EN;
writew(tmp, host->regs + NFC_CONFIG1);
@@ -957,12 +967,44 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->ecc.layout = &nand_hw_eccoob_16;
}
- host->pagesize_2k = 0;
+ /* first scan to find the device and get the page size */
+ if (nand_scan_ident(mtd, 1)) {
+ err = -ENXIO;
+ goto escan;
+ }
- /* Scan to find existence of the device */
- if (nand_scan(mtd, 1)) {
- DEBUG(MTD_DEBUG_LEVEL0,
- "MXC_ND: Unable to find any NAND device.\n");
+ host->pagesize_2k = (mtd->writesize == 2048) ? 1 : 0;
+
+ if (this->ecc.mode == NAND_ECC_HW) {
+ switch (mtd->oobsize) {
+ case 8:
+ this->ecc.layout = &nand_hw_eccoob_8;
+ break;
+ case 16:
+ this->ecc.layout = &nand_hw_eccoob_16;
+ break;
+ case 64:
+ this->ecc.layout = &nand_hw_eccoob_64;
+ break;
+ default:
+ /* page size not handled by HW ECC */
+ /* switching back to soft ECC */
+ this->ecc.size = 512;
+ this->ecc.bytes = 3;
+ this->ecc.layout = &nand_hw_eccoob_8;
+ this->ecc.mode = NAND_ECC_SOFT;
+ this->ecc.calculate = NULL;
+ this->ecc.correct = NULL;
+ this->ecc.hwctl = NULL;
+ tmp = readw(host->regs + NFC_CONFIG1);
+ tmp &= ~NFC_ECC_EN;
+ writew(tmp, host->regs + NFC_CONFIG1);
+ break;
+ }
+ }
+
+ /* second phase scan */
+ if (nand_scan_tail(mtd)) {
err = -ENXIO;
goto escan;
}
@@ -985,7 +1027,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
return 0;
escan:
- free_irq(host->irq, NULL);
+ free_irq(host->irq, host);
eirq:
iounmap(host->regs);
eres:
@@ -1005,7 +1047,7 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
nand_release(&host->mtd);
- free_irq(host->irq, NULL);
+ free_irq(host->irq, host);
iounmap(host->regs);
kfree(host);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3d7ed432fa41..8c21b89d2d0c 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2756,7 +2756,8 @@ int nand_scan_tail(struct mtd_info *mtd)
* the out of band area
*/
chip->ecc.layout->oobavail = 0;
- for (i = 0; chip->ecc.layout->oobfree[i].length; i++)
+ for (i = 0; chip->ecc.layout->oobfree[i].length
+ && i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
chip->ecc.layout->oobavail +=
chip->ecc.layout->oobfree[i].length;
mtd->oobavail = chip->ecc.layout->oobavail;
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 868147acce2c..c0cb87d6d16e 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -428,8 +428,8 @@ EXPORT_SYMBOL(nand_calculate_ecc);
int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
- unsigned char b0, b1, b2;
- unsigned char byte_addr, bit_addr;
+ unsigned char b0, b1, b2, bit_addr;
+ unsigned int byte_addr;
/* 256 or 512 bytes/ecc */
const uint32_t eccsize_mult =
(((struct nand_chip *)mtd->priv)->ecc.size) >> 8;
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
new file mode 100644
index 000000000000..0cd76f89f4b0
--- /dev/null
+++ b/drivers/mtd/nand/omap2.c
@@ -0,0 +1,776 @@
+/*
+ * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
+ * Copyright © 2004 Micron Technology Inc.
+ * Copyright © 2004 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+
+#include <asm/dma.h>
+
+#include <mach/gpmc.h>
+#include <mach/nand.h>
+
+#define GPMC_IRQ_STATUS 0x18
+#define GPMC_ECC_CONFIG 0x1F4
+#define GPMC_ECC_CONTROL 0x1F8
+#define GPMC_ECC_SIZE_CONFIG 0x1FC
+#define GPMC_ECC1_RESULT 0x200
+
+#define DRIVER_NAME "omap2-nand"
+
+/* size (4 KiB) for IO mapping */
+#define NAND_IO_SIZE SZ_4K
+
+#define NAND_WP_OFF 0
+#define NAND_WP_BIT 0x00000010
+#define WR_RD_PIN_MONITORING 0x00600000
+
+#define GPMC_BUF_FULL 0x00000001
+#define GPMC_BUF_EMPTY 0x00000000
+
+#define NAND_Ecc_P1e (1 << 0)
+#define NAND_Ecc_P2e (1 << 1)
+#define NAND_Ecc_P4e (1 << 2)
+#define NAND_Ecc_P8e (1 << 3)
+#define NAND_Ecc_P16e (1 << 4)
+#define NAND_Ecc_P32e (1 << 5)
+#define NAND_Ecc_P64e (1 << 6)
+#define NAND_Ecc_P128e (1 << 7)
+#define NAND_Ecc_P256e (1 << 8)
+#define NAND_Ecc_P512e (1 << 9)
+#define NAND_Ecc_P1024e (1 << 10)
+#define NAND_Ecc_P2048e (1 << 11)
+
+#define NAND_Ecc_P1o (1 << 16)
+#define NAND_Ecc_P2o (1 << 17)
+#define NAND_Ecc_P4o (1 << 18)
+#define NAND_Ecc_P8o (1 << 19)
+#define NAND_Ecc_P16o (1 << 20)
+#define NAND_Ecc_P32o (1 << 21)
+#define NAND_Ecc_P64o (1 << 22)
+#define NAND_Ecc_P128o (1 << 23)
+#define NAND_Ecc_P256o (1 << 24)
+#define NAND_Ecc_P512o (1 << 25)
+#define NAND_Ecc_P1024o (1 << 26)
+#define NAND_Ecc_P2048o (1 << 27)
+
+#define TF(value) (value ? 1 : 0)
+
+#define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
+#define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
+#define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
+#define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
+#define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
+#define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
+#define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
+#define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
+
+#define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
+#define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
+#define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
+#define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
+
+#define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
+#define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
+#define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
+#define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
+#define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
+#define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
+#define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
+#define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
+
+#define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
+#define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
+#define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
+#define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
+#define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
+#define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
+#define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
+#define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
+
+#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
+#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
+
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "cmdlinepart", NULL };
+#endif
+
+struct omap_nand_info {
+ struct nand_hw_control controller;
+ struct omap_nand_platform_data *pdata;
+ struct mtd_info mtd;
+ struct mtd_partition *parts;
+ struct nand_chip nand;
+ struct platform_device *pdev;
+
+ int gpmc_cs;
+ unsigned long phys_base;
+ void __iomem *gpmc_cs_baseaddr;
+ void __iomem *gpmc_baseaddr;
+};
+
+/**
+ * omap_nand_wp - This function enable or disable the Write Protect feature
+ * @mtd: MTD device structure
+ * @mode: WP ON/OFF
+ */
+static void omap_nand_wp(struct mtd_info *mtd, int mode)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+
+ unsigned long config = __raw_readl(info->gpmc_baseaddr + GPMC_CONFIG);
+
+ if (mode)
+ config &= ~(NAND_WP_BIT); /* WP is ON */
+ else
+ config |= (NAND_WP_BIT); /* WP is OFF */
+
+ __raw_writel(config, (info->gpmc_baseaddr + GPMC_CONFIG));
+}
+
+/**
+ * omap_hwcontrol - hardware specific access to control-lines
+ * @mtd: MTD device structure
+ * @cmd: command to device
+ * @ctrl:
+ * NAND_NCE: bit 0 -> don't care
+ * NAND_CLE: bit 1 -> Command Latch
+ * NAND_ALE: bit 2 -> Address Latch
+ *
+ * NOTE: boards may use different bits for these!!
+ */
+static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ switch (ctrl) {
+ case NAND_CTRL_CHANGE | NAND_CTRL_CLE:
+ info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_COMMAND;
+ info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ break;
+
+ case NAND_CTRL_CHANGE | NAND_CTRL_ALE:
+ info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_ADDRESS;
+ info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ break;
+
+ case NAND_CTRL_CHANGE | NAND_NCE:
+ info->nand.IO_ADDR_W = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ info->nand.IO_ADDR_R = info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_DATA;
+ break;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ __raw_writeb(cmd, info->nand.IO_ADDR_W);
+}
+
+/**
+ * omap_read_buf16 - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *nand = mtd->priv;
+
+ __raw_readsw(nand->IO_ADDR_R, buf, len / 2);
+}
+
+/**
+ * omap_write_buf16 - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd,
+ struct omap_nand_info, mtd);
+ u16 *p = (u16 *) buf;
+
+ /* FIXME try bursts of writesw() or DMA ... */
+ len >>= 1;
+
+ while (len--) {
+ writew(*p++, info->nand.IO_ADDR_W);
+
+ while (GPMC_BUF_EMPTY == (readl(info->gpmc_baseaddr +
+ GPMC_STATUS) & GPMC_BUF_FULL))
+ ;
+ }
+}
+/**
+ * omap_verify_buf - Verify chip data against buffer
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
+ */
+static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ u16 *p = (u16 *) buf;
+
+ len >>= 1;
+ while (len--) {
+ if (*p++ != cpu_to_le16(readw(info->nand.IO_ADDR_R)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_MTD_NAND_OMAP_HWECC
+/**
+ * omap_hwecc_init - Initialize the HW ECC for NAND flash in GPMC controller
+ * @mtd: MTD device structure
+ */
+static void omap_hwecc_init(struct mtd_info *mtd)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned long val = 0x0;
+
+ /* Read from ECC Control Register */
+ val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* Clear all ECC | Enable Reg1 */
+ val = ((0x00000001<<8) | 0x00000001);
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+
+ /* Read from ECC Size Config Register */
+ val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
+ /* ECCSIZE1=512 | Select eccResultsize[0-3] */
+ val = ((((chip->ecc.size >> 1) - 1) << 22) | (0x0000000F));
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_SIZE_CONFIG);
+}
+
+/**
+ * gen_true_ecc - This function will generate true ECC value
+ * @ecc_buf: buffer to store ecc code
+ *
+ * This generated true ECC value can be used when correcting
+ * data read from NAND flash memory core
+ */
+static void gen_true_ecc(u8 *ecc_buf)
+{
+ u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
+ ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
+
+ ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
+ P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
+ ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
+ P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
+ ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
+ P1e(tmp) | P2048o(tmp) | P2048e(tmp));
+}
+
+/**
+ * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
+ * @ecc_data1: ecc code from nand spare area
+ * @ecc_data2: ecc code from hardware register obtained from hardware ecc
+ * @page_data: page data
+ *
+ * This function compares two ECC's and indicates if there is an error.
+ * If the error can be corrected it will be corrected to the buffer.
+ */
+static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
+ u8 *ecc_data2, /* read from register */
+ u8 *page_data)
+{
+ uint i;
+ u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
+ u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
+ u8 ecc_bit[24];
+ u8 ecc_sum = 0;
+ u8 find_bit = 0;
+ uint find_byte = 0;
+ int isEccFF;
+
+ isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
+
+ gen_true_ecc(ecc_data1);
+ gen_true_ecc(ecc_data2);
+
+ for (i = 0; i <= 2; i++) {
+ *(ecc_data1 + i) = ~(*(ecc_data1 + i));
+ *(ecc_data2 + i) = ~(*(ecc_data2 + i));
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp0_bit[i] = *ecc_data1 % 2;
+ *ecc_data1 = *ecc_data1 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp1_bit[i] = *(ecc_data1 + 1) % 2;
+ *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ tmp2_bit[i] = *(ecc_data1 + 2) % 2;
+ *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp0_bit[i] = *ecc_data2 % 2;
+ *ecc_data2 = *ecc_data2 / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp1_bit[i] = *(ecc_data2 + 1) % 2;
+ *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
+ }
+
+ for (i = 0; i < 8; i++) {
+ comp2_bit[i] = *(ecc_data2 + 2) % 2;
+ *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
+ }
+
+ for (i = 0; i < 6; i++)
+ ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
+
+ for (i = 0; i < 8; i++)
+ ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
+
+ ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
+ ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
+
+ for (i = 0; i < 24; i++)
+ ecc_sum += ecc_bit[i];
+
+ switch (ecc_sum) {
+ case 0:
+ /* Not reached because this function is not called if
+ * ECC values are equal
+ */
+ return 0;
+
+ case 1:
+ /* Uncorrectable error */
+ DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
+ return -1;
+
+ case 11:
+ /* UN-Correctable error */
+ DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
+ return -1;
+
+ case 12:
+ /* Correctable error */
+ find_byte = (ecc_bit[23] << 8) +
+ (ecc_bit[21] << 7) +
+ (ecc_bit[19] << 6) +
+ (ecc_bit[17] << 5) +
+ (ecc_bit[15] << 4) +
+ (ecc_bit[13] << 3) +
+ (ecc_bit[11] << 2) +
+ (ecc_bit[9] << 1) +
+ ecc_bit[7];
+
+ find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
+
+ DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
+ "offset: %d, bit: %d\n", find_byte, find_bit);
+
+ page_data[find_byte] ^= (1 << find_bit);
+
+ return 0;
+ default:
+ if (isEccFF) {
+ if (ecc_data2[0] == 0 &&
+ ecc_data2[1] == 0 &&
+ ecc_data2[2] == 0)
+ return 0;
+ }
+ DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
+ return -1;
+ }
+}
+
+/**
+ * omap_correct_data - Compares the ECC read with HW generated ECC
+ * @mtd: MTD device structure
+ * @dat: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Compares the ecc read from nand spare area with ECC registers values
+ * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection
+ * and correction.
+ */
+static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ int blockCnt = 0, i = 0, ret = 0;
+
+ /* Ex NAND_ECC_HW12_2048 */
+ if ((info->nand.ecc.mode == NAND_ECC_HW) &&
+ (info->nand.ecc.size == 2048))
+ blockCnt = 4;
+ else
+ blockCnt = 1;
+
+ for (i = 0; i < blockCnt; i++) {
+ if (memcmp(read_ecc, calc_ecc, 3) != 0) {
+ ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
+ if (ret < 0)
+ return ret;
+ }
+ read_ecc += 3;
+ calc_ecc += 3;
+ dat += 512;
+ }
+ return 0;
+}
+
+/**
+ * omap_calcuate_ecc - Generate non-inverted ECC bytes.
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Using noninverted ECC can be considered ugly since writing a blank
+ * page ie. padding will clear the ECC bytes. This is no problem as long
+ * nobody is trying to write data on the seemingly unused page. Reading
+ * an erased page will produce an ECC mismatch between generated and read
+ * ECC bytes that has to be dealt with separately.
+ */
+static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned long val = 0x0;
+ unsigned long reg;
+
+ /* Start Reading from HW ECC1_Result = 0x200 */
+ reg = (unsigned long)(info->gpmc_baseaddr + GPMC_ECC1_RESULT);
+ val = __raw_readl(reg);
+ *ecc_code++ = val; /* P128e, ..., P1e */
+ *ecc_code++ = val >> 16; /* P128o, ..., P1o */
+ /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
+ *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
+ reg += 4;
+
+ return 0;
+}
+
+/**
+ * omap_enable_hwecc - This function enables the hardware ecc functionality
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_chip *chip = mtd->priv;
+ unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ unsigned long val = __raw_readl(info->gpmc_baseaddr + GPMC_ECC_CONFIG);
+
+ switch (mode) {
+ case NAND_ECC_READ:
+ __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ break;
+ case NAND_ECC_READSYN:
+ __raw_writel(0x100, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ break;
+ case NAND_ECC_WRITE:
+ __raw_writel(0x101, info->gpmc_baseaddr + GPMC_ECC_CONTROL);
+ /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
+ val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+ break;
+ default:
+ DEBUG(MTD_DEBUG_LEVEL0, "Error: Unrecognized Mode[%d]!\n",
+ mode);
+ break;
+ }
+
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_ECC_CONFIG);
+}
+#endif
+
+/**
+ * omap_wait - wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND Chip structure
+ *
+ * Wait function is called during Program and erase operations and
+ * the way it is called from MTD layer, we should wait till the NAND
+ * chip is ready after the programming/erase operation has completed.
+ *
+ * Erase can take up to 400ms and program up to 20ms according to
+ * general NAND and SmartMedia specs
+ */
+static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+ struct nand_chip *this = mtd->priv;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned long timeo = jiffies;
+ int status, state = this->state;
+
+ if (state == FL_ERASING)
+ timeo += (HZ * 400) / 1000;
+ else
+ timeo += (HZ * 20) / 1000;
+
+ this->IO_ADDR_W = (void *) info->gpmc_cs_baseaddr +
+ GPMC_CS_NAND_COMMAND;
+ this->IO_ADDR_R = (void *) info->gpmc_cs_baseaddr + GPMC_CS_NAND_DATA;
+
+ __raw_writeb(NAND_CMD_STATUS & 0xFF, this->IO_ADDR_W);
+
+ while (time_before(jiffies, timeo)) {
+ status = __raw_readb(this->IO_ADDR_R);
+ if (!(status & 0x40))
+ break;
+ }
+ return status;
+}
+
+/**
+ * omap_dev_ready - calls the platform specific dev_ready function
+ * @mtd: MTD device structure
+ */
+static int omap_dev_ready(struct mtd_info *mtd)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned int val = __raw_readl(info->gpmc_baseaddr + GPMC_IRQ_STATUS);
+
+ if ((val & 0x100) == 0x100) {
+ /* Clear IRQ Interrupt */
+ val |= 0x100;
+ val &= ~(0x0);
+ __raw_writel(val, info->gpmc_baseaddr + GPMC_IRQ_STATUS);
+ } else {
+ unsigned int cnt = 0;
+ while (cnt++ < 0x1FF) {
+ if ((val & 0x100) == 0x100)
+ return 0;
+ val = __raw_readl(info->gpmc_baseaddr +
+ GPMC_IRQ_STATUS);
+ }
+ }
+
+ return 1;
+}
+
+static int __devinit omap_nand_probe(struct platform_device *pdev)
+{
+ struct omap_nand_info *info;
+ struct omap_nand_platform_data *pdata;
+ int err;
+ unsigned long val;
+
+
+ pdata = pdev->dev.platform_data;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "platform data missing\n");
+ return -ENODEV;
+ }
+
+ info = kzalloc(sizeof(struct omap_nand_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, info);
+
+ spin_lock_init(&info->controller.lock);
+ init_waitqueue_head(&info->controller.wq);
+
+ info->pdev = pdev;
+
+ info->gpmc_cs = pdata->cs;
+ info->gpmc_baseaddr = pdata->gpmc_baseaddr;
+ info->gpmc_cs_baseaddr = pdata->gpmc_cs_baseaddr;
+
+ info->mtd.priv = &info->nand;
+ info->mtd.name = dev_name(&pdev->dev);
+ info->mtd.owner = THIS_MODULE;
+
+ err = gpmc_cs_request(info->gpmc_cs, NAND_IO_SIZE, &info->phys_base);
+ if (err < 0) {
+ dev_err(&pdev->dev, "Cannot request GPMC CS\n");
+ goto out_free_info;
+ }
+
+ /* Enable RD PIN Monitoring Reg */
+ if (pdata->dev_ready) {
+ val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1);
+ val |= WR_RD_PIN_MONITORING;
+ gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG1, val);
+ }
+
+ val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG7);
+ val &= ~(0xf << 8);
+ val |= (0xc & 0xf) << 8;
+ gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG7, val);
+
+ /* NAND write protect off */
+ omap_nand_wp(&info->mtd, NAND_WP_OFF);
+
+ if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
+ pdev->dev.driver->name)) {
+ err = -EBUSY;
+ goto out_free_cs;
+ }
+
+ info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
+ if (!info->nand.IO_ADDR_R) {
+ err = -ENOMEM;
+ goto out_release_mem_region;
+ }
+ info->nand.controller = &info->controller;
+
+ info->nand.IO_ADDR_W = info->nand.IO_ADDR_R;
+ info->nand.cmd_ctrl = omap_hwcontrol;
+
+ /* REVISIT: only supports 16-bit NAND flash */
+
+ info->nand.read_buf = omap_read_buf16;
+ info->nand.write_buf = omap_write_buf16;
+ info->nand.verify_buf = omap_verify_buf;
+
+ /*
+ * If RDY/BSY line is connected to OMAP then use the omap ready
+ * funcrtion and the generic nand_wait function which reads the status
+ * register after monitoring the RDY/BSY line.Otherwise use a standard
+ * chip delay which is slightly more than tR (AC Timing) of the NAND
+ * device and read status register until you get a failure or success
+ */
+ if (pdata->dev_ready) {
+ info->nand.dev_ready = omap_dev_ready;
+ info->nand.chip_delay = 0;
+ } else {
+ info->nand.waitfunc = omap_wait;
+ info->nand.chip_delay = 50;
+ }
+
+ info->nand.options |= NAND_SKIP_BBTSCAN;
+ if ((gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1) & 0x3000)
+ == 0x1000)
+ info->nand.options |= NAND_BUSWIDTH_16;
+
+#ifdef CONFIG_MTD_NAND_OMAP_HWECC
+ info->nand.ecc.bytes = 3;
+ info->nand.ecc.size = 512;
+ info->nand.ecc.calculate = omap_calculate_ecc;
+ info->nand.ecc.hwctl = omap_enable_hwecc;
+ info->nand.ecc.correct = omap_correct_data;
+ info->nand.ecc.mode = NAND_ECC_HW;
+
+ /* init HW ECC */
+ omap_hwecc_init(&info->mtd);
+#else
+ info->nand.ecc.mode = NAND_ECC_SOFT;
+#endif
+
+ /* DIP switches on some boards change between 8 and 16 bit
+ * bus widths for flash. Try the other width if the first try fails.
+ */
+ if (nand_scan(&info->mtd, 1)) {
+ info->nand.options ^= NAND_BUSWIDTH_16;
+ if (nand_scan(&info->mtd, 1)) {
+ err = -ENXIO;
+ goto out_release_mem_region;
+ }
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+ err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
+ if (err > 0)
+ add_mtd_partitions(&info->mtd, info->parts, err);
+ else if (pdata->parts)
+ add_mtd_partitions(&info->mtd, pdata->parts, pdata->nr_parts);
+ else
+#endif
+ add_mtd_device(&info->mtd);
+
+ platform_set_drvdata(pdev, &info->mtd);
+
+ return 0;
+
+out_release_mem_region:
+ release_mem_region(info->phys_base, NAND_IO_SIZE);
+out_free_cs:
+ gpmc_cs_free(info->gpmc_cs);
+out_free_info:
+ kfree(info);
+
+ return err;
+}
+
+static int omap_nand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct omap_nand_info *info = mtd->priv;
+
+ platform_set_drvdata(pdev, NULL);
+ /* Release NAND device, its internal structures and partitions */
+ nand_release(&info->mtd);
+ iounmap(info->nand.IO_ADDR_R);
+ kfree(&info->mtd);
+ return 0;
+}
+
+static struct platform_driver omap_nand_driver = {
+ .probe = omap_nand_probe,
+ .remove = omap_nand_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init omap_nand_init(void)
+{
+ printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME);
+ return platform_driver_register(&omap_nand_driver);
+}
+
+static void __exit omap_nand_exit(void)
+{
+ platform_driver_unregister(&omap_nand_driver);
+}
+
+module_init(omap_nand_init);
+module_exit(omap_nand_exit);
+
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index c2dfd3ea353d..7ad972229db4 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -47,6 +47,28 @@ static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl
writeb(cmd, nc->IO_ADDR_W + offs);
}
+static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+ void __iomem *io_base = chip->IO_ADDR_R;
+ uint64_t *buf64;
+ int i = 0;
+
+ while (len && (unsigned long)buf & 7) {
+ *buf++ = readb(io_base);
+ len--;
+ }
+ buf64 = (uint64_t *)buf;
+ while (i < len/8) {
+ uint64_t x;
+ asm ("ldrd\t%0, [%1]" : "=r" (x) : "r" (io_base));
+ buf64[i++] = x;
+ }
+ i *= 8;
+ while (i < len)
+ buf[i++] = readb(io_base);
+}
+
static int __init orion_nand_probe(struct platform_device *pdev)
{
struct mtd_info *mtd;
@@ -83,6 +105,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
nc->priv = board;
nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
nc->cmd_ctrl = orion_nand_cmd_ctrl;
+ nc->read_buf = orion_nand_read_buf;
nc->ecc.mode = NAND_ECC_SOFT;
if (board->chip_delay)
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 86e1d08eee00..4e16c6f5bdd5 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -61,6 +61,8 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl;
data->chip.dev_ready = pdata->ctrl.dev_ready;
data->chip.select_chip = pdata->ctrl.select_chip;
+ data->chip.write_buf = pdata->ctrl.write_buf;
+ data->chip.read_buf = pdata->ctrl.read_buf;
data->chip.chip_delay = pdata->chip.chip_delay;
data->chip.options |= pdata->chip.options;
@@ -70,6 +72,13 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
+ /* Handle any platform specific setup */
+ if (pdata->ctrl.probe) {
+ res = pdata->ctrl.probe(pdev);
+ if (res)
+ goto out;
+ }
+
/* Scan to find existance of the device */
if (nand_scan(&data->mtd, 1)) {
res = -ENXIO;
@@ -86,6 +95,8 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
return 0;
}
}
+ if (pdata->chip.set_parts)
+ pdata->chip.set_parts(data->mtd.size, &pdata->chip);
if (pdata->chip.partitions) {
data->parts = pdata->chip.partitions;
res = add_mtd_partitions(&data->mtd, data->parts,
@@ -99,6 +110,8 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
nand_release(&data->mtd);
out:
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
platform_set_drvdata(pdev, NULL);
iounmap(data->io_base);
kfree(data);
@@ -111,15 +124,15 @@ out:
static int __devexit plat_nand_remove(struct platform_device *pdev)
{
struct plat_nand_data *data = platform_get_drvdata(pdev);
-#ifdef CONFIG_MTD_PARTITIONS
struct platform_nand_data *pdata = pdev->dev.platform_data;
-#endif
nand_release(&data->mtd);
#ifdef CONFIG_MTD_PARTITIONS
if (data->parts && data->parts != pdata->chip.partitions)
kfree(data->parts);
#endif
+ if (pdata->ctrl.remove)
+ pdata->ctrl.remove(pdev);
iounmap(data->io_base);
kfree(data);
@@ -128,7 +141,7 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
static struct platform_driver plat_nand_driver = {
.probe = plat_nand_probe,
- .remove = plat_nand_remove,
+ .remove = __devexit_p(plat_nand_remove),
.driver = {
.name = "gen_nand",
.owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 8e375d5fe231..11dc7e69c4fb 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -74,6 +74,14 @@ static struct nand_ecclayout nand_hw_eccoob = {
struct s3c2410_nand_info;
+/**
+ * struct s3c2410_nand_mtd - driver MTD structure
+ * @mtd: The MTD instance to pass to the MTD layer.
+ * @chip: The NAND chip information.
+ * @set: The platform information supplied for this set of NAND chips.
+ * @info: Link back to the hardware information.
+ * @scan_res: The result from calling nand_scan_ident().
+*/
struct s3c2410_nand_mtd {
struct mtd_info mtd;
struct nand_chip chip;
@@ -90,6 +98,21 @@ enum s3c_cpu_type {
/* overview of the s3c2410 nand state */
+/**
+ * struct s3c2410_nand_info - NAND controller state.
+ * @mtds: An array of MTD instances on this controoler.
+ * @platform: The platform data for this board.
+ * @device: The platform device we bound to.
+ * @area: The IO area resource that came from request_mem_region().
+ * @clk: The clock resource for this controller.
+ * @regs: The area mapped for the hardware registers described by @area.
+ * @sel_reg: Pointer to the register controlling the NAND selection.
+ * @sel_bit: The bit in @sel_reg to select the NAND chip.
+ * @mtd_count: The number of MTDs created from this controller.
+ * @save_sel: The contents of @sel_reg to be saved over suspend.
+ * @clk_rate: The clock rate from @clk.
+ * @cpu_type: The exact type of this controller.
+ */
struct s3c2410_nand_info {
/* mtd info */
struct nand_hw_control controller;
@@ -145,12 +168,19 @@ static inline int allow_clk_stop(struct s3c2410_nand_info *info)
#define NS_IN_KHZ 1000000
+/**
+ * s3c_nand_calc_rate - calculate timing data.
+ * @wanted: The cycle time in nanoseconds.
+ * @clk: The clock rate in kHz.
+ * @max: The maximum divider value.
+ *
+ * Calculate the timing value from the given parameters.
+ */
static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
{
int result;
- result = (wanted * clk) / NS_IN_KHZ;
- result++;
+ result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ);
pr_debug("result %d from %ld, %d\n", result, clk, wanted);
@@ -169,13 +199,21 @@ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
/* controller setup */
+/**
+ * s3c2410_nand_setrate - setup controller timing information.
+ * @info: The controller instance.
+ *
+ * Given the information supplied by the platform, calculate and set
+ * the necessary timing registers in the hardware to generate the
+ * necessary timing cycles to the hardware.
+ */
static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
{
struct s3c2410_platform_nand *plat = info->platform;
int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
int tacls, twrph0, twrph1;
unsigned long clkrate = clk_get_rate(info->clk);
- unsigned long set, cfg, mask;
+ unsigned long uninitialized_var(set), cfg, uninitialized_var(mask);
unsigned long flags;
/* calculate the timing information for the controller */
@@ -215,9 +253,9 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
case TYPE_S3C2440:
case TYPE_S3C2412:
- mask = (S3C2410_NFCONF_TACLS(tacls_max - 1) |
- S3C2410_NFCONF_TWRPH0(7) |
- S3C2410_NFCONF_TWRPH1(7));
+ mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) |
+ S3C2440_NFCONF_TWRPH0(7) |
+ S3C2440_NFCONF_TWRPH1(7));
set = S3C2440_NFCONF_TACLS(tacls - 1);
set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
@@ -225,14 +263,9 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
break;
default:
- /* keep compiler happy */
- mask = 0;
- set = 0;
BUG();
}
- dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
-
local_irq_save(flags);
cfg = readl(info->regs + S3C2410_NFCONF);
@@ -242,9 +275,18 @@ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
local_irq_restore(flags);
+ dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
+
return 0;
}
+/**
+ * s3c2410_nand_inithw - basic hardware initialisation
+ * @info: The hardware state.
+ *
+ * Do the basic initialisation of the hardware, using s3c2410_nand_setrate()
+ * to setup the hardware access speeds and set the controller to be enabled.
+*/
static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
{
int ret;
@@ -268,8 +310,19 @@ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
return 0;
}
-/* select chip */
-
+/**
+ * s3c2410_nand_select_chip - select the given nand chip
+ * @mtd: The MTD instance for this chip.
+ * @chip: The chip number.
+ *
+ * This is called by the MTD layer to either select a given chip for the
+ * @mtd instance, or to indicate that the access has finished and the
+ * chip can be de-selected.
+ *
+ * The routine ensures that the nFCE line is correctly setup, and any
+ * platform specific selection code is called to route nFCE to the specific
+ * chip.
+ */
static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
{
struct s3c2410_nand_info *info;
@@ -530,7 +583,16 @@ static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- readsl(info->regs + S3C2440_NFDATA, buf, len / 4);
+
+ readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup if we've got less than a word to do */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--)
+ *buf++ = readb(info->regs + S3C2440_NFDATA);
+ }
}
static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
@@ -542,7 +604,16 @@ static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int
static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
- writesl(info->regs + S3C2440_NFDATA, buf, len / 4);
+
+ writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+ /* cleanup any fractional write */
+ if (len & 3) {
+ buf += len & ~3;
+
+ for (; len & 3; len--, buf++)
+ writeb(*buf, info->regs + S3C2440_NFDATA);
+ }
}
/* cpufreq driver support */
@@ -593,7 +664,7 @@ static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *inf
/* device management functions */
-static int s3c2410_nand_remove(struct platform_device *pdev)
+static int s3c24xx_nand_remove(struct platform_device *pdev)
{
struct s3c2410_nand_info *info = to_nand_info(pdev);
@@ -645,17 +716,31 @@ static int s3c2410_nand_remove(struct platform_device *pdev)
}
#ifdef CONFIG_MTD_PARTITIONS
+const char *part_probes[] = { "cmdlinepart", NULL };
static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *mtd,
struct s3c2410_nand_set *set)
{
+ struct mtd_partition *part_info;
+ int nr_part = 0;
+
if (set == NULL)
return add_mtd_device(&mtd->mtd);
- if (set->nr_partitions > 0 && set->partitions != NULL) {
- return add_mtd_partitions(&mtd->mtd, set->partitions, set->nr_partitions);
+ if (set->nr_partitions == 0) {
+ mtd->mtd.name = set->name;
+ nr_part = parse_mtd_partitions(&mtd->mtd, part_probes,
+ &part_info, 0);
+ } else {
+ if (set->nr_partitions > 0 && set->partitions != NULL) {
+ nr_part = set->nr_partitions;
+ part_info = set->partitions;
+ }
}
+ if (nr_part > 0 && part_info)
+ return add_mtd_partitions(&mtd->mtd, part_info, nr_part);
+
return add_mtd_device(&mtd->mtd);
}
#else
@@ -667,11 +752,16 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
}
#endif
-/* s3c2410_nand_init_chip
+/**
+ * s3c2410_nand_init_chip - initialise a single instance of an chip
+ * @info: The base NAND controller the chip is on.
+ * @nmtd: The new controller MTD instance to fill in.
+ * @set: The information passed from the board specific platform data.
*
- * init a single instance of an chip
-*/
-
+ * Initialise the given @nmtd from the information in @info and @set. This
+ * readies the structure for use with the MTD layer functions by ensuring
+ * all pointers are setup and the necessary control routines selected.
+ */
static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *nmtd,
struct s3c2410_nand_set *set)
@@ -757,14 +847,40 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
if (set->disable_ecc)
chip->ecc.mode = NAND_ECC_NONE;
+
+ switch (chip->ecc.mode) {
+ case NAND_ECC_NONE:
+ dev_info(info->device, "NAND ECC disabled\n");
+ break;
+ case NAND_ECC_SOFT:
+ dev_info(info->device, "NAND soft ECC\n");
+ break;
+ case NAND_ECC_HW:
+ dev_info(info->device, "NAND hardware ECC\n");
+ break;
+ default:
+ dev_info(info->device, "NAND ECC UNKNOWN\n");
+ break;
+ }
+
+ /* If you use u-boot BBT creation code, specifying this flag will
+ * let the kernel fish out the BBT from the NAND, and also skip the
+ * full NAND scan that can take 1/2s or so. Little things... */
+ if (set->flash_bbt)
+ chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
}
-/* s3c2410_nand_update_chip
+/**
+ * s3c2410_nand_update_chip - post probe update
+ * @info: The controller instance.
+ * @nmtd: The driver version of the MTD instance.
*
- * post-probe chip update, to change any items, such as the
- * layout for large page nand
- */
-
+ * This routine is called after the chip probe has succesfully completed
+ * and the relevant per-chip information updated. This call ensure that
+ * we update the internal state accordingly.
+ *
+ * The internal state is currently limited to the ECC state information.
+*/
static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
struct s3c2410_nand_mtd *nmtd)
{
@@ -773,33 +889,33 @@ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
dev_dbg(info->device, "chip %p => page shift %d\n",
chip, chip->page_shift);
- if (hardware_ecc) {
+ if (chip->ecc.mode != NAND_ECC_HW)
+ return;
+
/* change the behaviour depending on wether we are using
* the large or small page nand device */
- if (chip->page_shift > 10) {
- chip->ecc.size = 256;
- chip->ecc.bytes = 3;
- } else {
- chip->ecc.size = 512;
- chip->ecc.bytes = 3;
- chip->ecc.layout = &nand_hw_eccoob;
- }
+ if (chip->page_shift > 10) {
+ chip->ecc.size = 256;
+ chip->ecc.bytes = 3;
+ } else {
+ chip->ecc.size = 512;
+ chip->ecc.bytes = 3;
+ chip->ecc.layout = &nand_hw_eccoob;
}
}
-/* s3c2410_nand_probe
+/* s3c24xx_nand_probe
*
* called by device layer when it finds a device matching
* one our driver can handled. This code checks to see if
* it can allocate all necessary resources then calls the
* nand layer to look for devices
*/
-
-static int s3c24xx_nand_probe(struct platform_device *pdev,
- enum s3c_cpu_type cpu_type)
+static int s3c24xx_nand_probe(struct platform_device *pdev)
{
struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
+ enum s3c_cpu_type cpu_type;
struct s3c2410_nand_info *info;
struct s3c2410_nand_mtd *nmtd;
struct s3c2410_nand_set *sets;
@@ -809,6 +925,8 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
int nr_sets;
int setno;
+ cpu_type = platform_get_device_id(pdev)->driver_data;
+
pr_debug("s3c2410_nand_probe(%p)\n", pdev);
info = kmalloc(sizeof(*info), GFP_KERNEL);
@@ -922,7 +1040,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
return 0;
exit_error:
- s3c2410_nand_remove(pdev);
+ s3c24xx_nand_remove(pdev);
if (err == 0)
err = -EINVAL;
@@ -983,50 +1101,33 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
/* driver device registration */
-static int s3c2410_nand_probe(struct platform_device *dev)
-{
- return s3c24xx_nand_probe(dev, TYPE_S3C2410);
-}
-
-static int s3c2440_nand_probe(struct platform_device *dev)
-{
- return s3c24xx_nand_probe(dev, TYPE_S3C2440);
-}
-
-static int s3c2412_nand_probe(struct platform_device *dev)
-{
- return s3c24xx_nand_probe(dev, TYPE_S3C2412);
-}
-
-static struct platform_driver s3c2410_nand_driver = {
- .probe = s3c2410_nand_probe,
- .remove = s3c2410_nand_remove,
- .suspend = s3c24xx_nand_suspend,
- .resume = s3c24xx_nand_resume,
- .driver = {
- .name = "s3c2410-nand",
- .owner = THIS_MODULE,
+static struct platform_device_id s3c24xx_driver_ids[] = {
+ {
+ .name = "s3c2410-nand",
+ .driver_data = TYPE_S3C2410,
+ }, {
+ .name = "s3c2440-nand",
+ .driver_data = TYPE_S3C2440,
+ }, {
+ .name = "s3c2412-nand",
+ .driver_data = TYPE_S3C2412,
+ }, {
+ .name = "s3c6400-nand",
+ .driver_data = TYPE_S3C2412, /* compatible with 2412 */
},
+ { }
};
-static struct platform_driver s3c2440_nand_driver = {
- .probe = s3c2440_nand_probe,
- .remove = s3c2410_nand_remove,
- .suspend = s3c24xx_nand_suspend,
- .resume = s3c24xx_nand_resume,
- .driver = {
- .name = "s3c2440-nand",
- .owner = THIS_MODULE,
- },
-};
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-static struct platform_driver s3c2412_nand_driver = {
- .probe = s3c2412_nand_probe,
- .remove = s3c2410_nand_remove,
+static struct platform_driver s3c24xx_nand_driver = {
+ .probe = s3c24xx_nand_probe,
+ .remove = s3c24xx_nand_remove,
.suspend = s3c24xx_nand_suspend,
.resume = s3c24xx_nand_resume,
+ .id_table = s3c24xx_driver_ids,
.driver = {
- .name = "s3c2412-nand",
+ .name = "s3c24xx-nand",
.owner = THIS_MODULE,
},
};
@@ -1035,16 +1136,12 @@ static int __init s3c2410_nand_init(void)
{
printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
- platform_driver_register(&s3c2412_nand_driver);
- platform_driver_register(&s3c2440_nand_driver);
- return platform_driver_register(&s3c2410_nand_driver);
+ return platform_driver_register(&s3c24xx_nand_driver);
}
static void __exit s3c2410_nand_exit(void)
{
- platform_driver_unregister(&s3c2412_nand_driver);
- platform_driver_unregister(&s3c2440_nand_driver);
- platform_driver_unregister(&s3c2410_nand_driver);
+ platform_driver_unregister(&s3c24xx_nand_driver);
}
module_init(s3c2410_nand_init);
@@ -1053,6 +1150,3 @@ module_exit(s3c2410_nand_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
-MODULE_ALIAS("platform:s3c2410-nand");
-MODULE_ALIAS("platform:s3c2412-nand");
-MODULE_ALIAS("platform:s3c2440-nand");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index 812479264896..488088eff2ca 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -64,7 +64,7 @@ struct txx9ndfmc_priv {
struct nand_chip chip;
struct mtd_info mtd;
int cs;
- char mtdname[BUS_ID_SIZE + 2];
+ const char *mtdname;
};
#define MAX_TXX9NDFMC_DEV 4
@@ -334,16 +334,23 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
if (plat->ch_mask != 1) {
txx9_priv->cs = i;
- sprintf(txx9_priv->mtdname, "%s.%u",
- dev_name(&dev->dev), i);
+ txx9_priv->mtdname = kasprintf(GFP_KERNEL, "%s.%u",
+ dev_name(&dev->dev), i);
} else {
txx9_priv->cs = -1;
- strcpy(txx9_priv->mtdname, dev_name(&dev->dev));
+ txx9_priv->mtdname = kstrdup(dev_name(&dev->dev),
+ GFP_KERNEL);
+ }
+ if (!txx9_priv->mtdname) {
+ kfree(txx9_priv);
+ dev_err(&dev->dev, "Unable to allocate MTD name.\n");
+ continue;
}
if (plat->wide_mask & (1 << i))
chip->options |= NAND_BUSWIDTH_16;
if (nand_scan(mtd, 1)) {
+ kfree(txx9_priv->mtdname);
kfree(txx9_priv);
continue;
}
@@ -385,6 +392,7 @@ static int __exit txx9ndfmc_remove(struct platform_device *dev)
kfree(drvdata->parts[i]);
#endif
del_mtd_device(mtd);
+ kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
return 0;
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 6391e3dc8002..38d656b9b2ee 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -565,7 +565,7 @@ int omap2_onenand_rephase(void)
NULL, __adjust_timing);
}
-static void __devexit omap2_onenand_shutdown(struct platform_device *pdev)
+static void omap2_onenand_shutdown(struct platform_device *pdev)
{
struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
@@ -777,7 +777,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
static struct platform_driver omap2_onenand_driver = {
.probe = omap2_onenand_probe,
- .remove = omap2_onenand_remove,
+ .remove = __devexit_p(omap2_onenand_remove),
.shutdown = omap2_onenand_shutdown,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 30d6999e5f9f..6e829095ea9d 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -9,6 +9,10 @@
* auto-placement support, read-while load support, various fixes
* Copyright (C) Nokia Corporation, 2007
*
+ * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
+ * Flex-OneNAND support
+ * Copyright (C) Samsung Electronics, 2008
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -16,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/delay.h>
@@ -27,6 +32,38 @@
#include <asm/io.h>
+/* Default Flex-OneNAND boundary and lock respectively */
+static int flex_bdry[MAX_DIES * 2] = { -1, 0, -1, 0 };
+
+module_param_array(flex_bdry, int, NULL, 0400);
+MODULE_PARM_DESC(flex_bdry, "SLC Boundary information for Flex-OneNAND"
+ "Syntax:flex_bdry=DIE_BDRY,LOCK,..."
+ "DIE_BDRY: SLC boundary of the die"
+ "LOCK: Locking information for SLC boundary"
+ " : 0->Set boundary in unlocked status"
+ " : 1->Set boundary in locked status");
+
+/**
+ * onenand_oob_128 - oob info for Flex-Onenand with 4KB page
+ * For now, we expose only 64 out of 80 ecc bytes
+ */
+static struct nand_ecclayout onenand_oob_128 = {
+ .eccbytes = 64,
+ .eccpos = {
+ 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+ 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
+ 102, 103, 104, 105
+ },
+ .oobfree = {
+ {2, 4}, {18, 4}, {34, 4}, {50, 4},
+ {66, 4}, {82, 4}, {98, 4}, {114, 4}
+ }
+};
+
/**
* onenand_oob_64 - oob info for large (2KB) page
*/
@@ -65,6 +102,14 @@ static const unsigned char ffchars[] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 48 */
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 64 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 80 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 96 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 112 */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 128 */
};
/**
@@ -171,6 +216,70 @@ static int onenand_buffer_address(int dataram1, int sectors, int count)
}
/**
+ * flexonenand_block- For given address return block number
+ * @param this - OneNAND device structure
+ * @param addr - Address for which block number is needed
+ */
+static unsigned flexonenand_block(struct onenand_chip *this, loff_t addr)
+{
+ unsigned boundary, blk, die = 0;
+
+ if (ONENAND_IS_DDP(this) && addr >= this->diesize[0]) {
+ die = 1;
+ addr -= this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+
+ blk = addr >> (this->erase_shift - 1);
+ if (blk > boundary)
+ blk = (blk + boundary + 1) >> 1;
+
+ blk += die ? this->density_mask : 0;
+ return blk;
+}
+
+inline unsigned onenand_block(struct onenand_chip *this, loff_t addr)
+{
+ if (!FLEXONENAND(this))
+ return addr >> this->erase_shift;
+ return flexonenand_block(this, addr);
+}
+
+/**
+ * flexonenand_addr - Return address of the block
+ * @this: OneNAND device structure
+ * @block: Block number on Flex-OneNAND
+ *
+ * Return address of the block
+ */
+static loff_t flexonenand_addr(struct onenand_chip *this, int block)
+{
+ loff_t ofs = 0;
+ int die = 0, boundary;
+
+ if (ONENAND_IS_DDP(this) && block >= this->density_mask) {
+ block -= this->density_mask;
+ die = 1;
+ ofs = this->diesize[0];
+ }
+
+ boundary = this->boundary[die];
+ ofs += (loff_t)block << (this->erase_shift - 1);
+ if (block > (boundary + 1))
+ ofs += (loff_t)(block - boundary - 1) << (this->erase_shift - 1);
+ return ofs;
+}
+
+loff_t onenand_addr(struct onenand_chip *this, int block)
+{
+ if (!FLEXONENAND(this))
+ return (loff_t)block << this->erase_shift;
+ return flexonenand_addr(this, block);
+}
+EXPORT_SYMBOL(onenand_addr);
+
+/**
* onenand_get_density - [DEFAULT] Get OneNAND density
* @param dev_id OneNAND device ID
*
@@ -183,6 +292,22 @@ static inline int onenand_get_density(int dev_id)
}
/**
+ * flexonenand_region - [Flex-OneNAND] Return erase region of addr
+ * @param mtd MTD device structure
+ * @param addr address whose erase region needs to be identified
+ */
+int flexonenand_region(struct mtd_info *mtd, loff_t addr)
+{
+ int i;
+
+ for (i = 0; i < mtd->numeraseregions; i++)
+ if (addr < mtd->eraseregions[i].offset)
+ break;
+ return i - 1;
+}
+EXPORT_SYMBOL(flexonenand_region);
+
+/**
* onenand_command - [DEFAULT] Send command to OneNAND device
* @param mtd MTD device structure
* @param cmd the command to be sent
@@ -207,16 +332,28 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
page = -1;
break;
+ case FLEXONENAND_CMD_PI_ACCESS:
+ /* addr contains die index */
+ block = addr * this->density_mask;
+ page = -1;
+ break;
+
case ONENAND_CMD_ERASE:
case ONENAND_CMD_BUFFERRAM:
case ONENAND_CMD_OTP_ACCESS:
- block = (int) (addr >> this->erase_shift);
+ block = onenand_block(this, addr);
page = -1;
break;
+ case FLEXONENAND_CMD_READ_PI:
+ cmd = ONENAND_CMD_READ;
+ block = addr * this->density_mask;
+ page = 0;
+ break;
+
default:
- block = (int) (addr >> this->erase_shift);
- page = (int) (addr >> this->page_shift);
+ block = onenand_block(this, addr);
+ page = (int) (addr - onenand_addr(this, block)) >> this->page_shift;
if (ONENAND_IS_2PLANE(this)) {
/* Make the even block number */
@@ -236,7 +373,7 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
- if (ONENAND_IS_2PLANE(this))
+ if (ONENAND_IS_MLC(this) || ONENAND_IS_2PLANE(this))
/* It is always BufferRAM0 */
ONENAND_SET_BUFFERRAM0(this);
else
@@ -258,13 +395,18 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
if (page != -1) {
/* Now we use page size operation */
- int sectors = 4, count = 4;
+ int sectors = 0, count = 0;
int dataram;
switch (cmd) {
+ case FLEXONENAND_CMD_RECOVER_LSB:
case ONENAND_CMD_READ:
case ONENAND_CMD_READOOB:
- dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
+ if (ONENAND_IS_MLC(this))
+ /* It is always BufferRAM0 */
+ dataram = ONENAND_SET_BUFFERRAM0(this);
+ else
+ dataram = ONENAND_SET_NEXT_BUFFERRAM(this);
break;
default:
@@ -293,6 +435,30 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
}
/**
+ * onenand_read_ecc - return ecc status
+ * @param this onenand chip structure
+ */
+static inline int onenand_read_ecc(struct onenand_chip *this)
+{
+ int ecc, i, result = 0;
+
+ if (!FLEXONENAND(this))
+ return this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+
+ for (i = 0; i < 4; i++) {
+ ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS + i);
+ if (likely(!ecc))
+ continue;
+ if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR)
+ return ONENAND_ECC_2BIT_ALL;
+ else
+ result = ONENAND_ECC_1BIT_ALL;
+ }
+
+ return result;
+}
+
+/**
* onenand_wait - [DEFAULT] wait until the command is done
* @param mtd MTD device structure
* @param state state to select the max. timeout value
@@ -331,14 +497,14 @@ static int onenand_wait(struct mtd_info *mtd, int state)
* power off recovery (POR) test, it should read ECC status first
*/
if (interrupt & ONENAND_INT_READ) {
- int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+ int ecc = onenand_read_ecc(this);
if (ecc) {
if (ecc & ONENAND_ECC_2BIT_ALL) {
printk(KERN_ERR "onenand_wait: ECC error = 0x%04x\n", ecc);
mtd->ecc_stats.failed++;
return -EBADMSG;
} else if (ecc & ONENAND_ECC_1BIT_ALL) {
- printk(KERN_INFO "onenand_wait: correctable ECC error = 0x%04x\n", ecc);
+ printk(KERN_DEBUG "onenand_wait: correctable ECC error = 0x%04x\n", ecc);
mtd->ecc_stats.corrected++;
}
}
@@ -656,7 +822,7 @@ static int onenand_check_bufferram(struct mtd_info *mtd, loff_t addr)
if (found && ONENAND_IS_DDP(this)) {
/* Select DataRAM for DDP */
- int block = (int) (addr >> this->erase_shift);
+ int block = onenand_block(this, addr);
int value = onenand_bufferram_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
}
@@ -816,6 +982,149 @@ static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, int col
}
/**
+ * onenand_recover_lsb - [Flex-OneNAND] Recover LSB page data
+ * @param mtd MTD device structure
+ * @param addr address to recover
+ * @param status return value from onenand_wait / onenand_bbt_wait
+ *
+ * MLC NAND Flash cell has paired pages - LSB page and MSB page. LSB page has
+ * lower page address and MSB page has higher page address in paired pages.
+ * If power off occurs during MSB page program, the paired LSB page data can
+ * become corrupt. LSB page recovery read is a way to read LSB page though page
+ * data are corrupted. When uncorrectable error occurs as a result of LSB page
+ * read after power up, issue LSB page recovery read.
+ */
+static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i;
+
+ /* Recovery is only for Flex-OneNAND */
+ if (!FLEXONENAND(this))
+ return status;
+
+ /* check if we failed due to uncorrectable error */
+ if (status != -EBADMSG && status != ONENAND_BBT_READ_ECC_ERROR)
+ return status;
+
+ /* check if address lies in MLC region */
+ i = flexonenand_region(mtd, addr);
+ if (mtd->eraseregions[i].erasesize < (1 << this->erase_shift))
+ return status;
+
+ /* We are attempting to reread, so decrement stats.failed
+ * which was incremented by onenand_wait due to read failure
+ */
+ printk(KERN_INFO "onenand_recover_lsb: Attempting to recover from uncorrectable read\n");
+ mtd->ecc_stats.failed--;
+
+ /* Issue the LSB page recovery command */
+ this->command(mtd, FLEXONENAND_CMD_RECOVER_LSB, addr, this->writesize);
+ return this->wait(mtd, FL_READING);
+}
+
+/**
+ * onenand_mlc_read_ops_nolock - MLC OneNAND read main and/or out-of-band
+ * @param mtd MTD device structure
+ * @param from offset to read from
+ * @param ops: oob operation description structure
+ *
+ * MLC OneNAND / Flex-OneNAND has 4KB page size and 4KB dataram.
+ * So, read-while-load is not present.
+ */
+static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
+ size_t len = ops->len;
+ size_t ooblen = ops->ooblen;
+ u_char *buf = ops->datbuf;
+ u_char *oobbuf = ops->oobbuf;
+ int read = 0, column, thislen;
+ int oobread = 0, oobcolumn, thisooblen, oobsize;
+ int ret = 0;
+ int writesize = this->writesize;
+
+ DEBUG(MTD_DEBUG_LEVEL3, "onenand_mlc_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
+
+ if (ops->mode == MTD_OOB_AUTO)
+ oobsize = this->ecclayout->oobavail;
+ else
+ oobsize = mtd->oobsize;
+
+ oobcolumn = from & (mtd->oobsize - 1);
+
+ /* Do not allow reads past end of device */
+ if (from + len > mtd->size) {
+ printk(KERN_ERR "onenand_mlc_read_ops_nolock: Attempt read beyond end of device\n");
+ ops->retlen = 0;
+ ops->oobretlen = 0;
+ return -EINVAL;
+ }
+
+ stats = mtd->ecc_stats;
+
+ while (read < len) {
+ cond_resched();
+
+ thislen = min_t(int, writesize, len - read);
+
+ column = from & (writesize - 1);
+ if (column + thislen > writesize)
+ thislen = writesize - column;
+
+ if (!onenand_check_bufferram(mtd, from)) {
+ this->command(mtd, ONENAND_CMD_READ, from, writesize);
+
+ ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+ onenand_update_bufferram(mtd, from, !ret);
+ if (ret == -EBADMSG)
+ ret = 0;
+ }
+
+ this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen);
+ if (oobbuf) {
+ thisooblen = oobsize - oobcolumn;
+ thisooblen = min_t(int, thisooblen, ooblen - oobread);
+
+ if (ops->mode == MTD_OOB_AUTO)
+ onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
+ else
+ this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
+ oobread += thisooblen;
+ oobbuf += thisooblen;
+ oobcolumn = 0;
+ }
+
+ read += thislen;
+ if (read == len)
+ break;
+
+ from += thislen;
+ buf += thislen;
+ }
+
+ /*
+ * Return success, if no ECC failures, else -EBADMSG
+ * fs driver will take care of that, because
+ * retlen == desired len and result == -EBADMSG
+ */
+ ops->retlen = read;
+ ops->oobretlen = oobread;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
+
+/**
* onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band
* @param mtd MTD device structure
* @param from offset to read from
@@ -962,7 +1271,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
size_t len = ops->ooblen;
mtd_oob_mode_t mode = ops->mode;
u_char *buf = ops->oobbuf;
- int ret = 0;
+ int ret = 0, readcmd;
from += ops->ooboffs;
@@ -993,17 +1302,22 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
stats = mtd->ecc_stats;
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
while (read < len) {
cond_resched();
thislen = oobsize - column;
thislen = min_t(int, thislen, len);
- this->command(mtd, ONENAND_CMD_READOOB, from, mtd->oobsize);
+ this->command(mtd, readcmd, from, mtd->oobsize);
onenand_update_bufferram(mtd, from, 0);
ret = this->wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
if (ret && ret != -EBADMSG) {
printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
break;
@@ -1053,6 +1367,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
+ struct onenand_chip *this = mtd->priv;
struct mtd_oob_ops ops = {
.len = len,
.ooblen = 0,
@@ -1062,7 +1377,9 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
int ret;
onenand_get_device(mtd, FL_READING);
- ret = onenand_read_ops_nolock(mtd, from, &ops);
+ ret = ONENAND_IS_MLC(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, &ops) :
+ onenand_read_ops_nolock(mtd, from, &ops);
onenand_release_device(mtd);
*retlen = ops.retlen;
@@ -1080,6 +1397,7 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
+ struct onenand_chip *this = mtd->priv;
int ret;
switch (ops->mode) {
@@ -1094,7 +1412,9 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
onenand_get_device(mtd, FL_READING);
if (ops->datbuf)
- ret = onenand_read_ops_nolock(mtd, from, ops);
+ ret = ONENAND_IS_MLC(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, ops) :
+ onenand_read_ops_nolock(mtd, from, ops);
else
ret = onenand_read_oob_nolock(mtd, from, ops);
onenand_release_device(mtd);
@@ -1128,11 +1448,11 @@ static int onenand_bbt_wait(struct mtd_info *mtd, int state)
ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS);
if (interrupt & ONENAND_INT_READ) {
- int ecc = this->read_word(this->base + ONENAND_REG_ECC_STATUS);
+ int ecc = onenand_read_ecc(this);
if (ecc & ONENAND_ECC_2BIT_ALL) {
printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x"
", controller error 0x%04x\n", ecc, ctrl);
- return ONENAND_BBT_READ_ERROR;
+ return ONENAND_BBT_READ_ECC_ERROR;
}
} else {
printk(KERN_ERR "onenand_bbt_wait: read timeout!"
@@ -1163,7 +1483,7 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
{
struct onenand_chip *this = mtd->priv;
int read = 0, thislen, column;
- int ret = 0;
+ int ret = 0, readcmd;
size_t len = ops->ooblen;
u_char *buf = ops->oobbuf;
@@ -1183,17 +1503,22 @@ int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
column = from & (mtd->oobsize - 1);
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
while (read < len) {
cond_resched();
thislen = mtd->oobsize - column;
thislen = min_t(int, thislen, len);
- this->command(mtd, ONENAND_CMD_READOOB, from, mtd->oobsize);
+ this->command(mtd, readcmd, from, mtd->oobsize);
onenand_update_bufferram(mtd, from, 0);
- ret = onenand_bbt_wait(mtd, FL_READING);
+ ret = this->bbt_wait(mtd, FL_READING);
+ if (unlikely(ret))
+ ret = onenand_recover_lsb(mtd, from, ret);
+
if (ret)
break;
@@ -1230,9 +1555,11 @@ static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to
{
struct onenand_chip *this = mtd->priv;
u_char *oob_buf = this->oob_buf;
- int status, i;
+ int status, i, readcmd;
- this->command(mtd, ONENAND_CMD_READOOB, to, mtd->oobsize);
+ readcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_READ : ONENAND_CMD_READOOB;
+
+ this->command(mtd, readcmd, to, mtd->oobsize);
onenand_update_bufferram(mtd, to, 0);
status = this->wait(mtd, FL_READING);
if (status)
@@ -1633,7 +1960,7 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
{
struct onenand_chip *this = mtd->priv;
int column, ret = 0, oobsize;
- int written = 0;
+ int written = 0, oobcmd;
u_char *oobbuf;
size_t len = ops->ooblen;
const u_char *buf = ops->oobbuf;
@@ -1675,6 +2002,8 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
oobbuf = this->oob_buf;
+ oobcmd = ONENAND_IS_MLC(this) ? ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB;
+
/* Loop until all data write */
while (written < len) {
int thislen = min_t(int, oobsize, len - written);
@@ -1692,7 +2021,14 @@ static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to,
memcpy(oobbuf + column, buf, thislen);
this->write_bufferram(mtd, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize);
- this->command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
+ if (ONENAND_IS_MLC(this)) {
+ /* Set main area of DataRAM to 0xff*/
+ memset(this->page_buf, 0xff, mtd->writesize);
+ this->write_bufferram(mtd, ONENAND_DATARAM,
+ this->page_buf, 0, mtd->writesize);
+ }
+
+ this->command(mtd, oobcmd, to, mtd->oobsize);
onenand_update_bufferram(mtd, to, 0);
if (ONENAND_IS_2PLANE(this)) {
@@ -1815,29 +2151,48 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
{
struct onenand_chip *this = mtd->priv;
unsigned int block_size;
- loff_t addr;
- int len;
- int ret = 0;
+ loff_t addr = instr->addr;
+ loff_t len = instr->len;
+ int ret = 0, i;
+ struct mtd_erase_region_info *region = NULL;
+ loff_t region_end = 0;
DEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%012llx, len = %llu\n", (unsigned long long) instr->addr, (unsigned long long) instr->len);
- block_size = (1 << this->erase_shift);
-
- /* Start address must align on block boundary */
- if (unlikely(instr->addr & (block_size - 1))) {
- printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ /* Do not allow erase past end of device */
+ if (unlikely((len + addr) > mtd->size)) {
+ printk(KERN_ERR "onenand_erase: Erase past end of device\n");
return -EINVAL;
}
- /* Length must align on block boundary */
- if (unlikely(instr->len & (block_size - 1))) {
- printk(KERN_ERR "onenand_erase: Length not block aligned\n");
- return -EINVAL;
+ if (FLEXONENAND(this)) {
+ /* Find the eraseregion of this address */
+ i = flexonenand_region(mtd, addr);
+ region = &mtd->eraseregions[i];
+
+ block_size = region->erasesize;
+ region_end = region->offset + region->erasesize * region->numblocks;
+
+ /* Start address within region must align on block boundary.
+ * Erase region's start offset is always block start address.
+ */
+ if (unlikely((addr - region->offset) & (block_size - 1))) {
+ printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ return -EINVAL;
+ }
+ } else {
+ block_size = 1 << this->erase_shift;
+
+ /* Start address must align on block boundary */
+ if (unlikely(addr & (block_size - 1))) {
+ printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ return -EINVAL;
+ }
}
- /* Do not allow erase past end of device */
- if (unlikely((instr->len + instr->addr) > mtd->size)) {
- printk(KERN_ERR "onenand_erase: Erase past end of device\n");
+ /* Length must align on block boundary */
+ if (unlikely(len & (block_size - 1))) {
+ printk(KERN_ERR "onenand_erase: Length not block aligned\n");
return -EINVAL;
}
@@ -1847,9 +2202,6 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
onenand_get_device(mtd, FL_ERASING);
/* Loop throught the pages */
- len = instr->len;
- addr = instr->addr;
-
instr->state = MTD_ERASING;
while (len) {
@@ -1869,7 +2221,8 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
ret = this->wait(mtd, FL_ERASING);
/* Check, if it is write protected */
if (ret) {
- printk(KERN_ERR "onenand_erase: Failed erase, block %d\n", (unsigned) (addr >> this->erase_shift));
+ printk(KERN_ERR "onenand_erase: Failed erase, block %d\n",
+ onenand_block(this, addr));
instr->state = MTD_ERASE_FAILED;
instr->fail_addr = addr;
goto erase_exit;
@@ -1877,6 +2230,22 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
len -= block_size;
addr += block_size;
+
+ if (addr == region_end) {
+ if (!len)
+ break;
+ region++;
+
+ block_size = region->erasesize;
+ region_end = region->offset + region->erasesize * region->numblocks;
+
+ if (len & (block_size - 1)) {
+ /* FIXME: This should be handled at MTD partitioning level. */
+ printk(KERN_ERR "onenand_erase: Unaligned address\n");
+ goto erase_exit;
+ }
+ }
+
}
instr->state = MTD_ERASE_DONE;
@@ -1955,13 +2324,17 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
int block;
/* Get block number */
- block = ((int) ofs) >> bbm->bbt_erase_shift;
+ block = onenand_block(this, ofs);
if (bbm->bbt)
bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
/* We write two bytes, so we dont have to mess with 16 bit access */
ofs += mtd->oobsize + (bbm->badblockpos & ~0x01);
- return onenand_write_oob_nolock(mtd, ofs, &ops);
+ /* FIXME : What to do when marking SLC block in partition
+ * with MLC erasesize? For now, it is not advisable to
+ * create partitions containing both SLC and MLC regions.
+ */
+ return onenand_write_oob_nolock(mtd, ofs, &ops);
}
/**
@@ -2005,8 +2378,8 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
int start, end, block, value, status;
int wp_status_mask;
- start = ofs >> this->erase_shift;
- end = len >> this->erase_shift;
+ start = onenand_block(this, ofs);
+ end = onenand_block(this, ofs + len) - 1;
if (cmd == ONENAND_CMD_LOCK)
wp_status_mask = ONENAND_WP_LS;
@@ -2018,7 +2391,7 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
/* Set start block address */
this->write_word(start, this->base + ONENAND_REG_START_BLOCK_ADDRESS);
/* Set end block address */
- this->write_word(start + end - 1, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
+ this->write_word(end, this->base + ONENAND_REG_END_BLOCK_ADDRESS);
/* Write lock command */
this->command(mtd, cmd, 0, 0);
@@ -2039,7 +2412,7 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
}
/* Block lock scheme */
- for (block = start; block < start + end; block++) {
+ for (block = start; block < end + 1; block++) {
/* Set block address */
value = onenand_block_address(this, block);
this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
@@ -2147,7 +2520,7 @@ static void onenand_unlock_all(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
loff_t ofs = 0;
- size_t len = this->chipsize;
+ loff_t len = mtd->size;
if (this->options & ONENAND_HAS_UNLOCK_ALL) {
/* Set start block address */
@@ -2163,12 +2536,16 @@ static void onenand_unlock_all(struct mtd_info *mtd)
& ONENAND_CTRL_ONGO)
continue;
+ /* Don't check lock status */
+ if (this->options & ONENAND_SKIP_UNLOCK_CHECK)
+ return;
+
/* Check lock status */
if (onenand_check_lock_status(this))
return;
/* Workaround for all block unlock in DDP */
- if (ONENAND_IS_DDP(this)) {
+ if (ONENAND_IS_DDP(this) && !FLEXONENAND(this)) {
/* All blocks on another chip */
ofs = this->chipsize >> 1;
len = this->chipsize >> 1;
@@ -2210,7 +2587,9 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
- ret = onenand_read_ops_nolock(mtd, from, &ops);
+ ret = ONENAND_IS_MLC(this) ?
+ onenand_mlc_read_ops_nolock(mtd, from, &ops) :
+ onenand_read_ops_nolock(mtd, from, &ops);
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
@@ -2277,21 +2656,32 @@ static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
struct onenand_chip *this = mtd->priv;
- struct mtd_oob_ops ops = {
- .mode = MTD_OOB_PLACE,
- .ooblen = len,
- .oobbuf = buf,
- .ooboffs = 0,
- };
+ struct mtd_oob_ops ops;
int ret;
/* Enter OTP access mode */
this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
this->wait(mtd, FL_OTPING);
- ret = onenand_write_oob_nolock(mtd, from, &ops);
-
- *retlen = ops.oobretlen;
+ if (FLEXONENAND(this)) {
+ /*
+ * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
+ * main area of page 49.
+ */
+ ops.len = mtd->writesize;
+ ops.ooblen = 0;
+ ops.datbuf = buf;
+ ops.oobbuf = NULL;
+ ret = onenand_write_ops_nolock(mtd, mtd->writesize * 49, &ops);
+ *retlen = ops.retlen;
+ } else {
+ ops.mode = MTD_OOB_PLACE;
+ ops.ooblen = len;
+ ops.oobbuf = buf;
+ ops.ooboffs = 0;
+ ret = onenand_write_oob_nolock(mtd, from, &ops);
+ *retlen = ops.oobretlen;
+ }
/* Exit OTP access mode */
this->command(mtd, ONENAND_CMD_RESET, 0, 0);
@@ -2475,27 +2865,34 @@ static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
struct onenand_chip *this = mtd->priv;
- u_char *oob_buf = this->oob_buf;
+ u_char *buf = FLEXONENAND(this) ? this->page_buf : this->oob_buf;
size_t retlen;
int ret;
- memset(oob_buf, 0xff, mtd->oobsize);
+ memset(buf, 0xff, FLEXONENAND(this) ? this->writesize
+ : mtd->oobsize);
/*
* Note: OTP lock operation
* OTP block : 0xXXFC
* 1st block : 0xXXF3 (If chip support)
* Both : 0xXXF0 (If chip support)
*/
- oob_buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
+ if (FLEXONENAND(this))
+ buf[FLEXONENAND_OTP_LOCK_OFFSET] = 0xFC;
+ else
+ buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
/*
* Write lock mark to 8th word of sector0 of page0 of the spare0.
* We write 16 bytes spare area instead of 2 bytes.
+ * For Flex-OneNAND, we write lock mark to 1st word of sector 4 of
+ * main area of page 49.
*/
+
from = 0;
- len = 16;
+ len = FLEXONENAND(this) ? mtd->writesize : 16;
- ret = onenand_otp_walk(mtd, from, len, &retlen, oob_buf, do_otp_lock, MTD_OTP_USER);
+ ret = onenand_otp_walk(mtd, from, len, &retlen, buf, do_otp_lock, MTD_OTP_USER);
return ret ? : retlen;
}
@@ -2542,6 +2939,14 @@ static void onenand_check_features(struct mtd_info *mtd)
break;
}
+ if (ONENAND_IS_MLC(this))
+ this->options &= ~ONENAND_HAS_2PLANE;
+
+ if (FLEXONENAND(this)) {
+ this->options &= ~ONENAND_HAS_CONT_LOCK;
+ this->options |= ONENAND_HAS_UNLOCK_ALL;
+ }
+
if (this->options & ONENAND_HAS_CONT_LOCK)
printk(KERN_DEBUG "Lock scheme is Continuous Lock\n");
if (this->options & ONENAND_HAS_UNLOCK_ALL)
@@ -2559,14 +2964,16 @@ static void onenand_check_features(struct mtd_info *mtd)
*/
static void onenand_print_device_info(int device, int version)
{
- int vcc, demuxed, ddp, density;
+ int vcc, demuxed, ddp, density, flexonenand;
vcc = device & ONENAND_DEVICE_VCC_MASK;
demuxed = device & ONENAND_DEVICE_IS_DEMUX;
ddp = device & ONENAND_DEVICE_IS_DDP;
density = onenand_get_density(device);
- printk(KERN_INFO "%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
- demuxed ? "" : "Muxed ",
+ flexonenand = device & DEVICE_IS_FLEXONENAND;
+ printk(KERN_INFO "%s%sOneNAND%s %dMB %sV 16-bit (0x%02x)\n",
+ demuxed ? "" : "Muxed ",
+ flexonenand ? "Flex-" : "",
ddp ? "(DDP)" : "",
(16 << density),
vcc ? "2.65/3.3" : "1.8",
@@ -2576,6 +2983,7 @@ static void onenand_print_device_info(int device, int version)
static const struct onenand_manufacturers onenand_manuf_ids[] = {
{ONENAND_MFR_SAMSUNG, "Samsung"},
+ {ONENAND_MFR_NUMONYX, "Numonyx"},
};
/**
@@ -2605,6 +3013,261 @@ static int onenand_check_maf(int manuf)
}
/**
+* flexonenand_get_boundary - Reads the SLC boundary
+* @param onenand_info - onenand info structure
+**/
+static int flexonenand_get_boundary(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ unsigned die, bdry;
+ int ret, syscfg, locked;
+
+ /* Disable ECC */
+ syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
+ this->write_word((syscfg | 0x0100), this->base + ONENAND_REG_SYS_CFG1);
+
+ for (die = 0; die < this->dies; die++) {
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ ret = this->wait(mtd, FL_READING);
+
+ bdry = this->read_word(this->base + ONENAND_DATARAM);
+ if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
+ locked = 0;
+ else
+ locked = 1;
+ this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
+
+ this->command(mtd, ONENAND_CMD_RESET, 0, 0);
+ ret = this->wait(mtd, FL_RESETING);
+
+ printk(KERN_INFO "Die %d boundary: %d%s\n", die,
+ this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
+ }
+
+ /* Enable ECC */
+ this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1);
+ return 0;
+}
+
+/**
+ * flexonenand_get_size - Fill up fields in onenand_chip and mtd_info
+ * boundary[], diesize[], mtd->size, mtd->erasesize
+ * @param mtd - MTD device structure
+ */
+static void flexonenand_get_size(struct mtd_info *mtd)
+{
+ struct onenand_chip *this = mtd->priv;
+ int die, i, eraseshift, density;
+ int blksperdie, maxbdry;
+ loff_t ofs;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((loff_t)(16 << density) << 20) >> (this->erase_shift);
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+ maxbdry = blksperdie - 1;
+ eraseshift = this->erase_shift - 1;
+
+ mtd->numeraseregions = this->dies << 1;
+
+ /* This fills up the device boundary */
+ flexonenand_get_boundary(mtd);
+ die = ofs = 0;
+ i = -1;
+ for (; die < this->dies; die++) {
+ if (!die || this->boundary[die-1] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks =
+ this->boundary[die] + 1;
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift++;
+ } else {
+ mtd->numeraseregions -= 1;
+ mtd->eraseregions[i].numblocks +=
+ this->boundary[die] + 1;
+ ofs += (this->boundary[die] + 1) << (eraseshift - 1);
+ }
+ if (this->boundary[die] != maxbdry) {
+ i++;
+ mtd->eraseregions[i].offset = ofs;
+ mtd->eraseregions[i].erasesize = 1 << eraseshift;
+ mtd->eraseregions[i].numblocks = maxbdry ^
+ this->boundary[die];
+ ofs += mtd->eraseregions[i].numblocks << eraseshift;
+ eraseshift--;
+ } else
+ mtd->numeraseregions -= 1;
+ }
+
+ /* Expose MLC erase size except when all blocks are SLC */
+ mtd->erasesize = 1 << this->erase_shift;
+ if (mtd->numeraseregions == 1)
+ mtd->erasesize >>= 1;
+
+ printk(KERN_INFO "Device has %d eraseregions\n", mtd->numeraseregions);
+ for (i = 0; i < mtd->numeraseregions; i++)
+ printk(KERN_INFO "[offset: 0x%08x, erasesize: 0x%05x,"
+ " numblocks: %04u]\n",
+ (unsigned int) mtd->eraseregions[i].offset,
+ mtd->eraseregions[i].erasesize,
+ mtd->eraseregions[i].numblocks);
+
+ for (die = 0, mtd->size = 0; die < this->dies; die++) {
+ this->diesize[die] = (loff_t)blksperdie << this->erase_shift;
+ this->diesize[die] -= (loff_t)(this->boundary[die] + 1)
+ << (this->erase_shift - 1);
+ mtd->size += this->diesize[die];
+ }
+}
+
+/**
+ * flexonenand_check_blocks_erased - Check if blocks are erased
+ * @param mtd_info - mtd info structure
+ * @param start - first erase block to check
+ * @param end - last erase block to check
+ *
+ * Converting an unerased block from MLC to SLC
+ * causes byte values to change. Since both data and its ECC
+ * have changed, reads on the block give uncorrectable error.
+ * This might lead to the block being detected as bad.
+ *
+ * Avoid this by ensuring that the block to be converted is
+ * erased.
+ */
+static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int end)
+{
+ struct onenand_chip *this = mtd->priv;
+ int i, ret;
+ int block;
+ struct mtd_oob_ops ops = {
+ .mode = MTD_OOB_PLACE,
+ .ooboffs = 0,
+ .ooblen = mtd->oobsize,
+ .datbuf = NULL,
+ .oobbuf = this->oob_buf,
+ };
+ loff_t addr;
+
+ printk(KERN_DEBUG "Check blocks from %d to %d\n", start, end);
+
+ for (block = start; block <= end; block++) {
+ addr = flexonenand_addr(this, block);
+ if (onenand_block_isbad_nolock(mtd, addr, 0))
+ continue;
+
+ /*
+ * Since main area write results in ECC write to spare,
+ * it is sufficient to check only ECC bytes for change.
+ */
+ ret = onenand_read_oob_nolock(mtd, addr, &ops);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < mtd->oobsize; i++)
+ if (this->oob_buf[i] != 0xff)
+ break;
+
+ if (i != mtd->oobsize) {
+ printk(KERN_WARNING "Block %d not erased.\n", block);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * flexonenand_set_boundary - Writes the SLC boundary
+ * @param mtd - mtd info structure
+ */
+int flexonenand_set_boundary(struct mtd_info *mtd, int die,
+ int boundary, int lock)
+{
+ struct onenand_chip *this = mtd->priv;
+ int ret, density, blksperdie, old, new, thisboundary;
+ loff_t addr;
+
+ /* Change only once for SDP Flex-OneNAND */
+ if (die && (!ONENAND_IS_DDP(this)))
+ return 0;
+
+ /* boundary value of -1 indicates no required change */
+ if (boundary < 0 || boundary == this->boundary[die])
+ return 0;
+
+ density = onenand_get_density(this->device_id);
+ blksperdie = ((16 << density) << 20) >> this->erase_shift;
+ blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0;
+
+ if (boundary >= blksperdie) {
+ printk(KERN_ERR "flexonenand_set_boundary: Invalid boundary value. "
+ "Boundary not changed.\n");
+ return -EINVAL;
+ }
+
+ /* Check if converting blocks are erased */
+ old = this->boundary[die] + (die * this->density_mask);
+ new = boundary + (die * this->density_mask);
+ ret = flexonenand_check_blocks_erased(mtd, min(old, new) + 1, max(old, new));
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Please erase blocks before boundary change\n");
+ return ret;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0);
+ this->wait(mtd, FL_SYNCING);
+
+ /* Check is boundary is locked */
+ this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
+ ret = this->wait(mtd, FL_READING);
+
+ thisboundary = this->read_word(this->base + ONENAND_DATARAM);
+ if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
+ printk(KERN_ERR "flexonenand_set_boundary: boundary locked\n");
+ ret = 1;
+ goto out;
+ }
+
+ printk(KERN_INFO "flexonenand_set_boundary: Changing die %d boundary: %d%s\n",
+ die, boundary, lock ? "(Locked)" : "(Unlocked)");
+
+ addr = die ? this->diesize[0] : 0;
+
+ boundary &= FLEXONENAND_PI_MASK;
+ boundary |= lock ? 0 : (3 << FLEXONENAND_PI_UNLOCK_SHIFT);
+
+ this->command(mtd, ONENAND_CMD_ERASE, addr, 0);
+ ret = this->wait(mtd, FL_ERASING);
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Failed PI erase for Die %d\n", die);
+ goto out;
+ }
+
+ this->write_word(boundary, this->base + ONENAND_DATARAM);
+ this->command(mtd, ONENAND_CMD_PROG, addr, 0);
+ ret = this->wait(mtd, FL_WRITING);
+ if (ret) {
+ printk(KERN_ERR "flexonenand_set_boundary: Failed PI write for Die %d\n", die);
+ goto out;
+ }
+
+ this->command(mtd, FLEXONENAND_CMD_PI_UPDATE, die, 0);
+ ret = this->wait(mtd, FL_WRITING);
+out:
+ this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND);
+ this->wait(mtd, FL_RESETING);
+ if (!ret)
+ /* Recalculate device size on boundary change*/
+ flexonenand_get_size(mtd);
+
+ return ret;
+}
+
+/**
* onenand_probe - [OneNAND Interface] Probe the OneNAND device
* @param mtd MTD device structure
*
@@ -2621,7 +3284,7 @@ static int onenand_probe(struct mtd_info *mtd)
/* Save system configuration 1 */
syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
/* Clear Sync. Burst Read mode to read BootRAM */
- this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ), this->base + ONENAND_REG_SYS_CFG1);
+ this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ & ~ONENAND_SYS_CFG1_SYNC_WRITE), this->base + ONENAND_REG_SYS_CFG1);
/* Send the command for reading device ID from BootRAM */
this->write_word(ONENAND_CMD_READID, this->base + ONENAND_BOOTRAM);
@@ -2646,6 +3309,7 @@ static int onenand_probe(struct mtd_info *mtd)
maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
+ this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
/* Check OneNAND device */
if (maf_id != bram_maf_id || dev_id != bram_dev_id)
@@ -2657,29 +3321,55 @@ static int onenand_probe(struct mtd_info *mtd)
this->version_id = ver_id;
density = onenand_get_density(dev_id);
+ if (FLEXONENAND(this)) {
+ this->dies = ONENAND_IS_DDP(this) ? 2 : 1;
+ /* Maximum possible erase regions */
+ mtd->numeraseregions = this->dies << 1;
+ mtd->eraseregions = kzalloc(sizeof(struct mtd_erase_region_info)
+ * (this->dies << 1), GFP_KERNEL);
+ if (!mtd->eraseregions)
+ return -ENOMEM;
+ }
+
+ /*
+ * For Flex-OneNAND, chipsize represents maximum possible device size.
+ * mtd->size represents the actual device size.
+ */
this->chipsize = (16 << density) << 20;
- /* Set density mask. it is used for DDP */
- if (ONENAND_IS_DDP(this))
- this->density_mask = (1 << (density + 6));
- else
- this->density_mask = 0;
/* OneNAND page size & block size */
/* The data buffer size is equal to page size */
mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
+ /* We use the full BufferRAM */
+ if (ONENAND_IS_MLC(this))
+ mtd->writesize <<= 1;
+
mtd->oobsize = mtd->writesize >> 5;
/* Pages per a block are always 64 in OneNAND */
mtd->erasesize = mtd->writesize << 6;
+ /*
+ * Flex-OneNAND SLC area has 64 pages per block.
+ * Flex-OneNAND MLC area has 128 pages per block.
+ * Expose MLC erase size to find erase_shift and page_mask.
+ */
+ if (FLEXONENAND(this))
+ mtd->erasesize <<= 1;
this->erase_shift = ffs(mtd->erasesize) - 1;
this->page_shift = ffs(mtd->writesize) - 1;
this->page_mask = (1 << (this->erase_shift - this->page_shift)) - 1;
+ /* Set density mask. it is used for DDP */
+ if (ONENAND_IS_DDP(this))
+ this->density_mask = this->chipsize >> (this->erase_shift + 1);
/* It's real page size */
this->writesize = mtd->writesize;
/* REVIST: Multichip handling */
- mtd->size = this->chipsize;
+ if (FLEXONENAND(this))
+ flexonenand_get_size(mtd);
+ else
+ mtd->size = this->chipsize;
/* Check OneNAND features */
onenand_check_features(mtd);
@@ -2734,7 +3424,7 @@ static void onenand_resume(struct mtd_info *mtd)
*/
int onenand_scan(struct mtd_info *mtd, int maxchips)
{
- int i;
+ int i, ret;
struct onenand_chip *this = mtd->priv;
if (!this->read_word)
@@ -2746,6 +3436,10 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
this->command = onenand_command;
if (!this->wait)
onenand_setup_wait(mtd);
+ if (!this->bbt_wait)
+ this->bbt_wait = onenand_bbt_wait;
+ if (!this->unlock_all)
+ this->unlock_all = onenand_unlock_all;
if (!this->read_bufferram)
this->read_bufferram = onenand_read_bufferram;
@@ -2796,6 +3490,10 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
* Allow subpage writes up to oobsize.
*/
switch (mtd->oobsize) {
+ case 128:
+ this->ecclayout = &onenand_oob_128;
+ mtd->subpage_sft = 0;
+ break;
case 64:
this->ecclayout = &onenand_oob_64;
mtd->subpage_sft = 2;
@@ -2859,9 +3557,18 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
mtd->owner = THIS_MODULE;
/* Unlock whole block */
- onenand_unlock_all(mtd);
+ this->unlock_all(mtd);
+
+ ret = this->scan_bbt(mtd);
+ if ((!FLEXONENAND(this)) || ret)
+ return ret;
- return this->scan_bbt(mtd);
+ /* Change Flex-OneNAND boundaries if required */
+ for (i = 0; i < MAX_DIES; i++)
+ flexonenand_set_boundary(mtd, i, flex_bdry[2 * i],
+ flex_bdry[(2 * i) + 1]);
+
+ return 0;
}
/**
@@ -2890,6 +3597,7 @@ void onenand_release(struct mtd_info *mtd)
kfree(this->page_buf);
if (this->options & ONENAND_OOBBUF_ALLOC)
kfree(this->oob_buf);
+ kfree(mtd->eraseregions);
}
EXPORT_SYMBOL_GPL(onenand_scan);
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 2f53b51c6805..a91fcac1af01 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -63,6 +63,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
loff_t from;
size_t readlen, ooblen;
struct mtd_oob_ops ops;
+ int rgn;
printk(KERN_INFO "Scanning device for bad blocks\n");
@@ -76,7 +77,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
/* Note that numblocks is 2 * (real numblocks) here;
* see i += 2 below as it makses shifting and masking less painful
*/
- numblocks = mtd->size >> (bbm->bbt_erase_shift - 1);
+ numblocks = this->chipsize >> (bbm->bbt_erase_shift - 1);
startblock = 0;
from = 0;
@@ -106,7 +107,12 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
}
}
i += 2;
- from += (1 << bbm->bbt_erase_shift);
+
+ if (FLEXONENAND(this)) {
+ rgn = flexonenand_region(mtd, from);
+ from += mtd->eraseregions[rgn].erasesize;
+ } else
+ from += (1 << bbm->bbt_erase_shift);
}
return 0;
@@ -143,7 +149,7 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
uint8_t res;
/* Get block number * 2 */
- block = (int) (offs >> (bbm->bbt_erase_shift - 1));
+ block = (int) (onenand_block(this, offs) << 1);
res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
DEBUG(MTD_DEBUG_LEVEL2, "onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
@@ -178,7 +184,7 @@ int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
struct bbm_info *bbm = this->bbm;
int len, ret = 0;
- len = mtd->size >> (this->erase_shift + 2);
+ len = this->chipsize >> (this->erase_shift + 2);
/* Allocate memory (2bit per block) and clear the memory bad block table */
bbm->bbt = kzalloc(len, GFP_KERNEL);
if (!bbm->bbt) {
diff --git a/drivers/mtd/onenand/onenand_sim.c b/drivers/mtd/onenand/onenand_sim.c
index d64200b7c94b..f6e3c8aebd3a 100644
--- a/drivers/mtd/onenand/onenand_sim.c
+++ b/drivers/mtd/onenand/onenand_sim.c
@@ -6,6 +6,10 @@
* Copyright © 2005-2007 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
*
+ * Vishak G <vishak.g at samsung.com>, Rohit Hagargundgi <h.rohit at samsung.com>
+ * Flex-OneNAND simulator support
+ * Copyright (C) Samsung Electronics, 2008
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -24,16 +28,38 @@
#ifndef CONFIG_ONENAND_SIM_MANUFACTURER
#define CONFIG_ONENAND_SIM_MANUFACTURER 0xec
#endif
+
#ifndef CONFIG_ONENAND_SIM_DEVICE_ID
#define CONFIG_ONENAND_SIM_DEVICE_ID 0x04
#endif
+
+#define CONFIG_FLEXONENAND ((CONFIG_ONENAND_SIM_DEVICE_ID >> 9) & 1)
+
#ifndef CONFIG_ONENAND_SIM_VERSION_ID
#define CONFIG_ONENAND_SIM_VERSION_ID 0x1e
#endif
+#ifndef CONFIG_ONENAND_SIM_TECHNOLOGY_ID
+#define CONFIG_ONENAND_SIM_TECHNOLOGY_ID CONFIG_FLEXONENAND
+#endif
+
+/* Initial boundary values for Flex-OneNAND Simulator */
+#ifndef CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY
+#define CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY 0x01
+#endif
+
+#ifndef CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY
+#define CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY 0x01
+#endif
+
static int manuf_id = CONFIG_ONENAND_SIM_MANUFACTURER;
static int device_id = CONFIG_ONENAND_SIM_DEVICE_ID;
static int version_id = CONFIG_ONENAND_SIM_VERSION_ID;
+static int technology_id = CONFIG_ONENAND_SIM_TECHNOLOGY_ID;
+static int boundary[] = {
+ CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY,
+ CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY,
+};
struct onenand_flash {
void __iomem *base;
@@ -57,12 +83,18 @@ struct onenand_flash {
(writew(v, this->base + ONENAND_REG_WP_STATUS))
/* It has all 0xff chars */
-#define MAX_ONENAND_PAGESIZE (2048 + 64)
+#define MAX_ONENAND_PAGESIZE (4096 + 128)
static unsigned char *ffchars;
+#if CONFIG_FLEXONENAND
+#define PARTITION_NAME "Flex-OneNAND simulator partition"
+#else
+#define PARTITION_NAME "OneNAND simulator partition"
+#endif
+
static struct mtd_partition os_partitions[] = {
{
- .name = "OneNAND simulator partition",
+ .name = PARTITION_NAME,
.offset = 0,
.size = MTDPART_SIZ_FULL,
},
@@ -104,6 +136,7 @@ static void onenand_lock_handle(struct onenand_chip *this, int cmd)
switch (cmd) {
case ONENAND_CMD_UNLOCK:
+ case ONENAND_CMD_UNLOCK_ALL:
if (block_lock_scheme)
ONENAND_SET_WP_STATUS(ONENAND_WP_US, this);
else
@@ -228,10 +261,12 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
{
struct mtd_info *mtd = &info->mtd;
struct onenand_flash *flash = this->priv;
- int main_offset, spare_offset;
+ int main_offset, spare_offset, die = 0;
void __iomem *src;
void __iomem *dest;
unsigned int i;
+ static int pi_operation;
+ int erasesize, rgn;
if (dataram) {
main_offset = mtd->writesize;
@@ -241,10 +276,27 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
spare_offset = 0;
}
+ if (pi_operation) {
+ die = readw(this->base + ONENAND_REG_START_ADDRESS2);
+ die >>= ONENAND_DDP_SHIFT;
+ }
+
switch (cmd) {
+ case FLEXONENAND_CMD_PI_ACCESS:
+ pi_operation = 1;
+ break;
+
+ case ONENAND_CMD_RESET:
+ pi_operation = 0;
+ break;
+
case ONENAND_CMD_READ:
src = ONENAND_CORE(flash) + offset;
dest = ONENAND_MAIN_AREA(this, main_offset);
+ if (pi_operation) {
+ writew(boundary[die], this->base + ONENAND_DATARAM);
+ break;
+ }
memcpy(dest, src, mtd->writesize);
/* Fall through */
@@ -257,6 +309,10 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
case ONENAND_CMD_PROG:
src = ONENAND_MAIN_AREA(this, main_offset);
dest = ONENAND_CORE(flash) + offset;
+ if (pi_operation) {
+ boundary[die] = readw(this->base + ONENAND_DATARAM);
+ break;
+ }
/* To handle partial write */
for (i = 0; i < (1 << mtd->subpage_sft); i++) {
int off = i * this->subpagesize;
@@ -284,9 +340,18 @@ static void onenand_data_handle(struct onenand_chip *this, int cmd,
break;
case ONENAND_CMD_ERASE:
- memset(ONENAND_CORE(flash) + offset, 0xff, mtd->erasesize);
+ if (pi_operation)
+ break;
+
+ if (FLEXONENAND(this)) {
+ rgn = flexonenand_region(mtd, offset);
+ erasesize = mtd->eraseregions[rgn].erasesize;
+ } else
+ erasesize = mtd->erasesize;
+
+ memset(ONENAND_CORE(flash) + offset, 0xff, erasesize);
memset(ONENAND_CORE_SPARE(flash, this, offset), 0xff,
- (mtd->erasesize >> 5));
+ (erasesize >> 5));
break;
default:
@@ -339,7 +404,7 @@ static void onenand_command_handle(struct onenand_chip *this, int cmd)
}
if (block != -1)
- offset += block << this->erase_shift;
+ offset = onenand_addr(this, block);
if (page != -1)
offset += page << this->page_shift;
@@ -390,6 +455,7 @@ static int __init flash_init(struct onenand_flash *flash)
}
density = device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
+ density &= ONENAND_DEVICE_DENSITY_MASK;
size = ((16 << 20) << density);
ONENAND_CORE(flash) = vmalloc(size + (size >> 5));
@@ -405,8 +471,9 @@ static int __init flash_init(struct onenand_flash *flash)
writew(manuf_id, flash->base + ONENAND_REG_MANUFACTURER_ID);
writew(device_id, flash->base + ONENAND_REG_DEVICE_ID);
writew(version_id, flash->base + ONENAND_REG_VERSION_ID);
+ writew(technology_id, flash->base + ONENAND_REG_TECHNOLOGY);
- if (density < 2)
+ if (density < 2 && (!CONFIG_FLEXONENAND))
buffer_size = 0x0400; /* 1KiB page */
else
buffer_size = 0x0800; /* 2KiB page */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 892a9e4e275f..1dc721517e4c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2443,6 +2443,17 @@ config JME
To compile this driver as a module, choose M here. The module
will be called jme.
+config S6GMAC
+ tristate "S6105 GMAC ethernet support"
+ depends on XTENSA_VARIANT_S6000
+ select PHYLIB
+ help
+ This driver supports the on chip ethernet device on the
+ S6105 xtensa processor.
+
+ To compile this driver as a module, choose M here. The module
+ will be called s6gmac.
+
endif # NETDEV_1000
#
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index d366fb2b40e9..4b58a59f211b 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -245,6 +245,7 @@ obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
obj-$(CONFIG_DNET) += dnet.o
obj-$(CONFIG_MACB) += macb.o
+obj-$(CONFIG_S6GMAC) += s6gmac.o
obj-$(CONFIG_ARM) += arm/
obj-$(CONFIG_DEV_APPLETALK) += appletalk/
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index b4bb06fdf307..f703758f0a6e 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -65,7 +65,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define TX_CQ_LEN 1024
#define RX_Q_LEN 1024 /* Does not support any other value */
#define RX_CQ_LEN 1024
-#define MCC_Q_LEN 64 /* total size not to exceed 8 pages */
+#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
#define MCC_CQ_LEN 256
#define BE_NAPI_WEIGHT 64
@@ -91,6 +91,61 @@ struct be_queue_info {
atomic_t used; /* Number of valid elements in the queue */
};
+static inline u32 MODULO(u16 val, u16 limit)
+{
+ BUG_ON(limit & (limit - 1));
+ return val & (limit - 1);
+}
+
+static inline void index_adv(u16 *index, u16 val, u16 limit)
+{
+ *index = MODULO((*index + val), limit);
+}
+
+static inline void index_inc(u16 *index, u16 limit)
+{
+ *index = MODULO((*index + 1), limit);
+}
+
+static inline void *queue_head_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->head * q->entry_size;
+}
+
+static inline void *queue_tail_node(struct be_queue_info *q)
+{
+ return q->dma_mem.va + q->tail * q->entry_size;
+}
+
+static inline void queue_head_inc(struct be_queue_info *q)
+{
+ index_inc(&q->head, q->len);
+}
+
+static inline void queue_tail_inc(struct be_queue_info *q)
+{
+ index_inc(&q->tail, q->len);
+}
+
+
+struct be_eq_obj {
+ struct be_queue_info q;
+ char desc[32];
+
+ /* Adaptive interrupt coalescing (AIC) info */
+ bool enable_aic;
+ u16 min_eqd; /* in usecs */
+ u16 max_eqd; /* in usecs */
+ u16 cur_eqd; /* in usecs */
+
+ struct napi_struct napi;
+};
+
+struct be_mcc_obj {
+ struct be_queue_info q;
+ struct be_queue_info cq;
+};
+
struct be_ctrl_info {
u8 __iomem *csr;
u8 __iomem *db; /* Door Bell */
@@ -98,11 +153,20 @@ struct be_ctrl_info {
int pci_func;
/* Mbox used for cmd request/response */
- spinlock_t cmd_lock; /* For serializing cmds to BE card */
+ spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;
/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
* is stored for freeing purpose */
struct be_dma_mem mbox_mem_alloced;
+
+ /* MCC Rings */
+ struct be_mcc_obj mcc_obj;
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_cq_lock;
+
+ /* MCC Async callback */
+ void (*async_cb)(void *adapter, bool link_up);
+ void *adapter_ctxt;
};
#include "be_cmds.h"
@@ -150,19 +214,6 @@ struct be_stats_obj {
struct be_dma_mem cmd;
};
-struct be_eq_obj {
- struct be_queue_info q;
- char desc[32];
-
- /* Adaptive interrupt coalescing (AIC) info */
- bool enable_aic;
- u16 min_eqd; /* in usecs */
- u16 max_eqd; /* in usecs */
- u16 cur_eqd; /* in usecs */
-
- struct napi_struct napi;
-};
-
struct be_tx_obj {
struct be_queue_info q;
struct be_queue_info cq;
@@ -225,8 +276,9 @@ struct be_adapter {
u32 if_handle; /* Used to configure filtering */
u32 pmac_id; /* MAC addr handle used by BE card */
- struct be_link_info link;
+ bool link_up;
u32 port_num;
+ bool promiscuous;
};
extern struct ethtool_ops be_ethtool_ops;
@@ -235,22 +287,6 @@ extern struct ethtool_ops be_ethtool_ops;
#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
-static inline u32 MODULO(u16 val, u16 limit)
-{
- BUG_ON(limit & (limit - 1));
- return val & (limit - 1);
-}
-
-static inline void index_adv(u16 *index, u16 val, u16 limit)
-{
- *index = MODULO((*index + val), limit);
-}
-
-static inline void index_inc(u16 *index, u16 limit)
-{
- *index = MODULO((*index + 1), limit);
-}
-
#define PAGE_SHIFT_4K 12
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
@@ -339,4 +375,6 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
return val;
}
+extern void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
+ u16 num_popped);
#endif /* BE_H */
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index d444aed962bc..583517ed56f0 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -17,6 +17,133 @@
#include "be.h"
+static void be_mcc_notify(struct be_ctrl_info *ctrl)
+{
+ struct be_queue_info *mccq = &ctrl->mcc_obj.q;
+ u32 val = 0;
+
+ val |= mccq->id & DB_MCCQ_RING_ID_MASK;
+ val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
+ iowrite32(val, ctrl->db + DB_MCCQ_OFFSET);
+}
+
+/* To check if valid bit is set, check the entire word as we don't know
+ * the endianness of the data (old entry is host endian while a new entry is
+ * little endian) */
+static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl)
+{
+ if (compl->flags != 0) {
+ compl->flags = le32_to_cpu(compl->flags);
+ BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/* Need to reset the entire word that houses the valid bit */
+static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl)
+{
+ compl->flags = 0;
+}
+
+static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
+ struct be_mcc_cq_entry *compl)
+{
+ u16 compl_status, extd_status;
+
+ /* Just swap the status to host endian; mcc tag is opaquely copied
+ * from mcc_wrb */
+ be_dws_le_to_cpu(compl, 4);
+
+ compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
+ CQE_STATUS_COMPL_MASK;
+ if (compl_status != MCC_STATUS_SUCCESS) {
+ extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
+ CQE_STATUS_EXTD_MASK;
+ printk(KERN_WARNING DRV_NAME
+ " error in cmd completion: status(compl/extd)=%d/%d\n",
+ compl_status, extd_status);
+ return -1;
+ }
+ return 0;
+}
+
+/* Link state evt is a string of bytes; no need for endian swapping */
+static void be_async_link_state_process(struct be_ctrl_info *ctrl,
+ struct be_async_event_link_state *evt)
+{
+ ctrl->async_cb(ctrl->adapter_ctxt,
+ evt->port_link_status == ASYNC_EVENT_LINK_UP ? true : false);
+}
+
+static inline bool is_link_state_evt(u32 trailer)
+{
+ return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+ ASYNC_TRAILER_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_LINK_STATE);
+}
+
+static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_ctrl_info *ctrl)
+{
+ struct be_queue_info *mcc_cq = &ctrl->mcc_obj.cq;
+ struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq);
+
+ if (be_mcc_compl_is_new(compl)) {
+ queue_tail_inc(mcc_cq);
+ return compl;
+ }
+ return NULL;
+}
+
+void be_process_mcc(struct be_ctrl_info *ctrl)
+{
+ struct be_mcc_cq_entry *compl;
+ int num = 0;
+
+ spin_lock_bh(&ctrl->mcc_cq_lock);
+ while ((compl = be_mcc_compl_get(ctrl))) {
+ if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
+ /* Interpret flags as an async trailer */
+ BUG_ON(!is_link_state_evt(compl->flags));
+
+ /* Interpret compl as a async link evt */
+ be_async_link_state_process(ctrl,
+ (struct be_async_event_link_state *) compl);
+ } else {
+ be_mcc_compl_process(ctrl, compl);
+ atomic_dec(&ctrl->mcc_obj.q.used);
+ }
+ be_mcc_compl_use(compl);
+ num++;
+ }
+ if (num)
+ be_cq_notify(ctrl, ctrl->mcc_obj.cq.id, true, num);
+ spin_unlock_bh(&ctrl->mcc_cq_lock);
+}
+
+/* Wait till no more pending mcc requests are present */
+static void be_mcc_wait_compl(struct be_ctrl_info *ctrl)
+{
+#define mcc_timeout 50000 /* 5s timeout */
+ int i;
+ for (i = 0; i < mcc_timeout; i++) {
+ be_process_mcc(ctrl);
+ if (atomic_read(&ctrl->mcc_obj.q.used) == 0)
+ break;
+ udelay(100);
+ }
+ if (i == mcc_timeout)
+ printk(KERN_WARNING DRV_NAME "mcc poll timed out\n");
+}
+
+/* Notify MCC requests and wait for completion */
+static void be_mcc_notify_wait(struct be_ctrl_info *ctrl)
+{
+ be_mcc_notify(ctrl);
+ be_mcc_wait_compl(ctrl);
+}
+
static int be_mbox_db_ready_wait(void __iomem *db)
{
int cnt = 0, wait = 5;
@@ -44,11 +171,11 @@ static int be_mbox_db_ready_wait(void __iomem *db)
/*
* Insert the mailbox address into the doorbell in two steps
+ * Polls on the mbox doorbell till a command completion (or a timeout) occurs
*/
static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
{
int status;
- u16 compl_status, extd_status;
u32 val = 0;
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
@@ -79,24 +206,17 @@ static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
if (status != 0)
return status;
- /* compl entry has been made now */
- be_dws_le_to_cpu(cqe, sizeof(*cqe));
- if (!(cqe->flags & CQE_FLAGS_VALID_MASK)) {
- printk(KERN_WARNING DRV_NAME ": ERROR invalid mbox compl\n");
+ /* A cq entry has been made now */
+ if (be_mcc_compl_is_new(cqe)) {
+ status = be_mcc_compl_process(ctrl, &mbox->cqe);
+ be_mcc_compl_use(cqe);
+ if (status)
+ return status;
+ } else {
+ printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n");
return -1;
}
-
- compl_status = (cqe->status >> CQE_STATUS_COMPL_SHIFT) &
- CQE_STATUS_COMPL_MASK;
- if (compl_status != MCC_STATUS_SUCCESS) {
- extd_status = (cqe->status >> CQE_STATUS_EXTD_SHIFT) &
- CQE_STATUS_EXTD_MASK;
- printk(KERN_WARNING DRV_NAME
- ": ERROR in cmd compl. status(compl/extd)=%d/%d\n",
- compl_status, extd_status);
- }
-
- return compl_status;
+ return 0;
}
static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage)
@@ -235,6 +355,18 @@ static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
}
+static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq)
+{
+ struct be_mcc_wrb *wrb = NULL;
+ if (atomic_read(&mccq->used) < mccq->len) {
+ wrb = queue_head_node(mccq);
+ queue_head_inc(mccq);
+ atomic_inc(&mccq->used);
+ memset(wrb, 0, sizeof(*wrb));
+ }
+ return wrb;
+}
+
int be_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *eq, int eq_delay)
{
@@ -244,7 +376,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem = &eq->dma_mem;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -272,7 +404,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl,
eq->id = le16_to_cpu(resp->eq_id);
eq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -284,7 +416,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -304,7 +436,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
if (!status)
memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -315,7 +447,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -332,7 +464,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
*pmac_id = le32_to_cpu(resp->pmac_id);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -342,7 +474,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -354,7 +486,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
req->pmac_id = cpu_to_le32(pmac_id);
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -370,7 +502,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
void *ctxt = &req->context;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -388,7 +520,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
- AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 0);
+ AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -399,7 +531,56 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
cq->id = le16_to_cpu(resp->cq_id);
cq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
+
+ return status;
+}
+
+static u32 be_encoded_q_len(int q_len)
+{
+ u32 len_encoded = fls(q_len); /* log2(len) + 1 */
+ if (len_encoded == 16)
+ len_encoded = 0;
+ return len_encoded;
+}
+
+int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
+{
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
+ struct be_cmd_req_mcc_create *req = embedded_payload(wrb);
+ struct be_dma_mem *q_mem = &mccq->dma_mem;
+ void *ctxt = &req->context;
+ int status;
+
+ spin_lock(&ctrl->mbox_lock);
+ memset(wrb, 0, sizeof(*wrb));
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MCC_CREATE, sizeof(*req));
+
+ req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
+
+ AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctrl->pci_func);
+ AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
+ be_encoded_q_len(mccq->len));
+ AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
+
+ be_dws_cpu_to_le(ctxt, sizeof(req->context));
+
+ be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
+
+ status = be_mbox_db_ring(ctrl);
+ if (!status) {
+ struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
+ mccq->id = le16_to_cpu(resp->id);
+ mccq->created = true;
+ }
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -415,7 +596,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl,
int status;
u32 len_encoded;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -446,7 +627,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl,
txq->id = le16_to_cpu(resp->cid);
txq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -460,7 +641,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -482,7 +663,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
rxq->id = le16_to_cpu(resp->id);
rxq->created = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -496,7 +677,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
u8 subsys = 0, opcode = 0;
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -518,6 +699,10 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
subsys = CMD_SUBSYSTEM_ETH;
opcode = OPCODE_ETH_RX_DESTROY;
break;
+ case QTYPE_MCCQ:
+ subsys = CMD_SUBSYSTEM_COMMON;
+ opcode = OPCODE_COMMON_MCC_DESTROY;
+ break;
default:
printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
status = -1;
@@ -528,7 +713,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
status = be_mbox_db_ring(ctrl);
err:
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -541,7 +726,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
struct be_cmd_req_if_create *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -562,7 +747,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
*pmac_id = le32_to_cpu(resp->pmac_id);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -572,7 +757,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -583,7 +768,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
req->interface_id = cpu_to_le32(interface_id);
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -598,7 +783,7 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
struct be_sge *sge = nonembedded_sgl(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
memset(req, 0, sizeof(*req));
@@ -617,18 +802,20 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
- struct be_link_info *link)
+ bool *link_up)
{
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct be_cmd_req_link_status *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
+
+ *link_up = false;
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -639,14 +826,11 @@ int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
status = be_mbox_db_ring(ctrl);
if (!status) {
struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
- link->speed = resp->mac_speed;
- link->duplex = resp->mac_duplex;
- link->fault = resp->mac_fault;
- } else {
- link->speed = PHY_LINK_SPEED_ZERO;
+ if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
+ *link_up = true;
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -656,7 +840,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -670,7 +854,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -681,7 +865,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -696,7 +880,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -707,7 +891,7 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -726,18 +910,22 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
+/* Use MCC for this command as it may be called in BH context */
int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
{
- struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb);
- int status;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_promiscuous_config *req;
- spin_lock(&ctrl->cmd_lock);
- memset(wrb, 0, sizeof(*wrb));
+ spin_lock_bh(&ctrl->mcc_lock);
+
+ wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
+ BUG_ON(!wrb);
+
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -749,21 +937,29 @@ int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
else
req->port0_promiscuous = en;
- status = be_mbox_db_ring(ctrl);
+ be_mcc_notify_wait(ctrl);
- spin_unlock(&ctrl->cmd_lock);
- return status;
+ spin_unlock_bh(&ctrl->mcc_lock);
+ return 0;
}
-int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
- u32 num, bool promiscuous)
+/*
+ * Use MCC for this command as it may be called in BH context
+ * (mc == NULL) => multicast promiscous
+ */
+int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
+ struct dev_mc_list *mc_list, u32 mc_count)
{
- struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
- struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb);
- int status;
+#define BE_MAX_MC 32 /* set mcast promisc if > 32 */
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_mcast_mac_config *req;
- spin_lock(&ctrl->cmd_lock);
- memset(wrb, 0, sizeof(*wrb));
+ spin_lock_bh(&ctrl->mcc_lock);
+
+ wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
+ BUG_ON(!wrb);
+
+ req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -771,17 +967,23 @@ int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
req->interface_id = if_id;
- req->promiscuous = promiscuous;
- if (!promiscuous) {
- req->num_mac = cpu_to_le16(num);
- if (num)
- memcpy(req->mac, mac_table, ETH_ALEN * num);
+ if (mc_list && mc_count <= BE_MAX_MC) {
+ int i;
+ struct dev_mc_list *mc;
+
+ req->num_mac = cpu_to_le16(mc_count);
+
+ for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
+ memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
+ } else {
+ req->promiscuous = 1;
}
- status = be_mbox_db_ring(ctrl);
+ be_mcc_notify_wait(ctrl);
- spin_unlock(&ctrl->cmd_lock);
- return status;
+ spin_unlock_bh(&ctrl->mcc_lock);
+
+ return 0;
}
int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
@@ -790,7 +992,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -804,7 +1006,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
status = be_mbox_db_ring(ctrl);
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -814,7 +1016,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -831,7 +1033,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
*rx_fc = le16_to_cpu(resp->rx_flow_control);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
@@ -841,7 +1043,7 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
int status;
- spin_lock(&ctrl->cmd_lock);
+ spin_lock(&ctrl->mbox_lock);
memset(wrb, 0, sizeof(*wrb));
@@ -856,6 +1058,6 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
*port_num = le32_to_cpu(resp->phys_port);
}
- spin_unlock(&ctrl->cmd_lock);
+ spin_unlock(&ctrl->mbox_lock);
return status;
}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index e499e2d5b8c3..747626da7b4e 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -76,6 +76,34 @@ struct be_mcc_cq_entry {
u32 flags; /* dword 3 */
};
+/* When the async bit of mcc_compl is set, the last 4 bytes of
+ * mcc_compl is interpreted as follows:
+ */
+#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
+#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
+#define ASYNC_EVENT_CODE_LINK_STATE 0x1
+struct be_async_event_trailer {
+ u32 code;
+};
+
+enum {
+ ASYNC_EVENT_LINK_DOWN = 0x0,
+ ASYNC_EVENT_LINK_UP = 0x1
+};
+
+/* When the event code of an async trailer is link-state, the mcc_compl
+ * must be interpreted as follows
+ */
+struct be_async_event_link_state {
+ u8 physical_port;
+ u8 port_link_status;
+ u8 port_duplex;
+ u8 port_speed;
+ u8 port_fault;
+ u8 rsvd0[7];
+ struct be_async_event_trailer trailer;
+} __packed;
+
struct be_mcc_mailbox {
struct be_mcc_wrb wrb;
struct be_mcc_cq_entry cqe;
@@ -101,6 +129,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_FIRMWARE_CONFIG 42
#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
+#define OPCODE_COMMON_MCC_DESTROY 53
#define OPCODE_COMMON_CQ_DESTROY 54
#define OPCODE_COMMON_EQ_DESTROY 55
#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
@@ -269,6 +298,38 @@ struct be_cmd_resp_cq_create {
u16 rsvd0;
} __packed;
+/******************** Create MCCQ ***************************/
+/* Pseudo amap definition in which each bit of the actual structure is defined
+ * as a byte: used to calculate offset/shift/mask of each field */
+struct amap_mcc_context {
+ u8 con_index[14];
+ u8 rsvd0[2];
+ u8 ring_size[4];
+ u8 fetch_wrb;
+ u8 fetch_r2t;
+ u8 cq_id[10];
+ u8 prod_index[14];
+ u8 fid[8];
+ u8 pdid[9];
+ u8 valid;
+ u8 rsvd1[32];
+ u8 rsvd2[32];
+} __packed;
+
+struct be_cmd_req_mcc_create {
+ struct be_cmd_req_hdr hdr;
+ u16 num_pages;
+ u16 rsvd0;
+ u8 context[sizeof(struct amap_mcc_context) / 8];
+ struct phys_addr pages[8];
+} __packed;
+
+struct be_cmd_resp_mcc_create {
+ struct be_cmd_resp_hdr hdr;
+ u16 id;
+ u16 rsvd0;
+} __packed;
+
/******************** Create TxQ ***************************/
#define BE_ETH_TX_RING_TYPE_STANDARD 2
#define BE_ULP1_NUM 1
@@ -341,7 +402,8 @@ enum {
QTYPE_EQ = 1,
QTYPE_CQ,
QTYPE_TXQ,
- QTYPE_RXQ
+ QTYPE_RXQ,
+ QTYPE_MCCQ
};
struct be_cmd_req_q_destroy {
@@ -546,12 +608,6 @@ struct be_cmd_req_link_status {
u32 rsvd;
};
-struct be_link_info {
- u8 duplex;
- u8 speed;
- u8 fault;
-};
-
enum {
PHY_LINK_DUPLEX_NONE = 0x0,
PHY_LINK_DUPLEX_HALF = 0x1,
@@ -657,6 +713,9 @@ extern int be_cmd_cq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *cq, struct be_queue_info *eq,
bool sol_evts, bool no_delay,
int num_cqe_dma_coalesce);
+extern int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq);
extern int be_cmd_txq_create(struct be_ctrl_info *ctrl,
struct be_queue_info *txq,
struct be_queue_info *cq);
@@ -667,7 +726,7 @@ extern int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
extern int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
int type);
extern int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
- struct be_link_info *link);
+ bool *link_up);
extern int be_cmd_reset(struct be_ctrl_info *ctrl);
extern int be_cmd_get_stats(struct be_ctrl_info *ctrl,
struct be_dma_mem *nonemb_cmd);
@@ -679,10 +738,11 @@ extern int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id,
bool promiscuous);
extern int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl,
u8 port_num, bool en);
-extern int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id,
- u8 *mac_table, u32 num, bool promiscuous);
+extern int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
+ struct dev_mc_list *mc_list, u32 mc_count);
extern int be_cmd_set_flow_control(struct be_ctrl_info *ctrl,
u32 tx_fc, u32 rx_fc);
extern int be_cmd_get_flow_control(struct be_ctrl_info *ctrl,
u32 *tx_fc, u32 *rx_fc);
extern int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num);
+extern void be_process_mcc(struct be_ctrl_info *ctrl);
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index b132aa4893ca..b02e805c1db3 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -61,7 +61,7 @@
/* Clear the interrupt for this eq */
#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
/* Must be 1 */
-#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
+#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
/* Number of event entries processed */
#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
/* Rearm bit */
@@ -88,6 +88,12 @@
/* Number of rx frags posted */
#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
+/********** MCC door bell ************/
+#define DB_MCCQ_OFFSET 0x140
+#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
+/* Number of entries posted */
+#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+
/*
* BE descriptors: host memory data structures whose formats
* are hardwired in BE silicon.
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 66bb56874d9b..66c10c87f517 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -60,26 +60,6 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
return 0;
}
-static inline void *queue_head_node(struct be_queue_info *q)
-{
- return q->dma_mem.va + q->head * q->entry_size;
-}
-
-static inline void *queue_tail_node(struct be_queue_info *q)
-{
- return q->dma_mem.va + q->tail * q->entry_size;
-}
-
-static inline void queue_head_inc(struct be_queue_info *q)
-{
- index_inc(&q->head, q->len);
-}
-
-static inline void queue_tail_inc(struct be_queue_info *q)
-{
- index_inc(&q->tail, q->len);
-}
-
static void be_intr_set(struct be_ctrl_info *ctrl, bool enable)
{
u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
@@ -127,7 +107,7 @@ static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid,
iowrite32(val, ctrl->db + DB_EQ_OFFSET);
}
-static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
+void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
bool arm, u16 num_popped)
{
u32 val = 0;
@@ -234,28 +214,24 @@ static void netdev_stats_update(struct be_adapter *adapter)
dev_stats->tx_window_errors = 0;
}
-static void be_link_status_update(struct be_adapter *adapter)
+void be_link_status_update(void *ctxt, bool link_up)
{
- struct be_link_info *prev = &adapter->link;
- struct be_link_info now = { 0 };
+ struct be_adapter *adapter = ctxt;
struct net_device *netdev = adapter->netdev;
- be_cmd_link_status_query(&adapter->ctrl, &now);
-
/* If link came up or went down */
- if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO ||
- prev->speed == PHY_LINK_SPEED_ZERO)) {
- if (now.speed == PHY_LINK_SPEED_ZERO) {
- netif_stop_queue(netdev);
- netif_carrier_off(netdev);
- printk(KERN_INFO "%s: Link down\n", netdev->name);
- } else {
+ if (adapter->link_up != link_up) {
+ if (link_up) {
netif_start_queue(netdev);
netif_carrier_on(netdev);
printk(KERN_INFO "%s: Link up\n", netdev->name);
+ } else {
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ printk(KERN_INFO "%s: Link down\n", netdev->name);
}
+ adapter->link_up = link_up;
}
- *prev = now;
}
/* Update the EQ delay n BE based on the RX frags consumed / sec */
@@ -569,47 +545,32 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
be_vid_config(netdev);
}
-static void be_set_multicast_filter(struct net_device *netdev)
+static void be_set_multicast_list(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct dev_mc_list *mc_ptr;
- u8 mac_addr[32][ETH_ALEN];
- int i = 0;
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
- if (netdev->flags & IFF_ALLMULTI) {
- /* set BE in Multicast promiscuous */
- be_cmd_mcast_mac_set(&adapter->ctrl,
- adapter->if_handle, NULL, 0, true);
- return;
+ if (netdev->flags & IFF_PROMISC) {
+ be_cmd_promiscuous_config(ctrl, adapter->port_num, 1);
+ adapter->promiscuous = true;
+ goto done;
}
- for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
- memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
- if (++i >= 32) {
- be_cmd_mcast_mac_set(&adapter->ctrl,
- adapter->if_handle, &mac_addr[0][0], i, false);
- i = 0;
- }
-
+ /* BE was previously in promiscous mode; disable it */
+ if (adapter->promiscuous) {
+ adapter->promiscuous = false;
+ be_cmd_promiscuous_config(ctrl, adapter->port_num, 0);
}
- if (i) {
- /* reset the promiscuous mode also. */
- be_cmd_mcast_mac_set(&adapter->ctrl,
- adapter->if_handle, &mac_addr[0][0], i, false);
+ if (netdev->flags & IFF_ALLMULTI) {
+ be_cmd_multicast_set(ctrl, adapter->if_handle, NULL, 0);
+ goto done;
}
-}
-static void be_set_multicast_list(struct net_device *netdev)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- if (netdev->flags & IFF_PROMISC) {
- be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1);
- } else {
- be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0);
- be_set_multicast_filter(netdev);
- }
+ be_cmd_multicast_set(ctrl, adapter->if_handle, netdev->mc_list,
+ netdev->mc_count);
+done:
+ return;
}
static void be_rx_rate_update(struct be_adapter *adapter)
@@ -960,10 +921,8 @@ static void be_post_rx_frags(struct be_adapter *adapter)
return;
}
-static struct be_eth_tx_compl *
-be_tx_compl_get(struct be_adapter *adapter)
+static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
{
- struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
@@ -1051,6 +1010,59 @@ static void be_tx_q_clean(struct be_adapter *adapter)
}
}
+static void be_mcc_queues_destroy(struct be_adapter *adapter)
+{
+ struct be_queue_info *q;
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+
+ q = &ctrl->mcc_obj.q;
+ if (q->created)
+ be_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
+ be_queue_free(adapter, q);
+
+ q = &ctrl->mcc_obj.cq;
+ if (q->created)
+ be_cmd_q_destroy(ctrl, q, QTYPE_CQ);
+ be_queue_free(adapter, q);
+}
+
+/* Must be called only after TX qs are created as MCC shares TX EQ */
+static int be_mcc_queues_create(struct be_adapter *adapter)
+{
+ struct be_queue_info *q, *cq;
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+
+ /* Alloc MCC compl queue */
+ cq = &ctrl->mcc_obj.cq;
+ if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
+ sizeof(struct be_mcc_cq_entry)))
+ goto err;
+
+ /* Ask BE to create MCC compl queue; share TX's eq */
+ if (be_cmd_cq_create(ctrl, cq, &adapter->tx_eq.q, false, true, 0))
+ goto mcc_cq_free;
+
+ /* Alloc MCC queue */
+ q = &ctrl->mcc_obj.q;
+ if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
+ goto mcc_cq_destroy;
+
+ /* Ask BE to create MCC queue */
+ if (be_cmd_mccq_create(ctrl, q, cq))
+ goto mcc_q_free;
+
+ return 0;
+
+mcc_q_free:
+ be_queue_free(adapter, q);
+mcc_cq_destroy:
+ be_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
+mcc_cq_free:
+ be_queue_free(adapter, cq);
+err:
+ return -1;
+}
+
static void be_tx_queues_destroy(struct be_adapter *adapter)
{
struct be_queue_info *q;
@@ -1263,7 +1275,7 @@ static irqreturn_t be_msix_rx(int irq, void *dev)
return IRQ_HANDLED;
}
-static irqreturn_t be_msix_tx(int irq, void *dev)
+static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
{
struct be_adapter *adapter = dev;
@@ -1324,40 +1336,51 @@ int be_poll_rx(struct napi_struct *napi, int budget)
return work_done;
}
-/* For TX we don't honour budget; consume everything */
-int be_poll_tx(struct napi_struct *napi, int budget)
+void be_process_tx(struct be_adapter *adapter)
{
- struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
- struct be_adapter *adapter =
- container_of(tx_eq, struct be_adapter, tx_eq);
- struct be_tx_obj *tx_obj = &adapter->tx_obj;
- struct be_queue_info *tx_cq = &tx_obj->cq;
- struct be_queue_info *txq = &tx_obj->q;
+ struct be_queue_info *txq = &adapter->tx_obj.q;
+ struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
struct be_eth_tx_compl *txcp;
u32 num_cmpl = 0;
u16 end_idx;
- while ((txcp = be_tx_compl_get(adapter))) {
+ while ((txcp = be_tx_compl_get(tx_cq))) {
end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
wrb_index, txcp);
be_tx_compl_process(adapter, end_idx);
num_cmpl++;
}
- /* As Tx wrbs have been freed up, wake up netdev queue if
- * it was stopped due to lack of tx wrbs.
- */
- if (netif_queue_stopped(adapter->netdev) &&
+ if (num_cmpl) {
+ be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
+
+ /* As Tx wrbs have been freed up, wake up netdev queue if
+ * it was stopped due to lack of tx wrbs.
+ */
+ if (netif_queue_stopped(adapter->netdev) &&
atomic_read(&txq->used) < txq->len / 2) {
- netif_wake_queue(adapter->netdev);
+ netif_wake_queue(adapter->netdev);
+ }
+
+ drvr_stats(adapter)->be_tx_events++;
+ drvr_stats(adapter)->be_tx_compl += num_cmpl;
}
+}
+
+/* As TX and MCC share the same EQ check for both TX and MCC completions.
+ * For TX/MCC we don't honour budget; consume everything
+ */
+static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
+{
+ struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
+ struct be_adapter *adapter =
+ container_of(tx_eq, struct be_adapter, tx_eq);
napi_complete(napi);
- be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
+ be_process_tx(adapter);
- drvr_stats(adapter)->be_tx_events++;
- drvr_stats(adapter)->be_tx_compl += num_cmpl;
+ be_process_mcc(&adapter->ctrl);
return 1;
}
@@ -1368,9 +1391,6 @@ static void be_worker(struct work_struct *work)
container_of(work, struct be_adapter, work.work);
int status;
- /* Check link */
- be_link_status_update(adapter);
-
/* Get Stats */
status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd);
if (!status)
@@ -1419,7 +1439,7 @@ static int be_msix_register(struct be_adapter *adapter)
sprintf(tx_eq->desc, "%s-tx", netdev->name);
vec = be_msix_vec_get(adapter, tx_eq->q.id);
- status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter);
+ status = request_irq(vec, be_msix_tx_mcc, 0, tx_eq->desc, adapter);
if (status)
goto err;
@@ -1495,6 +1515,39 @@ static int be_open(struct net_device *netdev)
struct be_ctrl_info *ctrl = &adapter->ctrl;
struct be_eq_obj *rx_eq = &adapter->rx_eq;
struct be_eq_obj *tx_eq = &adapter->tx_eq;
+ bool link_up;
+ int status;
+
+ /* First time posting */
+ be_post_rx_frags(adapter);
+
+ napi_enable(&rx_eq->napi);
+ napi_enable(&tx_eq->napi);
+
+ be_irq_register(adapter);
+
+ be_intr_set(ctrl, true);
+
+ /* The evt queues are created in unarmed state; arm them */
+ be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
+ be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
+
+ /* Rx compl queue may be in unarmed state; rearm it */
+ be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
+
+ status = be_cmd_link_status_query(ctrl, &link_up);
+ if (status)
+ return status;
+ be_link_status_update(adapter, link_up);
+
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
+ return 0;
+}
+
+static int be_setup(struct be_adapter *adapter)
+{
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+ struct net_device *netdev = adapter->netdev;
u32 if_flags;
int status;
@@ -1521,29 +1574,14 @@ static int be_open(struct net_device *netdev)
if (status != 0)
goto tx_qs_destroy;
- /* First time posting */
- be_post_rx_frags(adapter);
-
- napi_enable(&rx_eq->napi);
- napi_enable(&tx_eq->napi);
-
- be_irq_register(adapter);
-
- be_intr_set(ctrl, true);
-
- /* The evt queues are created in the unarmed state; arm them */
- be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
- be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
-
- /* The compl queues are created in the unarmed state; arm them */
- be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
- be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0);
-
- be_link_status_update(adapter);
+ status = be_mcc_queues_create(adapter);
+ if (status != 0)
+ goto rx_qs_destroy;
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
+rx_qs_destroy:
+ be_rx_queues_destroy(adapter);
tx_qs_destroy:
be_tx_queues_destroy(adapter);
if_destroy:
@@ -1552,6 +1590,19 @@ do_none:
return status;
}
+static int be_clear(struct be_adapter *adapter)
+{
+ struct be_ctrl_info *ctrl = &adapter->ctrl;
+
+ be_rx_queues_destroy(adapter);
+ be_tx_queues_destroy(adapter);
+
+ be_cmd_if_destroy(ctrl, adapter->if_handle);
+
+ be_mcc_queues_destroy(adapter);
+ return 0;
+}
+
static int be_close(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1564,7 +1615,7 @@ static int be_close(struct net_device *netdev)
netif_stop_queue(netdev);
netif_carrier_off(netdev);
- adapter->link.speed = PHY_LINK_SPEED_ZERO;
+ adapter->link_up = false;
be_intr_set(ctrl, false);
@@ -1581,10 +1632,6 @@ static int be_close(struct net_device *netdev)
napi_disable(&rx_eq->napi);
napi_disable(&tx_eq->napi);
- be_rx_queues_destroy(adapter);
- be_tx_queues_destroy(adapter);
-
- be_cmd_if_destroy(ctrl, adapter->if_handle);
return 0;
}
@@ -1673,7 +1720,7 @@ static void be_netdev_init(struct net_device *netdev)
netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
BE_NAPI_WEIGHT);
- netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx,
+ netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
BE_NAPI_WEIGHT);
netif_carrier_off(netdev);
@@ -1755,7 +1802,12 @@ static int be_ctrl_init(struct be_adapter *adapter)
mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
- spin_lock_init(&ctrl->cmd_lock);
+ spin_lock_init(&ctrl->mbox_lock);
+ spin_lock_init(&ctrl->mcc_lock);
+ spin_lock_init(&ctrl->mcc_cq_lock);
+
+ ctrl->async_cb = be_link_status_update;
+ ctrl->adapter_ctxt = adapter;
val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) &
@@ -1793,6 +1845,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
unregister_netdev(adapter->netdev);
+ be_clear(adapter);
+
be_stats_cleanup(adapter);
be_ctrl_cleanup(adapter);
@@ -1890,13 +1944,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
be_netdev_init(netdev);
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
+ status = be_setup(adapter);
+ if (status)
+ goto stats_clean;
status = register_netdev(netdev);
if (status != 0)
- goto stats_clean;
+ goto unsetup;
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
return 0;
+unsetup:
+ be_clear(adapter);
stats_clean:
be_stats_cleanup(adapter);
ctrl_clean:
@@ -1921,6 +1980,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
if (netif_running(netdev)) {
rtnl_lock();
be_close(netdev);
+ be_clear(adapter);
rtnl_unlock();
}
@@ -1947,6 +2007,7 @@ static int be_resume(struct pci_dev *pdev)
if (netif_running(netdev)) {
rtnl_lock();
+ be_setup(adapter);
be_open(netdev);
rtnl_unlock();
}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 677f60490f67..679885a122b4 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1997,7 +1997,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
struct e1000_hw *hw = &adapter->hw;
struct net_device *poll_dev = adapter->netdev;
- int tx_cleaned = 0, work_done = 0;
+ int tx_cleaned = 1, work_done = 0;
adapter = netdev_priv(poll_dev);
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index e02bafdd3682..93f4abd990a9 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -668,7 +668,7 @@ int mlx4_en_start_port(struct net_device *dev)
queue_work(mdev->workqueue, &priv->mcast_task);
priv->port_up = true;
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
mac_err:
@@ -700,14 +700,14 @@ void mlx4_en_stop_port(struct net_device *dev)
en_dbg(DRV, priv, "stop port called while port already down\n");
return;
}
- netif_stop_queue(dev);
/* Synchronize with tx routine */
netif_tx_lock_bh(dev);
- priv->port_up = false;
+ netif_tx_stop_all_queues(dev);
netif_tx_unlock_bh(dev);
/* close port*/
+ priv->port_up = false;
mlx4_CLOSE_PORT(mdev->dev, priv->port);
/* Unregister Mac address for the port */
@@ -881,7 +881,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
cancel_delayed_work(&priv->stats_task);
- cancel_delayed_work(&priv->refill_task);
/* flush any pending task for this netdev */
flush_workqueue(mdev->workqueue);
@@ -986,7 +985,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
- INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 5a14899c1e25..91bdfdfd431f 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -269,31 +269,6 @@ reduce_rings:
return 0;
}
-static int mlx4_en_fill_rx_buf(struct net_device *dev,
- struct mlx4_en_rx_ring *ring)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- int num = 0;
- int err;
-
- while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
- err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
- ring->size_mask);
- if (err) {
- if (netif_msg_rx_err(priv))
- en_warn(priv, "Failed preparing rx descriptor\n");
- priv->port_stats.rx_alloc_failed++;
- break;
- }
- ++num;
- ++ring->prod;
- }
- if ((u32) (ring->prod - ring->cons) == ring->actual_size)
- ring->full = 1;
-
- return num;
-}
-
static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
@@ -312,42 +287,6 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
}
}
-
-void mlx4_en_rx_refill(struct work_struct *work)
-{
- struct delayed_work *delay = to_delayed_work(work);
- struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
- refill_task);
- struct mlx4_en_dev *mdev = priv->mdev;
- struct net_device *dev = priv->dev;
- struct mlx4_en_rx_ring *ring;
- int need_refill = 0;
- int i;
-
- mutex_lock(&mdev->state_lock);
- if (!mdev->device_up || !priv->port_up)
- goto out;
-
- /* We only get here if there are no receive buffers, so we can't race
- * with Rx interrupts while filling buffers */
- for (i = 0; i < priv->rx_ring_num; i++) {
- ring = &priv->rx_ring[i];
- if (ring->need_refill) {
- if (mlx4_en_fill_rx_buf(dev, ring)) {
- ring->need_refill = 0;
- mlx4_en_update_rx_prod_db(ring);
- } else
- need_refill = 1;
- }
- }
- if (need_refill)
- queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ);
-
-out:
- mutex_unlock(&mdev->state_lock);
-}
-
-
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{
@@ -457,9 +396,6 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
ring_ind--;
goto err_allocator;
}
-
- /* Fill Rx buffers */
- ring->full = 0;
}
err = mlx4_en_fill_rx_buffers(priv);
if (err)
@@ -647,33 +583,6 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
return skb;
}
-static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring,
- int from, int to, int num)
-{
- struct skb_frag_struct *skb_frags_from;
- struct skb_frag_struct *skb_frags_to;
- struct mlx4_en_rx_desc *rx_desc_from;
- struct mlx4_en_rx_desc *rx_desc_to;
- int from_index, to_index;
- int nr, i;
-
- for (i = 0; i < num; i++) {
- from_index = (from + i) & ring->size_mask;
- to_index = (to + i) & ring->size_mask;
- skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info);
- skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info);
- rx_desc_from = ring->buf + (from_index << ring->log_stride);
- rx_desc_to = ring->buf + (to_index << ring->log_stride);
-
- for (nr = 0; nr < priv->num_frags; nr++) {
- skb_frags_to[nr].page = skb_frags_from[nr].page;
- skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset;
- rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr;
- }
- }
-}
-
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
@@ -821,11 +730,6 @@ out:
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
ring->cons = cq->mcq.cons_index;
ring->prod += polled; /* Polled descriptors were realocated in place */
- if (unlikely(!ring->full)) {
- mlx4_en_copy_desc(priv, ring, ring->cons - polled,
- ring->prod - polled, polled);
- mlx4_en_fill_rx_buf(dev, ring);
- }
mlx4_en_update_rx_prod_db(ring);
return polled;
}
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 5dc7466ad035..08c43f2ae72b 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -515,16 +515,9 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
else {
if (netif_msg_tx_err(priv))
en_warn(priv, "Non-linear headers\n");
- dev_kfree_skb_any(skb);
return 0;
}
}
- if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
- if (netif_msg_tx_err(priv))
- en_warn(priv, "LSO header size too big\n");
- dev_kfree_skb_any(skb);
- return 0;
- }
} else {
*lso_header_size = 0;
if (!is_inline(skb, NULL))
@@ -616,13 +609,9 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
int lso_header_size;
void *fragptr;
- if (unlikely(!skb->len)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
real_size = get_real_size(skb, dev, &lso_header_size);
if (unlikely(!real_size))
- return NETDEV_TX_OK;
+ goto tx_drop;
/* Allign descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
@@ -630,8 +619,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Oversized header or SG list\n");
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
+ goto tx_drop;
}
tx_ind = skb->queue_mapping;
@@ -653,14 +641,6 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- /* Now that we know what Tx ring to use */
- if (unlikely(!priv->port_up)) {
- if (netif_msg_tx_err(priv))
- en_warn(priv, "xmit: port down!\n");
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
/* Track current inflight packets for performance analysis */
AVG_PERF_COUNTER(priv->pstats.inflight_avg,
(u32) (ring->prod - ring->cons - 1));
@@ -785,5 +765,10 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
mlx4_en_xmit_poll(priv, tx_ind);
return 0;
+
+tx_drop:
+ dev_kfree_skb_any(skb);
+ priv->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index d43a9e4c2aea..c7c5e86804ff 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -99,7 +99,6 @@
#define RSS_FACTOR 2
#define TXBB_SIZE 64
#define HEADROOM (2048 / TXBB_SIZE + 1)
-#define MAX_LSO_HDR_SIZE 92
#define STAMP_STRIDE 64
#define STAMP_DWORDS (STAMP_STRIDE / 4)
#define STAMP_SHIFT 31
@@ -296,8 +295,6 @@ struct mlx4_en_rx_ring {
u32 prod;
u32 cons;
u32 buf_size;
- int need_refill;
- int full;
void *buf;
void *rx_info;
unsigned long bytes;
@@ -495,7 +492,6 @@ struct mlx4_en_priv {
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
struct work_struct mcast_task;
struct work_struct mac_task;
- struct delayed_work refill_task;
struct work_struct watchdog_task;
struct work_struct linkstate_task;
struct delayed_work stats_task;
@@ -565,7 +561,6 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
-void mlx4_en_rx_refill(struct work_struct *work);
void mlx4_en_rx_irq(struct mlx4_cq *mcq);
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 745ae8b4a2e8..0f32db3e92ad 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1750,12 +1750,12 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
uc_addr_set(mp, dev->dev_addr);
- port_config = rdlp(mp, PORT_CONFIG);
+ port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
+
nibbles = uc_addr_filter_mask(dev);
if (!nibbles) {
port_config |= UNICAST_PROMISCUOUS_MODE;
- wrlp(mp, PORT_CONFIG, port_config);
- return;
+ nibbles = 0xffff;
}
for (i = 0; i < 16; i += 4) {
@@ -1776,7 +1776,6 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
wrl(mp, off, v);
}
- port_config &= ~UNICAST_PROMISCUOUS_MODE;
wrlp(mp, PORT_CONFIG, port_config);
}
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 6de8399d6dd9..17c116bb332c 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -356,7 +356,6 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk);
ap_put(ap);
- tty_unthrottle(tty);
}
static void
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index d2fa2db13586..aa3d39f38e22 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -397,7 +397,6 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
if (!skb_queue_empty(&ap->rqueue))
tasklet_schedule(&ap->tsk);
sp_put(ap);
- tty_unthrottle(tty);
}
static void
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 8a823ecc99a9..bbc6d4d3cc94 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -3837,7 +3837,9 @@ static void ql_reset_work(struct work_struct *work)
16) | ISP_CONTROL_RI));
}
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
ssleep(1);
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
} while (--max_wait_time);
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 4e22462684c9..4b53b58d75fc 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -51,9 +51,6 @@
#define TX_BUFFS_AVAIL(tp) \
(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
-/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
-static const int max_interrupt_work = 20;
-
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static const int multicast_filter_limit = 32;
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c
new file mode 100644
index 000000000000..5345e47b35ac
--- /dev/null
+++ b/drivers/net/s6gmac.c
@@ -0,0 +1,1073 @@
+/*
+ * Ethernet driver for S6105 on chip network device
+ * (c)2008 emlix GmbH http://www.emlix.com
+ * Authors: Oskar Schirmer <os@emlix.com>
+ * Daniel Gloeckner <dg@emlix.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <linux/stddef.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <variant/hardware.h>
+#include <variant/dmac.h>
+
+#define DRV_NAME "s6gmac"
+#define DRV_PRMT DRV_NAME ": "
+
+
+/* register declarations */
+
+#define S6_GMAC_MACCONF1 0x000
+#define S6_GMAC_MACCONF1_TXENA 0
+#define S6_GMAC_MACCONF1_SYNCTX 1
+#define S6_GMAC_MACCONF1_RXENA 2
+#define S6_GMAC_MACCONF1_SYNCRX 3
+#define S6_GMAC_MACCONF1_TXFLOWCTRL 4
+#define S6_GMAC_MACCONF1_RXFLOWCTRL 5
+#define S6_GMAC_MACCONF1_LOOPBACK 8
+#define S6_GMAC_MACCONF1_RESTXFUNC 16
+#define S6_GMAC_MACCONF1_RESRXFUNC 17
+#define S6_GMAC_MACCONF1_RESTXMACCTRL 18
+#define S6_GMAC_MACCONF1_RESRXMACCTRL 19
+#define S6_GMAC_MACCONF1_SIMULRES 30
+#define S6_GMAC_MACCONF1_SOFTRES 31
+#define S6_GMAC_MACCONF2 0x004
+#define S6_GMAC_MACCONF2_FULL 0
+#define S6_GMAC_MACCONF2_CRCENA 1
+#define S6_GMAC_MACCONF2_PADCRCENA 2
+#define S6_GMAC_MACCONF2_LENGTHFCHK 4
+#define S6_GMAC_MACCONF2_HUGEFRAMENA 5
+#define S6_GMAC_MACCONF2_IFMODE 8
+#define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1
+#define S6_GMAC_MACCONF2_IFMODE_BYTE 2
+#define S6_GMAC_MACCONF2_IFMODE_MASK 3
+#define S6_GMAC_MACCONF2_PREAMBLELEN 12
+#define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F
+#define S6_GMAC_MACIPGIFG 0x008
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F
+#define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16
+#define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24
+#define S6_GMAC_MACHALFDUPLEX 0x00C
+#define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0
+#define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F
+#define S6_GMAC_MACHALFDUPLEX_RETXMAX 12
+#define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F
+#define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16
+#define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17
+#define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18
+#define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19
+#define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20
+#define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F
+#define S6_GMAC_MACMAXFRAMELEN 0x010
+#define S6_GMAC_MACMIICONF 0x020
+#define S6_GMAC_MACMIICONF_CSEL 0
+#define S6_GMAC_MACMIICONF_CSEL_DIV10 0
+#define S6_GMAC_MACMIICONF_CSEL_DIV12 1
+#define S6_GMAC_MACMIICONF_CSEL_DIV14 2
+#define S6_GMAC_MACMIICONF_CSEL_DIV18 3
+#define S6_GMAC_MACMIICONF_CSEL_DIV24 4
+#define S6_GMAC_MACMIICONF_CSEL_DIV34 5
+#define S6_GMAC_MACMIICONF_CSEL_DIV68 6
+#define S6_GMAC_MACMIICONF_CSEL_DIV168 7
+#define S6_GMAC_MACMIICONF_CSEL_MASK 7
+#define S6_GMAC_MACMIICONF_PREAMBLESUPR 4
+#define S6_GMAC_MACMIICONF_SCANAUTOINCR 5
+#define S6_GMAC_MACMIICMD 0x024
+#define S6_GMAC_MACMIICMD_READ 0
+#define S6_GMAC_MACMIICMD_SCAN 1
+#define S6_GMAC_MACMIIADDR 0x028
+#define S6_GMAC_MACMIIADDR_REG 0
+#define S6_GMAC_MACMIIADDR_REG_MASK 0x1F
+#define S6_GMAC_MACMIIADDR_PHY 8
+#define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F
+#define S6_GMAC_MACMIICTRL 0x02C
+#define S6_GMAC_MACMIISTAT 0x030
+#define S6_GMAC_MACMIIINDI 0x034
+#define S6_GMAC_MACMIIINDI_BUSY 0
+#define S6_GMAC_MACMIIINDI_SCAN 1
+#define S6_GMAC_MACMIIINDI_INVAL 2
+#define S6_GMAC_MACINTERFSTAT 0x03C
+#define S6_GMAC_MACINTERFSTAT_LINKFAIL 3
+#define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9
+#define S6_GMAC_MACSTATADDR1 0x040
+#define S6_GMAC_MACSTATADDR2 0x044
+
+#define S6_GMAC_FIFOCONF0 0x048
+#define S6_GMAC_FIFOCONF0_HSTRSTWT 0
+#define S6_GMAC_FIFOCONF0_HSTRSTSR 1
+#define S6_GMAC_FIFOCONF0_HSTRSTFR 2
+#define S6_GMAC_FIFOCONF0_HSTRSTST 3
+#define S6_GMAC_FIFOCONF0_HSTRSTFT 4
+#define S6_GMAC_FIFOCONF0_WTMENREQ 8
+#define S6_GMAC_FIFOCONF0_SRFENREQ 9
+#define S6_GMAC_FIFOCONF0_FRFENREQ 10
+#define S6_GMAC_FIFOCONF0_STFENREQ 11
+#define S6_GMAC_FIFOCONF0_FTFENREQ 12
+#define S6_GMAC_FIFOCONF0_WTMENRPLY 16
+#define S6_GMAC_FIFOCONF0_SRFENRPLY 17
+#define S6_GMAC_FIFOCONF0_FRFENRPLY 18
+#define S6_GMAC_FIFOCONF0_STFENRPLY 19
+#define S6_GMAC_FIFOCONF0_FTFENRPLY 20
+#define S6_GMAC_FIFOCONF1 0x04C
+#define S6_GMAC_FIFOCONF2 0x050
+#define S6_GMAC_FIFOCONF2_CFGLWM 0
+#define S6_GMAC_FIFOCONF2_CFGHWM 16
+#define S6_GMAC_FIFOCONF3 0x054
+#define S6_GMAC_FIFOCONF3_CFGFTTH 0
+#define S6_GMAC_FIFOCONF3_CFGHWMFT 16
+#define S6_GMAC_FIFOCONF4 0x058
+#define S6_GMAC_FIFOCONF_RSV_PREVDROP 0
+#define S6_GMAC_FIFOCONF_RSV_RUNT 1
+#define S6_GMAC_FIFOCONF_RSV_FALSECAR 2
+#define S6_GMAC_FIFOCONF_RSV_CODEERR 3
+#define S6_GMAC_FIFOCONF_RSV_CRCERR 4
+#define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5
+#define S6_GMAC_FIFOCONF_RSV_LENRANGE 6
+#define S6_GMAC_FIFOCONF_RSV_OK 7
+#define S6_GMAC_FIFOCONF_RSV_MULTICAST 8
+#define S6_GMAC_FIFOCONF_RSV_BROADCAST 9
+#define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10
+#define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11
+#define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12
+#define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13
+#define S6_GMAC_FIFOCONF_RSV_VLANTAG 14
+#define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15
+#define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16
+#define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF
+#define S6_GMAC_FIFOCONF5 0x05C
+#define S6_GMAC_FIFOCONF5_DROPLT64 18
+#define S6_GMAC_FIFOCONF5_CFGBYTM 19
+#define S6_GMAC_FIFOCONF5_RXDROPSIZE 20
+#define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF
+
+#define S6_GMAC_STAT_REGS 0x080
+#define S6_GMAC_STAT_SIZE_MIN 12
+#define S6_GMAC_STATTR64 0x080
+#define S6_GMAC_STATTR64_SIZE 18
+#define S6_GMAC_STATTR127 0x084
+#define S6_GMAC_STATTR127_SIZE 18
+#define S6_GMAC_STATTR255 0x088
+#define S6_GMAC_STATTR255_SIZE 18
+#define S6_GMAC_STATTR511 0x08C
+#define S6_GMAC_STATTR511_SIZE 18
+#define S6_GMAC_STATTR1K 0x090
+#define S6_GMAC_STATTR1K_SIZE 18
+#define S6_GMAC_STATTRMAX 0x094
+#define S6_GMAC_STATTRMAX_SIZE 18
+#define S6_GMAC_STATTRMGV 0x098
+#define S6_GMAC_STATTRMGV_SIZE 18
+#define S6_GMAC_STATRBYT 0x09C
+#define S6_GMAC_STATRBYT_SIZE 24
+#define S6_GMAC_STATRPKT 0x0A0
+#define S6_GMAC_STATRPKT_SIZE 18
+#define S6_GMAC_STATRFCS 0x0A4
+#define S6_GMAC_STATRFCS_SIZE 12
+#define S6_GMAC_STATRMCA 0x0A8
+#define S6_GMAC_STATRMCA_SIZE 18
+#define S6_GMAC_STATRBCA 0x0AC
+#define S6_GMAC_STATRBCA_SIZE 22
+#define S6_GMAC_STATRXCF 0x0B0
+#define S6_GMAC_STATRXCF_SIZE 18
+#define S6_GMAC_STATRXPF 0x0B4
+#define S6_GMAC_STATRXPF_SIZE 12
+#define S6_GMAC_STATRXUO 0x0B8
+#define S6_GMAC_STATRXUO_SIZE 12
+#define S6_GMAC_STATRALN 0x0BC
+#define S6_GMAC_STATRALN_SIZE 12
+#define S6_GMAC_STATRFLR 0x0C0
+#define S6_GMAC_STATRFLR_SIZE 16
+#define S6_GMAC_STATRCDE 0x0C4
+#define S6_GMAC_STATRCDE_SIZE 12
+#define S6_GMAC_STATRCSE 0x0C8
+#define S6_GMAC_STATRCSE_SIZE 12
+#define S6_GMAC_STATRUND 0x0CC
+#define S6_GMAC_STATRUND_SIZE 12
+#define S6_GMAC_STATROVR 0x0D0
+#define S6_GMAC_STATROVR_SIZE 12
+#define S6_GMAC_STATRFRG 0x0D4
+#define S6_GMAC_STATRFRG_SIZE 12
+#define S6_GMAC_STATRJBR 0x0D8
+#define S6_GMAC_STATRJBR_SIZE 12
+#define S6_GMAC_STATRDRP 0x0DC
+#define S6_GMAC_STATRDRP_SIZE 12
+#define S6_GMAC_STATTBYT 0x0E0
+#define S6_GMAC_STATTBYT_SIZE 24
+#define S6_GMAC_STATTPKT 0x0E4
+#define S6_GMAC_STATTPKT_SIZE 18
+#define S6_GMAC_STATTMCA 0x0E8
+#define S6_GMAC_STATTMCA_SIZE 18
+#define S6_GMAC_STATTBCA 0x0EC
+#define S6_GMAC_STATTBCA_SIZE 18
+#define S6_GMAC_STATTXPF 0x0F0
+#define S6_GMAC_STATTXPF_SIZE 12
+#define S6_GMAC_STATTDFR 0x0F4
+#define S6_GMAC_STATTDFR_SIZE 12
+#define S6_GMAC_STATTEDF 0x0F8
+#define S6_GMAC_STATTEDF_SIZE 12
+#define S6_GMAC_STATTSCL 0x0FC
+#define S6_GMAC_STATTSCL_SIZE 12
+#define S6_GMAC_STATTMCL 0x100
+#define S6_GMAC_STATTMCL_SIZE 12
+#define S6_GMAC_STATTLCL 0x104
+#define S6_GMAC_STATTLCL_SIZE 12
+#define S6_GMAC_STATTXCL 0x108
+#define S6_GMAC_STATTXCL_SIZE 12
+#define S6_GMAC_STATTNCL 0x10C
+#define S6_GMAC_STATTNCL_SIZE 13
+#define S6_GMAC_STATTPFH 0x110
+#define S6_GMAC_STATTPFH_SIZE 12
+#define S6_GMAC_STATTDRP 0x114
+#define S6_GMAC_STATTDRP_SIZE 12
+#define S6_GMAC_STATTJBR 0x118
+#define S6_GMAC_STATTJBR_SIZE 12
+#define S6_GMAC_STATTFCS 0x11C
+#define S6_GMAC_STATTFCS_SIZE 12
+#define S6_GMAC_STATTXCF 0x120
+#define S6_GMAC_STATTXCF_SIZE 12
+#define S6_GMAC_STATTOVR 0x124
+#define S6_GMAC_STATTOVR_SIZE 12
+#define S6_GMAC_STATTUND 0x128
+#define S6_GMAC_STATTUND_SIZE 12
+#define S6_GMAC_STATTFRG 0x12C
+#define S6_GMAC_STATTFRG_SIZE 12
+#define S6_GMAC_STATCARRY(n) (0x130 + 4*(n))
+#define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n))
+#define S6_GMAC_STATCARRY1_RDRP 0
+#define S6_GMAC_STATCARRY1_RJBR 1
+#define S6_GMAC_STATCARRY1_RFRG 2
+#define S6_GMAC_STATCARRY1_ROVR 3
+#define S6_GMAC_STATCARRY1_RUND 4
+#define S6_GMAC_STATCARRY1_RCSE 5
+#define S6_GMAC_STATCARRY1_RCDE 6
+#define S6_GMAC_STATCARRY1_RFLR 7
+#define S6_GMAC_STATCARRY1_RALN 8
+#define S6_GMAC_STATCARRY1_RXUO 9
+#define S6_GMAC_STATCARRY1_RXPF 10
+#define S6_GMAC_STATCARRY1_RXCF 11
+#define S6_GMAC_STATCARRY1_RBCA 12
+#define S6_GMAC_STATCARRY1_RMCA 13
+#define S6_GMAC_STATCARRY1_RFCS 14
+#define S6_GMAC_STATCARRY1_RPKT 15
+#define S6_GMAC_STATCARRY1_RBYT 16
+#define S6_GMAC_STATCARRY1_TRMGV 25
+#define S6_GMAC_STATCARRY1_TRMAX 26
+#define S6_GMAC_STATCARRY1_TR1K 27
+#define S6_GMAC_STATCARRY1_TR511 28
+#define S6_GMAC_STATCARRY1_TR255 29
+#define S6_GMAC_STATCARRY1_TR127 30
+#define S6_GMAC_STATCARRY1_TR64 31
+#define S6_GMAC_STATCARRY2_TDRP 0
+#define S6_GMAC_STATCARRY2_TPFH 1
+#define S6_GMAC_STATCARRY2_TNCL 2
+#define S6_GMAC_STATCARRY2_TXCL 3
+#define S6_GMAC_STATCARRY2_TLCL 4
+#define S6_GMAC_STATCARRY2_TMCL 5
+#define S6_GMAC_STATCARRY2_TSCL 6
+#define S6_GMAC_STATCARRY2_TEDF 7
+#define S6_GMAC_STATCARRY2_TDFR 8
+#define S6_GMAC_STATCARRY2_TXPF 9
+#define S6_GMAC_STATCARRY2_TBCA 10
+#define S6_GMAC_STATCARRY2_TMCA 11
+#define S6_GMAC_STATCARRY2_TPKT 12
+#define S6_GMAC_STATCARRY2_TBYT 13
+#define S6_GMAC_STATCARRY2_TFRG 14
+#define S6_GMAC_STATCARRY2_TUND 15
+#define S6_GMAC_STATCARRY2_TOVR 16
+#define S6_GMAC_STATCARRY2_TXCF 17
+#define S6_GMAC_STATCARRY2_TFCS 18
+#define S6_GMAC_STATCARRY2_TJBR 19
+
+#define S6_GMAC_HOST_PBLKCTRL 0x140
+#define S6_GMAC_HOST_PBLKCTRL_TXENA 0
+#define S6_GMAC_HOST_PBLKCTRL_RXENA 1
+#define S6_GMAC_HOST_PBLKCTRL_TXSRES 2
+#define S6_GMAC_HOST_PBLKCTRL_RXSRES 3
+#define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8
+#define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7
+#define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF
+#define S6_GMAC_HOST_PBLKCTRL_STATENA 16
+#define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17
+#define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18
+#define S6_GMAC_HOST_PBLKCTRL_RGMII 19
+#define S6_GMAC_HOST_INTMASK 0x144
+#define S6_GMAC_HOST_INTSTAT 0x148
+#define S6_GMAC_HOST_INT_TXBURSTOVER 3
+#define S6_GMAC_HOST_INT_TXPREWOVER 4
+#define S6_GMAC_HOST_INT_RXBURSTUNDER 5
+#define S6_GMAC_HOST_INT_RXPOSTRFULL 6
+#define S6_GMAC_HOST_INT_RXPOSTRUNDER 7
+#define S6_GMAC_HOST_RXFIFOHWM 0x14C
+#define S6_GMAC_HOST_CTRLFRAMXP 0x150
+#define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n))
+#define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n))
+#define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n))
+#define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n))
+
+#define S6_GMAC_BURST_PREWR 0x1B0
+#define S6_GMAC_BURST_PREWR_LEN 0
+#define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1)
+#define S6_GMAC_BURST_PREWR_CFE 20
+#define S6_GMAC_BURST_PREWR_PPE 21
+#define S6_GMAC_BURST_PREWR_FCS 22
+#define S6_GMAC_BURST_PREWR_PAD 23
+#define S6_GMAC_BURST_POSTRD 0x1D0
+#define S6_GMAC_BURST_POSTRD_LEN 0
+#define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1)
+#define S6_GMAC_BURST_POSTRD_DROP 20
+
+
+/* data handling */
+
+#define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */
+#define S6_NUM_RX_SKB 16
+#define S6_MAX_FRLEN 1536
+
+struct s6gmac {
+ u32 reg;
+ u32 tx_dma;
+ u32 rx_dma;
+ u32 io;
+ u8 tx_chan;
+ u8 rx_chan;
+ spinlock_t lock;
+ u8 tx_skb_i, tx_skb_o;
+ u8 rx_skb_i, rx_skb_o;
+ struct sk_buff *tx_skb[S6_NUM_TX_SKB];
+ struct sk_buff *rx_skb[S6_NUM_RX_SKB];
+ unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)];
+ unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)];
+ struct phy_device *phydev;
+ struct {
+ struct mii_bus *bus;
+ int irq[PHY_MAX_ADDR];
+ } mii;
+ struct {
+ unsigned int mbit;
+ u8 giga;
+ u8 isup;
+ u8 full;
+ } link;
+};
+
+static void s6gmac_rx_fillfifo(struct s6gmac *pd)
+{
+ struct sk_buff *skb;
+ while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB)
+ && (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan))
+ && (skb = dev_alloc_skb(S6_MAX_FRLEN + 2))) {
+ pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
+ s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
+ pd->io, (u32)skb->data, S6_MAX_FRLEN);
+ }
+}
+
+static void s6gmac_rx_interrupt(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ u32 pfx;
+ struct sk_buff *skb;
+ while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) >
+ s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) {
+ skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB];
+ pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD);
+ if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) {
+ dev_kfree_skb_irq(skb);
+ } else {
+ skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
+ & S6_GMAC_BURST_POSTRD_LEN_MASK);
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_rx(skb);
+ }
+ }
+}
+
+static void s6gmac_tx_interrupt(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >
+ s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) {
+ dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
+ }
+ if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
+ netif_wake_queue(dev);
+}
+
+struct s6gmac_statinf {
+ unsigned reg_size : 4; /* 0: unused */
+ unsigned reg_off : 6;
+ unsigned net_index : 6;
+};
+
+#define S6_STATS_B (8 * sizeof(u32))
+#define S6_STATS_C(b, r, f) [b] = { \
+ BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \
+ BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \
+ >= (1<<4)) + \
+ r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \
+ BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \
+ >= ((1<<6)-1)) + \
+ (r - S6_GMAC_STAT_REGS) / sizeof(u32), \
+ BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \
+ % sizeof(unsigned long)) + \
+ BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \
+ / sizeof(unsigned long)) >= (1<<6))) + \
+ BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \
+ != sizeof(unsigned long))) + \
+ (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)},
+
+static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { {
+ S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped)
+}, {
+ S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors)
+ S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors)
+} };
+
+static void s6gmac_stats_collect(struct s6gmac *pd,
+ const struct s6gmac_statinf *inf)
+{
+ int b;
+ for (b = 0; b < S6_STATS_B; b++) {
+ if (inf[b].reg_size) {
+ pd->stats[inf[b].net_index] +=
+ readl(pd->reg + S6_GMAC_STAT_REGS
+ + sizeof(u32) * inf[b].reg_off);
+ }
+ }
+}
+
+static void s6gmac_stats_carry(struct s6gmac *pd,
+ const struct s6gmac_statinf *inf, u32 mask)
+{
+ int b;
+ while (mask) {
+ b = fls(mask) - 1;
+ mask &= ~(1 << b);
+ pd->carry[inf[b].net_index] += (1 << inf[b].reg_size);
+ }
+}
+
+static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry)
+{
+ int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) &
+ ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry));
+ return r;
+}
+
+static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry)
+{
+ u32 mask;
+ mask = s6gmac_stats_pending(pd, carry);
+ if (mask) {
+ writel(mask, pd->reg + S6_GMAC_STATCARRY(carry));
+ s6gmac_stats_carry(pd, &statinf[carry][0], mask);
+ }
+}
+
+static irqreturn_t s6gmac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct s6gmac *pd = netdev_priv(dev);
+ if (!dev)
+ return IRQ_NONE;
+ spin_lock(&pd->lock);
+ if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan))
+ s6gmac_rx_interrupt(dev);
+ s6gmac_rx_fillfifo(pd);
+ if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan))
+ s6gmac_tx_interrupt(dev);
+ s6gmac_stats_interrupt(pd, 0);
+ s6gmac_stats_interrupt(pd, 1);
+ spin_unlock(&pd->lock);
+ return IRQ_HANDLED;
+}
+
+static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n,
+ u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi)
+{
+ writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n));
+ writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n));
+ writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n));
+ writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n));
+}
+
+static inline void s6gmac_stop_device(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ writel(0, pd->reg + S6_GMAC_MACCONF1);
+}
+
+static inline void s6gmac_init_device(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ int is_rgmii = !!(pd->phydev->supported
+ & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half));
+#if 0
+ writel(1 << S6_GMAC_MACCONF1_SYNCTX |
+ 1 << S6_GMAC_MACCONF1_SYNCRX |
+ 1 << S6_GMAC_MACCONF1_TXFLOWCTRL |
+ 1 << S6_GMAC_MACCONF1_RXFLOWCTRL |
+ 1 << S6_GMAC_MACCONF1_RESTXFUNC |
+ 1 << S6_GMAC_MACCONF1_RESRXFUNC |
+ 1 << S6_GMAC_MACCONF1_RESTXMACCTRL |
+ 1 << S6_GMAC_MACCONF1_RESRXMACCTRL,
+ pd->reg + S6_GMAC_MACCONF1);
+#endif
+ writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1);
+ udelay(1000);
+ writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA,
+ pd->reg + S6_GMAC_MACCONF1);
+ writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES |
+ 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES,
+ pd->reg + S6_GMAC_HOST_PBLKCTRL);
+ writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
+ S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
+ is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
+ pd->reg + S6_GMAC_HOST_PBLKCTRL);
+ writel(1 << S6_GMAC_MACCONF1_TXENA |
+ 1 << S6_GMAC_MACCONF1_RXENA |
+ (dev->flags & IFF_LOOPBACK ? 1 : 0)
+ << S6_GMAC_MACCONF1_LOOPBACK,
+ pd->reg + S6_GMAC_MACCONF1);
+ writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ?
+ dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN,
+ pd->reg + S6_GMAC_MACMAXFRAMELEN);
+ writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL |
+ 1 << S6_GMAC_MACCONF2_PADCRCENA |
+ 1 << S6_GMAC_MACCONF2_LENGTHFCHK |
+ (pd->link.giga ?
+ S6_GMAC_MACCONF2_IFMODE_BYTE :
+ S6_GMAC_MACCONF2_IFMODE_NIBBLE)
+ << S6_GMAC_MACCONF2_IFMODE |
+ 7 << S6_GMAC_MACCONF2_PREAMBLELEN,
+ pd->reg + S6_GMAC_MACCONF2);
+ writel(0, pd->reg + S6_GMAC_MACSTATADDR1);
+ writel(0, pd->reg + S6_GMAC_MACSTATADDR2);
+ writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ |
+ 1 << S6_GMAC_FIFOCONF0_SRFENREQ |
+ 1 << S6_GMAC_FIFOCONF0_FRFENREQ |
+ 1 << S6_GMAC_FIFOCONF0_STFENREQ |
+ 1 << S6_GMAC_FIFOCONF0_FTFENREQ,
+ pd->reg + S6_GMAC_FIFOCONF0);
+ writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH |
+ 128 << S6_GMAC_FIFOCONF3_CFGHWMFT,
+ pd->reg + S6_GMAC_FIFOCONF3);
+ writel((S6_GMAC_FIFOCONF_RSV_MASK & ~(
+ 1 << S6_GMAC_FIFOCONF_RSV_RUNT |
+ 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
+ 1 << S6_GMAC_FIFOCONF_RSV_OK |
+ 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
+ 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
+ 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
+ 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
+ 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) |
+ 1 << S6_GMAC_FIFOCONF5_DROPLT64 |
+ pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM |
+ 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE,
+ pd->reg + S6_GMAC_FIFOCONF5);
+ writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT |
+ 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
+ 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
+ 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
+ 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
+ 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
+ 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED,
+ pd->reg + S6_GMAC_FIFOCONF4);
+ s6gmac_set_dstaddr(pd, 0,
+ 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF);
+ s6gmac_set_dstaddr(pd, 1,
+ dev->dev_addr[5] |
+ dev->dev_addr[4] << 8 |
+ dev->dev_addr[3] << 16 |
+ dev->dev_addr[2] << 24,
+ dev->dev_addr[1] |
+ dev->dev_addr[0] << 8,
+ 0xFFFFFFFF, 0x0000FFFF);
+ s6gmac_set_dstaddr(pd, 2,
+ 0x00000000, 0x00000100, 0x00000000, 0x00000100);
+ s6gmac_set_dstaddr(pd, 3,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000);
+ writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA |
+ 1 << S6_GMAC_HOST_PBLKCTRL_RXENA |
+ S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
+ S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
+ 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
+ is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
+ pd->reg + S6_GMAC_HOST_PBLKCTRL);
+}
+
+static void s6mii_enable(struct s6gmac *pd)
+{
+ writel(readl(pd->reg + S6_GMAC_MACCONF1) &
+ ~(1 << S6_GMAC_MACCONF1_SOFTRES),
+ pd->reg + S6_GMAC_MACCONF1);
+ writel((readl(pd->reg + S6_GMAC_MACMIICONF)
+ & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL))
+ | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL),
+ pd->reg + S6_GMAC_MACMIICONF);
+}
+
+static int s6mii_busy(struct s6gmac *pd, int tmo)
+{
+ while (readl(pd->reg + S6_GMAC_MACMIIINDI)) {
+ if (--tmo == 0)
+ return -ETIME;
+ udelay(64);
+ }
+ return 0;
+}
+
+static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct s6gmac *pd = bus->priv;
+ s6mii_enable(pd);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
+ regnum << S6_GMAC_MACMIIADDR_REG,
+ pd->reg + S6_GMAC_MACMIIADDR);
+ writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD);
+ writel(0, pd->reg + S6_GMAC_MACMIICMD);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT);
+}
+
+static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
+{
+ struct s6gmac *pd = bus->priv;
+ s6mii_enable(pd);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
+ regnum << S6_GMAC_MACMIIADDR_REG,
+ pd->reg + S6_GMAC_MACMIIADDR);
+ writel(value, pd->reg + S6_GMAC_MACMIICTRL);
+ if (s6mii_busy(pd, 256))
+ return -ETIME;
+ return 0;
+}
+
+static int s6mii_reset(struct mii_bus *bus)
+{
+ struct s6gmac *pd = bus->priv;
+ s6mii_enable(pd);
+ if (s6mii_busy(pd, PHY_INIT_TIMEOUT))
+ return -ETIME;
+ return 0;
+}
+
+static void s6gmac_set_rgmii_txclock(struct s6gmac *pd)
+{
+ u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
+ pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC);
+ switch (pd->link.mbit) {
+ case 10:
+ pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC;
+ break;
+ case 100:
+ pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC;
+ break;
+ case 1000:
+ pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC;
+ break;
+ default:
+ return;
+ }
+ writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL);
+}
+
+static inline void s6gmac_linkisup(struct net_device *dev, int isup)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ struct phy_device *phydev = pd->phydev;
+
+ pd->link.full = phydev->duplex;
+ pd->link.giga = (phydev->speed == 1000);
+ if (pd->link.mbit != phydev->speed) {
+ pd->link.mbit = phydev->speed;
+ s6gmac_set_rgmii_txclock(pd);
+ }
+ pd->link.isup = isup;
+ if (isup)
+ netif_carrier_on(dev);
+ phy_print_status(phydev);
+}
+
+static void s6gmac_adjust_link(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ struct phy_device *phydev = pd->phydev;
+ if (pd->link.isup &&
+ (!phydev->link ||
+ (pd->link.mbit != phydev->speed) ||
+ (pd->link.full != phydev->duplex))) {
+ pd->link.isup = 0;
+ netif_tx_disable(dev);
+ if (!phydev->link) {
+ netif_carrier_off(dev);
+ phy_print_status(phydev);
+ }
+ }
+ if (!pd->link.isup && phydev->link) {
+ if (pd->link.full != phydev->duplex) {
+ u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
+ if (phydev->duplex)
+ maccfg |= 1 << S6_GMAC_MACCONF2_FULL;
+ else
+ maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL);
+ writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
+ }
+
+ if (pd->link.giga != (phydev->speed == 1000)) {
+ u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5);
+ u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
+ maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK
+ << S6_GMAC_MACCONF2_IFMODE);
+ if (phydev->speed == 1000) {
+ fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM;
+ maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE
+ << S6_GMAC_MACCONF2_IFMODE;
+ } else {
+ fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM);
+ maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE
+ << S6_GMAC_MACCONF2_IFMODE;
+ }
+ writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5);
+ writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
+ }
+
+ if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
+ netif_wake_queue(dev);
+ s6gmac_linkisup(dev, 1);
+ }
+}
+
+static inline int s6gmac_phy_start(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ int i = 0;
+ struct phy_device *p = NULL;
+ while ((!(p = pd->mii.bus->phy_map[i])) && (i < PHY_MAX_ADDR))
+ i++;
+ p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0,
+ PHY_INTERFACE_MODE_RGMII);
+ if (IS_ERR(p)) {
+ printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ return PTR_ERR(p);
+ }
+ p->supported &= PHY_GBIT_FEATURES;
+ p->advertising = p->supported;
+ pd->phydev = p;
+ return 0;
+}
+
+static inline void s6gmac_init_stats(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ u32 mask;
+ mask = 1 << S6_GMAC_STATCARRY1_RDRP |
+ 1 << S6_GMAC_STATCARRY1_RJBR |
+ 1 << S6_GMAC_STATCARRY1_RFRG |
+ 1 << S6_GMAC_STATCARRY1_ROVR |
+ 1 << S6_GMAC_STATCARRY1_RUND |
+ 1 << S6_GMAC_STATCARRY1_RCDE |
+ 1 << S6_GMAC_STATCARRY1_RFLR |
+ 1 << S6_GMAC_STATCARRY1_RALN |
+ 1 << S6_GMAC_STATCARRY1_RMCA |
+ 1 << S6_GMAC_STATCARRY1_RFCS |
+ 1 << S6_GMAC_STATCARRY1_RPKT |
+ 1 << S6_GMAC_STATCARRY1_RBYT;
+ writel(mask, pd->reg + S6_GMAC_STATCARRY(0));
+ writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0));
+ mask = 1 << S6_GMAC_STATCARRY2_TDRP |
+ 1 << S6_GMAC_STATCARRY2_TNCL |
+ 1 << S6_GMAC_STATCARRY2_TXCL |
+ 1 << S6_GMAC_STATCARRY2_TEDF |
+ 1 << S6_GMAC_STATCARRY2_TPKT |
+ 1 << S6_GMAC_STATCARRY2_TBYT |
+ 1 << S6_GMAC_STATCARRY2_TFRG |
+ 1 << S6_GMAC_STATCARRY2_TUND |
+ 1 << S6_GMAC_STATCARRY2_TOVR |
+ 1 << S6_GMAC_STATCARRY2_TFCS |
+ 1 << S6_GMAC_STATCARRY2_TJBR;
+ writel(mask, pd->reg + S6_GMAC_STATCARRY(1));
+ writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1));
+}
+
+static inline void s6gmac_init_dmac(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ s6dmac_disable_chan(pd->tx_dma, pd->tx_chan);
+ s6dmac_disable_chan(pd->rx_dma, pd->rx_chan);
+ s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX);
+ s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX);
+}
+
+static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ spin_lock_irqsave(&pd->lock, flags);
+ dev->trans_start = jiffies;
+ writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
+ 0 << S6_GMAC_BURST_PREWR_CFE |
+ 1 << S6_GMAC_BURST_PREWR_PPE |
+ 1 << S6_GMAC_BURST_PREWR_FCS |
+ ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD,
+ pd->reg + S6_GMAC_BURST_PREWR);
+ s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan,
+ (u32)skb->data, pd->io, skb->len);
+ if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
+ netif_stop_queue(dev);
+ if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) {
+ printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n",
+ pd->tx_skb_o, pd->tx_skb_i);
+ BUG();
+ }
+ pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb;
+ spin_unlock_irqrestore(&pd->lock, flags);
+ return 0;
+}
+
+static void s6gmac_tx_timeout(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ spin_lock_irqsave(&pd->lock, flags);
+ s6gmac_tx_interrupt(dev);
+ spin_unlock_irqrestore(&pd->lock, flags);
+}
+
+static int s6gmac_open(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ phy_read_status(pd->phydev);
+ spin_lock_irqsave(&pd->lock, flags);
+ pd->link.mbit = 0;
+ s6gmac_linkisup(dev, pd->phydev->link);
+ s6gmac_init_device(dev);
+ s6gmac_init_stats(dev);
+ s6gmac_init_dmac(dev);
+ s6gmac_rx_fillfifo(pd);
+ s6dmac_enable_chan(pd->rx_dma, pd->rx_chan,
+ 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1);
+ s6dmac_enable_chan(pd->tx_dma, pd->tx_chan,
+ 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1);
+ writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER |
+ 0 << S6_GMAC_HOST_INT_TXPREWOVER |
+ 0 << S6_GMAC_HOST_INT_RXBURSTUNDER |
+ 0 << S6_GMAC_HOST_INT_RXPOSTRFULL |
+ 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER,
+ pd->reg + S6_GMAC_HOST_INTMASK);
+ spin_unlock_irqrestore(&pd->lock, flags);
+ phy_start(pd->phydev);
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int s6gmac_stop(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ unsigned long flags;
+ netif_stop_queue(dev);
+ phy_stop(pd->phydev);
+ spin_lock_irqsave(&pd->lock, flags);
+ s6gmac_init_dmac(dev);
+ s6gmac_stop_device(dev);
+ while (pd->tx_skb_i != pd->tx_skb_o)
+ dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
+ while (pd->rx_skb_i != pd->rx_skb_o)
+ dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]);
+ spin_unlock_irqrestore(&pd->lock, flags);
+ return 0;
+}
+
+static struct net_device_stats *s6gmac_stats(struct net_device *dev)
+{
+ struct s6gmac *pd = netdev_priv(dev);
+ struct net_device_stats *st = (struct net_device_stats *)&pd->stats;
+ int i;
+ do {
+ unsigned long flags;
+ spin_lock_irqsave(&pd->lock, flags);
+ for (i = 0; i < sizeof(pd->stats) / sizeof(unsigned long); i++)
+ pd->stats[i] =
+ pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
+ s6gmac_stats_collect(pd, &statinf[0][0]);
+ s6gmac_stats_collect(pd, &statinf[1][0]);
+ i = s6gmac_stats_pending(pd, 0) |
+ s6gmac_stats_pending(pd, 1);
+ spin_unlock_irqrestore(&pd->lock, flags);
+ } while (i);
+ st->rx_errors = st->rx_crc_errors +
+ st->rx_frame_errors +
+ st->rx_length_errors +
+ st->rx_missed_errors;
+ st->tx_errors += st->tx_aborted_errors;
+ return st;
+}
+
+static int __devinit s6gmac_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct s6gmac *pd;
+ int res;
+ unsigned long i;
+ struct mii_bus *mb;
+ dev = alloc_etherdev(sizeof(*pd));
+ if (!dev) {
+ printk(KERN_ERR DRV_PRMT "etherdev alloc failed, aborting.\n");
+ return -ENOMEM;
+ }
+ dev->open = s6gmac_open;
+ dev->stop = s6gmac_stop;
+ dev->hard_start_xmit = s6gmac_tx;
+ dev->tx_timeout = s6gmac_tx_timeout;
+ dev->watchdog_timeo = HZ;
+ dev->get_stats = s6gmac_stats;
+ dev->irq = platform_get_irq(pdev, 0);
+ pd = netdev_priv(dev);
+ memset(pd, 0, sizeof(*pd));
+ spin_lock_init(&pd->lock);
+ pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start;
+ i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start;
+ pd->tx_dma = DMA_MASK_DMAC(i);
+ pd->tx_chan = DMA_INDEX_CHNL(i);
+ i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start;
+ pd->rx_dma = DMA_MASK_DMAC(i);
+ pd->rx_chan = DMA_INDEX_CHNL(i);
+ pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
+ res = request_irq(dev->irq, &s6gmac_interrupt, 0, dev->name, dev);
+ if (res) {
+ printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq);
+ goto errirq;
+ }
+ res = register_netdev(dev);
+ if (res) {
+ printk(KERN_ERR DRV_PRMT "error registering device %s\n",
+ dev->name);
+ goto errdev;
+ }
+ mb = mdiobus_alloc();
+ if (!mb) {
+ printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
+ goto errmii;
+ }
+ mb->name = "s6gmac_mii";
+ mb->read = s6mii_read;
+ mb->write = s6mii_write;
+ mb->reset = s6mii_reset;
+ mb->priv = pd;
+ snprintf(mb->id, MII_BUS_ID_SIZE, "0");
+ mb->phy_mask = ~(1 << 0);
+ mb->irq = &pd->mii.irq[0];
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ int n = platform_get_irq(pdev, i + 1);
+ if (n < 0)
+ n = PHY_POLL;
+ pd->mii.irq[i] = n;
+ }
+ mdiobus_register(mb);
+ pd->mii.bus = mb;
+ res = s6gmac_phy_start(dev);
+ if (res)
+ return res;
+ platform_set_drvdata(pdev, dev);
+ return 0;
+errmii:
+ unregister_netdev(dev);
+errdev:
+ free_irq(dev->irq, dev);
+errirq:
+ free_netdev(dev);
+ return res;
+}
+
+static int __devexit s6gmac_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ if (dev) {
+ struct s6gmac *pd = netdev_priv(dev);
+ mdiobus_unregister(pd->mii.bus);
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+ free_netdev(dev);
+ platform_set_drvdata(pdev, NULL);
+ }
+ return 0;
+}
+
+static struct platform_driver s6gmac_driver = {
+ .probe = s6gmac_probe,
+ .remove = __devexit_p(s6gmac_remove),
+ .driver = {
+ .name = "s6gmac",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init s6gmac_init(void)
+{
+ printk(KERN_INFO DRV_PRMT "S6 GMAC ethernet driver\n");
+ return platform_driver_register(&s6gmac_driver);
+}
+
+
+static void __exit s6gmac_exit(void)
+{
+ platform_driver_unregister(&s6gmac_driver);
+}
+
+module_init(s6gmac_init);
+module_exit(s6gmac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
+MODULE_AUTHOR("Oskar Schirmer <os@emlix.com>");
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3717569828bf..a906d3998131 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -169,10 +169,12 @@ config USB_NET_CDCETHER
The Linux-USB CDC Ethernet Gadget driver is an open implementation.
This driver should work with at least the following devices:
+ * Dell Wireless 5530 HSPA
* Ericsson PipeRider (all variants)
+ * Ericsson Mobile Broadband Module (all variants)
* Motorola (DM100 and SB4100)
* Broadcom Cable Modem (reference design)
- * Toshiba PCX1100U
+ * Toshiba (PCX1100U and F3507g)
* ...
This driver creates an interface named "ethX", where X depends on
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 01fd528306ec..4a6aff579403 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -533,6 +533,31 @@ static const struct usb_device_id products [] = {
USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Ericsson F3507g ver. 2 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Ericsson F3607gw */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Ericsson F3307 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Toshiba F3507g */
+ USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
+}, {
+ /* Dell F3507g */
+ USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &cdc_info,
},
{ }, // END
};
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index c66b9c324f54..ca39ace0b0eb 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -307,9 +307,10 @@ static const struct usb_device_id products [] = {
USB_DEVICE (0x1286, 0x8001), // "blob" bootloader
.driver_info = (unsigned long) &blob_info,
}, {
- // Linux Ethernet/RNDIS gadget on pxa210/25x/26x, second config
- // e.g. Gumstix, current OpenZaurus, ...
- USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203),
+ // Linux Ethernet/RNDIS gadget, mostly on PXA, second config
+ // e.g. Gumstix, current OpenZaurus, ... or anything else
+ // that just enables this gadget option.
+ USB_DEVICE (0x0525, 0xa4a2),
.driver_info = (unsigned long) &linuxdev_info,
},
#endif
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 2138535f2339..73acbd244aa1 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -297,7 +297,7 @@ static int update_eth_regs_async(pegasus_t * pegasus)
pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
- pegasus->dr.wValue = 0;
+ pegasus->dr.wValue = cpu_to_le16(0);
pegasus->dr.wIndex = cpu_to_le16(EthCtrl0);
pegasus->dr.wLength = cpu_to_le16(3);
pegasus->ctrl_urb->transfer_buffer_length = 3;
@@ -446,11 +446,12 @@ static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data)
int i;
__u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE };
int ret;
+ __le16 le_data = cpu_to_le16(data);
set_registers(pegasus, EpromOffset, 4, d);
enable_eprom_write(pegasus);
set_register(pegasus, EpromOffset, index);
- set_registers(pegasus, EpromData, 2, &data);
+ set_registers(pegasus, EpromData, 2, &le_data);
set_register(pegasus, EpromCtrl, EPROM_WRITE);
for (i = 0; i < REG_TIMEOUT; i++) {
@@ -923,29 +924,32 @@ static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev)
static inline void disable_net_traffic(pegasus_t * pegasus)
{
- int tmp = 0;
+ __le16 tmp = cpu_to_le16(0);
- set_registers(pegasus, EthCtrl0, 2, &tmp);
+ set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
}
static inline void get_interrupt_interval(pegasus_t * pegasus)
{
- __u8 data[2];
+ u16 data;
+ u8 interval;
- read_eprom_word(pegasus, 4, (__u16 *) data);
+ read_eprom_word(pegasus, 4, &data);
+ interval = data >> 8;
if (pegasus->usb->speed != USB_SPEED_HIGH) {
- if (data[1] < 0x80) {
+ if (interval < 0x80) {
if (netif_msg_timer(pegasus))
dev_info(&pegasus->intf->dev, "intr interval "
"changed from %ums to %ums\n",
- data[1], 0x80);
- data[1] = 0x80;
+ interval, 0x80);
+ interval = 0x80;
+ data = (data & 0x00FF) | ((u16)interval << 8);
#ifdef PEGASUS_WRITE_EEPROM
- write_eprom_word(pegasus, 4, *(__u16 *) data);
+ write_eprom_word(pegasus, 4, data);
#endif
}
}
- pegasus->intr_interval = data[1];
+ pegasus->intr_interval = interval;
}
static void set_carrier(struct net_device *net)
@@ -1299,7 +1303,8 @@ static int pegasus_blacklisted(struct usb_device *udev)
/* Special quirk to keep the driver from handling the Belkin Bluetooth
* dongle which happens to have the same ID.
*/
- if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) &&
+ if ((udd->idVendor == cpu_to_le16(VENDOR_BELKIN)) &&
+ (udd->idProduct == cpu_to_le16(0x0121)) &&
(udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) &&
(udd->bDeviceProtocol == 1))
return 1;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index b02f7adff5dc..3ba35956327a 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1847,7 +1847,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
*/
if (tdinfo->skb_dma) {
- pktlen = (skb->len > ETH_ZLEN ? : ETH_ZLEN);
+ pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
for (i = 0; i < tdinfo->nskb_dma; i++) {
#ifdef VELOCITY_ZERO_COPY_SUPPORT
pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 55f7de09d134..ea045151f953 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -538,6 +538,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
sc->iobase = mem; /* So we can unmap it on detach */
sc->cachelsz = csz * sizeof(u32); /* convert to bytes */
sc->opmode = NL80211_IFTYPE_STATION;
+ sc->bintval = 1000;
mutex_init(&sc->lock);
spin_lock_init(&sc->rxbuflock);
spin_lock_init(&sc->txbuflock);
@@ -686,6 +687,13 @@ ath5k_pci_resume(struct pci_dev *pdev)
if (err)
return err;
+ /*
+ * Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_write_config_byte(pdev, 0x41, 0);
+
err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
if (err) {
ATH5K_ERR(sc, "request_irq failed\n");
@@ -2748,9 +2756,6 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
goto end;
}
- /* Set to a reasonable value. Note that this will
- * be set to mac80211's value at ath5k_config(). */
- sc->bintval = 1000;
ath5k_hw_set_lladdr(sc->ah, conf->mac_addr);
ret = 0;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9f49a3251d4d..66a6c1f5022a 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1196,8 +1196,8 @@ void ath_radio_disable(struct ath_softc *sc)
ath9k_hw_phy_disable(ah);
ath9k_hw_configpcipowersave(ah, 1);
- ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
ath9k_ps_restore(sc);
+ ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
}
/*******************/
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index ccdf20a2e9be..170c5b32e49b 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -87,6 +87,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct ath_softc *sc;
struct ieee80211_hw *hw;
u8 csz;
+ u32 val;
int ret = 0;
struct ath_hw *ah;
@@ -133,6 +134,14 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
+ /*
+ * Disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
ret = pci_request_region(pdev, 0, "ath9k");
if (ret) {
dev_err(&pdev->dev, "PCI memory region reserve error\n");
@@ -239,12 +248,21 @@ static int ath_pci_resume(struct pci_dev *pdev)
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ u32 val;
int err;
err = pci_enable_device(pdev);
if (err)
return err;
pci_restore_state(pdev);
+ /*
+ * Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
/* Enable LED */
ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index f99f3a76df3f..cece1c4c6bda 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -539,11 +539,14 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
if (ath_beacon_dtim_pending_cab(skb)) {
/*
* Remain awake waiting for buffered broadcast/multicast
- * frames.
+ * frames. If the last broadcast/multicast frame is not
+ * received properly, the next beacon frame will work as
+ * a backup trigger for returning into NETWORK SLEEP state,
+ * so we are waiting for it as well.
*/
DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating "
"buffered broadcast/multicast frame(s)\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_CAB;
+ sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON;
return;
}
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 635c16ee6186..77c339f8516c 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -288,6 +288,7 @@ struct iwm_priv {
u8 *eeprom;
struct timer_list watchdog;
struct work_struct reset_worker;
+ struct mutex mutex;
struct rfkill *rfkill;
char private[0] __attribute__((__aligned__(NETDEV_ALIGN)));
@@ -315,8 +316,11 @@ extern const struct iw_handler_def iwm_iw_handler_def;
void *iwm_if_alloc(int sizeof_bus, struct device *dev,
struct iwm_if_ops *if_ops);
void iwm_if_free(struct iwm_priv *iwm);
+int iwm_if_add(struct iwm_priv *iwm);
+void iwm_if_remove(struct iwm_priv *iwm);
int iwm_mode_to_nl80211_iftype(int mode);
int iwm_priv_init(struct iwm_priv *iwm);
+void iwm_priv_deinit(struct iwm_priv *iwm);
void iwm_reset(struct iwm_priv *iwm);
void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
struct iwm_umac_notif_alive *alive);
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 6a2640f16b6d..8be206d58222 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -112,6 +112,9 @@ static void iwm_statistics_request(struct work_struct *work)
iwm_send_umac_stats_req(iwm, 0);
}
+int __iwm_up(struct iwm_priv *iwm);
+int __iwm_down(struct iwm_priv *iwm);
+
static void iwm_reset_worker(struct work_struct *work)
{
struct iwm_priv *iwm;
@@ -120,6 +123,19 @@ static void iwm_reset_worker(struct work_struct *work)
iwm = container_of(work, struct iwm_priv, reset_worker);
+ /*
+ * XXX: The iwm->mutex is introduced purely for this reset work,
+ * because the other users for iwm_up and iwm_down are only netdev
+ * ndo_open and ndo_stop which are already protected by rtnl.
+ * Please remove iwm->mutex together if iwm_reset_worker() is not
+ * required in the future.
+ */
+ if (!mutex_trylock(&iwm->mutex)) {
+ IWM_WARN(iwm, "We are in the middle of interface bringing "
+ "UP/DOWN. Skip driver resetting.\n");
+ return;
+ }
+
if (iwm->umac_profile_active) {
profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL);
if (profile)
@@ -128,10 +144,10 @@ static void iwm_reset_worker(struct work_struct *work)
IWM_ERR(iwm, "Couldn't alloc memory for profile\n");
}
- iwm_down(iwm);
+ __iwm_down(iwm);
while (retry++ < 3) {
- ret = iwm_up(iwm);
+ ret = __iwm_up(iwm);
if (!ret)
break;
@@ -142,7 +158,7 @@ static void iwm_reset_worker(struct work_struct *work)
IWM_WARN(iwm, "iwm_up() failed: %d\n", ret);
kfree(profile);
- return;
+ goto out;
}
if (profile) {
@@ -151,6 +167,9 @@ static void iwm_reset_worker(struct work_struct *work)
iwm_send_mlme_profile(iwm);
kfree(profile);
}
+
+ out:
+ mutex_unlock(&iwm->mutex);
}
static void iwm_watchdog(unsigned long data)
@@ -215,10 +234,21 @@ int iwm_priv_init(struct iwm_priv *iwm)
init_timer(&iwm->watchdog);
iwm->watchdog.function = iwm_watchdog;
iwm->watchdog.data = (unsigned long)iwm;
+ mutex_init(&iwm->mutex);
return 0;
}
+void iwm_priv_deinit(struct iwm_priv *iwm)
+{
+ int i;
+
+ for (i = 0; i < IWM_TX_QUEUES; i++)
+ destroy_workqueue(iwm->txq[i].wq);
+
+ destroy_workqueue(iwm->rx_wq);
+}
+
/*
* We reset all the structures, and we reset the UMAC.
* After calling this routine, you're expected to reload
@@ -466,7 +496,7 @@ void iwm_link_off(struct iwm_priv *iwm)
iwm_rx_free(iwm);
- cancel_delayed_work(&iwm->stats_request);
+ cancel_delayed_work_sync(&iwm->stats_request);
memset(wstats, 0, sizeof(struct iw_statistics));
wstats->qual.updated = IW_QUAL_ALL_INVALID;
@@ -511,7 +541,7 @@ static int iwm_channels_init(struct iwm_priv *iwm)
return 0;
}
-int iwm_up(struct iwm_priv *iwm)
+int __iwm_up(struct iwm_priv *iwm)
{
int ret;
struct iwm_notif *notif_reboot, *notif_ack = NULL;
@@ -647,7 +677,18 @@ int iwm_up(struct iwm_priv *iwm)
return -EIO;
}
-int iwm_down(struct iwm_priv *iwm)
+int iwm_up(struct iwm_priv *iwm)
+{
+ int ret;
+
+ mutex_lock(&iwm->mutex);
+ ret = __iwm_up(iwm);
+ mutex_unlock(&iwm->mutex);
+
+ return ret;
+}
+
+int __iwm_down(struct iwm_priv *iwm)
{
int ret;
@@ -678,3 +719,14 @@ int iwm_down(struct iwm_priv *iwm)
return 0;
}
+
+int iwm_down(struct iwm_priv *iwm)
+{
+ int ret;
+
+ mutex_lock(&iwm->mutex);
+ ret = __iwm_down(iwm);
+ mutex_unlock(&iwm->mutex);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 68e2c3b6c7a1..aaa20c6885c8 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -114,32 +114,31 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
iwm = wdev_to_iwm(wdev);
iwm->bus_ops = if_ops;
iwm->wdev = wdev;
- iwm_priv_init(iwm);
+
+ ret = iwm_priv_init(iwm);
+ if (ret) {
+ dev_err(dev, "failed to init iwm_priv\n");
+ goto out_wdev;
+ }
+
wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode);
- ndev = alloc_netdev_mq(0, "wlan%d", ether_setup,
- IWM_TX_QUEUES);
+ ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
if (!ndev) {
dev_err(dev, "no memory for network device instance\n");
- goto out_wdev;
+ goto out_priv;
}
ndev->netdev_ops = &iwm_netdev_ops;
ndev->wireless_handlers = &iwm_iw_handler_def;
ndev->ieee80211_ptr = wdev;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
- ret = register_netdev(ndev);
- if (ret < 0) {
- dev_err(dev, "Failed to register netdev: %d\n", ret);
- goto out_ndev;
- }
-
wdev->netdev = ndev;
return iwm;
- out_ndev:
- free_netdev(ndev);
+ out_priv:
+ iwm_priv_deinit(iwm);
out_wdev:
iwm_wdev_free(iwm);
@@ -148,15 +147,29 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
void iwm_if_free(struct iwm_priv *iwm)
{
- int i;
-
if (!iwm_to_ndev(iwm))
return;
- unregister_netdev(iwm_to_ndev(iwm));
free_netdev(iwm_to_ndev(iwm));
iwm_wdev_free(iwm);
- destroy_workqueue(iwm->rx_wq);
- for (i = 0; i < IWM_TX_QUEUES; i++)
- destroy_workqueue(iwm->txq[i].wq);
+ iwm_priv_deinit(iwm);
+}
+
+int iwm_if_add(struct iwm_priv *iwm)
+{
+ struct net_device *ndev = iwm_to_ndev(iwm);
+ int ret;
+
+ ret = register_netdev(ndev);
+ if (ret < 0) {
+ dev_err(&ndev->dev, "Failed to register netdev: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void iwm_if_remove(struct iwm_priv *iwm)
+{
+ unregister_netdev(iwm_to_ndev(iwm));
}
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index b54da677b371..916681837fd2 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -454,10 +454,18 @@ static int iwm_sdio_probe(struct sdio_func *func,
INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker);
+ ret = iwm_if_add(iwm);
+ if (ret) {
+ dev_err(dev, "add SDIO interface failed\n");
+ goto destroy_wq;
+ }
+
dev_info(dev, "IWM SDIO probe\n");
return 0;
+ destroy_wq:
+ destroy_workqueue(hw->isr_wq);
debugfs_exit:
iwm_debugfs_exit(iwm);
if_free:
@@ -471,9 +479,10 @@ static void iwm_sdio_remove(struct sdio_func *func)
struct iwm_priv *iwm = hw_to_iwm(hw);
struct device *dev = &func->dev;
+ iwm_if_remove(iwm);
+ destroy_workqueue(hw->isr_wq);
iwm_debugfs_exit(iwm);
iwm_if_free(iwm);
- destroy_workqueue(hw->isr_wq);
sdio_set_drvdata(func, NULL);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index f0e5e943f6e3..14a19baff214 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -67,6 +67,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x083a, 0xe503), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 151bf5bc8afe..1032d5fdbd42 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1471,11 +1471,13 @@ static void __devinit decode_smsc(int efer, int key, int devid, int devrev)
static void __devinit winbond_check(int io, int key)
{
- int devid, devrev, oldid, x_devid, x_devrev, x_oldid;
+ int origval, devid, devrev, oldid, x_devid, x_devrev, x_oldid;
if (!request_region(io, 3, __func__))
return;
+ origval = inb(io); /* Save original value */
+
/* First probe without key */
outb(0x20, io);
x_devid = inb(io + 1);
@@ -1495,6 +1497,8 @@ static void __devinit winbond_check(int io, int key)
oldid = inb(io + 1);
outb(0xaa, io); /* Magic Seal */
+ outb(origval, io); /* in case we poked some entirely different hardware */
+
if ((x_devid == devid) && (x_devrev == devrev) && (x_oldid == oldid))
goto out; /* protection against false positives */
@@ -1505,11 +1509,15 @@ out:
static void __devinit winbond_check2(int io, int key)
{
- int devid, devrev, oldid, x_devid, x_devrev, x_oldid;
+ int origval[3], devid, devrev, oldid, x_devid, x_devrev, x_oldid;
if (!request_region(io, 3, __func__))
return;
+ origval[0] = inb(io); /* Save original values */
+ origval[1] = inb(io + 1);
+ origval[2] = inb(io + 2);
+
/* First probe without the key */
outb(0x20, io + 2);
x_devid = inb(io + 2);
@@ -1528,6 +1536,10 @@ static void __devinit winbond_check2(int io, int key)
oldid = inb(io + 2);
outb(0xaa, io); /* Magic Seal */
+ outb(origval[0], io); /* in case we poked some entirely different hardware */
+ outb(origval[1], io + 1);
+ outb(origval[2], io + 2);
+
if (x_devid == devid && x_devrev == devrev && x_oldid == oldid)
goto out; /* protection against false positives */
@@ -1538,11 +1550,13 @@ out:
static void __devinit smsc_check(int io, int key)
{
- int id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev;
+ int origval, id, rev, oldid, oldrev, x_id, x_rev, x_oldid, x_oldrev;
if (!request_region(io, 3, __func__))
return;
+ origval = inb(io); /* Save original value */
+
/* First probe without the key */
outb(0x0d, io);
x_oldid = inb(io + 1);
@@ -1566,6 +1580,8 @@ static void __devinit smsc_check(int io, int key)
rev = inb(io + 1);
outb(0xaa, io); /* Magic Seal */
+ outb(origval, io); /* in case we poked some entirely different hardware */
+
if (x_id == id && x_oldrev == oldrev &&
x_oldid == oldid && x_rev == rev)
goto out; /* protection against false positives */
@@ -1602,11 +1618,12 @@ static void __devinit detect_and_report_smsc(void)
static void __devinit detect_and_report_it87(void)
{
u16 dev;
- u8 r;
+ u8 origval, r;
if (verbose_probing)
printk(KERN_DEBUG "IT8705 Super-IO detection, now testing port 2E ...\n");
- if (!request_region(0x2e, 1, __func__))
+ if (!request_region(0x2e, 2, __func__))
return;
+ origval = inb(0x2e); /* Save original value */
outb(0x87, 0x2e);
outb(0x01, 0x2e);
outb(0x55, 0x2e);
@@ -1626,8 +1643,10 @@ static void __devinit detect_and_report_it87(void)
outb(r | 8, 0x2F);
outb(0x02, 0x2E); /* Lock */
outb(0x02, 0x2F);
+ } else {
+ outb(origval, 0x2e); /* Oops, sorry to disturb */
}
- release_region(0x2e, 1);
+ release_region(0x2e, 2);
}
#endif /* CONFIG_PARPORT_PC_SUPERIO */
@@ -2271,6 +2290,9 @@ struct parport *parport_pc_probe_port(unsigned long int base,
if (IS_ERR(pdev))
return NULL;
dev = &pdev->dev;
+
+ dev->coherent_dma_mask = DMA_BIT_MASK(24);
+ dev->dma_mask = &dev->coherent_dma_mask;
}
ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index b77ae6794275..1ebd6b4c743b 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -2,10 +2,11 @@
# Makefile for the PCI bus specific drivers.
#
-obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \
+obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
irq.o
obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_SYSFS) += slot.o
# Build PCI Express stuff if needed
obj-$(CONFIG_PCIEPORTBUS) += pcie/
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 0f3706512686..db23200c4874 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -66,6 +66,25 @@ EXPORT_SYMBOL(pci_bus_write_config_byte);
EXPORT_SYMBOL(pci_bus_write_config_word);
EXPORT_SYMBOL(pci_bus_write_config_dword);
+/**
+ * pci_bus_set_ops - Set raw operations of pci bus
+ * @bus: pci bus struct
+ * @ops: new raw operations
+ *
+ * Return previous raw operations
+ */
+struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
+{
+ struct pci_ops *old_ops;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pci_lock, flags);
+ old_ops = bus->ops;
+ bus->ops = ops;
+ spin_unlock_irqrestore(&pci_lock, flags);
+ return old_ops;
+}
+EXPORT_SYMBOL(pci_bus_set_ops);
/**
* pci_read_vpd - Read one entry from Vital Product Data
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 97a8194063b5..cef28a79103f 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -41,9 +41,14 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
void *alignf_data)
{
int i, ret = -ENOMEM;
+ resource_size_t max = -1;
type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
+ /* don't allocate too high if the pref mem doesn't support 64bit*/
+ if (!(res->flags & IORESOURCE_MEM_64))
+ max = PCIBIOS_MAX_MEM_32;
+
for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
struct resource *r = bus->resource[i];
if (!r)
@@ -62,7 +67,7 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
/* Ok, try it out.. */
ret = allocate_resource(r, res, size,
r->start ? : min,
- -1, align,
+ max, align,
alignf, alignf_data);
if (ret == 0)
break;
@@ -201,13 +206,18 @@ void pci_enable_bridges(struct pci_bus *bus)
* Walk the given bus, including any bridged devices
* on buses under this bus. Call the provided callback
* on each device found.
+ *
+ * We check the return of @cb each time. If it returns anything
+ * other than 0, we break out.
+ *
*/
-void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata)
{
struct pci_dev *dev;
struct pci_bus *bus;
struct list_head *next;
+ int retval;
bus = top;
down_read(&pci_bus_sem);
@@ -231,8 +241,10 @@ void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
/* Run device routines with the device locked */
down(&dev->dev.sem);
- cb(dev, userdata);
+ retval = cb(dev, userdata);
up(&dev->dev.sem);
+ if (retval)
+ break;
}
up_read(&pci_bus_sem);
}
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index fa3a11365ec3..7b287cb38b7a 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -267,6 +267,84 @@ rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
}
return ret;
}
+
+static LIST_HEAD(dmar_atsr_units);
+
+static int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
+{
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+ atsru = kzalloc(sizeof(*atsru), GFP_KERNEL);
+ if (!atsru)
+ return -ENOMEM;
+
+ atsru->hdr = hdr;
+ atsru->include_all = atsr->flags & 0x1;
+
+ list_add(&atsru->list, &dmar_atsr_units);
+
+ return 0;
+}
+
+static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
+{
+ int rc;
+ struct acpi_dmar_atsr *atsr;
+
+ if (atsru->include_all)
+ return 0;
+
+ atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+ rc = dmar_parse_dev_scope((void *)(atsr + 1),
+ (void *)atsr + atsr->header.length,
+ &atsru->devices_cnt, &atsru->devices,
+ atsr->segment);
+ if (rc || !atsru->devices_cnt) {
+ list_del(&atsru->list);
+ kfree(atsru);
+ }
+
+ return rc;
+}
+
+int dmar_find_matched_atsr_unit(struct pci_dev *dev)
+{
+ int i;
+ struct pci_bus *bus;
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+ list_for_each_entry(atsru, &dmar_atsr_units, list) {
+ atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
+ if (atsr->segment == pci_domain_nr(dev->bus))
+ goto found;
+ }
+
+ return 0;
+
+found:
+ for (bus = dev->bus; bus; bus = bus->parent) {
+ struct pci_dev *bridge = bus->self;
+
+ if (!bridge || !bridge->is_pcie ||
+ bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
+
+ if (bridge->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
+ for (i = 0; i < atsru->devices_cnt; i++)
+ if (atsru->devices[i] == bridge)
+ return 1;
+ break;
+ }
+ }
+
+ if (atsru->include_all)
+ return 1;
+
+ return 0;
+}
#endif
static void __init
@@ -274,22 +352,28 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
{
struct acpi_dmar_hardware_unit *drhd;
struct acpi_dmar_reserved_memory *rmrr;
+ struct acpi_dmar_atsr *atsr;
switch (header->type) {
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
- drhd = (struct acpi_dmar_hardware_unit *)header;
+ drhd = container_of(header, struct acpi_dmar_hardware_unit,
+ header);
printk (KERN_INFO PREFIX
- "DRHD (flags: 0x%08x)base: 0x%016Lx\n",
- drhd->flags, (unsigned long long)drhd->address);
+ "DRHD base: %#016Lx flags: %#x\n",
+ (unsigned long long)drhd->address, drhd->flags);
break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
- rmrr = (struct acpi_dmar_reserved_memory *)header;
-
+ rmrr = container_of(header, struct acpi_dmar_reserved_memory,
+ header);
printk (KERN_INFO PREFIX
- "RMRR base: 0x%016Lx end: 0x%016Lx\n",
+ "RMRR base: %#016Lx end: %#016Lx\n",
(unsigned long long)rmrr->base_address,
(unsigned long long)rmrr->end_address);
break;
+ case ACPI_DMAR_TYPE_ATSR:
+ atsr = container_of(header, struct acpi_dmar_atsr, header);
+ printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
+ break;
}
}
@@ -363,6 +447,11 @@ parse_dmar_table(void)
ret = dmar_parse_one_rmrr(entry_header);
#endif
break;
+ case ACPI_DMAR_TYPE_ATSR:
+#ifdef CONFIG_DMAR
+ ret = dmar_parse_one_atsr(entry_header);
+#endif
+ break;
default:
printk(KERN_WARNING PREFIX
"Unknown DMAR structure type\n");
@@ -431,11 +520,19 @@ int __init dmar_dev_scope_init(void)
#ifdef CONFIG_DMAR
{
struct dmar_rmrr_unit *rmrr, *rmrr_n;
+ struct dmar_atsr_unit *atsr, *atsr_n;
+
list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) {
ret = rmrr_parse_dev(rmrr);
if (ret)
return ret;
}
+
+ list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) {
+ ret = atsr_parse_dev(atsr);
+ if (ret)
+ return ret;
+ }
}
#endif
@@ -468,6 +565,9 @@ int __init dmar_table_init(void)
#ifdef CONFIG_DMAR
if (list_empty(&dmar_rmrr_units))
printk(KERN_INFO PREFIX "No RMRR found\n");
+
+ if (list_empty(&dmar_atsr_units))
+ printk(KERN_INFO PREFIX "No ATSR found\n");
#endif
#ifdef CONFIG_INTR_REMAP
@@ -515,6 +615,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
u32 ver;
static int iommu_allocated = 0;
int agaw = 0;
+ int msagaw = 0;
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -535,12 +636,20 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
printk(KERN_ERR
- "Cannot get a valid agaw for iommu (seq_id = %d)\n",
+ "Cannot get a valid agaw for iommu (seq_id = %d)\n",
+ iommu->seq_id);
+ goto error;
+ }
+ msagaw = iommu_calculate_max_sagaw(iommu);
+ if (msagaw < 0) {
+ printk(KERN_ERR
+ "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
goto error;
}
#endif
iommu->agaw = agaw;
+ iommu->msagaw = msagaw;
/* the registers might be more than one page */
map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
@@ -590,7 +699,8 @@ void free_iommu(struct intel_iommu *iommu)
*/
static inline void reclaim_free_desc(struct q_inval *qi)
{
- while (qi->desc_status[qi->free_tail] == QI_DONE) {
+ while (qi->desc_status[qi->free_tail] == QI_DONE ||
+ qi->desc_status[qi->free_tail] == QI_ABORT) {
qi->desc_status[qi->free_tail] = QI_FREE;
qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
qi->free_cnt++;
@@ -600,10 +710,13 @@ static inline void reclaim_free_desc(struct q_inval *qi)
static int qi_check_fault(struct intel_iommu *iommu, int index)
{
u32 fault;
- int head;
+ int head, tail;
struct q_inval *qi = iommu->qi;
int wait_index = (index + 1) % QI_LENGTH;
+ if (qi->desc_status[wait_index] == QI_ABORT)
+ return -EAGAIN;
+
fault = readl(iommu->reg + DMAR_FSTS_REG);
/*
@@ -613,7 +726,11 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
*/
if (fault & DMA_FSTS_IQE) {
head = readl(iommu->reg + DMAR_IQH_REG);
- if ((head >> 4) == index) {
+ if ((head >> DMAR_IQ_SHIFT) == index) {
+ printk(KERN_ERR "VT-d detected invalid descriptor: "
+ "low=%llx, high=%llx\n",
+ (unsigned long long)qi->desc[index].low,
+ (unsigned long long)qi->desc[index].high);
memcpy(&qi->desc[index], &qi->desc[wait_index],
sizeof(struct qi_desc));
__iommu_flush_cache(iommu, &qi->desc[index],
@@ -623,6 +740,32 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
}
}
+ /*
+ * If ITE happens, all pending wait_desc commands are aborted.
+ * No new descriptors are fetched until the ITE is cleared.
+ */
+ if (fault & DMA_FSTS_ITE) {
+ head = readl(iommu->reg + DMAR_IQH_REG);
+ head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
+ head |= 1;
+ tail = readl(iommu->reg + DMAR_IQT_REG);
+ tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
+
+ writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
+
+ do {
+ if (qi->desc_status[head] == QI_IN_USE)
+ qi->desc_status[head] = QI_ABORT;
+ head = (head - 2 + QI_LENGTH) % QI_LENGTH;
+ } while (head != tail);
+
+ if (qi->desc_status[wait_index] == QI_ABORT)
+ return -EAGAIN;
+ }
+
+ if (fault & DMA_FSTS_ICE)
+ writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
+
return 0;
}
@@ -632,7 +775,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
*/
int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
{
- int rc = 0;
+ int rc;
struct q_inval *qi = iommu->qi;
struct qi_desc *hw, wait_desc;
int wait_index, index;
@@ -643,6 +786,9 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
hw = qi->desc;
+restart:
+ rc = 0;
+
spin_lock_irqsave(&qi->q_lock, flags);
while (qi->free_cnt < 3) {
spin_unlock_irqrestore(&qi->q_lock, flags);
@@ -673,7 +819,7 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
* update the HW tail register indicating the presence of
* new descriptors.
*/
- writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
+ writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
while (qi->desc_status[wait_index] != QI_DONE) {
/*
@@ -685,18 +831,21 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
*/
rc = qi_check_fault(iommu, index);
if (rc)
- goto out;
+ break;
spin_unlock(&qi->q_lock);
cpu_relax();
spin_lock(&qi->q_lock);
}
-out:
- qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
+
+ qi->desc_status[index] = QI_DONE;
reclaim_free_desc(qi);
spin_unlock_irqrestore(&qi->q_lock, flags);
+ if (rc == -EAGAIN)
+ goto restart;
+
return rc;
}
@@ -714,41 +863,26 @@ void qi_global_iec(struct intel_iommu *iommu)
qi_submit_sync(&desc, iommu);
}
-int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
- u64 type, int non_present_entry_flush)
+void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
+ u64 type)
{
struct qi_desc desc;
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
| QI_CC_GRAN(type) | QI_CC_TYPE;
desc.high = 0;
- return qi_submit_sync(&desc, iommu);
+ qi_submit_sync(&desc, iommu);
}
-int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type,
- int non_present_entry_flush)
+void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type)
{
u8 dw = 0, dr = 0;
struct qi_desc desc;
int ih = 0;
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
if (cap_write_drain(iommu->cap))
dw = 1;
@@ -760,7 +894,28 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
| QI_IOTLB_AM(size_order);
- return qi_submit_sync(&desc, iommu);
+ qi_submit_sync(&desc, iommu);
+}
+
+void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
+ u64 addr, unsigned mask)
+{
+ struct qi_desc desc;
+
+ if (mask) {
+ BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
+ addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
+ desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
+ } else
+ desc.high = QI_DEV_IOTLB_ADDR(addr);
+
+ if (qdep >= QI_DEV_IOTLB_MAX_INVS)
+ qdep = 0;
+
+ desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
+ QI_DIOTLB_TYPE;
+
+ qi_submit_sync(&desc, iommu);
}
/*
@@ -790,7 +945,6 @@ void dmar_disable_qi(struct intel_iommu *iommu)
cpu_relax();
iommu->gcmd &= ~DMA_GCMD_QIE;
-
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
@@ -804,7 +958,7 @@ end:
*/
static void __dmar_enable_qi(struct intel_iommu *iommu)
{
- u32 cmd, sts;
+ u32 sts;
unsigned long flags;
struct q_inval *qi = iommu->qi;
@@ -818,9 +972,8 @@ static void __dmar_enable_qi(struct intel_iommu *iommu)
dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
- cmd = iommu->gcmd | DMA_GCMD_QIE;
iommu->gcmd |= DMA_GCMD_QIE;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
@@ -1096,7 +1249,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
set_irq_data(irq, NULL);
iommu->irq = 0;
destroy_irq(irq);
- return 0;
+ return ret;
}
ret = request_irq(irq, dmar_fault, 0, iommu->name, iommu);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index 9aa4fe100a0d..66f29bc00be4 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -4,7 +4,7 @@
menuconfig HOTPLUG_PCI
tristate "Support for PCI Hotplug"
- depends on PCI && HOTPLUG
+ depends on PCI && HOTPLUG && SYSFS
---help---
Say Y here if you have a motherboard with a PCI Hotplug controller.
This allows you to add and remove PCI cards while the machine is
@@ -41,7 +41,7 @@ config HOTPLUG_PCI_FAKE
config HOTPLUG_PCI_COMPAQ
tristate "Compaq PCI Hotplug driver"
- depends on X86 && PCI_BIOS && PCI_LEGACY
+ depends on X86 && PCI_BIOS
help
Say Y here if you have a motherboard with a Compaq PCI Hotplug
controller.
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 43c10bd261b4..4dd7114964ac 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -77,7 +77,6 @@ static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index de94f4feef8c..a5b9f6ae507b 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -72,7 +72,6 @@ static int get_adapter_status(struct hotplug_slot *slot, u8 * value);
static int get_latch_status(struct hotplug_slot *slot, u8 * value);
static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index afaf8f69f73e..53836001d511 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -150,25 +150,25 @@ struct ctrl_reg { /* offset */
/* offsets to the controller registers based on the above structure layout */
enum ctrl_offsets {
- SLOT_RST = offsetof(struct ctrl_reg, slot_RST),
+ SLOT_RST = offsetof(struct ctrl_reg, slot_RST),
SLOT_ENABLE = offsetof(struct ctrl_reg, slot_enable),
MISC = offsetof(struct ctrl_reg, misc),
LED_CONTROL = offsetof(struct ctrl_reg, led_control),
INT_INPUT_CLEAR = offsetof(struct ctrl_reg, int_input_clear),
- INT_MASK = offsetof(struct ctrl_reg, int_mask),
- CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0),
+ INT_MASK = offsetof(struct ctrl_reg, int_mask),
+ CTRL_RESERVED0 = offsetof(struct ctrl_reg, reserved0),
CTRL_RESERVED1 = offsetof(struct ctrl_reg, reserved1),
CTRL_RESERVED2 = offsetof(struct ctrl_reg, reserved1),
- GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB),
- NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input),
+ GEN_OUTPUT_AB = offsetof(struct ctrl_reg, gen_output_AB),
+ NON_INT_INPUT = offsetof(struct ctrl_reg, non_int_input),
CTRL_RESERVED3 = offsetof(struct ctrl_reg, reserved3),
CTRL_RESERVED4 = offsetof(struct ctrl_reg, reserved4),
CTRL_RESERVED5 = offsetof(struct ctrl_reg, reserved5),
CTRL_RESERVED6 = offsetof(struct ctrl_reg, reserved6),
CTRL_RESERVED7 = offsetof(struct ctrl_reg, reserved7),
CTRL_RESERVED8 = offsetof(struct ctrl_reg, reserved8),
- SLOT_MASK = offsetof(struct ctrl_reg, slot_mask),
- CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9),
+ SLOT_MASK = offsetof(struct ctrl_reg, slot_mask),
+ CTRL_RESERVED9 = offsetof(struct ctrl_reg, reserved9),
CTRL_RESERVED10 = offsetof(struct ctrl_reg, reserved10),
CTRL_RESERVED11 = offsetof(struct ctrl_reg, reserved11),
SLOT_SERR = offsetof(struct ctrl_reg, slot_SERR),
@@ -190,7 +190,9 @@ struct hrt {
u32 reserved2;
} __attribute__ ((packed));
-/* offsets to the hotplug resource table registers based on the above structure layout */
+/* offsets to the hotplug resource table registers based on the above
+ * structure layout
+ */
enum hrt_offsets {
SIG0 = offsetof(struct hrt, sig0),
SIG1 = offsetof(struct hrt, sig1),
@@ -217,18 +219,20 @@ struct slot_rt {
u16 pre_mem_length;
} __attribute__ ((packed));
-/* offsets to the hotplug slot resource table registers based on the above structure layout */
+/* offsets to the hotplug slot resource table registers based on the above
+ * structure layout
+ */
enum slot_rt_offsets {
DEV_FUNC = offsetof(struct slot_rt, dev_func),
- PRIMARY_BUS = offsetof(struct slot_rt, primary_bus),
- SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus),
- MAX_BUS = offsetof(struct slot_rt, max_bus),
- IO_BASE = offsetof(struct slot_rt, io_base),
- IO_LENGTH = offsetof(struct slot_rt, io_length),
- MEM_BASE = offsetof(struct slot_rt, mem_base),
- MEM_LENGTH = offsetof(struct slot_rt, mem_length),
- PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base),
- PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length),
+ PRIMARY_BUS = offsetof(struct slot_rt, primary_bus),
+ SECONDARY_BUS = offsetof(struct slot_rt, secondary_bus),
+ MAX_BUS = offsetof(struct slot_rt, max_bus),
+ IO_BASE = offsetof(struct slot_rt, io_base),
+ IO_LENGTH = offsetof(struct slot_rt, io_length),
+ MEM_BASE = offsetof(struct slot_rt, mem_base),
+ MEM_LENGTH = offsetof(struct slot_rt, mem_length),
+ PRE_MEM_BASE = offsetof(struct slot_rt, pre_mem_base),
+ PRE_MEM_LENGTH = offsetof(struct slot_rt, pre_mem_length),
};
struct pci_func {
@@ -286,8 +290,8 @@ struct event_info {
struct controller {
struct controller *next;
u32 ctrl_int_comp;
- struct mutex crit_sect; /* critical section mutex */
- void __iomem *hpc_reg; /* cookie for our pci controller location */
+ struct mutex crit_sect; /* critical section mutex */
+ void __iomem *hpc_reg; /* cookie for our pci controller location */
struct pci_resource *mem_head;
struct pci_resource *p_mem_head;
struct pci_resource *io_head;
@@ -299,7 +303,7 @@ struct controller {
u8 next_event;
u8 interrupt;
u8 cfgspc_irq;
- u8 bus; /* bus number for the pci hotplug controller */
+ u8 bus; /* bus number for the pci hotplug controller */
u8 rev;
u8 slot_device_offset;
u8 first_slot;
@@ -401,46 +405,57 @@ struct resource_lists {
/* debugfs functions for the hotplug controller info */
-extern void cpqhp_initialize_debugfs (void);
-extern void cpqhp_shutdown_debugfs (void);
-extern void cpqhp_create_debugfs_files (struct controller *ctrl);
-extern void cpqhp_remove_debugfs_files (struct controller *ctrl);
+extern void cpqhp_initialize_debugfs(void);
+extern void cpqhp_shutdown_debugfs(void);
+extern void cpqhp_create_debugfs_files(struct controller *ctrl);
+extern void cpqhp_remove_debugfs_files(struct controller *ctrl);
/* controller functions */
-extern void cpqhp_pushbutton_thread (unsigned long event_pointer);
-extern irqreturn_t cpqhp_ctrl_intr (int IRQ, void *data);
-extern int cpqhp_find_available_resources (struct controller *ctrl, void __iomem *rom_start);
-extern int cpqhp_event_start_thread (void);
-extern void cpqhp_event_stop_thread (void);
-extern struct pci_func *cpqhp_slot_create (unsigned char busnumber);
-extern struct pci_func *cpqhp_slot_find (unsigned char bus, unsigned char device, unsigned char index);
-extern int cpqhp_process_SI (struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_process_SS (struct controller *ctrl, struct pci_func *func);
-extern int cpqhp_hardware_test (struct controller *ctrl, int test_num);
+extern void cpqhp_pushbutton_thread(unsigned long event_pointer);
+extern irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data);
+extern int cpqhp_find_available_resources(struct controller *ctrl,
+ void __iomem *rom_start);
+extern int cpqhp_event_start_thread(void);
+extern void cpqhp_event_stop_thread(void);
+extern struct pci_func *cpqhp_slot_create(unsigned char busnumber);
+extern struct pci_func *cpqhp_slot_find(unsigned char bus, unsigned char device,
+ unsigned char index);
+extern int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func);
+extern int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func);
+extern int cpqhp_hardware_test(struct controller *ctrl, int test_num);
/* resource functions */
extern int cpqhp_resource_sort_and_combine (struct pci_resource **head);
/* pci functions */
-extern int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
-extern int cpqhp_get_bus_dev (struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot);
-extern int cpqhp_save_config (struct controller *ctrl, int busnumber, int is_hot_plug);
-extern int cpqhp_save_base_addr_length (struct controller *ctrl, struct pci_func * func);
-extern int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func);
-extern int cpqhp_configure_board (struct controller *ctrl, struct pci_func * func);
-extern int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot);
-extern int cpqhp_valid_replace (struct controller *ctrl, struct pci_func * func);
-extern void cpqhp_destroy_board_resources (struct pci_func * func);
-extern int cpqhp_return_board_resources (struct pci_func * func, struct resource_lists * resources);
-extern void cpqhp_destroy_resource_list (struct resource_lists * resources);
-extern int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func);
-extern int cpqhp_unconfigure_device (struct pci_func* func);
+extern int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
+extern int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num,
+ u8 slot);
+extern int cpqhp_save_config(struct controller *ctrl, int busnumber,
+ int is_hot_plug);
+extern int cpqhp_save_base_addr_length(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_save_used_resources(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_configure_board(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_save_slot_config(struct controller *ctrl,
+ struct pci_func *new_slot);
+extern int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func);
+extern void cpqhp_destroy_board_resources(struct pci_func *func);
+extern int cpqhp_return_board_resources (struct pci_func *func,
+ struct resource_lists *resources);
+extern void cpqhp_destroy_resource_list(struct resource_lists *resources);
+extern int cpqhp_configure_device(struct controller *ctrl,
+ struct pci_func *func);
+extern int cpqhp_unconfigure_device(struct pci_func *func);
/* Global variables */
extern int cpqhp_debug;
extern int cpqhp_legacy_mode;
extern struct controller *cpqhp_ctrl_list;
extern struct pci_func *cpqhp_slot_list[256];
+extern struct irq_routing_table *cpqhp_routing_table;
/* these can be gotten rid of, but for debugging they are purty */
extern u8 cpqhp_nic_irq;
@@ -449,7 +464,7 @@ extern u8 cpqhp_disk_irq;
/* inline functions */
-static inline char *slot_name(struct slot *slot)
+static inline const char *slot_name(struct slot *slot)
{
return hotplug_slot_name(slot->hotplug_slot);
}
@@ -458,9 +473,9 @@ static inline char *slot_name(struct slot *slot)
* return_resource
*
* Puts node back in the resource list pointed to by head
- *
*/
-static inline void return_resource(struct pci_resource **head, struct pci_resource *node)
+static inline void return_resource(struct pci_resource **head,
+ struct pci_resource *node)
{
if (!node || !head)
return;
@@ -471,7 +486,7 @@ static inline void return_resource(struct pci_resource **head, struct pci_resour
static inline void set_SOGO(struct controller *ctrl)
{
u16 misc;
-
+
misc = readw(ctrl->hpc_reg + MISC);
misc = (misc | 0x0001) & 0xFFFB;
writew(misc, ctrl->hpc_reg + MISC);
@@ -481,7 +496,7 @@ static inline void set_SOGO(struct controller *ctrl)
static inline void amber_LED_on(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control |= (0x01010000L << slot);
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -491,7 +506,7 @@ static inline void amber_LED_on(struct controller *ctrl, u8 slot)
static inline void amber_LED_off(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= ~(0x01010000L << slot);
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -504,7 +519,7 @@ static inline int read_amber_LED(struct controller *ctrl, u8 slot)
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= (0x01010000L << slot);
-
+
return led_control ? 1 : 0;
}
@@ -512,7 +527,7 @@ static inline int read_amber_LED(struct controller *ctrl, u8 slot)
static inline void green_LED_on(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control |= 0x0101L << slot;
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -521,7 +536,7 @@ static inline void green_LED_on(struct controller *ctrl, u8 slot)
static inline void green_LED_off(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= ~(0x0101L << slot);
writel(led_control, ctrl->hpc_reg + LED_CONTROL);
@@ -531,7 +546,7 @@ static inline void green_LED_off(struct controller *ctrl, u8 slot)
static inline void green_LED_blink(struct controller *ctrl, u8 slot)
{
u32 led_control;
-
+
led_control = readl(ctrl->hpc_reg + LED_CONTROL);
led_control &= ~(0x0101L << slot);
led_control |= (0x0001L << slot);
@@ -575,22 +590,21 @@ static inline u8 read_slot_enable(struct controller *ctrl)
}
-/*
+/**
* get_controller_speed - find the current frequency/mode of controller.
*
* @ctrl: controller to get frequency/mode for.
*
* Returns controller speed.
- *
*/
static inline u8 get_controller_speed(struct controller *ctrl)
{
u8 curr_freq;
- u16 misc;
-
+ u16 misc;
+
if (ctrl->pcix_support) {
curr_freq = readb(ctrl->hpc_reg + NEXT_CURR_FREQ);
- if ((curr_freq & 0xB0) == 0xB0)
+ if ((curr_freq & 0xB0) == 0xB0)
return PCI_SPEED_133MHz_PCIX;
if ((curr_freq & 0xA0) == 0xA0)
return PCI_SPEED_100MHz_PCIX;
@@ -602,19 +616,18 @@ static inline u8 get_controller_speed(struct controller *ctrl)
return PCI_SPEED_33MHz;
}
- misc = readw(ctrl->hpc_reg + MISC);
- return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
+ misc = readw(ctrl->hpc_reg + MISC);
+ return (misc & 0x0800) ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
}
-
-/*
+
+/**
* get_adapter_speed - find the max supported frequency/mode of adapter.
*
* @ctrl: hotplug controller.
* @hp_slot: hotplug slot where adapter is installed.
*
* Returns adapter speed.
- *
*/
static inline u8 get_adapter_speed(struct controller *ctrl, u8 hp_slot)
{
@@ -672,7 +685,8 @@ static inline int get_slot_enabled(struct controller *ctrl, struct slot *slot)
}
-static inline int cpq_get_latch_status(struct controller *ctrl, struct slot *slot)
+static inline int cpq_get_latch_status(struct controller *ctrl,
+ struct slot *slot)
{
u32 status;
u8 hp_slot;
@@ -687,7 +701,8 @@ static inline int cpq_get_latch_status(struct controller *ctrl, struct slot *slo
}
-static inline int get_presence_status(struct controller *ctrl, struct slot *slot)
+static inline int get_presence_status(struct controller *ctrl,
+ struct slot *slot)
{
int presence_save = 0;
u8 hp_slot;
@@ -696,7 +711,8 @@ static inline int get_presence_status(struct controller *ctrl, struct slot *slot
hp_slot = slot->device - ctrl->slot_device_offset;
tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
- presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15)) >> hp_slot) & 0x02;
+ presence_save = (int) ((((~tempdword) >> 23) | ((~tempdword) >> 15))
+ >> hp_slot) & 0x02;
return presence_save;
}
@@ -718,5 +734,12 @@ static inline int wait_for_ctrl_irq(struct controller *ctrl)
return retval;
}
-#endif
+#include <asm/pci_x86.h>
+static inline int cpqhp_routing_table_length(void)
+{
+ BUG_ON(cpqhp_routing_table == NULL);
+ return ((cpqhp_routing_table->size - sizeof(struct irq_routing_table)) /
+ sizeof(struct irq_info));
+}
+#endif
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index c2e1bcbb28a7..075b4f4b6e0d 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -25,8 +25,7 @@
* Send feedback to <greg@kroah.com>
*
* Jan 12, 2003 - Added 66/100/133MHz PCI-X support,
- * Torben Mathiasen <torben.mathiasen@hp.com>
- *
+ * Torben Mathiasen <torben.mathiasen@hp.com>
*/
#include <linux/module.h>
@@ -45,7 +44,6 @@
#include "cpqphp.h"
#include "cpqphp_nvram.h"
-#include <asm/pci_x86.h>
/* Global variables */
@@ -53,6 +51,7 @@ int cpqhp_debug;
int cpqhp_legacy_mode;
struct controller *cpqhp_ctrl_list; /* = NULL */
struct pci_func *cpqhp_slot_list[256];
+struct irq_routing_table *cpqhp_routing_table;
/* local variables */
static void __iomem *smbios_table;
@@ -78,33 +77,6 @@ MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
#define CPQHPC_MODULE_MINOR 208
-static int one_time_init (void);
-static int set_attention_status (struct hotplug_slot *slot, u8 value);
-static int process_SI (struct hotplug_slot *slot);
-static int process_SS (struct hotplug_slot *slot);
-static int hardware_test (struct hotplug_slot *slot, u32 value);
-static int get_power_status (struct hotplug_slot *slot, u8 *value);
-static int get_attention_status (struct hotplug_slot *slot, u8 *value);
-static int get_latch_status (struct hotplug_slot *slot, u8 *value);
-static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
-static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
-static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
-
-static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
- .set_attention_status = set_attention_status,
- .enable_slot = process_SI,
- .disable_slot = process_SS,
- .hardware_test = hardware_test,
- .get_power_status = get_power_status,
- .get_attention_status = get_attention_status,
- .get_latch_status = get_latch_status,
- .get_adapter_status = get_adapter_status,
- .get_max_bus_speed = get_max_bus_speed,
- .get_cur_bus_speed = get_cur_bus_speed,
-};
-
-
static inline int is_slot64bit(struct slot *slot)
{
return (readb(slot->p_sm_slot + SMBIOS_SLOT_WIDTH) == 0x06) ? 1 : 0;
@@ -144,7 +116,7 @@ static void __iomem * detect_SMBIOS_pointer(void __iomem *begin, void __iomem *e
break;
}
}
-
+
if (!status)
fp = NULL;
@@ -171,7 +143,7 @@ static int init_SERR(struct controller * ctrl)
tempdword = ctrl->first_slot;
number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
- // Loop through slots
+ /* Loop through slots */
while (number_of_slots) {
physical_slot = tempdword;
writeb(0, ctrl->hpc_reg + SLOT_SERR);
@@ -182,41 +154,42 @@ static int init_SERR(struct controller * ctrl)
return 0;
}
-
-/* nice debugging output */
-static int pci_print_IRQ_route (void)
+static int init_cpqhp_routing_table(void)
{
- struct irq_routing_table *routing_table;
int len;
- int loop;
-
- u8 tbus, tdevice, tslot;
- routing_table = pcibios_get_irq_routing_table();
- if (routing_table == NULL) {
- err("No BIOS Routing Table??? Not good\n");
+ cpqhp_routing_table = pcibios_get_irq_routing_table();
+ if (cpqhp_routing_table == NULL)
return -ENOMEM;
- }
- len = (routing_table->size - sizeof(struct irq_routing_table)) /
- sizeof(struct irq_info);
- // Make sure I got at least one entry
+ len = cpqhp_routing_table_length();
if (len == 0) {
- kfree(routing_table);
+ kfree(cpqhp_routing_table);
+ cpqhp_routing_table = NULL;
return -1;
}
- dbg("bus dev func slot\n");
+ return 0;
+}
+
+/* nice debugging output */
+static void pci_print_IRQ_route(void)
+{
+ int len;
+ int loop;
+ u8 tbus, tdevice, tslot;
+
+ len = cpqhp_routing_table_length();
+ dbg("bus dev func slot\n");
for (loop = 0; loop < len; ++loop) {
- tbus = routing_table->slots[loop].bus;
- tdevice = routing_table->slots[loop].devfn;
- tslot = routing_table->slots[loop].slot;
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn;
+ tslot = cpqhp_routing_table->slots[loop].slot;
dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot);
}
- kfree(routing_table);
- return 0;
+ return;
}
@@ -242,9 +215,9 @@ static void __iomem *get_subsequent_smbios_entry(void __iomem *smbios_start,
void __iomem *p_max;
if (!smbios_table || !curr)
- return(NULL);
+ return NULL;
- // set p_max to the end of the table
+ /* set p_max to the end of the table */
p_max = smbios_start + readw(smbios_table + ST_LENGTH);
p_temp = curr;
@@ -253,20 +226,19 @@ static void __iomem *get_subsequent_smbios_entry(void __iomem *smbios_start,
while ((p_temp < p_max) && !bail) {
/* Look for the double NULL terminator
* The first condition is the previous byte
- * and the second is the curr */
- if (!previous_byte && !(readb(p_temp))) {
+ * and the second is the curr
+ */
+ if (!previous_byte && !(readb(p_temp)))
bail = 1;
- }
previous_byte = readb(p_temp);
p_temp++;
}
- if (p_temp < p_max) {
+ if (p_temp < p_max)
return p_temp;
- } else {
+ else
return NULL;
- }
}
@@ -292,21 +264,18 @@ static void __iomem *get_SMBIOS_entry(void __iomem *smbios_start,
if (!smbios_table)
return NULL;
- if (!previous) {
+ if (!previous)
previous = smbios_start;
- } else {
+ else
previous = get_subsequent_smbios_entry(smbios_start,
smbios_table, previous);
- }
- while (previous) {
- if (readb(previous + SMBIOS_GENERIC_TYPE) != type) {
+ while (previous)
+ if (readb(previous + SMBIOS_GENERIC_TYPE) != type)
previous = get_subsequent_smbios_entry(smbios_start,
smbios_table, previous);
- } else {
+ else
break;
- }
- }
return previous;
}
@@ -322,144 +291,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
kfree(slot);
}
-#define SLOT_NAME_SIZE 10
-
-static int ctrl_slot_setup(struct controller *ctrl,
- void __iomem *smbios_start,
- void __iomem *smbios_table)
-{
- struct slot *slot;
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *hotplug_slot_info;
- u8 number_of_slots;
- u8 slot_device;
- u8 slot_number;
- u8 ctrl_slot;
- u32 tempdword;
- char name[SLOT_NAME_SIZE];
- void __iomem *slot_entry= NULL;
- int result = -ENOMEM;
-
- dbg("%s\n", __func__);
-
- tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
-
- number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
- slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
- slot_number = ctrl->first_slot;
-
- while (number_of_slots) {
- slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
- goto error;
-
- slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
- GFP_KERNEL);
- if (!slot->hotplug_slot)
- goto error_slot;
- hotplug_slot = slot->hotplug_slot;
-
- hotplug_slot->info =
- kzalloc(sizeof(*(hotplug_slot->info)),
- GFP_KERNEL);
- if (!hotplug_slot->info)
- goto error_hpslot;
- hotplug_slot_info = hotplug_slot->info;
-
- slot->ctrl = ctrl;
- slot->bus = ctrl->bus;
- slot->device = slot_device;
- slot->number = slot_number;
- dbg("slot->number = %u\n", slot->number);
-
- slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
- slot_entry);
-
- while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
- slot->number)) {
- slot_entry = get_SMBIOS_entry(smbios_start,
- smbios_table, 9, slot_entry);
- }
-
- slot->p_sm_slot = slot_entry;
-
- init_timer(&slot->task_event);
- slot->task_event.expires = jiffies + 5 * HZ;
- slot->task_event.function = cpqhp_pushbutton_thread;
-
- //FIXME: these capabilities aren't used but if they are
- // they need to be correctly implemented
- slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
- slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
-
- if (is_slot64bit(slot))
- slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
- if (is_slot66mhz(slot))
- slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
- if (ctrl->speed == PCI_SPEED_66MHz)
- slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
-
- ctrl_slot =
- slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
-
- // Check presence
- slot->capabilities |=
- ((((~tempdword) >> 23) |
- ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
- // Check the switch state
- slot->capabilities |=
- ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
- // Check the slot enable
- slot->capabilities |=
- ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
-
- /* register this slot with the hotplug pci core */
- hotplug_slot->release = &release_slot;
- hotplug_slot->private = slot;
- snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
- hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
-
- hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
- hotplug_slot_info->attention_status =
- cpq_get_attention_status(ctrl, slot);
- hotplug_slot_info->latch_status =
- cpq_get_latch_status(ctrl, slot);
- hotplug_slot_info->adapter_status =
- get_presence_status(ctrl, slot);
-
- dbg("registering bus %d, dev %d, number %d, "
- "ctrl->slot_device_offset %d, slot %d\n",
- slot->bus, slot->device,
- slot->number, ctrl->slot_device_offset,
- slot_number);
- result = pci_hp_register(hotplug_slot,
- ctrl->pci_dev->bus,
- slot->device,
- name);
- if (result) {
- err("pci_hp_register failed with error %d\n", result);
- goto error_info;
- }
-
- slot->next = ctrl->slot;
- ctrl->slot = slot;
-
- number_of_slots--;
- slot_device++;
- slot_number++;
- }
-
- return 0;
-error_info:
- kfree(hotplug_slot_info);
-error_hpslot:
- kfree(hotplug_slot);
-error_slot:
- kfree(slot);
-error:
- return result;
-}
-
static int ctrl_slot_cleanup (struct controller * ctrl)
{
struct slot *old_slot, *next_slot;
@@ -476,36 +307,32 @@ static int ctrl_slot_cleanup (struct controller * ctrl)
cpqhp_remove_debugfs_files(ctrl);
- //Free IRQ associated with hot plug device
+ /* Free IRQ associated with hot plug device */
free_irq(ctrl->interrupt, ctrl);
- //Unmap the memory
+ /* Unmap the memory */
iounmap(ctrl->hpc_reg);
- //Finally reclaim PCI mem
+ /* Finally reclaim PCI mem */
release_mem_region(pci_resource_start(ctrl->pci_dev, 0),
pci_resource_len(ctrl->pci_dev, 0));
- return(0);
+ return 0;
}
-//============================================================================
-// function: get_slot_mapping
-//
-// Description: Attempts to determine a logical slot mapping for a PCI
-// device. Won't work for more than one PCI-PCI bridge
-// in a slot.
-//
-// Input: u8 bus_num - bus number of PCI device
-// u8 dev_num - device number of PCI device
-// u8 *slot - Pointer to u8 where slot number will
-// be returned
-//
-// Output: SUCCESS or FAILURE
-//=============================================================================
+/**
+ * get_slot_mapping - determine logical slot mapping for PCI device
+ *
+ * Won't work for more than one PCI-PCI bridge in a slot.
+ *
+ * @bus_num - bus number of PCI device
+ * @dev_num - device number of PCI device
+ * @slot - Pointer to u8 where slot number will be returned
+ *
+ * Output: SUCCESS or FAILURE
+ */
static int
get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
{
- struct irq_routing_table *PCIIRQRoutingInfoLength;
u32 work;
long len;
long loop;
@@ -516,36 +343,25 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
bridgeSlot = 0xFF;
- PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table();
- if (!PCIIRQRoutingInfoLength)
- return -1;
-
- len = (PCIIRQRoutingInfoLength->size -
- sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
- // Make sure I got at least one entry
- if (len == 0) {
- kfree(PCIIRQRoutingInfoLength);
- return -1;
- }
-
+ len = cpqhp_routing_table_length();
for (loop = 0; loop < len; ++loop) {
- tbus = PCIIRQRoutingInfoLength->slots[loop].bus;
- tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn >> 3;
- tslot = PCIIRQRoutingInfoLength->slots[loop].slot;
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn >> 3;
+ tslot = cpqhp_routing_table->slots[loop].slot;
if ((tbus == bus_num) && (tdevice == dev_num)) {
*slot = tslot;
- kfree(PCIIRQRoutingInfoLength);
return 0;
} else {
/* Did not get a match on the target PCI device. Check
- * if the current IRQ table entry is a PCI-to-PCI bridge
- * device. If so, and it's secondary bus matches the
- * bus number for the target device, I need to save the
- * bridge's slot number. If I can not find an entry for
- * the target device, I will have to assume it's on the
- * other side of the bridge, and assign it the bridge's
- * slot. */
+ * if the current IRQ table entry is a PCI-to-PCI
+ * bridge device. If so, and it's secondary bus
+ * matches the bus number for the target device, I need
+ * to save the bridge's slot number. If I can not find
+ * an entry for the target device, I will have to
+ * assume it's on the other side of the bridge, and
+ * assign it the bridge's slot.
+ */
bus->number = tbus;
pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0),
PCI_CLASS_REVISION, &work);
@@ -555,25 +371,23 @@ get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
PCI_DEVFN(tdevice, 0),
PCI_PRIMARY_BUS, &work);
// See if bridge's secondary bus matches target bus.
- if (((work >> 8) & 0x000000FF) == (long) bus_num) {
+ if (((work >> 8) & 0x000000FF) == (long) bus_num)
bridgeSlot = tslot;
- }
}
}
}
- // If we got here, we didn't find an entry in the IRQ mapping table
- // for the target PCI device. If we did determine that the target
- // device is on the other side of a PCI-to-PCI bridge, return the
- // slot number for the bridge.
+ /* If we got here, we didn't find an entry in the IRQ mapping table for
+ * the target PCI device. If we did determine that the target device
+ * is on the other side of a PCI-to-PCI bridge, return the slot number
+ * for the bridge.
+ */
if (bridgeSlot != 0xFF) {
*slot = bridgeSlot;
- kfree(PCIIRQRoutingInfoLength);
return 0;
}
- kfree(PCIIRQRoutingInfoLength);
- // Couldn't find an entry in the routing table for this PCI device
+ /* Couldn't find an entry in the routing table for this PCI device */
return -1;
}
@@ -591,32 +405,32 @@ cpqhp_set_attention_status(struct controller *ctrl, struct pci_func *func,
u8 hp_slot;
if (func == NULL)
- return(1);
+ return 1;
hp_slot = func->device - ctrl->slot_device_offset;
- // Wait for exclusive access to hardware
+ /* Wait for exclusive access to hardware */
mutex_lock(&ctrl->crit_sect);
- if (status == 1) {
+ if (status == 1)
amber_LED_on (ctrl, hp_slot);
- } else if (status == 0) {
+ else if (status == 0)
amber_LED_off (ctrl, hp_slot);
- } else {
- // Done with exclusive hardware access
+ else {
+ /* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
- return(1);
+ return 1;
}
set_SOGO(ctrl);
- // Wait for SOBS to be unset
+ /* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
- // Done with exclusive hardware access
+ /* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
- return(0);
+ return 0;
}
@@ -719,7 +533,7 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
- return cpqhp_hardware_test(ctrl, value);
+ return cpqhp_hardware_test(ctrl, value);
}
@@ -738,7 +552,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
-
+
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = cpq_get_attention_status(ctrl, slot);
@@ -793,6 +607,230 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
return 0;
}
+static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
+ .set_attention_status = set_attention_status,
+ .enable_slot = process_SI,
+ .disable_slot = process_SS,
+ .hardware_test = hardware_test,
+ .get_power_status = get_power_status,
+ .get_attention_status = get_attention_status,
+ .get_latch_status = get_latch_status,
+ .get_adapter_status = get_adapter_status,
+ .get_max_bus_speed = get_max_bus_speed,
+ .get_cur_bus_speed = get_cur_bus_speed,
+};
+
+#define SLOT_NAME_SIZE 10
+
+static int ctrl_slot_setup(struct controller *ctrl,
+ void __iomem *smbios_start,
+ void __iomem *smbios_table)
+{
+ struct slot *slot;
+ struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot_info *hotplug_slot_info;
+ u8 number_of_slots;
+ u8 slot_device;
+ u8 slot_number;
+ u8 ctrl_slot;
+ u32 tempdword;
+ char name[SLOT_NAME_SIZE];
+ void __iomem *slot_entry= NULL;
+ int result = -ENOMEM;
+
+ dbg("%s\n", __func__);
+
+ tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
+
+ number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
+ slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
+ slot_number = ctrl->first_slot;
+
+ while (number_of_slots) {
+ slot = kzalloc(sizeof(*slot), GFP_KERNEL);
+ if (!slot)
+ goto error;
+
+ slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
+ GFP_KERNEL);
+ if (!slot->hotplug_slot)
+ goto error_slot;
+ hotplug_slot = slot->hotplug_slot;
+
+ hotplug_slot->info = kzalloc(sizeof(*(hotplug_slot->info)),
+ GFP_KERNEL);
+ if (!hotplug_slot->info)
+ goto error_hpslot;
+ hotplug_slot_info = hotplug_slot->info;
+
+ slot->ctrl = ctrl;
+ slot->bus = ctrl->bus;
+ slot->device = slot_device;
+ slot->number = slot_number;
+ dbg("slot->number = %u\n", slot->number);
+
+ slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
+ slot_entry);
+
+ while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
+ slot->number)) {
+ slot_entry = get_SMBIOS_entry(smbios_start,
+ smbios_table, 9, slot_entry);
+ }
+
+ slot->p_sm_slot = slot_entry;
+
+ init_timer(&slot->task_event);
+ slot->task_event.expires = jiffies + 5 * HZ;
+ slot->task_event.function = cpqhp_pushbutton_thread;
+
+ /*FIXME: these capabilities aren't used but if they are
+ * they need to be correctly implemented
+ */
+ slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
+ slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
+
+ if (is_slot64bit(slot))
+ slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
+ if (is_slot66mhz(slot))
+ slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
+ if (ctrl->speed == PCI_SPEED_66MHz)
+ slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
+
+ ctrl_slot =
+ slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
+
+ /* Check presence */
+ slot->capabilities |=
+ ((((~tempdword) >> 23) |
+ ((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
+ /* Check the switch state */
+ slot->capabilities |=
+ ((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
+ /* Check the slot enable */
+ slot->capabilities |=
+ ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
+
+ /* register this slot with the hotplug pci core */
+ hotplug_slot->release = &release_slot;
+ hotplug_slot->private = slot;
+ snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
+ hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
+
+ hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
+ hotplug_slot_info->attention_status =
+ cpq_get_attention_status(ctrl, slot);
+ hotplug_slot_info->latch_status =
+ cpq_get_latch_status(ctrl, slot);
+ hotplug_slot_info->adapter_status =
+ get_presence_status(ctrl, slot);
+
+ dbg("registering bus %d, dev %d, number %d, "
+ "ctrl->slot_device_offset %d, slot %d\n",
+ slot->bus, slot->device,
+ slot->number, ctrl->slot_device_offset,
+ slot_number);
+ result = pci_hp_register(hotplug_slot,
+ ctrl->pci_dev->bus,
+ slot->device,
+ name);
+ if (result) {
+ err("pci_hp_register failed with error %d\n", result);
+ goto error_info;
+ }
+
+ slot->next = ctrl->slot;
+ ctrl->slot = slot;
+
+ number_of_slots--;
+ slot_device++;
+ slot_number++;
+ }
+
+ return 0;
+error_info:
+ kfree(hotplug_slot_info);
+error_hpslot:
+ kfree(hotplug_slot);
+error_slot:
+ kfree(slot);
+error:
+ return result;
+}
+
+static int one_time_init(void)
+{
+ int loop;
+ int retval = 0;
+
+ if (initialized)
+ return 0;
+
+ power_mode = 0;
+
+ retval = init_cpqhp_routing_table();
+ if (retval)
+ goto error;
+
+ if (cpqhp_debug)
+ pci_print_IRQ_route();
+
+ dbg("Initialize + Start the notification mechanism \n");
+
+ retval = cpqhp_event_start_thread();
+ if (retval)
+ goto error;
+
+ dbg("Initialize slot lists\n");
+ for (loop = 0; loop < 256; loop++)
+ cpqhp_slot_list[loop] = NULL;
+
+ /* FIXME: We also need to hook the NMI handler eventually.
+ * this also needs to be worked with Christoph
+ * register_NMI_handler();
+ */
+ /* Map rom address */
+ cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
+ if (!cpqhp_rom_start) {
+ err ("Could not ioremap memory region for ROM\n");
+ retval = -EIO;
+ goto error;
+ }
+
+ /* Now, map the int15 entry point if we are on compaq specific
+ * hardware
+ */
+ compaq_nvram_init(cpqhp_rom_start);
+
+ /* Map smbios table entry point structure */
+ smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
+ cpqhp_rom_start + ROM_PHY_LEN);
+ if (!smbios_table) {
+ err ("Could not find the SMBIOS pointer in memory\n");
+ retval = -EIO;
+ goto error_rom_start;
+ }
+
+ smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
+ readw(smbios_table + ST_LENGTH));
+ if (!smbios_start) {
+ err ("Could not ioremap memory region taken from SMBIOS values\n");
+ retval = -EIO;
+ goto error_smbios_start;
+ }
+
+ initialized = 1;
+
+ return retval;
+
+error_smbios_start:
+ iounmap(smbios_start);
+error_rom_start:
+ iounmap(cpqhp_rom_start);
+error:
+ return retval;
+}
+
static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u8 num_of_slots = 0;
@@ -815,7 +853,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
}
- // Need to read VID early b/c it's used to differentiate CPQ and INTC discovery
+ /* Need to read VID early b/c it's used to differentiate CPQ and INTC
+ * discovery
+ */
rc = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor_id);
if (rc || ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL))) {
err(msg_HPC_non_compaq_or_intel);
@@ -832,217 +872,209 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Check for the proper subsytem ID's
- * Intel uses a different SSID programming model than Compaq.
+ * Intel uses a different SSID programming model than Compaq.
* For Intel, each SSID bit identifies a PHP capability.
* Also Intel HPC's may have RID=0.
*/
- if ((pdev->revision > 2) || (vendor_id == PCI_VENDOR_ID_INTEL)) {
- // TODO: This code can be made to support non-Compaq or Intel subsystem IDs
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_disable_device;
- }
- dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
- if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
- err(msg_HPC_non_compaq_or_intel);
- rc = -ENODEV;
- goto err_disable_device;
- }
+ if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
+ err(msg_HPC_not_supported);
+ return -ENODEV;
+ }
- ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
- if (!ctrl) {
- err("%s : out of memory\n", __func__);
- rc = -ENOMEM;
- goto err_disable_device;
- }
+ /* TODO: This code can be made to support non-Compaq or Intel
+ * subsystem IDs
+ */
+ rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &subsystem_vid);
+ if (rc) {
+ err("%s : pci_read_config_word failed\n", __func__);
+ goto err_disable_device;
+ }
+ dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
+ if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
+ err(msg_HPC_non_compaq_or_intel);
+ rc = -ENODEV;
+ goto err_disable_device;
+ }
- rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
- if (rc) {
- err("%s : pci_read_config_word failed\n", __func__);
- goto err_free_ctrl;
- }
+ ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
+ if (!ctrl) {
+ err("%s : out of memory\n", __func__);
+ rc = -ENOMEM;
+ goto err_disable_device;
+ }
- info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
-
- /* Set Vendor ID, so it can be accessed later from other functions */
- ctrl->vendor_id = vendor_id;
-
- switch (subsystem_vid) {
- case PCI_VENDOR_ID_COMPAQ:
- if (pdev->revision >= 0x13) { /* CIOBX */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 1;
- ctrl->pcix_speed_capability = 1;
- pci_read_config_byte(pdev, 0x41, &bus_cap);
- if (bus_cap & 0x80) {
- dbg("bus max supports 133MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
- break;
- }
- if (bus_cap & 0x40) {
- dbg("bus max supports 100MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
- break;
- }
- if (bus_cap & 20) {
- dbg("bus max supports 66MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
- break;
- }
- if (bus_cap & 10) {
- dbg("bus max supports 66MHz PCI\n");
- ctrl->speed_capability = PCI_SPEED_66MHz;
- break;
- }
-
- break;
- }
-
- switch (subsystem_deviceid) {
- case PCI_SUB_HPC_ID:
- /* Original 6500/7000 implementation */
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
- ctrl->push_button = 0;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID2:
- /* First Pushbutton implementation */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID_INTC:
- /* Third party (6500/7000) */
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
- ctrl->push_button = 0;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID3:
- /* First 66 Mhz implementation */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_66MHz;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- break;
- case PCI_SUB_HPC_ID4:
- /* First PCI-X implementation, 100MHz */
- ctrl->push_flag = 1;
- ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
- ctrl->push_button = 1;
- ctrl->pci_config_space = 1;
- ctrl->defeature_PHP = 1;
- ctrl->pcix_support = 1;
- ctrl->pcix_speed_capability = 0;
- break;
- default:
- err(msg_HPC_not_supported);
- rc = -ENODEV;
- goto err_free_ctrl;
- }
- break;
+ rc = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsystem_deviceid);
+ if (rc) {
+ err("%s : pci_read_config_word failed\n", __func__);
+ goto err_free_ctrl;
+ }
- case PCI_VENDOR_ID_INTEL:
- /* Check for speed capability (0=33, 1=66) */
- if (subsystem_deviceid & 0x0001) {
- ctrl->speed_capability = PCI_SPEED_66MHz;
- } else {
- ctrl->speed_capability = PCI_SPEED_33MHz;
- }
-
- /* Check for push button */
- if (subsystem_deviceid & 0x0002) {
- /* no push button */
- ctrl->push_button = 0;
- } else {
- /* push button supported */
- ctrl->push_button = 1;
- }
-
- /* Check for slot switch type (0=mechanical, 1=not mechanical) */
- if (subsystem_deviceid & 0x0004) {
- /* no switch */
- ctrl->slot_switch_type = 0;
- } else {
- /* switch */
- ctrl->slot_switch_type = 1;
- }
-
- /* PHP Status (0=De-feature PHP, 1=Normal operation) */
- if (subsystem_deviceid & 0x0008) {
- ctrl->defeature_PHP = 1; // PHP supported
- } else {
- ctrl->defeature_PHP = 0; // PHP not supported
- }
-
- /* Alternate Base Address Register Interface (0=not supported, 1=supported) */
- if (subsystem_deviceid & 0x0010) {
- ctrl->alternate_base_address = 1; // supported
- } else {
- ctrl->alternate_base_address = 0; // not supported
- }
-
- /* PCI Config Space Index (0=not supported, 1=supported) */
- if (subsystem_deviceid & 0x0020) {
- ctrl->pci_config_space = 1; // supported
- } else {
- ctrl->pci_config_space = 0; // not supported
- }
-
- /* PCI-X support */
- if (subsystem_deviceid & 0x0080) {
- /* PCI-X capable */
- ctrl->pcix_support = 1;
- /* Frequency of operation in PCI-X mode */
- if (subsystem_deviceid & 0x0040) {
- /* 133MHz PCI-X if bit 7 is 1 */
- ctrl->pcix_speed_capability = 1;
- } else {
- /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
- /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
- ctrl->pcix_speed_capability = 0;
- }
- } else {
- /* Conventional PCI */
- ctrl->pcix_support = 0;
- ctrl->pcix_speed_capability = 0;
- }
+ info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
+
+ /* Set Vendor ID, so it can be accessed later from other
+ * functions
+ */
+ ctrl->vendor_id = vendor_id;
+
+ switch (subsystem_vid) {
+ case PCI_VENDOR_ID_COMPAQ:
+ if (pdev->revision >= 0x13) { /* CIOBX */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 1;
+ ctrl->pcix_speed_capability = 1;
+ pci_read_config_byte(pdev, 0x41, &bus_cap);
+ if (bus_cap & 0x80) {
+ dbg("bus max supports 133MHz PCI-X\n");
+ ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
break;
+ }
+ if (bus_cap & 0x40) {
+ dbg("bus max supports 100MHz PCI-X\n");
+ ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
+ break;
+ }
+ if (bus_cap & 20) {
+ dbg("bus max supports 66MHz PCI-X\n");
+ ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
+ break;
+ }
+ if (bus_cap & 10) {
+ dbg("bus max supports 66MHz PCI\n");
+ ctrl->speed_capability = PCI_SPEED_66MHz;
+ break;
+ }
+
+ break;
+ }
- default:
- err(msg_HPC_not_supported);
- rc = -ENODEV;
- goto err_free_ctrl;
+ switch (subsystem_deviceid) {
+ case PCI_SUB_HPC_ID:
+ /* Original 6500/7000 implementation */
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+ ctrl->push_button = 0;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID2:
+ /* First Pushbutton implementation */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID_INTC:
+ /* Third party (6500/7000) */
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+ ctrl->push_button = 0;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID3:
+ /* First 66 Mhz implementation */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_66MHz;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ case PCI_SUB_HPC_ID4:
+ /* First PCI-X implementation, 100MHz */
+ ctrl->push_flag = 1;
+ ctrl->slot_switch_type = 1;
+ ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
+ ctrl->push_button = 1;
+ ctrl->pci_config_space = 1;
+ ctrl->defeature_PHP = 1;
+ ctrl->pcix_support = 1;
+ ctrl->pcix_speed_capability = 0;
+ break;
+ default:
+ err(msg_HPC_not_supported);
+ rc = -ENODEV;
+ goto err_free_ctrl;
}
+ break;
+
+ case PCI_VENDOR_ID_INTEL:
+ /* Check for speed capability (0=33, 1=66) */
+ if (subsystem_deviceid & 0x0001)
+ ctrl->speed_capability = PCI_SPEED_66MHz;
+ else
+ ctrl->speed_capability = PCI_SPEED_33MHz;
+
+ /* Check for push button */
+ if (subsystem_deviceid & 0x0002)
+ ctrl->push_button = 0;
+ else
+ ctrl->push_button = 1;
+
+ /* Check for slot switch type (0=mechanical, 1=not mechanical) */
+ if (subsystem_deviceid & 0x0004)
+ ctrl->slot_switch_type = 0;
+ else
+ ctrl->slot_switch_type = 1;
+
+ /* PHP Status (0=De-feature PHP, 1=Normal operation) */
+ if (subsystem_deviceid & 0x0008)
+ ctrl->defeature_PHP = 1; /* PHP supported */
+ else
+ ctrl->defeature_PHP = 0; /* PHP not supported */
+
+ /* Alternate Base Address Register Interface
+ * (0=not supported, 1=supported)
+ */
+ if (subsystem_deviceid & 0x0010)
+ ctrl->alternate_base_address = 1;
+ else
+ ctrl->alternate_base_address = 0;
+
+ /* PCI Config Space Index (0=not supported, 1=supported) */
+ if (subsystem_deviceid & 0x0020)
+ ctrl->pci_config_space = 1;
+ else
+ ctrl->pci_config_space = 0;
+
+ /* PCI-X support */
+ if (subsystem_deviceid & 0x0080) {
+ ctrl->pcix_support = 1;
+ if (subsystem_deviceid & 0x0040)
+ /* 133MHz PCI-X if bit 7 is 1 */
+ ctrl->pcix_speed_capability = 1;
+ else
+ /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
+ /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
+ ctrl->pcix_speed_capability = 0;
+ } else {
+ /* Conventional PCI */
+ ctrl->pcix_support = 0;
+ ctrl->pcix_speed_capability = 0;
+ }
+ break;
- } else {
+ default:
err(msg_HPC_not_supported);
- return -ENODEV;
+ rc = -ENODEV;
+ goto err_free_ctrl;
}
- // Tell the user that we found one.
+ /* Tell the user that we found one. */
info("Initializing the PCI hot plug controller residing on PCI bus %d\n",
pdev->bus->number);
@@ -1087,7 +1119,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) {
goto err_free_bus;
}
-
+
dbg("pdev = %p\n", pdev);
dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0));
dbg("pci resource len %llx\n", (unsigned long long)pci_resource_len(pdev, 0));
@@ -1109,7 +1141,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_mem_region;
}
- // Check for 66Mhz operation
+ /* Check for 66Mhz operation */
ctrl->speed = get_controller_speed(ctrl);
@@ -1120,7 +1152,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*
********************************************************/
- // find the physical slot number of the first hot plug slot
+ /* find the physical slot number of the first hot plug slot */
/* Get slot won't work for devices behind bridges, but
* in this case it will always be called for the "base"
@@ -1137,7 +1169,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_iounmap;
}
- // Store PCI Config Space for all devices on this bus
+ /* Store PCI Config Space for all devices on this bus */
rc = cpqhp_save_config(ctrl, ctrl->bus, readb(ctrl->hpc_reg + SLOT_MASK));
if (rc) {
err("%s: unable to save PCI configuration data, error %d\n",
@@ -1148,7 +1180,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/*
* Get IO, memory, and IRQ resources for new devices
*/
- // The next line is required for cpqhp_find_available_resources
+ /* The next line is required for cpqhp_find_available_resources */
ctrl->interrupt = pdev->irq;
if (ctrl->interrupt < 0x10) {
cpqhp_legacy_mode = 1;
@@ -1182,7 +1214,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
__func__, rc);
goto err_iounmap;
}
-
+
/* Mask all general input interrupts */
writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_MASK);
@@ -1196,12 +1228,14 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_iounmap;
}
- /* Enable Shift Out interrupt and clear it, also enable SERR on power fault */
+ /* Enable Shift Out interrupt and clear it, also enable SERR on power
+ * fault
+ */
temp_word = readw(ctrl->hpc_reg + MISC);
temp_word |= 0x4006;
writew(temp_word, ctrl->hpc_reg + MISC);
- // Changed 05/05/97 to clear all interrupts at start
+ /* Changed 05/05/97 to clear all interrupts at start */
writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_INPUT_CLEAR);
ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
@@ -1216,13 +1250,14 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cpqhp_ctrl_list = ctrl;
}
- // turn off empty slots here unless command line option "ON" set
- // Wait for exclusive access to hardware
+ /* turn off empty slots here unless command line option "ON" set
+ * Wait for exclusive access to hardware
+ */
mutex_lock(&ctrl->crit_sect);
num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
- // find first device number for the ctrl
+ /* find first device number for the ctrl */
device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
while (num_of_slots) {
@@ -1234,23 +1269,21 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hp_slot = func->device - ctrl->slot_device_offset;
dbg("hp_slot: %d\n", hp_slot);
- // We have to save the presence info for these slots
+ /* We have to save the presence info for these slots */
temp_word = ctrl->ctrl_int_comp >> 16;
func->presence_save = (temp_word >> hp_slot) & 0x01;
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
- if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
+ if (ctrl->ctrl_int_comp & (0x1L << hp_slot))
func->switch_save = 0;
- } else {
+ else
func->switch_save = 0x10;
- }
- if (!power_mode) {
+ if (!power_mode)
if (!func->is_a_board) {
green_LED_off(ctrl, hp_slot);
slot_disable(ctrl, hp_slot);
}
- }
device++;
num_of_slots--;
@@ -1258,7 +1291,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!power_mode) {
set_SOGO(ctrl);
- // Wait for SOBS to be unset
+ /* Wait for SOBS to be unset */
wait_for_ctrl_irq(ctrl);
}
@@ -1269,7 +1302,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_free_irq;
}
- // Done with exclusive hardware access
+ /* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
cpqhp_create_debugfs_files(ctrl);
@@ -1291,77 +1324,6 @@ err_disable_device:
return rc;
}
-
-static int one_time_init(void)
-{
- int loop;
- int retval = 0;
-
- if (initialized)
- return 0;
-
- power_mode = 0;
-
- retval = pci_print_IRQ_route();
- if (retval)
- goto error;
-
- dbg("Initialize + Start the notification mechanism \n");
-
- retval = cpqhp_event_start_thread();
- if (retval)
- goto error;
-
- dbg("Initialize slot lists\n");
- for (loop = 0; loop < 256; loop++) {
- cpqhp_slot_list[loop] = NULL;
- }
-
- // FIXME: We also need to hook the NMI handler eventually.
- // this also needs to be worked with Christoph
- // register_NMI_handler();
-
- // Map rom address
- cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
- if (!cpqhp_rom_start) {
- err ("Could not ioremap memory region for ROM\n");
- retval = -EIO;
- goto error;
- }
-
- /* Now, map the int15 entry point if we are on compaq specific hardware */
- compaq_nvram_init(cpqhp_rom_start);
-
- /* Map smbios table entry point structure */
- smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
- cpqhp_rom_start + ROM_PHY_LEN);
- if (!smbios_table) {
- err ("Could not find the SMBIOS pointer in memory\n");
- retval = -EIO;
- goto error_rom_start;
- }
-
- smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
- readw(smbios_table + ST_LENGTH));
- if (!smbios_start) {
- err ("Could not ioremap memory region taken from SMBIOS values\n");
- retval = -EIO;
- goto error_smbios_start;
- }
-
- initialized = 1;
-
- return retval;
-
-error_smbios_start:
- iounmap(smbios_start);
-error_rom_start:
- iounmap(cpqhp_rom_start);
-error:
- return retval;
-}
-
-
static void __exit unload_cpqphpd(void)
{
struct pci_func *next;
@@ -1381,10 +1343,10 @@ static void __exit unload_cpqphpd(void)
if (ctrl->hpc_reg) {
u16 misc;
rc = read_slot_enable (ctrl);
-
+
writeb(0, ctrl->hpc_reg + SLOT_SERR);
writel(0xFFFFFFC0L | ~rc, ctrl->hpc_reg + INT_MASK);
-
+
misc = readw(ctrl->hpc_reg + MISC);
misc &= 0xFFFD;
writew(misc, ctrl->hpc_reg + MISC);
@@ -1464,38 +1426,34 @@ static void __exit unload_cpqphpd(void)
}
}
- // Stop the notification mechanism
+ /* Stop the notification mechanism */
if (initialized)
cpqhp_event_stop_thread();
- //unmap the rom address
+ /* unmap the rom address */
if (cpqhp_rom_start)
iounmap(cpqhp_rom_start);
if (smbios_start)
iounmap(smbios_start);
}
-
-
static struct pci_device_id hpcd_pci_tbl[] = {
{
/* handle any PCI Hotplug controller */
.class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
.class_mask = ~0,
-
+
/* no matter who makes it */
.vendor = PCI_ANY_ID,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
-
+
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, hpcd_pci_tbl);
-
-
static struct pci_driver cpqhpc_driver = {
.name = "compaq_pci_hotplug",
.id_table = hpcd_pci_tbl,
@@ -1503,8 +1461,6 @@ static struct pci_driver cpqhpc_driver = {
/* remove: cpqhpc_remove_one, */
};
-
-
static int __init cpqhpc_init(void)
{
int result;
@@ -1518,7 +1474,6 @@ static int __init cpqhpc_init(void)
return result;
}
-
static void __exit cpqhpc_cleanup(void)
{
dbg("unload_cpqphpd()\n");
@@ -1529,8 +1484,5 @@ static void __exit cpqhpc_cleanup(void)
cpqhp_shutdown_debugfs();
}
-
module_init(cpqhpc_init);
module_exit(cpqhpc_cleanup);
-
-
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index cc227a8c4b11..2fa47af992a8 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -81,14 +81,15 @@ static u8 handle_switch_change(u8 change, struct controller * ctrl)
for (hp_slot = 0; hp_slot < 6; hp_slot++) {
if (change & (0x1L << hp_slot)) {
- /**********************************
+ /*
* this one changed.
- **********************************/
+ */
func = cpqhp_slot_find(ctrl->bus,
(hp_slot + ctrl->slot_device_offset), 0);
/* this is the structure that tells the worker thread
- *what to do */
+ * what to do
+ */
taskInfo = &(ctrl->event_queue[ctrl->next_event]);
ctrl->next_event = (ctrl->next_event + 1) % 10;
taskInfo->hp_slot = hp_slot;
@@ -100,17 +101,17 @@ static u8 handle_switch_change(u8 change, struct controller * ctrl)
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) {
- /**********************************
+ /*
* Switch opened
- **********************************/
+ */
func->switch_save = 0;
taskInfo->event_type = INT_SWITCH_OPEN;
} else {
- /**********************************
+ /*
* Switch closed
- **********************************/
+ */
func->switch_save = 0x10;
@@ -131,9 +132,8 @@ static struct slot *cpqhp_find_slot(struct controller *ctrl, u8 device)
{
struct slot *slot = ctrl->slot;
- while (slot && (slot->device != device)) {
+ while (slot && (slot->device != device))
slot = slot->next;
- }
return slot;
}
@@ -152,17 +152,17 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
if (!change)
return 0;
- /**********************************
+ /*
* Presence Change
- **********************************/
+ */
dbg("cpqsbd: Presence/Notify input change.\n");
dbg(" Changed bits are 0x%4.4x\n", change );
for (hp_slot = 0; hp_slot < 6; hp_slot++) {
if (change & (0x0101 << hp_slot)) {
- /**********************************
+ /*
* this one changed.
- **********************************/
+ */
func = cpqhp_slot_find(ctrl->bus,
(hp_slot + ctrl->slot_device_offset), 0);
@@ -177,22 +177,23 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
return 0;
/* If the switch closed, must be a button
- * If not in button mode, nevermind */
+ * If not in button mode, nevermind
+ */
if (func->switch_save && (ctrl->push_button == 1)) {
temp_word = ctrl->ctrl_int_comp >> 16;
temp_byte = (temp_word >> hp_slot) & 0x01;
temp_byte |= (temp_word >> (hp_slot + 7)) & 0x02;
if (temp_byte != func->presence_save) {
- /**************************************
+ /*
* button Pressed (doesn't do anything)
- **************************************/
+ */
dbg("hp_slot %d button pressed\n", hp_slot);
taskInfo->event_type = INT_BUTTON_PRESS;
} else {
- /**********************************
+ /*
* button Released - TAKE ACTION!!!!
- **********************************/
+ */
dbg("hp_slot %d button released\n", hp_slot);
taskInfo->event_type = INT_BUTTON_RELEASE;
@@ -210,7 +211,8 @@ static u8 handle_presence_change(u16 change, struct controller * ctrl)
}
} else {
/* Switch is open, assume a presence change
- * Save the presence state */
+ * Save the presence state
+ */
temp_word = ctrl->ctrl_int_comp >> 16;
func->presence_save = (temp_word >> hp_slot) & 0x01;
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
@@ -241,17 +243,17 @@ static u8 handle_power_fault(u8 change, struct controller * ctrl)
if (!change)
return 0;
- /**********************************
+ /*
* power fault
- **********************************/
+ */
info("power fault interrupt\n");
for (hp_slot = 0; hp_slot < 6; hp_slot++) {
if (change & (0x01 << hp_slot)) {
- /**********************************
+ /*
* this one changed.
- **********************************/
+ */
func = cpqhp_slot_find(ctrl->bus,
(hp_slot + ctrl->slot_device_offset), 0);
@@ -262,16 +264,16 @@ static u8 handle_power_fault(u8 change, struct controller * ctrl)
rc++;
if (ctrl->ctrl_int_comp & (0x00000100 << hp_slot)) {
- /**********************************
+ /*
* power fault Cleared
- **********************************/
+ */
func->status = 0x00;
taskInfo->event_type = INT_POWER_FAULT_CLEAR;
} else {
- /**********************************
+ /*
* power fault
- **********************************/
+ */
taskInfo->event_type = INT_POWER_FAULT;
if (ctrl->rev < 4) {
@@ -432,13 +434,15 @@ static struct pci_resource *do_pre_bridge_resource_split(struct pci_resource **h
/* If we got here, there the bridge requires some of the resource, but
- * we may be able to split some off of the front */
+ * we may be able to split some off of the front
+ */
node = *head;
if (node->length & (alignment -1)) {
/* this one isn't an aligned length, so we'll make a new entry
- * and split it up. */
+ * and split it up.
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -544,10 +548,10 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
if (!(*head))
return NULL;
- if ( cpqhp_resource_sort_and_combine(head) )
+ if (cpqhp_resource_sort_and_combine(head))
return NULL;
- if ( sort_by_size(head) )
+ if (sort_by_size(head))
return NULL;
for (node = *head; node; node = node->next) {
@@ -556,7 +560,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
if (node->base & (size - 1)) {
/* this one isn't base aligned properly
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
temp_dword = (node->base | (size-1)) + 1;
/* Short circuit if adjusted size is too small */
@@ -581,7 +586,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
/* Don't need to check if too small since we already did */
if (node->length > size) {
/* this one is longer than we need
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -601,7 +607,8 @@ static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size
continue;
/* If we got here, then it is the right size
- * Now take it out of the list and break */
+ * Now take it out of the list and break
+ */
if (*head == node) {
*head = node->next;
} else {
@@ -642,14 +649,16 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
return NULL;
for (max = *head; max; max = max->next) {
- /* If not big enough we could probably just bail,
- * instead we'll continue to the next. */
+ /* If not big enough we could probably just bail,
+ * instead we'll continue to the next.
+ */
if (max->length < size)
continue;
if (max->base & (size - 1)) {
/* this one isn't base aligned properly
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
temp_dword = (max->base | (size-1)) + 1;
/* Short circuit if adjusted size is too small */
@@ -672,7 +681,8 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz
if ((max->base + max->length) & (size - 1)) {
/* this one isn't end aligned properly at the top
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -744,7 +754,8 @@ static struct pci_resource *get_resource(struct pci_resource **head, u32 size)
if (node->base & (size - 1)) {
dbg("%s: not aligned\n", __func__);
/* this one isn't base aligned properly
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
temp_dword = (node->base | (size-1)) + 1;
/* Short circuit if adjusted size is too small */
@@ -769,7 +780,8 @@ static struct pci_resource *get_resource(struct pci_resource **head, u32 size)
if (node->length > size) {
dbg("%s: too big\n", __func__);
/* this one is longer than we need
- * so we'll make a new entry and split it up */
+ * so we'll make a new entry and split it up
+ */
split_node = kmalloc(sizeof(*split_node), GFP_KERNEL);
if (!split_node)
@@ -886,19 +898,19 @@ irqreturn_t cpqhp_ctrl_intr(int IRQ, void *data)
u32 Diff;
u32 temp_dword;
-
+
misc = readw(ctrl->hpc_reg + MISC);
- /***************************************
+ /*
* Check to see if it was our interrupt
- ***************************************/
+ */
if (!(misc & 0x000C)) {
return IRQ_NONE;
}
if (misc & 0x0004) {
- /**********************************
+ /*
* Serial Output interrupt Pending
- **********************************/
+ */
/* Clear the interrupt */
misc |= 0x0004;
@@ -961,11 +973,8 @@ struct pci_func *cpqhp_slot_create(u8 busnumber)
struct pci_func *next;
new_slot = kzalloc(sizeof(*new_slot), GFP_KERNEL);
- if (new_slot == NULL) {
- /* I'm not dead yet!
- * You will be. */
+ if (new_slot == NULL)
return new_slot;
- }
new_slot->next = NULL;
new_slot->configured = 1;
@@ -996,10 +1005,8 @@ static int slot_remove(struct pci_func * old_slot)
return 1;
next = cpqhp_slot_list[old_slot->bus];
-
- if (next == NULL) {
+ if (next == NULL)
return 1;
- }
if (next == old_slot) {
cpqhp_slot_list[old_slot->bus] = old_slot->next;
@@ -1008,9 +1015,8 @@ static int slot_remove(struct pci_func * old_slot)
return 0;
}
- while ((next->next != old_slot) && (next->next != NULL)) {
+ while ((next->next != old_slot) && (next->next != NULL))
next = next->next;
- }
if (next->next == old_slot) {
next->next = old_slot->next;
@@ -1040,9 +1046,8 @@ static int bridge_slot_remove(struct pci_func *bridge)
for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) {
next = cpqhp_slot_list[tempBus];
- while (!slot_remove(next)) {
+ while (!slot_remove(next))
next = cpqhp_slot_list[tempBus];
- }
}
next = cpqhp_slot_list[bridge->bus];
@@ -1130,39 +1135,43 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER);
u16 reg16;
u32 leds = readl(ctrl->hpc_reg + LED_CONTROL);
-
+
if (ctrl->speed == adapter_speed)
return 0;
-
+
/* We don't allow freq/mode changes if we find another adapter running
- * in another slot on this controller */
+ * in another slot on this controller
+ */
for(slot = ctrl->slot; slot; slot = slot->next) {
- if (slot->device == (hp_slot + ctrl->slot_device_offset))
+ if (slot->device == (hp_slot + ctrl->slot_device_offset))
continue;
if (!slot->hotplug_slot || !slot->hotplug_slot->info)
continue;
- if (slot->hotplug_slot->info->adapter_status == 0)
+ if (slot->hotplug_slot->info->adapter_status == 0)
continue;
/* If another adapter is running on the same segment but at a
* lower speed/mode, we allow the new adapter to function at
- * this rate if supported */
- if (ctrl->speed < adapter_speed)
+ * this rate if supported
+ */
+ if (ctrl->speed < adapter_speed)
return 0;
return 1;
}
-
+
/* If the controller doesn't support freq/mode changes and the
- * controller is running at a higher mode, we bail */
+ * controller is running at a higher mode, we bail
+ */
if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability))
return 1;
-
+
/* But we allow the adapter to run at a lower rate if possible */
if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability))
return 0;
/* We try to set the max speed supported by both the adapter and
- * controller */
+ * controller
+ */
if (ctrl->speed_capability < adapter_speed) {
if (ctrl->speed == ctrl->speed_capability)
return 0;
@@ -1171,22 +1180,22 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
writel(0x0L, ctrl->hpc_reg + LED_CONTROL);
writeb(0x00, ctrl->hpc_reg + SLOT_ENABLE);
-
- set_SOGO(ctrl);
+
+ set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
-
+
if (adapter_speed != PCI_SPEED_133MHz_PCIX)
reg = 0xF5;
else
- reg = 0xF4;
+ reg = 0xF4;
pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
-
+
reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ);
reg16 &= ~0x000F;
switch(adapter_speed) {
- case(PCI_SPEED_133MHz_PCIX):
+ case(PCI_SPEED_133MHz_PCIX):
reg = 0x75;
- reg16 |= 0xB;
+ reg16 |= 0xB;
break;
case(PCI_SPEED_100MHz_PCIX):
reg = 0x74;
@@ -1203,48 +1212,48 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
default: /* 33MHz PCI 2.2 */
reg = 0x71;
break;
-
+
}
reg16 |= 0xB << 12;
writew(reg16, ctrl->hpc_reg + NEXT_CURR_FREQ);
-
- mdelay(5);
-
+
+ mdelay(5);
+
/* Reenable interrupts */
writel(0, ctrl->hpc_reg + INT_MASK);
- pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
-
+ pci_write_config_byte(ctrl->pci_dev, 0x41, reg);
+
/* Restart state machine */
reg = ~0xF;
pci_read_config_byte(ctrl->pci_dev, 0x43, &reg);
pci_write_config_byte(ctrl->pci_dev, 0x43, reg);
-
+
/* Only if mode change...*/
if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
set_SOGO(ctrl);
-
+
wait_for_ctrl_irq(ctrl);
mdelay(1100);
-
+
/* Restore LED/Slot state */
writel(leds, ctrl->hpc_reg + LED_CONTROL);
writeb(slot_power, ctrl->hpc_reg + SLOT_ENABLE);
-
+
set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
ctrl->speed = adapter_speed;
slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- info("Successfully changed frequency/mode for adapter in slot %d\n",
+ info("Successfully changed frequency/mode for adapter in slot %d\n",
slot->number);
return 0;
}
-/* the following routines constitute the bulk of the
- hotplug controller logic
+/* the following routines constitute the bulk of the
+ * hotplug controller logic
*/
@@ -1268,17 +1277,17 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
hp_slot = func->device - ctrl->slot_device_offset;
- if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot)) {
- /**********************************
- * The switch is open.
- **********************************/
+ /*
+ * The switch is open.
+ */
+ if (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot))
rc = INTERLOCK_OPEN;
- } else if (is_slot_enabled (ctrl, hp_slot)) {
- /**********************************
- * The board is already on
- **********************************/
+ /*
+ * The board is already on
+ */
+ else if (is_slot_enabled (ctrl, hp_slot))
rc = CARD_FUNCTIONING;
- } else {
+ else {
mutex_lock(&ctrl->crit_sect);
/* turn on board without attaching to the bus */
@@ -1299,7 +1308,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
/* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
-
+
adapter_speed = get_adapter_speed(ctrl, hp_slot);
if (ctrl->speed != adapter_speed)
if (set_controller_speed(ctrl, adapter_speed, hp_slot))
@@ -1352,7 +1361,8 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
* Get slot won't work for devices behind
* bridges, but in this case it will always be
* called for the "base" bus/dev/func of an
- * adapter. */
+ * adapter.
+ */
mutex_lock(&ctrl->crit_sect);
@@ -1377,7 +1387,8 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
* Get slot won't work for devices behind bridges, but
* in this case it will always be called for the "base"
- * bus/dev/func of an adapter. */
+ * bus/dev/func of an adapter.
+ */
mutex_lock(&ctrl->crit_sect);
@@ -1434,7 +1445,8 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
wait_for_ctrl_irq (ctrl);
/* Change bits in slot power register to force another shift out
- * NOTE: this is to work around the timer bug */
+ * NOTE: this is to work around the timer bug
+ */
temp_byte = readb(ctrl->hpc_reg + SLOT_POWER);
writeb(0x00, ctrl->hpc_reg + SLOT_POWER);
writeb(temp_byte, ctrl->hpc_reg + SLOT_POWER);
@@ -1443,12 +1455,12 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
/* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
-
+
adapter_speed = get_adapter_speed(ctrl, hp_slot);
if (ctrl->speed != adapter_speed)
if (set_controller_speed(ctrl, adapter_speed, hp_slot))
rc = WRONG_BUS_FREQUENCY;
-
+
/* turn off board without attaching to the bus */
disable_slot_power (ctrl, hp_slot);
@@ -1461,7 +1473,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
if (rc)
return rc;
-
+
p_slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
/* turn on board and blink green LED */
@@ -1521,7 +1533,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
}
/* All F's is an empty slot or an invalid board */
- if (temp_register != 0xFFFFFFFF) { /* Check for a board in the slot */
+ if (temp_register != 0xFFFFFFFF) {
res_lists.io_head = ctrl->io_head;
res_lists.mem_head = ctrl->mem_head;
res_lists.p_mem_head = ctrl->p_mem_head;
@@ -1570,9 +1582,8 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
index = 0;
do {
new_slot = cpqhp_slot_find(ctrl->bus, func->device, index++);
- if (new_slot && !new_slot->pci_dev) {
+ if (new_slot && !new_slot->pci_dev)
cpqhp_configure_device(ctrl, new_slot);
- }
} while (new_slot);
mutex_lock(&ctrl->crit_sect);
@@ -1859,12 +1870,12 @@ static void interrupt_event_handler(struct controller *ctrl)
info(msg_button_on, p_slot->number);
}
mutex_lock(&ctrl->crit_sect);
-
+
dbg("blink green LED and turn off amber\n");
-
+
amber_LED_off (ctrl, hp_slot);
green_LED_blink (ctrl, hp_slot);
-
+
set_SOGO(ctrl);
/* Wait for SOBS to be unset */
@@ -1958,7 +1969,7 @@ void cpqhp_pushbutton_thread(unsigned long slot)
if (cpqhp_process_SI(ctrl, func) != 0) {
amber_LED_on(ctrl, hp_slot);
green_LED_off(ctrl, hp_slot);
-
+
set_SOGO(ctrl);
/* Wait for SOBS to be unset */
@@ -2079,7 +2090,7 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
struct pci_bus *pci_bus = ctrl->pci_bus;
int physical_slot=0;
- device = func->device;
+ device = func->device;
func = cpqhp_slot_find(ctrl->bus, device, index++);
p_slot = cpqhp_find_slot(ctrl, device);
if (p_slot) {
@@ -2113,9 +2124,8 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
/* If the VGA Enable bit is set, remove isn't
* supported */
- if (BCR & PCI_BRIDGE_CTL_VGA) {
+ if (BCR & PCI_BRIDGE_CTL_VGA)
rc = REMOVE_NOT_SUPPORTED;
- }
}
}
@@ -2183,67 +2193,67 @@ int cpqhp_hardware_test(struct controller *ctrl, int test_num)
num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0f;
switch (test_num) {
- case 1:
- /* Do stuff here! */
-
- /* Do that funky LED thing */
- /* so we can restore them later */
- save_LED = readl(ctrl->hpc_reg + LED_CONTROL);
- work_LED = 0x01010101;
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
-
- work_LED = 0x01010000;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
- work_LED = 0x00000101;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- switch_leds(ctrl, num_of_slots, &work_LED, 0);
- switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ case 1:
+ /* Do stuff here! */
+
+ /* Do that funky LED thing */
+ /* so we can restore them later */
+ save_LED = readl(ctrl->hpc_reg + LED_CONTROL);
+ work_LED = 0x01010101;
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+
+ work_LED = 0x01010000;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+ work_LED = 0x00000101;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ switch_leds(ctrl, num_of_slots, &work_LED, 0);
+ switch_leds(ctrl, num_of_slots, &work_LED, 1);
+
+ work_LED = 0x01010000;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ for (loop = 0; loop < num_of_slots; loop++) {
+ set_SOGO(ctrl);
- work_LED = 0x01010000;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- for (loop = 0; loop < num_of_slots; loop++) {
- set_SOGO(ctrl);
+ /* Wait for SOGO interrupt */
+ wait_for_ctrl_irq (ctrl);
- /* Wait for SOGO interrupt */
- wait_for_ctrl_irq (ctrl);
+ /* Get ready for next iteration */
+ long_delay((3*HZ)/10);
+ work_LED = work_LED >> 16;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- /* Get ready for next iteration */
- long_delay((3*HZ)/10);
- work_LED = work_LED >> 16;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
-
- set_SOGO(ctrl);
+ set_SOGO(ctrl);
- /* Wait for SOGO interrupt */
- wait_for_ctrl_irq (ctrl);
+ /* Wait for SOGO interrupt */
+ wait_for_ctrl_irq (ctrl);
- /* Get ready for next iteration */
- long_delay((3*HZ)/10);
- work_LED = work_LED << 16;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- work_LED = work_LED << 1;
- writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
- }
+ /* Get ready for next iteration */
+ long_delay((3*HZ)/10);
+ work_LED = work_LED << 16;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ work_LED = work_LED << 1;
+ writel(work_LED, ctrl->hpc_reg + LED_CONTROL);
+ }
- /* put it back the way it was */
- writel(save_LED, ctrl->hpc_reg + LED_CONTROL);
+ /* put it back the way it was */
+ writel(save_LED, ctrl->hpc_reg + LED_CONTROL);
- set_SOGO(ctrl);
+ set_SOGO(ctrl);
- /* Wait for SOBS to be unset */
- wait_for_ctrl_irq (ctrl);
- break;
- case 2:
- /* Do other stuff here! */
- break;
- case 3:
- /* and more... */
- break;
+ /* Wait for SOBS to be unset */
+ wait_for_ctrl_irq (ctrl);
+ break;
+ case 2:
+ /* Do other stuff here! */
+ break;
+ case 3:
+ /* and more... */
+ break;
}
return 0;
}
@@ -2312,9 +2322,9 @@ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func
while ((function < max_functions) && (!stop_it)) {
pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID);
- if (ID == 0xFFFFFFFF) { /* There's nothing there. */
+ if (ID == 0xFFFFFFFF) {
function++;
- } else { /* There's something there */
+ } else {
/* Setup slot structure. */
new_slot = cpqhp_slot_create(func->bus);
@@ -2339,8 +2349,8 @@ static u32 configure_new_device(struct controller * ctrl, struct pci_func * func
/*
- Configuration logic that involves the hotplug data structures and
- their bookkeeping
+ * Configuration logic that involves the hotplug data structures and
+ * their bookkeeping
*/
@@ -2393,7 +2403,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
if (rc)
return rc;
- if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* PCI-PCI Bridge */
+ if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
/* set Primary bus */
dbg("set Primary bus = %d\n", func->bus);
rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus);
@@ -2484,7 +2494,8 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
temp_resources.irqs = &irqs;
/* Make copies of the nodes we are going to pass down so that
- * if there is a problem,we can just use these to free resources */
+ * if there is a problem,we can just use these to free resources
+ */
hold_bus_node = kmalloc(sizeof(*hold_bus_node), GFP_KERNEL);
hold_IO_node = kmalloc(sizeof(*hold_IO_node), GFP_KERNEL);
hold_mem_node = kmalloc(sizeof(*hold_mem_node), GFP_KERNEL);
@@ -2556,7 +2567,8 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
temp_word = (p_mem_node->base + p_mem_node->length - 1) >> 16;
rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
- /* Adjust this to compensate for extra adjustment in first loop */
+ /* Adjust this to compensate for extra adjustment in first loop
+ */
irqs.barber_pole--;
rc = 0;
@@ -2917,27 +2929,26 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
} /* End of base register loop */
if (cpqhp_legacy_mode) {
/* Figure out which interrupt pin this function uses */
- rc = pci_bus_read_config_byte (pci_bus, devfn,
+ rc = pci_bus_read_config_byte (pci_bus, devfn,
PCI_INTERRUPT_PIN, &temp_byte);
/* If this function needs an interrupt and we are behind
* a bridge and the pin is tied to something that's
* alread mapped, set this one the same */
- if (temp_byte && resources->irqs &&
- (resources->irqs->valid_INT &
+ if (temp_byte && resources->irqs &&
+ (resources->irqs->valid_INT &
(0x01 << ((temp_byte + resources->irqs->barber_pole - 1) & 0x03)))) {
/* We have to share with something already set up */
- IRQ = resources->irqs->interrupt[(temp_byte +
+ IRQ = resources->irqs->interrupt[(temp_byte +
resources->irqs->barber_pole - 1) & 0x03];
} else {
/* Program IRQ based on card type */
rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code);
- if (class_code == PCI_BASE_CLASS_STORAGE) {
+ if (class_code == PCI_BASE_CLASS_STORAGE)
IRQ = cpqhp_disk_irq;
- } else {
+ else
IRQ = cpqhp_nic_irq;
- }
}
/* IRQ Line */
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index cb174888002b..76ba8a1c774d 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -94,12 +94,13 @@ static u8 evbuffer[1024];
static void __iomem *compaq_int15_entry_point;
-static spinlock_t int15_lock; /* lock for ordering int15_bios_call() */
+/* lock for ordering int15_bios_call() */
+static spinlock_t int15_lock;
/* This is a series of function that deals with
- setting & getting the hotplug resource table in some environment variable.
-*/
+ * setting & getting the hotplug resource table in some environment variable.
+ */
/*
* We really shouldn't be doing this unless there is a _very_ good reason to!!!
@@ -113,7 +114,7 @@ static u32 add_byte( u32 **p_buffer, u8 value, u32 *used, u32 *avail)
if ((*used + 1) > *avail)
return(1);
-
+
*((u8*)*p_buffer) = value;
tByte = (u8**)p_buffer;
(*tByte)++;
@@ -170,10 +171,10 @@ static u32 access_EV (u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size)
unsigned long flags;
int op = operation;
int ret_val;
-
+
if (!compaq_int15_entry_point)
return -ENODEV;
-
+
spin_lock_irqsave(&int15_lock, flags);
__asm__ (
"xorl %%ebx,%%ebx\n" \
@@ -187,7 +188,7 @@ static u32 access_EV (u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size)
"D" (buffer), "m" (compaq_int15_entry_point)
: "%ebx", "%edx");
spin_unlock_irqrestore(&int15_lock, flags);
-
+
return((ret_val & 0xFF00) >> 8);
}
@@ -210,14 +211,16 @@ static int load_HRT (void __iomem *rom_start)
available = 1024;
- // Now load the EV
+ /* Now load the EV */
temp_dword = available;
rc = access_EV(READ_EV, "CQTHPS", evbuffer, &temp_dword);
evbuffer_length = temp_dword;
- // We're maintaining the resource lists so write FF to invalidate old info
+ /* We're maintaining the resource lists so write FF to invalidate old
+ * info
+ */
temp_dword = 1;
rc = access_EV(WRITE_EV, "CQTHPS", &temp_byte, &temp_dword);
@@ -263,13 +266,13 @@ static u32 store_HRT (void __iomem *rom_start)
p_EV_header = (struct ev_hrt_header *) pFill;
ctrl = cpqhp_ctrl_list;
-
- // The revision of this structure
+
+ /* The revision of this structure */
rc = add_byte( &pFill, 1 + ctrl->push_flag, &usedbytes, &available);
if (rc)
return(rc);
- // The number of controllers
+ /* The number of controllers */
rc = add_byte( &pFill, 1, &usedbytes, &available);
if (rc)
return(rc);
@@ -279,27 +282,27 @@ static u32 store_HRT (void __iomem *rom_start)
numCtrl++;
- // The bus number
+ /* The bus number */
rc = add_byte( &pFill, ctrl->bus, &usedbytes, &available);
if (rc)
return(rc);
- // The device Number
+ /* The device Number */
rc = add_byte( &pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available);
if (rc)
return(rc);
- // The function Number
+ /* The function Number */
rc = add_byte( &pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available);
if (rc)
return(rc);
- // Skip the number of available entries
+ /* Skip the number of available entries */
rc = add_dword( &pFill, 0, &usedbytes, &available);
if (rc)
return(rc);
- // Figure out memory Available
+ /* Figure out memory Available */
resNode = ctrl->mem_head;
@@ -308,12 +311,12 @@ static u32 store_HRT (void __iomem *rom_start)
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -321,10 +324,10 @@ static u32 store_HRT (void __iomem *rom_start)
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->mem_avail = loop;
- // Figure out prefetchable memory Available
+ /* Figure out prefetchable memory Available */
resNode = ctrl->p_mem_head;
@@ -333,12 +336,12 @@ static u32 store_HRT (void __iomem *rom_start)
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -346,10 +349,10 @@ static u32 store_HRT (void __iomem *rom_start)
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->p_mem_avail = loop;
- // Figure out IO Available
+ /* Figure out IO Available */
resNode = ctrl->io_head;
@@ -358,12 +361,12 @@ static u32 store_HRT (void __iomem *rom_start)
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -371,10 +374,10 @@ static u32 store_HRT (void __iomem *rom_start)
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->io_avail = loop;
- // Figure out bus Available
+ /* Figure out bus Available */
resNode = ctrl->bus_head;
@@ -383,12 +386,12 @@ static u32 store_HRT (void __iomem *rom_start)
while (resNode) {
loop ++;
- // base
+ /* base */
rc = add_dword( &pFill, resNode->base, &usedbytes, &available);
if (rc)
return(rc);
- // length
+ /* length */
rc = add_dword( &pFill, resNode->length, &usedbytes, &available);
if (rc)
return(rc);
@@ -396,15 +399,15 @@ static u32 store_HRT (void __iomem *rom_start)
resNode = resNode->next;
}
- // Fill in the number of entries
+ /* Fill in the number of entries */
p_ev_ctrl->bus_avail = loop;
ctrl = ctrl->next;
}
-
+
p_EV_header->num_of_ctrl = numCtrl;
- // Now store the EV
+ /* Now store the EV */
temp_dword = usedbytes;
@@ -449,20 +452,21 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
struct ev_hrt_header *p_EV_header;
if (!evbuffer_init) {
- // Read the resource list information in from NVRAM
+ /* Read the resource list information in from NVRAM */
if (load_HRT(rom_start))
memset (evbuffer, 0, 1024);
evbuffer_init = 1;
}
- // If we saved information in NVRAM, use it now
+ /* If we saved information in NVRAM, use it now */
p_EV_header = (struct ev_hrt_header *) evbuffer;
- // The following code is for systems where version 1.0 of this
- // driver has been loaded, but doesn't support the hardware.
- // In that case, the driver would incorrectly store something
- // in NVRAM.
+ /* The following code is for systems where version 1.0 of this
+ * driver has been loaded, but doesn't support the hardware.
+ * In that case, the driver would incorrectly store something
+ * in NVRAM.
+ */
if ((p_EV_header->Version == 2) ||
((p_EV_header->Version == 1) && !ctrl->push_flag)) {
p_byte = &(p_EV_header->next);
@@ -479,7 +483,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
function = p_ev_ctrl->function;
while ((bus != ctrl->bus) ||
- (device != PCI_SLOT(ctrl->pci_dev->devfn)) ||
+ (device != PCI_SLOT(ctrl->pci_dev->devfn)) ||
(function != PCI_FUNC(ctrl->pci_dev->devfn))) {
nummem = p_ev_ctrl->mem_avail;
numpmem = p_ev_ctrl->p_mem_avail;
@@ -491,7 +495,7 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
if (p_byte > ((u8*)p_EV_header + evbuffer_length))
return 2;
- // Skip forward to the next entry
+ /* Skip forward to the next entry */
p_byte += (nummem + numpmem + numio + numbus) * 8;
if (p_byte > ((u8*)p_EV_header + evbuffer_length))
@@ -629,8 +633,9 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
ctrl->bus_head = bus_node;
}
- // If all of the following fail, we don't have any resources for
- // hot plug add
+ /* If all of the following fail, we don't have any resources for
+ * hot plug add
+ */
rc = 1;
rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
@@ -640,14 +645,14 @@ int compaq_nvram_load (void __iomem *rom_start, struct controller *ctrl)
if (rc)
return(rc);
} else {
- if ((evbuffer[0] != 0) && (!ctrl->push_flag))
+ if ((evbuffer[0] != 0) && (!ctrl->push_flag))
return 1;
}
return 0;
}
-
+
int compaq_nvram_store (void __iomem *rom_start)
{
int rc = 1;
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 6c0ed0fcb8ee..6173b9a4544e 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -37,7 +37,6 @@
#include "../pci.h"
#include "cpqphp.h"
#include "cpqphp_nvram.h"
-#include <asm/pci_x86.h>
u8 cpqhp_nic_irq;
@@ -82,14 +81,14 @@ static void __iomem *detect_HRT_floating_pointer(void __iomem *begin, void __iom
}
-int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
+int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
{
unsigned char bus;
struct pci_bus *child;
int num;
if (func->pci_dev == NULL)
- func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function));
+ func->pci_dev = pci_get_bus_and_slot(func->bus,PCI_DEVFN(func->device, func->function));
/* No pci device, we need to create it then */
if (func->pci_dev == NULL) {
@@ -99,7 +98,7 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
if (num)
pci_bus_add_devices(ctrl->pci_dev->bus);
- func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function));
+ func->pci_dev = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, func->function));
if (func->pci_dev == NULL) {
dbg("ERROR: pci_dev still null\n");
return 0;
@@ -112,20 +111,24 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
pci_do_scan_bus(child);
}
+ pci_dev_put(func->pci_dev);
+
return 0;
}
-int cpqhp_unconfigure_device(struct pci_func* func)
+int cpqhp_unconfigure_device(struct pci_func* func)
{
int j;
-
+
dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function);
for (j=0; j<8 ; j++) {
- struct pci_dev* temp = pci_find_slot(func->bus, PCI_DEVFN(func->device, j));
- if (temp)
+ struct pci_dev* temp = pci_get_bus_and_slot(func->bus, PCI_DEVFN(func->device, j));
+ if (temp) {
+ pci_dev_put(temp);
pci_remove_bus_device(temp);
+ }
}
return 0;
}
@@ -178,32 +181,22 @@ int cpqhp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num)
if (!rc)
return !rc;
- // set the Edge Level Control Register (ELCR)
+ /* set the Edge Level Control Register (ELCR) */
temp_word = inb(0x4d0);
temp_word |= inb(0x4d1) << 8;
temp_word |= 0x01 << irq_num;
- // This should only be for x86 as it sets the Edge Level Control Register
- outb((u8) (temp_word & 0xFF), 0x4d0);
- outb((u8) ((temp_word & 0xFF00) >> 8), 0x4d1);
- rc = 0;
- }
+ /* This should only be for x86 as it sets the Edge Level
+ * Control Register
+ */
+ outb((u8) (temp_word & 0xFF), 0x4d0); outb((u8) ((temp_word &
+ 0xFF00) >> 8), 0x4d1); rc = 0; }
return rc;
}
-/*
- * WTF??? This function isn't in the code, yet a function calls it, but the
- * compiler optimizes it away? strange. Here as a placeholder to keep the
- * compiler happy.
- */
-static int PCI_ScanBusNonBridge (u8 bus, u8 device)
-{
- return 0;
-}
-
static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev_num)
{
u16 tdevice;
@@ -213,11 +206,11 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
ctrl->pci_bus->number = bus_num;
for (tdevice = 0; tdevice < 0xFF; tdevice++) {
- //Scan for access first
+ /* Scan for access first */
if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
continue;
dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice);
- //Yep we got one. Not a bridge ?
+ /* Yep we got one. Not a bridge ? */
if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) {
*dev_num = tdevice;
dbg("found it !\n");
@@ -225,16 +218,16 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
}
}
for (tdevice = 0; tdevice < 0xFF; tdevice++) {
- //Scan for access first
+ /* Scan for access first */
if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1)
continue;
dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice);
- //Yep we got one. bridge ?
+ /* Yep we got one. bridge ? */
if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus);
+ /* XXX: no recursion, wtf? */
dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice);
- if (PCI_ScanBusNonBridge(tbus, tdevice) == 0)
- return 0;
+ return 0;
}
}
@@ -244,39 +237,23 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 * dev
static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot, u8 nobridge)
{
- struct irq_routing_table *PCIIRQRoutingInfoLength;
- long len;
- long loop;
+ int loop, len;
u32 work;
-
u8 tbus, tdevice, tslot;
- PCIIRQRoutingInfoLength = pcibios_get_irq_routing_table();
- if (!PCIIRQRoutingInfoLength)
- return -1;
-
- len = (PCIIRQRoutingInfoLength->size -
- sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
- // Make sure I got at least one entry
- if (len == 0) {
- kfree(PCIIRQRoutingInfoLength );
- return -1;
- }
-
+ len = cpqhp_routing_table_length();
for (loop = 0; loop < len; ++loop) {
- tbus = PCIIRQRoutingInfoLength->slots[loop].bus;
- tdevice = PCIIRQRoutingInfoLength->slots[loop].devfn;
- tslot = PCIIRQRoutingInfoLength->slots[loop].slot;
+ tbus = cpqhp_routing_table->slots[loop].bus;
+ tdevice = cpqhp_routing_table->slots[loop].devfn;
+ tslot = cpqhp_routing_table->slots[loop].slot;
if (tslot == slot) {
*bus_num = tbus;
*dev_num = tdevice;
ctrl->pci_bus->number = tbus;
pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work);
- if (!nobridge || (work == 0xffffffff)) {
- kfree(PCIIRQRoutingInfoLength );
+ if (!nobridge || (work == 0xffffffff))
return 0;
- }
dbg("bus_num %d devfn %d\n", *bus_num, *dev_num);
pci_bus_read_config_dword (ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work);
@@ -287,28 +264,26 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num
dbg("Scan bus for Non Bridge: bus %d\n", tbus);
if (PCI_ScanBusForNonBridge(ctrl, tbus, dev_num) == 0) {
*bus_num = tbus;
- kfree(PCIIRQRoutingInfoLength );
return 0;
}
- } else {
- kfree(PCIIRQRoutingInfoLength );
+ } else
return 0;
- }
-
}
}
- kfree(PCIIRQRoutingInfoLength );
return -1;
}
int cpqhp_get_bus_dev (struct controller *ctrl, u8 * bus_num, u8 * dev_num, u8 slot)
{
- return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0); //plain (bridges allowed)
+ /* plain (bridges allowed) */
+ return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0);
}
-/* More PCI configuration routines; this time centered around hotplug controller */
+/* More PCI configuration routines; this time centered around hotplug
+ * controller
+ */
/*
@@ -339,12 +314,12 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
int stop_it;
int index;
- // Decide which slots are supported
+ /* Decide which slots are supported */
if (is_hot_plug) {
- //*********************************
- // is_hot_plug is the slot mask
- //*********************************
+ /*
+ * is_hot_plug is the slot mask
+ */
FirstSupported = is_hot_plug >> 4;
LastSupported = FirstSupported + (is_hot_plug & 0x0F) - 1;
} else {
@@ -352,123 +327,127 @@ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug)
LastSupported = 0x1F;
}
- // Save PCI configuration space for all devices in supported slots
+ /* Save PCI configuration space for all devices in supported slots */
ctrl->pci_bus->number = busnumber;
for (device = FirstSupported; device <= LastSupported; device++) {
ID = 0xFFFFFFFF;
- rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID);
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID);
+
+ if (ID == 0xFFFFFFFF) {
+ if (is_hot_plug) {
+ /* Setup slot structure with entry for empty
+ * slot
+ */
+ new_slot = cpqhp_slot_create(busnumber);
+ if (new_slot == NULL)
+ return 1;
- if (ID != 0xFFFFFFFF) { // device in slot
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code);
- if (rc)
- return rc;
+ new_slot->bus = (u8) busnumber;
+ new_slot->device = (u8) device;
+ new_slot->function = 0;
+ new_slot->is_a_board = 0;
+ new_slot->presence_save = 0;
+ new_slot->switch_save = 0;
+ }
+ continue;
+ }
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type);
- if (rc)
- return rc;
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code);
+ if (rc)
+ return rc;
- // If multi-function device, set max_functions to 8
- if (header_type & 0x80)
- max_functions = 8;
- else
- max_functions = 1;
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type);
+ if (rc)
+ return rc;
- function = 0;
+ /* If multi-function device, set max_functions to 8 */
+ if (header_type & 0x80)
+ max_functions = 8;
+ else
+ max_functions = 1;
- do {
- DevError = 0;
+ function = 0;
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // P-P Bridge
- // Recurse the subordinate bus
- // get the subordinate bus number
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus);
- if (rc) {
+ do {
+ DevError = 0;
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Recurse the subordinate bus
+ * get the subordinate bus number
+ */
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus);
+ if (rc) {
+ return rc;
+ } else {
+ sub_bus = (int) secondary_bus;
+
+ /* Save secondary bus cfg spc
+ * with this recursive call.
+ */
+ rc = cpqhp_save_config(ctrl, sub_bus, 0);
+ if (rc)
return rc;
- } else {
- sub_bus = (int) secondary_bus;
-
- // Save secondary bus cfg spc
- // with this recursive call.
- rc = cpqhp_save_config(ctrl, sub_bus, 0);
- if (rc)
- return rc;
- ctrl->pci_bus->number = busnumber;
- }
+ ctrl->pci_bus->number = busnumber;
}
+ }
- index = 0;
+ index = 0;
+ new_slot = cpqhp_slot_find(busnumber, device, index++);
+ while (new_slot &&
+ (new_slot->function != (u8) function))
new_slot = cpqhp_slot_find(busnumber, device, index++);
- while (new_slot &&
- (new_slot->function != (u8) function))
- new_slot = cpqhp_slot_find(busnumber, device, index++);
- if (!new_slot) {
- // Setup slot structure.
- new_slot = cpqhp_slot_create(busnumber);
-
- if (new_slot == NULL)
- return(1);
- }
-
- new_slot->bus = (u8) busnumber;
- new_slot->device = (u8) device;
- new_slot->function = (u8) function;
- new_slot->is_a_board = 1;
- new_slot->switch_save = 0x10;
- // In case of unsupported board
- new_slot->status = DevError;
- new_slot->pci_dev = pci_find_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
-
- for (cloop = 0; cloop < 0x20; cloop++) {
- rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
- if (rc)
- return rc;
- }
+ if (!new_slot) {
+ /* Setup slot structure. */
+ new_slot = cpqhp_slot_create(busnumber);
+ if (new_slot == NULL)
+ return 1;
+ }
- function++;
+ new_slot->bus = (u8) busnumber;
+ new_slot->device = (u8) device;
+ new_slot->function = (u8) function;
+ new_slot->is_a_board = 1;
+ new_slot->switch_save = 0x10;
+ /* In case of unsupported board */
+ new_slot->status = DevError;
+ new_slot->pci_dev = pci_get_bus_and_slot(new_slot->bus, (new_slot->device << 3) | new_slot->function);
- stop_it = 0;
+ for (cloop = 0; cloop < 0x20; cloop++) {
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
+ if (rc)
+ return rc;
+ }
- // this loop skips to the next present function
- // reading in Class Code and Header type.
+ pci_dev_put(new_slot->pci_dev);
- while ((function < max_functions)&&(!stop_it)) {
- rc = pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
- if (ID == 0xFFFFFFFF) { // nothing there.
- function++;
- } else { // Something there
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
- if (rc)
- return rc;
+ function++;
- rc = pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type);
- if (rc)
- return rc;
+ stop_it = 0;
- stop_it++;
- }
+ /* this loop skips to the next present function
+ * reading in Class Code and Header type.
+ */
+ while ((function < max_functions) && (!stop_it)) {
+ rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID);
+ if (ID == 0xFFFFFFFF) {
+ function++;
+ continue;
}
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code);
+ if (rc)
+ return rc;
- } while (function < max_functions);
- } // End of IF (device in slot?)
- else if (is_hot_plug) {
- // Setup slot structure with entry for empty slot
- new_slot = cpqhp_slot_create(busnumber);
+ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type);
+ if (rc)
+ return rc;
- if (new_slot == NULL) {
- return(1);
+ stop_it++;
}
- new_slot->bus = (u8) busnumber;
- new_slot->device = (u8) device;
- new_slot->function = 0;
- new_slot->is_a_board = 0;
- new_slot->presence_save = 0;
- new_slot->switch_save = 0;
- }
- } // End of FOR loop
+ } while (function < max_functions);
+ } /* End of FOR loop */
- return(0);
+ return 0;
}
@@ -489,7 +468,7 @@ int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
u8 secondary_bus;
int sub_bus;
int max_functions;
- int function;
+ int function = 0;
int cloop = 0;
int stop_it;
@@ -498,63 +477,58 @@ int cpqhp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot)
ctrl->pci_bus->number = new_slot->bus;
pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_VENDOR_ID, &ID);
- if (ID != 0xFFFFFFFF) { // device in slot
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code);
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
-
- if (header_type & 0x80) // Multi-function device
- max_functions = 8;
- else
- max_functions = 1;
-
- function = 0;
-
- do {
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
- // Recurse the subordinate bus
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
+ if (ID == 0xFFFFFFFF)
+ return 2;
- sub_bus = (int) secondary_bus;
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code);
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type);
- // Save the config headers for the secondary bus.
- rc = cpqhp_save_config(ctrl, sub_bus, 0);
- if (rc)
- return(rc);
- ctrl->pci_bus->number = new_slot->bus;
+ if (header_type & 0x80) /* Multi-function device */
+ max_functions = 8;
+ else
+ max_functions = 1;
- } // End of IF
+ while (function < max_functions) {
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Recurse the subordinate bus */
+ pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus);
- new_slot->status = 0;
+ sub_bus = (int) secondary_bus;
- for (cloop = 0; cloop < 0x20; cloop++) {
- pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
- }
+ /* Save the config headers for the secondary
+ * bus.
+ */
+ rc = cpqhp_save_config(ctrl, sub_bus, 0);
+ if (rc)
+ return(rc);
+ ctrl->pci_bus->number = new_slot->bus;
- function++;
+ }
- stop_it = 0;
+ new_slot->status = 0;
- // this loop skips to the next present function
- // reading in the Class Code and the Header type.
+ for (cloop = 0; cloop < 0x20; cloop++)
+ pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) & (new_slot-> config_space [cloop]));
- while ((function < max_functions) && (!stop_it)) {
- pci_bus_read_config_dword (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
+ function++;
- if (ID == 0xFFFFFFFF) { // nothing there.
- function++;
- } else { // Something there
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
+ stop_it = 0;
- pci_bus_read_config_byte (ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type);
+ /* this loop skips to the next present function
+ * reading in the Class Code and the Header type.
+ */
+ while ((function < max_functions) && (!stop_it)) {
+ pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID);
- stop_it++;
- }
+ if (ID == 0xFFFFFFFF)
+ function++;
+ else {
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code);
+ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type);
+ stop_it++;
}
+ }
- } while (function < max_functions);
- } // End of IF (device in slot?)
- else {
- return 2;
}
return 0;
@@ -590,11 +564,10 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- // Check for Bridge
+ /* Check for Bridge */
pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
- // PCI-PCI Bridge
pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
sub_bus = (int) secondary_bus;
@@ -610,23 +583,27 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
}
pci_bus->number = func->bus;
- //FIXME: this loop is duplicated in the non-bridge case. The two could be rolled together
- // Figure out IO and memory base lengths
+ /* FIXME: this loop is duplicated in the non-bridge
+ * case. The two could be rolled together Figure out
+ * IO and memory base lengths
+ */
for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
temp_register = 0xFFFFFFFF;
pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
-
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (base & 0x01L) {
- // IO base
- // set base = amount of IO space requested
+ /* IO base
+ * set base = amount of IO space
+ * requested
+ */
base = base & 0xFFFFFFFE;
base = (~base) + 1;
type = 1;
} else {
- // memory base
+ /* memory base */
base = base & 0xFFFFFFF0;
base = (~base) + 1;
@@ -637,32 +614,36 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
type = 0;
}
- // Save information in slot structure
+ /* Save information in slot structure */
func->base_length[(cloop - 0x10) >> 2] =
base;
func->base_type[(cloop - 0x10) >> 2] = type;
- } // End of base register loop
+ } /* End of base register loop */
-
- } else if ((header_type & 0x7F) == 0x00) { // PCI-PCI Bridge
- // Figure out IO and memory base lengths
+ } else if ((header_type & 0x7F) == 0x00) {
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
temp_register = 0xFFFFFFFF;
pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (base & 0x01L) {
- // IO base
- // base = amount of IO space requested
+ /* IO base
+ * base = amount of IO space
+ * requested
+ */
base = base & 0xFFFFFFFE;
base = (~base) + 1;
type = 1;
} else {
- // memory base
- // base = amount of memory space requested
+ /* memory base
+ * base = amount of memory
+ * space requested
+ */
base = base & 0xFFFFFFF0;
base = (~base) + 1;
@@ -673,16 +654,16 @@ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func * func)
type = 0;
}
- // Save information in slot structure
+ /* Save information in slot structure */
func->base_length[(cloop - 0x10) >> 2] = base;
func->base_type[(cloop - 0x10) >> 2] = type;
- } // End of base register loop
+ } /* End of base register loop */
- } else { // Some other unknown header type
+ } else { /* Some other unknown header type */
}
- // find the next device in this slot
+ /* find the next device in this slot */
func = cpqhp_slot_find(func->bus, func->device, index++);
}
@@ -728,18 +709,18 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- // Save the command register
+ /* Save the command register */
pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &save_command);
- // disable card
+ /* disable card */
command = 0x00;
pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command);
- // Check for Bridge
+ /* Check for Bridge */
pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
- // Clear Bridge Control Register
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* Clear Bridge Control Register */
command = 0x00;
pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command);
pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
@@ -755,7 +736,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
bus_node->next = func->bus_head;
func->bus_head = bus_node;
- // Save IO base and Limit registers
+ /* Save IO base and Limit registers */
pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_BASE, &b_base);
pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_LIMIT, &b_length);
@@ -771,7 +752,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
func->io_head = io_node;
}
- // Save memory base and Limit registers
+ /* Save memory base and Limit registers */
pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_BASE, &w_base);
pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, &w_length);
@@ -787,7 +768,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
func->mem_head = mem_node;
}
- // Save prefetchable memory base and Limit registers
+ /* Save prefetchable memory base and Limit registers */
pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, &w_base);
pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length);
@@ -802,7 +783,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
p_mem_node->next = func->p_mem_head;
func->p_mem_head = p_mem_node;
}
- // Figure out IO and memory base lengths
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x14; cloop += 4) {
pci_bus_read_config_dword (pci_bus, devfn, cloop, &save_base);
@@ -812,11 +793,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
temp_register = base;
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (((base & 0x03L) == 0x01)
&& (save_command & 0x01)) {
- // IO base
- // set temp_register = amount of IO space requested
+ /* IO base
+ * set temp_register = amount
+ * of IO space requested
+ */
temp_register = base & 0xFFFFFFFE;
temp_register = (~temp_register) + 1;
@@ -834,7 +818,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
if (((base & 0x0BL) == 0x08)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -851,7 +835,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
if (((base & 0x0BL) == 0x00)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -868,9 +852,10 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
return(1);
}
- } // End of base register loop
- } else if ((header_type & 0x7F) == 0x00) { // Standard header
- // Figure out IO and memory base lengths
+ } /* End of base register loop */
+ /* Standard header */
+ } else if ((header_type & 0x7F) == 0x00) {
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base);
@@ -880,11 +865,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
temp_register = base;
- if (base) { // If this register is implemented
+ /* If this register is implemented */
+ if (base) {
if (((base & 0x03L) == 0x01)
&& (save_command & 0x01)) {
- // IO base
- // set temp_register = amount of IO space requested
+ /* IO base
+ * set temp_register = amount
+ * of IO space requested
+ */
temp_register = base & 0xFFFFFFFE;
temp_register = (~temp_register) + 1;
@@ -901,7 +889,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
if (((base & 0x0BL) == 0x08)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -918,7 +906,7 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
if (((base & 0x0BL) == 0x00)
&& (save_command & 0x02)) {
- // prefetchable memory base
+ /* prefetchable memory base */
temp_register = base & 0xFFFFFFF0;
temp_register = (~temp_register) + 1;
@@ -935,15 +923,14 @@ int cpqhp_save_used_resources (struct controller *ctrl, struct pci_func * func)
} else
return(1);
}
- } // End of base register loop
- } else { // Some other unknown header type
+ } /* End of base register loop */
}
- // find the next device in this slot
+ /* find the next device in this slot */
func = cpqhp_slot_find(func->bus, func->device, index++);
}
- return(0);
+ return 0;
}
@@ -975,16 +962,16 @@ int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
pci_bus->number = func->bus;
devfn = PCI_DEVFN(func->device, func->function);
- // Start at the top of config space so that the control
- // registers are programmed last
- for (cloop = 0x3C; cloop > 0; cloop -= 4) {
+ /* Start at the top of config space so that the control
+ * registers are programmed last
+ */
+ for (cloop = 0x3C; cloop > 0; cloop -= 4)
pci_bus_write_config_dword (pci_bus, devfn, cloop, func->config_space[cloop >> 2]);
- }
pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- // If this is a bridge device, restore subordinate devices
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
+ /* If this is a bridge device, restore subordinate devices */
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
pci_bus_read_config_byte (pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
sub_bus = (int) secondary_bus;
@@ -1000,8 +987,9 @@ int cpqhp_configure_board(struct controller *ctrl, struct pci_func * func)
}
} else {
- // Check all the base Address Registers to make sure
- // they are the same. If not, the board is different.
+ /* Check all the base Address Registers to make sure
+ * they are the same. If not, the board is different.
+ */
for (cloop = 16; cloop < 40; cloop += 4) {
pci_bus_read_config_dword (pci_bus, devfn, cloop, &temp);
@@ -1058,27 +1046,28 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
pci_bus_read_config_dword (pci_bus, devfn, PCI_VENDOR_ID, &temp_register);
- // No adapter present
+ /* No adapter present */
if (temp_register == 0xFFFFFFFF)
return(NO_ADAPTER_PRESENT);
if (temp_register != func->config_space[0])
return(ADAPTER_NOT_SAME);
- // Check for same revision number and class code
+ /* Check for same revision number and class code */
pci_bus_read_config_dword (pci_bus, devfn, PCI_CLASS_REVISION, &temp_register);
- // Adapter not the same
+ /* Adapter not the same */
if (temp_register != func->config_space[0x08 >> 2])
return(ADAPTER_NOT_SAME);
- // Check for Bridge
+ /* Check for Bridge */
pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { // PCI-PCI Bridge
- // In order to continue checking, we must program the
- // bus registers in the bridge to respond to accesses
- // for it's subordinate bus(es)
+ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
+ /* In order to continue checking, we must program the
+ * bus registers in the bridge to respond to accesses
+ * for its subordinate bus(es)
+ */
temp_register = func->config_space[0x18 >> 2];
pci_bus_write_config_dword (pci_bus, devfn, PCI_PRIMARY_BUS, temp_register);
@@ -1096,35 +1085,39 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
}
}
- // Check to see if it is a standard config header
+ /* Check to see if it is a standard config header */
else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) {
- // Check subsystem vendor and ID
+ /* Check subsystem vendor and ID */
pci_bus_read_config_dword (pci_bus, devfn, PCI_SUBSYSTEM_VENDOR_ID, &temp_register);
if (temp_register != func->config_space[0x2C >> 2]) {
- // If it's a SMART-2 and the register isn't filled
- // in, ignore the difference because
- // they just have an old rev of the firmware
-
+ /* If it's a SMART-2 and the register isn't
+ * filled in, ignore the difference because
+ * they just have an old rev of the firmware
+ */
if (!((func->config_space[0] == 0xAE100E11)
&& (temp_register == 0x00L)))
return(ADAPTER_NOT_SAME);
}
- // Figure out IO and memory base lengths
+ /* Figure out IO and memory base lengths */
for (cloop = 0x10; cloop <= 0x24; cloop += 4) {
temp_register = 0xFFFFFFFF;
pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
pci_bus_read_config_dword (pci_bus, devfn, cloop, &base);
- if (base) { // If this register is implemented
+
+ /* If this register is implemented */
+ if (base) {
if (base & 0x01L) {
- // IO base
- // set base = amount of IO space requested
+ /* IO base
+ * set base = amount of IO
+ * space requested
+ */
base = base & 0xFFFFFFFE;
base = (~base) + 1;
type = 1;
} else {
- // memory base
+ /* memory base */
base = base & 0xFFFFFFF0;
base = (~base) + 1;
@@ -1135,23 +1128,24 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
type = 0;
}
- // Check information in slot structure
+ /* Check information in slot structure */
if (func->base_length[(cloop - 0x10) >> 2] != base)
return(ADAPTER_NOT_SAME);
if (func->base_type[(cloop - 0x10) >> 2] != type)
return(ADAPTER_NOT_SAME);
- } // End of base register loop
+ } /* End of base register loop */
- } // End of (type 0 config space) else
+ } /* End of (type 0 config space) else */
else {
- // this is not a type 0 or 1 config space header so
- // we don't know how to do it
+ /* this is not a type 0 or 1 config space header so
+ * we don't know how to do it
+ */
return(DEVICE_TYPE_NOT_SUPPORTED);
}
- // Get the next function
+ /* Get the next function */
func = cpqhp_slot_find(func->bus, func->device, index++);
}
@@ -1168,7 +1162,7 @@ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func * func)
* this function is for hot plug ADD!
*
* returns 0 if success
- */
+ */
int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_start)
{
u8 temp;
@@ -1187,10 +1181,10 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
rom_resource_table = detect_HRT_floating_pointer(rom_start, rom_start+0xffff);
dbg("rom_resource_table = %p\n", rom_resource_table);
- if (rom_resource_table == NULL) {
+ if (rom_resource_table == NULL)
return -ENODEV;
- }
- // Sum all resources and setup resource maps
+
+ /* Sum all resources and setup resource maps */
unused_IRQ = readl(rom_resource_table + UNUSED_IRQ);
dbg("unused_IRQ = %x\n", unused_IRQ);
@@ -1222,13 +1216,11 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
temp = 0;
- if (!cpqhp_nic_irq) {
+ if (!cpqhp_nic_irq)
cpqhp_nic_irq = ctrl->cfgspc_irq;
- }
- if (!cpqhp_disk_irq) {
+ if (!cpqhp_disk_irq)
cpqhp_disk_irq = ctrl->cfgspc_irq;
- }
dbg("cpqhp_disk_irq, cpqhp_nic_irq= %d, %d\n", cpqhp_disk_irq, cpqhp_nic_irq);
@@ -1262,13 +1254,13 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
dev_func, io_base, io_length, mem_base, mem_length, pre_mem_base, pre_mem_length,
primary_bus, secondary_bus, max_bus);
- // If this entry isn't for our controller's bus, ignore it
+ /* If this entry isn't for our controller's bus, ignore it */
if (primary_bus != ctrl->bus) {
i--;
one_slot += sizeof (struct slot_rt);
continue;
}
- // find out if this entry is for an occupied slot
+ /* find out if this entry is for an occupied slot */
ctrl->pci_bus->number = primary_bus;
pci_bus_read_config_dword (ctrl->pci_bus, dev_func, PCI_VENDOR_ID, &temp_dword);
dbg("temp_D_word = %x\n", temp_dword);
@@ -1282,13 +1274,13 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
func = cpqhp_slot_find(primary_bus, dev_func >> 3, index++);
}
- // If we can't find a match, skip this table entry
+ /* If we can't find a match, skip this table entry */
if (!func) {
i--;
one_slot += sizeof (struct slot_rt);
continue;
}
- // this may not work and shouldn't be used
+ /* this may not work and shouldn't be used */
if (secondary_bus != primary_bus)
bridged_slot = 1;
else
@@ -1301,7 +1293,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
}
- // If we've got a valid IO base, use it
+ /* If we've got a valid IO base, use it */
temp_dword = io_base + io_length;
@@ -1325,7 +1317,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
}
}
- // If we've got a valid memory base, use it
+ /* If we've got a valid memory base, use it */
temp_dword = mem_base + mem_length;
if ((mem_base) && (temp_dword < 0x10000)) {
mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL);
@@ -1348,8 +1340,9 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
}
}
- // If we've got a valid prefetchable memory base, and
- // the base + length isn't greater than 0xFFFF
+ /* If we've got a valid prefetchable memory base, and
+ * the base + length isn't greater than 0xFFFF
+ */
temp_dword = pre_mem_base + pre_mem_length;
if ((pre_mem_base) && (temp_dword < 0x10000)) {
p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL);
@@ -1372,9 +1365,10 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
}
}
- // If we've got a valid bus number, use it
- // The second condition is to ignore bus numbers on
- // populated slots that don't have PCI-PCI bridges
+ /* If we've got a valid bus number, use it
+ * The second condition is to ignore bus numbers on
+ * populated slots that don't have PCI-PCI bridges
+ */
if (secondary_bus && (secondary_bus != primary_bus)) {
bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL);
if (!bus_node)
@@ -1398,8 +1392,9 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
one_slot += sizeof (struct slot_rt);
}
- // If all of the following fail, we don't have any resources for
- // hot plug add
+ /* If all of the following fail, we don't have any resources for
+ * hot plug add
+ */
rc = 1;
rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head));
rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head));
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 42e4260c3b12..7485ffda950c 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -1318,7 +1318,6 @@ error:
}
struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = ibmphp_disable_slot,
@@ -1421,3 +1420,4 @@ static void __exit ibmphp_exit(void)
}
module_init(ibmphp_init);
+module_exit(ibmphp_exit);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 535fce0f07f9..844580489d4d 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -347,125 +347,129 @@ static struct pci_slot_attribute hotplug_slot_attr_test = {
.store = test_write_file
};
-static int has_power_file(struct pci_slot *pci_slot)
+static bool has_power_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if ((slot->ops->enable_slot) ||
(slot->ops->disable_slot) ||
(slot->ops->get_power_status))
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_attention_file(struct pci_slot *pci_slot)
+static bool has_attention_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if ((slot->ops->set_attention_status) ||
(slot->ops->get_attention_status))
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_latch_file(struct pci_slot *pci_slot)
+static bool has_latch_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_latch_status)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_adapter_file(struct pci_slot *pci_slot)
+static bool has_adapter_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_adapter_status)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_max_bus_speed_file(struct pci_slot *pci_slot)
+static bool has_max_bus_speed_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_max_bus_speed)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_cur_bus_speed_file(struct pci_slot *pci_slot)
+static bool has_cur_bus_speed_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->get_cur_bus_speed)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
-static int has_test_file(struct pci_slot *pci_slot)
+static bool has_test_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops))
- return -ENODEV;
+ return false;
if (slot->ops->hardware_test)
- return 0;
- return -ENOENT;
+ return true;
+ return false;
}
static int fs_add_slot(struct pci_slot *slot)
{
int retval = 0;
- if (has_power_file(slot) == 0) {
- retval = sysfs_create_file(&slot->kobj, &hotplug_slot_attr_power.attr);
+ /* Create symbolic link to the hotplug driver module */
+ pci_hp_create_module_link(slot);
+
+ if (has_power_file(slot)) {
+ retval = sysfs_create_file(&slot->kobj,
+ &hotplug_slot_attr_power.attr);
if (retval)
goto exit_power;
}
- if (has_attention_file(slot) == 0) {
+ if (has_attention_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_attention.attr);
if (retval)
goto exit_attention;
}
- if (has_latch_file(slot) == 0) {
+ if (has_latch_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_latch.attr);
if (retval)
goto exit_latch;
}
- if (has_adapter_file(slot) == 0) {
+ if (has_adapter_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_presence.attr);
if (retval)
goto exit_adapter;
}
- if (has_max_bus_speed_file(slot) == 0) {
+ if (has_max_bus_speed_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
- &hotplug_slot_attr_max_bus_speed.attr);
+ &hotplug_slot_attr_max_bus_speed.attr);
if (retval)
goto exit_max_speed;
}
- if (has_cur_bus_speed_file(slot) == 0) {
+ if (has_cur_bus_speed_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
- &hotplug_slot_attr_cur_bus_speed.attr);
+ &hotplug_slot_attr_cur_bus_speed.attr);
if (retval)
goto exit_cur_speed;
}
- if (has_test_file(slot) == 0) {
+ if (has_test_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_test.attr);
if (retval)
@@ -475,55 +479,61 @@ static int fs_add_slot(struct pci_slot *slot)
goto exit;
exit_test:
- if (has_cur_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr);
-
+ if (has_cur_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_cur_bus_speed.attr);
exit_cur_speed:
- if (has_max_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
-
+ if (has_max_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_max_bus_speed.attr);
exit_max_speed:
- if (has_adapter_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
-
+ if (has_adapter_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_presence.attr);
exit_adapter:
- if (has_latch_file(slot) == 0)
+ if (has_latch_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
-
exit_latch:
- if (has_attention_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr);
-
+ if (has_attention_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_attention.attr);
exit_attention:
- if (has_power_file(slot) == 0)
+ if (has_power_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
exit_power:
+ pci_hp_remove_module_link(slot);
exit:
return retval;
}
static void fs_remove_slot(struct pci_slot *slot)
{
- if (has_power_file(slot) == 0)
+ if (has_power_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
- if (has_attention_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_attention.attr);
+ if (has_attention_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_attention.attr);
- if (has_latch_file(slot) == 0)
+ if (has_latch_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_latch.attr);
- if (has_adapter_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
+ if (has_adapter_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_presence.attr);
- if (has_max_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
+ if (has_max_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_max_bus_speed.attr);
- if (has_cur_bus_speed_file(slot) == 0)
- sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_cur_bus_speed.attr);
+ if (has_cur_bus_speed_file(slot))
+ sysfs_remove_file(&slot->kobj,
+ &hotplug_slot_attr_cur_bus_speed.attr);
- if (has_test_file(slot) == 0)
+ if (has_test_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);
+
+ pci_hp_remove_module_link(slot);
}
static struct hotplug_slot *get_slot_from_name (const char *name)
@@ -540,10 +550,10 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
}
/**
- * pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
+ * __pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
* @bus: bus this slot is on
* @slot: pointer to the &struct hotplug_slot to register
- * @slot_nr: slot number
+ * @devnr: device number
* @name: name registered with kobject core
*
* Registers a hotplug slot with the pci hotplug subsystem, which will allow
@@ -551,8 +561,9 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
*
* Returns 0 if successful, anything else for an error.
*/
-int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
- const char *name)
+int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
+ int devnr, const char *name,
+ struct module *owner, const char *mod_name)
{
int result;
struct pci_slot *pci_slot;
@@ -567,14 +578,16 @@ int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr,
return -EINVAL;
}
- mutex_lock(&pci_hp_mutex);
+ slot->ops->owner = owner;
+ slot->ops->mod_name = mod_name;
+ mutex_lock(&pci_hp_mutex);
/*
* No problems if we call this interface from both ACPI_PCI_SLOT
* driver and call it here again. If we've already created the
* pci_slot, the interface will simply bump the refcount.
*/
- pci_slot = pci_create_slot(bus, slot_nr, name, slot);
+ pci_slot = pci_create_slot(bus, devnr, name, slot);
if (IS_ERR(pci_slot)) {
result = PTR_ERR(pci_slot);
goto out;
@@ -684,6 +697,6 @@ MODULE_LICENSE("GPL");
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-EXPORT_SYMBOL_GPL(pci_hp_register);
+EXPORT_SYMBOL_GPL(__pci_hp_register);
EXPORT_SYMBOL_GPL(pci_hp_deregister);
EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 0a368547e633..e6cf096498be 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -81,7 +81,6 @@ struct slot {
struct hpc_ops *hpc_ops;
struct hotplug_slot *hotplug_slot;
struct list_head slot_list;
- unsigned long last_emi_toggle;
struct delayed_work work; /* work for button event */
struct mutex lock;
};
@@ -203,8 +202,6 @@ struct hpc_ops {
int (*set_attention_status)(struct slot *slot, u8 status);
int (*get_latch_status)(struct slot *slot, u8 *status);
int (*get_adapter_status)(struct slot *slot, u8 *status);
- int (*get_emi_status)(struct slot *slot, u8 *status);
- int (*toggle_emi)(struct slot *slot);
int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
int (*get_max_lnk_width)(struct slot *slot, enum pcie_link_width *val);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index fb254b2454de..2317557fdee6 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -73,7 +73,6 @@ static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *val
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
@@ -85,99 +84,6 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
.get_cur_bus_speed = get_cur_bus_speed,
};
-/*
- * Check the status of the Electro Mechanical Interlock (EMI)
- */
-static int get_lock_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
- return (slot->hpc_ops->get_emi_status(slot, value));
-}
-
-/*
- * sysfs interface for the Electro Mechanical Interlock (EMI)
- * 1 == locked, 0 == unlocked
- */
-static ssize_t lock_read_file(struct hotplug_slot *slot, char *buf)
-{
- int retval;
- u8 value;
-
- retval = get_lock_status(slot, &value);
- if (retval)
- goto lock_read_exit;
- retval = sprintf (buf, "%d\n", value);
-
-lock_read_exit:
- return retval;
-}
-
-/*
- * Change the status of the Electro Mechanical Interlock (EMI)
- * This is a toggle - in addition there must be at least 1 second
- * in between toggles.
- */
-static int set_lock_status(struct hotplug_slot *hotplug_slot, u8 status)
-{
- struct slot *slot = hotplug_slot->private;
- int retval;
- u8 value;
-
- mutex_lock(&slot->ctrl->crit_sect);
-
- /* has it been >1 sec since our last toggle? */
- if ((get_seconds() - slot->last_emi_toggle) < 1) {
- mutex_unlock(&slot->ctrl->crit_sect);
- return -EINVAL;
- }
-
- /* see what our current state is */
- retval = get_lock_status(hotplug_slot, &value);
- if (retval || (value == status))
- goto set_lock_exit;
-
- slot->hpc_ops->toggle_emi(slot);
-set_lock_exit:
- mutex_unlock(&slot->ctrl->crit_sect);
- return 0;
-}
-
-/*
- * sysfs interface which allows the user to toggle the Electro Mechanical
- * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock
- */
-static ssize_t lock_write_file(struct hotplug_slot *hotplug_slot,
- const char *buf, size_t count)
-{
- struct slot *slot = hotplug_slot->private;
- unsigned long llock;
- u8 lock;
- int retval = 0;
-
- llock = simple_strtoul(buf, NULL, 10);
- lock = (u8)(llock & 0xff);
-
- switch (lock) {
- case 0:
- case 1:
- retval = set_lock_status(hotplug_slot, lock);
- break;
- default:
- ctrl_err(slot->ctrl, "%d is an invalid lock value\n",
- lock);
- retval = -EINVAL;
- }
- if (retval)
- return retval;
- return count;
-}
-
-static struct hotplug_slot_attribute hotplug_slot_attr_lock = {
- .attr = {.name = "lock", .mode = S_IFREG | S_IRUGO | S_IWUSR},
- .show = lock_read_file,
- .store = lock_write_file
-};
-
/**
* release_slot - free up the memory used by a slot
* @hotplug_slot: slot to free
@@ -236,17 +142,6 @@ static int init_slots(struct controller *ctrl)
get_attention_status(hotplug_slot, &info->attention_status);
get_latch_status(hotplug_slot, &info->latch_status);
get_adapter_status(hotplug_slot, &info->adapter_status);
- /* create additional sysfs entries */
- if (EMI(ctrl)) {
- retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
- &hotplug_slot_attr_lock.attr);
- if (retval) {
- pci_hp_deregister(hotplug_slot);
- ctrl_err(ctrl, "Cannot create additional sysfs "
- "entries\n");
- goto error_info;
- }
- }
}
return 0;
@@ -261,13 +156,8 @@ error:
static void cleanup_slots(struct controller *ctrl)
{
struct slot *slot;
-
- list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
- if (EMI(ctrl))
- sysfs_remove_file(&slot->hotplug_slot->pci_slot->kobj,
- &hotplug_slot_attr_lock.attr);
+ list_for_each_entry(slot, &ctrl->slot_list, slot_list)
pci_hp_deregister(slot->hotplug_slot);
- }
}
/*
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 07bd32151146..52813257e5bf 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -422,35 +422,6 @@ static int hpc_query_power_fault(struct slot *slot)
return !!(slot_status & PCI_EXP_SLTSTA_PFD);
}
-static int hpc_get_emi_status(struct slot *slot, u8 *status)
-{
- struct controller *ctrl = slot->ctrl;
- u16 slot_status;
- int retval;
-
- retval = pciehp_readw(ctrl, PCI_EXP_SLTSTA, &slot_status);
- if (retval) {
- ctrl_err(ctrl, "Cannot check EMI status\n");
- return retval;
- }
- *status = !!(slot_status & PCI_EXP_SLTSTA_EIS);
- return retval;
-}
-
-static int hpc_toggle_emi(struct slot *slot)
-{
- u16 slot_cmd;
- u16 cmd_mask;
- int rc;
-
- slot_cmd = PCI_EXP_SLTCTL_EIC;
- cmd_mask = PCI_EXP_SLTCTL_EIC;
- rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask);
- slot->last_emi_toggle = get_seconds();
-
- return rc;
-}
-
static int hpc_set_attention_status(struct slot *slot, u8 value)
{
struct controller *ctrl = slot->ctrl;
@@ -874,8 +845,6 @@ static struct hpc_ops pciehp_hpc_ops = {
.get_attention_status = hpc_get_attention_status,
.get_latch_status = hpc_get_latch_status,
.get_adapter_status = hpc_get_adapter_status,
- .get_emi_status = hpc_get_emi_status,
- .toggle_emi = hpc_toggle_emi,
.get_max_bus_speed = hpc_get_max_lnk_speed,
.get_cur_bus_speed = hpc_get_cur_lnk_speed,
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index e3dd6cf9e89f..5175d9b26f0b 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -82,7 +82,6 @@ static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops skel_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 95d02a08fdc7..c159223389ec 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -423,7 +423,6 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
}
struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index 2d6da78fddb6..a4494d78e7c2 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -83,7 +83,6 @@ static int disable_slot(struct hotplug_slot *slot);
static inline int get_power_status(struct hotplug_slot *slot, u8 *value);
static struct hotplug_slot_ops sn_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.get_power_status = get_power_status,
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index fe8d149c2293..8a520a3d0f59 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -69,7 +69,6 @@ static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *val
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
- .owner = THIS_MODULE,
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index cd389162735f..178853a07440 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -53,6 +53,8 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
+#define MAX_AGAW_WIDTH 64
+
#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
@@ -131,8 +133,6 @@ static inline void context_set_fault_enable(struct context_entry *context)
context->lo &= (((u64)-1) << 2) | 1;
}
-#define CONTEXT_TT_MULTI_LEVEL 0
-
static inline void context_set_translation_type(struct context_entry *context,
unsigned long value)
{
@@ -256,6 +256,7 @@ struct device_domain_info {
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
+ struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
};
@@ -401,17 +402,13 @@ void free_iova_mem(struct iova *iova)
static inline int width_to_agaw(int width);
-/* calculate agaw for each iommu.
- * "SAGAW" may be different across iommus, use a default agaw, and
- * get a supported less agaw for iommus that don't support the default agaw.
- */
-int iommu_calculate_agaw(struct intel_iommu *iommu)
+static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
int agaw = -1;
sagaw = cap_sagaw(iommu->cap);
- for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ for (agaw = width_to_agaw(max_gaw);
agaw >= 0; agaw--) {
if (test_bit(agaw, &sagaw))
break;
@@ -420,6 +417,24 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
return agaw;
}
+/*
+ * Calculate max SAGAW for each iommu.
+ */
+int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
+{
+ return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
+}
+
+/*
+ * calculate agaw for each iommu.
+ * "SAGAW" may be different across iommus, use a default agaw, and
+ * get a supported less agaw for iommus that don't support the default agaw.
+ */
+int iommu_calculate_agaw(struct intel_iommu *iommu)
+{
+ return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+}
+
/* in native case, each domain is related to only one iommu */
static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
{
@@ -809,7 +824,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
static void iommu_set_root_entry(struct intel_iommu *iommu)
{
void *addr;
- u32 cmd, sts;
+ u32 sts;
unsigned long flag;
addr = iommu->root_entry;
@@ -817,12 +832,11 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
spin_lock_irqsave(&iommu->register_lock, flag);
dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr));
- cmd = iommu->gcmd | DMA_GCMD_SRTP;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_RTPS), sts);
+ readl, (sts & DMA_GSTS_RTPS), sts);
spin_unlock_irqrestore(&iommu->register_lock, flag);
}
@@ -834,39 +848,25 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
if (!rwbf_quirk && !cap_rwbf(iommu->cap))
return;
- val = iommu->gcmd | DMA_GCMD_WBF;
spin_lock_irqsave(&iommu->register_lock, flag);
- writel(val, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(val & DMA_GSTS_WBFS)), val);
+ readl, (!(val & DMA_GSTS_WBFS)), val);
spin_unlock_irqrestore(&iommu->register_lock, flag);
}
/* return value determine if we need a write buffer flush */
-static int __iommu_flush_context(struct intel_iommu *iommu,
- u16 did, u16 source_id, u8 function_mask, u64 type,
- int non_present_entry_flush)
+static void __iommu_flush_context(struct intel_iommu *iommu,
+ u16 did, u16 source_id, u8 function_mask,
+ u64 type)
{
u64 val = 0;
unsigned long flag;
- /*
- * In the non-present entry flush case, if hardware doesn't cache
- * non-present entry we do nothing and if hardware cache non-present
- * entry, we flush entries of domain 0 (the domain id is used to cache
- * any non-present entries)
- */
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
switch (type) {
case DMA_CCMD_GLOBAL_INVL:
val = DMA_CCMD_GLOBAL_INVL;
@@ -891,33 +891,16 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
dmar_readq, (!(val & DMA_CCMD_ICC)), val);
spin_unlock_irqrestore(&iommu->register_lock, flag);
-
- /* flush context entry will implicitly flush write buffer */
- return 0;
}
/* return value determine if we need a write buffer flush */
-static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
- u64 addr, unsigned int size_order, u64 type,
- int non_present_entry_flush)
+static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
+ u64 addr, unsigned int size_order, u64 type)
{
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0, val_iva = 0;
unsigned long flag;
- /*
- * In the non-present entry flush case, if hardware doesn't cache
- * non-present entry we do nothing and if hardware cache non-present
- * entry, we flush entries of domain 0 (the domain id is used to cache
- * any non-present entries)
- */
- if (non_present_entry_flush) {
- if (!cap_caching_mode(iommu->cap))
- return 1;
- else
- did = 0;
- }
-
switch (type) {
case DMA_TLB_GLOBAL_FLUSH:
/* global flush doesn't need set IVA_REG */
@@ -965,37 +948,101 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n",
(unsigned long long)DMA_TLB_IIRG(type),
(unsigned long long)DMA_TLB_IAIG(val));
- /* flush iotlb entry will implicitly flush write buffer */
- return 0;
}
-static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
- u64 addr, unsigned int pages, int non_present_entry_flush)
+static struct device_domain_info *iommu_support_dev_iotlb(
+ struct dmar_domain *domain, int segment, u8 bus, u8 devfn)
+{
+ int found = 0;
+ unsigned long flags;
+ struct device_domain_info *info;
+ struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn);
+
+ if (!ecap_dev_iotlb_support(iommu->ecap))
+ return NULL;
+
+ if (!iommu->qi)
+ return NULL;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry(info, &domain->devices, link)
+ if (info->bus == bus && info->devfn == devfn) {
+ found = 1;
+ break;
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ if (!found || !info->dev)
+ return NULL;
+
+ if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS))
+ return NULL;
+
+ if (!dmar_find_matched_atsr_unit(info->dev))
+ return NULL;
+
+ info->iommu = iommu;
+
+ return info;
+}
+
+static void iommu_enable_dev_iotlb(struct device_domain_info *info)
{
- unsigned int mask;
+ if (!info)
+ return;
+
+ pci_enable_ats(info->dev, VTD_PAGE_SHIFT);
+}
+
+static void iommu_disable_dev_iotlb(struct device_domain_info *info)
+{
+ if (!info->dev || !pci_ats_enabled(info->dev))
+ return;
+
+ pci_disable_ats(info->dev);
+}
+
+static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
+ u64 addr, unsigned mask)
+{
+ u16 sid, qdep;
+ unsigned long flags;
+ struct device_domain_info *info;
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_for_each_entry(info, &domain->devices, link) {
+ if (!info->dev || !pci_ats_enabled(info->dev))
+ continue;
+
+ sid = info->bus << 8 | info->devfn;
+ qdep = pci_ats_queue_depth(info->dev);
+ qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
+ }
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+}
+
+static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
+ u64 addr, unsigned int pages)
+{
+ unsigned int mask = ilog2(__roundup_pow_of_two(pages));
BUG_ON(addr & (~VTD_PAGE_MASK));
BUG_ON(pages == 0);
- /* Fallback to domain selective flush if no PSI support */
- if (!cap_pgsel_inv(iommu->cap))
- return iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH,
- non_present_entry_flush);
-
/*
+ * Fallback to domain selective flush if no PSI support or the size is
+ * too big.
* PSI requires page size to be 2 ^ x, and the base address is naturally
* aligned to the size
*/
- mask = ilog2(__roundup_pow_of_two(pages));
- /* Fallback to domain selective flush if size is too big */
- if (mask > cap_max_amask_val(iommu->cap))
- return iommu->flush.flush_iotlb(iommu, did, 0, 0,
- DMA_TLB_DSI_FLUSH, non_present_entry_flush);
-
- return iommu->flush.flush_iotlb(iommu, did, addr, mask,
- DMA_TLB_PSI_FLUSH,
- non_present_entry_flush);
+ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
+ iommu->flush.flush_iotlb(iommu, did, 0, 0,
+ DMA_TLB_DSI_FLUSH);
+ else
+ iommu->flush.flush_iotlb(iommu, did, addr, mask,
+ DMA_TLB_PSI_FLUSH);
+ if (did)
+ iommu_flush_dev_iotlb(iommu->domains[did], addr, mask);
}
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
@@ -1021,13 +1068,13 @@ static int iommu_enable_translation(struct intel_iommu *iommu)
unsigned long flags;
spin_lock_irqsave(&iommu->register_lock, flags);
- writel(iommu->gcmd|DMA_GCMD_TE, iommu->reg + DMAR_GCMD_REG);
+ iommu->gcmd |= DMA_GCMD_TE;
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (sts & DMA_GSTS_TES), sts);
+ readl, (sts & DMA_GSTS_TES), sts);
- iommu->gcmd |= DMA_GCMD_TE;
spin_unlock_irqrestore(&iommu->register_lock, flags);
return 0;
}
@@ -1043,7 +1090,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu)
/* Make sure hardware complete it */
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
- readl, (!(sts & DMA_GSTS_TES)), sts);
+ readl, (!(sts & DMA_GSTS_TES)), sts);
spin_unlock_irqrestore(&iommu->register_lock, flag);
return 0;
@@ -1325,8 +1372,8 @@ static void domain_exit(struct dmar_domain *domain)
free_domain_mem(domain);
}
-static int domain_context_mapping_one(struct dmar_domain *domain,
- int segment, u8 bus, u8 devfn)
+static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
+ u8 bus, u8 devfn, int translation)
{
struct context_entry *context;
unsigned long flags;
@@ -1336,10 +1383,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
unsigned long ndomains;
int id;
int agaw;
+ struct device_domain_info *info = NULL;
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
BUG_ON(!domain->pgd);
+ BUG_ON(translation != CONTEXT_TT_PASS_THROUGH &&
+ translation != CONTEXT_TT_MULTI_LEVEL);
iommu = device_to_iommu(segment, bus, devfn);
if (!iommu)
@@ -1399,21 +1450,44 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
}
context_set_domain_id(context, id);
- context_set_address_width(context, iommu->agaw);
- context_set_address_root(context, virt_to_phys(pgd));
- context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL);
+
+ if (translation != CONTEXT_TT_PASS_THROUGH) {
+ info = iommu_support_dev_iotlb(domain, segment, bus, devfn);
+ translation = info ? CONTEXT_TT_DEV_IOTLB :
+ CONTEXT_TT_MULTI_LEVEL;
+ }
+ /*
+ * In pass through mode, AW must be programmed to indicate the largest
+ * AGAW value supported by hardware. And ASR is ignored by hardware.
+ */
+ if (unlikely(translation == CONTEXT_TT_PASS_THROUGH))
+ context_set_address_width(context, iommu->msagaw);
+ else {
+ context_set_address_root(context, virt_to_phys(pgd));
+ context_set_address_width(context, iommu->agaw);
+ }
+
+ context_set_translation_type(context, translation);
context_set_fault_enable(context);
context_set_present(context);
domain_flush_cache(domain, context, sizeof(*context));
- /* it's a non-present to present mapping */
- if (iommu->flush.flush_context(iommu, domain->id,
- (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL, 1))
+ /*
+ * It's a non-present to present mapping. If hardware doesn't cache
+ * non-present entry we only need to flush the write-buffer. If the
+ * _does_ cache non-present entries, then it does so in the special
+ * domain #0, which we have to flush:
+ */
+ if (cap_caching_mode(iommu->cap)) {
+ iommu->flush.flush_context(iommu, 0,
+ (((u16)bus) << 8) | devfn,
+ DMA_CCMD_MASK_NOBIT,
+ DMA_CCMD_DEVICE_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
+ } else {
iommu_flush_write_buffer(iommu);
- else
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
-
+ }
+ iommu_enable_dev_iotlb(info);
spin_unlock_irqrestore(&iommu->lock, flags);
spin_lock_irqsave(&domain->iommu_lock, flags);
@@ -1426,13 +1500,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
}
static int
-domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
+domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
+ int translation)
{
int ret;
struct pci_dev *tmp, *parent;
ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus),
- pdev->bus->number, pdev->devfn);
+ pdev->bus->number, pdev->devfn,
+ translation);
if (ret)
return ret;
@@ -1446,7 +1522,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
ret = domain_context_mapping_one(domain,
pci_domain_nr(parent->bus),
parent->bus->number,
- parent->devfn);
+ parent->devfn, translation);
if (ret)
return ret;
parent = parent->bus->self;
@@ -1454,12 +1530,14 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev)
if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
return domain_context_mapping_one(domain,
pci_domain_nr(tmp->subordinate),
- tmp->subordinate->number, 0);
+ tmp->subordinate->number, 0,
+ translation);
else /* this is a legacy PCI bridge */
return domain_context_mapping_one(domain,
pci_domain_nr(tmp->bus),
tmp->bus->number,
- tmp->devfn);
+ tmp->devfn,
+ translation);
}
static int domain_context_mapped(struct pci_dev *pdev)
@@ -1540,9 +1618,8 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
clear_context_table(iommu, bus, devfn);
iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL, 0);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
+ DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
static void domain_remove_dev_info(struct dmar_domain *domain)
@@ -1561,6 +1638,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
info->dev->dev.archdata.iommu = NULL;
spin_unlock_irqrestore(&device_domain_lock, flags);
+ iommu_disable_dev_iotlb(info);
iommu = device_to_iommu(info->segment, info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn);
free_devinfo_mem(info);
@@ -1756,7 +1834,7 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
goto error;
/* context entry init */
- ret = domain_context_mapping(domain, pdev);
+ ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL);
if (!ret)
return 0;
error:
@@ -1857,6 +1935,23 @@ static inline void iommu_prepare_isa(void)
}
#endif /* !CONFIG_DMAR_FLPY_WA */
+/* Initialize each context entry as pass through.*/
+static int __init init_context_pass_through(void)
+{
+ struct pci_dev *pdev = NULL;
+ struct dmar_domain *domain;
+ int ret;
+
+ for_each_pci_dev(pdev) {
+ domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
+ ret = domain_context_mapping(domain, pdev,
+ CONTEXT_TT_PASS_THROUGH);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
@@ -1864,6 +1959,7 @@ static int __init init_dmars(void)
struct pci_dev *pdev;
struct intel_iommu *iommu;
int i, ret;
+ int pass_through = 1;
/*
* for each drhd
@@ -1917,7 +2013,15 @@ static int __init init_dmars(void)
printk(KERN_ERR "IOMMU: allocate root entry failed\n");
goto error;
}
+ if (!ecap_pass_through(iommu->ecap))
+ pass_through = 0;
}
+ if (iommu_pass_through)
+ if (!pass_through) {
+ printk(KERN_INFO
+ "Pass Through is not supported by hardware.\n");
+ iommu_pass_through = 0;
+ }
/*
* Start from the sane iommu hardware state.
@@ -1973,35 +2077,56 @@ static int __init init_dmars(void)
}
/*
- * For each rmrr
- * for each dev attached to rmrr
- * do
- * locate drhd for dev, alloc domain for dev
- * allocate free domain
- * allocate page table entries for rmrr
- * if context not allocated for bus
- * allocate and init context
- * set present in root table for this bus
- * init context with domain, translation etc
- * endfor
- * endfor
+ * If pass through is set and enabled, context entries of all pci
+ * devices are intialized by pass through translation type.
*/
- for_each_rmrr_units(rmrr) {
- for (i = 0; i < rmrr->devices_cnt; i++) {
- pdev = rmrr->devices[i];
- /* some BIOS lists non-exist devices in DMAR table */
- if (!pdev)
- continue;
- ret = iommu_prepare_rmrr_dev(rmrr, pdev);
- if (ret)
- printk(KERN_ERR
- "IOMMU: mapping reserved region failed\n");
+ if (iommu_pass_through) {
+ ret = init_context_pass_through();
+ if (ret) {
+ printk(KERN_ERR "IOMMU: Pass through init failed.\n");
+ iommu_pass_through = 0;
}
}
- iommu_prepare_gfx_mapping();
+ /*
+ * If pass through is not set or not enabled, setup context entries for
+ * identity mappings for rmrr, gfx, and isa.
+ */
+ if (!iommu_pass_through) {
+ /*
+ * For each rmrr
+ * for each dev attached to rmrr
+ * do
+ * locate drhd for dev, alloc domain for dev
+ * allocate free domain
+ * allocate page table entries for rmrr
+ * if context not allocated for bus
+ * allocate and init context
+ * set present in root table for this bus
+ * init context with domain, translation etc
+ * endfor
+ * endfor
+ */
+ for_each_rmrr_units(rmrr) {
+ for (i = 0; i < rmrr->devices_cnt; i++) {
+ pdev = rmrr->devices[i];
+ /*
+ * some BIOS lists non-exist devices in DMAR
+ * table.
+ */
+ if (!pdev)
+ continue;
+ ret = iommu_prepare_rmrr_dev(rmrr, pdev);
+ if (ret)
+ printk(KERN_ERR
+ "IOMMU: mapping reserved region failed\n");
+ }
+ }
+
+ iommu_prepare_gfx_mapping();
- iommu_prepare_isa();
+ iommu_prepare_isa();
+ }
/*
* for each drhd
@@ -2023,10 +2148,8 @@ static int __init init_dmars(void)
iommu_set_root_entry(iommu);
- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
- 0);
- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
- 0);
+ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
iommu_disable_protect_mem_regions(iommu);
ret = iommu_enable_translation(iommu);
@@ -2112,7 +2235,8 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
/* make sure context mapping is ok */
if (unlikely(!domain_context_mapped(pdev))) {
- ret = domain_context_mapping(domain, pdev);
+ ret = domain_context_mapping(domain, pdev,
+ CONTEXT_TT_MULTI_LEVEL);
if (ret) {
printk(KERN_ERR
"Domain context map for %s failed",
@@ -2173,10 +2297,11 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
if (ret)
goto error;
- /* it's a non-present to present mapping */
- ret = iommu_flush_iotlb_psi(iommu, domain->id,
- start_paddr, size >> VTD_PAGE_SHIFT, 1);
- if (ret)
+ /* it's a non-present to present mapping. Only flush if caching mode */
+ if (cap_caching_mode(iommu->cap))
+ iommu_flush_iotlb_psi(iommu, 0, start_paddr,
+ size >> VTD_PAGE_SHIFT);
+ else
iommu_flush_write_buffer(iommu);
return start_paddr + ((u64)paddr & (~PAGE_MASK));
@@ -2210,15 +2335,22 @@ static void flush_unmaps(void)
if (!iommu)
continue;
- if (deferred_flush[i].next) {
- iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
- for (j = 0; j < deferred_flush[i].next; j++) {
- __free_iova(&deferred_flush[i].domain[j]->iovad,
- deferred_flush[i].iova[j]);
- }
- deferred_flush[i].next = 0;
+ if (!deferred_flush[i].next)
+ continue;
+
+ iommu->flush.flush_iotlb(iommu, 0, 0, 0,
+ DMA_TLB_GLOBAL_FLUSH);
+ for (j = 0; j < deferred_flush[i].next; j++) {
+ unsigned long mask;
+ struct iova *iova = deferred_flush[i].iova[j];
+
+ mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT;
+ mask = ilog2(mask >> VTD_PAGE_SHIFT);
+ iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
+ iova->pfn_lo << PAGE_SHIFT, mask);
+ __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
}
+ deferred_flush[i].next = 0;
}
list_size = 0;
@@ -2291,9 +2423,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
/* free page tables */
dma_pte_free_pagetable(domain, start_addr, start_addr + size);
if (intel_iommu_strict) {
- if (iommu_flush_iotlb_psi(iommu,
- domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
- iommu_flush_write_buffer(iommu);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
+ size >> VTD_PAGE_SHIFT);
/* free iova */
__free_iova(&domain->iovad, iova);
} else {
@@ -2384,9 +2515,8 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
/* free page tables */
dma_pte_free_pagetable(domain, start_addr, start_addr + size);
- if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
- size >> VTD_PAGE_SHIFT, 0))
- iommu_flush_write_buffer(iommu);
+ iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
+ size >> VTD_PAGE_SHIFT);
/* free iova */
__free_iova(&domain->iovad, iova);
@@ -2478,10 +2608,13 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
offset += size;
}
- /* it's a non-present to present mapping */
- if (iommu_flush_iotlb_psi(iommu, domain->id,
- start_addr, offset >> VTD_PAGE_SHIFT, 1))
+ /* it's a non-present to present mapping. Only flush if caching mode */
+ if (cap_caching_mode(iommu->cap))
+ iommu_flush_iotlb_psi(iommu, 0, start_addr,
+ offset >> VTD_PAGE_SHIFT);
+ else
iommu_flush_write_buffer(iommu);
+
return nelems;
}
@@ -2640,9 +2773,9 @@ static int init_iommu_hw(void)
iommu_set_root_entry(iommu);
iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL, 0);
+ DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
+ DMA_TLB_GLOBAL_FLUSH);
iommu_disable_protect_mem_regions(iommu);
iommu_enable_translation(iommu);
}
@@ -2657,9 +2790,9 @@ static void iommu_flush_all(void)
for_each_active_iommu(iommu, drhd) {
iommu->flush.flush_context(iommu, 0, 0, 0,
- DMA_CCMD_GLOBAL_INVL, 0);
+ DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
- DMA_TLB_GLOBAL_FLUSH, 0);
+ DMA_TLB_GLOBAL_FLUSH);
}
}
@@ -2782,7 +2915,7 @@ int __init intel_iommu_init(void)
* Check the need for DMA-remapping initialization now.
* Above initialization will also be used by Interrupt-remapping.
*/
- if (no_iommu || swiotlb || dmar_disabled)
+ if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled)
return -ENODEV;
iommu_init_mempool();
@@ -2802,7 +2935,15 @@ int __init intel_iommu_init(void)
init_timer(&unmap_timer);
force_iommu = 1;
- dma_ops = &intel_dma_ops;
+
+ if (!iommu_pass_through) {
+ printk(KERN_INFO
+ "Multi-level page-table translation for DMAR.\n");
+ dma_ops = &intel_dma_ops;
+ } else
+ printk(KERN_INFO
+ "DMAR: Pass through translation for DMAR.\n");
+
init_iommu_sysfs();
register_iommu(&intel_iommu_ops);
@@ -2888,6 +3029,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
info->dev->dev.archdata.iommu = NULL;
spin_unlock_irqrestore(&device_domain_lock, flags);
+ iommu_disable_dev_iotlb(info);
iommu_detach_dev(iommu, info->bus, info->devfn);
iommu_detach_dependent_devices(iommu, pdev);
free_devinfo_mem(info);
@@ -2938,6 +3080,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
spin_unlock_irqrestore(&device_domain_lock, flags1);
+ iommu_disable_dev_iotlb(info);
iommu = device_to_iommu(info->segment, info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn);
iommu_detach_dependent_devices(iommu, info->dev);
@@ -3142,11 +3285,11 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
return -EFAULT;
}
- ret = domain_context_mapping(dmar_domain, pdev);
+ ret = vm_domain_add_dev_info(dmar_domain, pdev);
if (ret)
return ret;
- ret = vm_domain_add_dev_info(dmar_domain, pdev);
+ ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
return ret;
}
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 3a0cb0bb0593..1e83c8c5f985 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -409,7 +409,7 @@ int free_irte(int irq)
static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
{
u64 addr;
- u32 cmd, sts;
+ u32 sts;
unsigned long flags;
addr = virt_to_phys((void *)iommu->ir_table->base);
@@ -420,9 +420,8 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
(addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE);
/* Set interrupt-remapping table pointer */
- cmd = iommu->gcmd | DMA_GCMD_SIRTP;
iommu->gcmd |= DMA_GCMD_SIRTP;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRTPS), sts);
@@ -437,9 +436,8 @@ static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
spin_lock_irqsave(&iommu->register_lock, flags);
/* Enable interrupt-remapping */
- cmd = iommu->gcmd | DMA_GCMD_IRE;
iommu->gcmd |= DMA_GCMD_IRE;
- writel(cmd, iommu->reg + DMAR_GCMD_REG);
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b497daab3d4a..e3a87210e947 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -5,6 +5,7 @@
*
* PCI Express I/O Virtualization (IOV) support.
* Single Root IOV 1.0
+ * Address Translation Service 1.0
*/
#include <linux/pci.h>
@@ -110,7 +111,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
}
if (reset)
- pci_execute_reset_function(virtfn);
+ __pci_reset_function(virtfn);
pci_device_add(virtfn, virtfn->bus);
mutex_unlock(&iov->dev->sriov->lock);
@@ -164,7 +165,7 @@ static void virtfn_remove(struct pci_dev *dev, int id, int reset)
if (reset) {
device_release_driver(&virtfn->dev);
- pci_execute_reset_function(virtfn);
+ __pci_reset_function(virtfn);
}
sprintf(buf, "virtfn%u", id);
@@ -487,13 +488,15 @@ found:
iov->self = dev;
pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+ if (dev->pcie_type == PCI_EXP_TYPE_RC_END)
+ iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
if (pdev)
iov->dev = pci_dev_get(pdev);
- else {
+ else
iov->dev = dev;
- mutex_init(&iov->lock);
- }
+
+ mutex_init(&iov->lock);
dev->sriov = iov;
dev->is_physfn = 1;
@@ -513,11 +516,11 @@ static void sriov_release(struct pci_dev *dev)
{
BUG_ON(dev->sriov->nr_virtfn);
- if (dev == dev->sriov->dev)
- mutex_destroy(&dev->sriov->lock);
- else
+ if (dev != dev->sriov->dev)
pci_dev_put(dev->sriov->dev);
+ mutex_destroy(&dev->sriov->lock);
+
kfree(dev->sriov);
dev->sriov = NULL;
}
@@ -679,3 +682,145 @@ irqreturn_t pci_sriov_migration(struct pci_dev *dev)
return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
}
EXPORT_SYMBOL_GPL(pci_sriov_migration);
+
+static int ats_alloc_one(struct pci_dev *dev, int ps)
+{
+ int pos;
+ u16 cap;
+ struct pci_ats *ats;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return -ENODEV;
+
+ ats = kzalloc(sizeof(*ats), GFP_KERNEL);
+ if (!ats)
+ return -ENOMEM;
+
+ ats->pos = pos;
+ ats->stu = ps;
+ pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+ ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
+ PCI_ATS_MAX_QDEP;
+ dev->ats = ats;
+
+ return 0;
+}
+
+static void ats_free_one(struct pci_dev *dev)
+{
+ kfree(dev->ats);
+ dev->ats = NULL;
+}
+
+/**
+ * pci_enable_ats - enable the ATS capability
+ * @dev: the PCI device
+ * @ps: the IOMMU page shift
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+int pci_enable_ats(struct pci_dev *dev, int ps)
+{
+ int rc;
+ u16 ctrl;
+
+ BUG_ON(dev->ats && dev->ats->is_enabled);
+
+ if (ps < PCI_ATS_MIN_STU)
+ return -EINVAL;
+
+ if (dev->is_physfn || dev->is_virtfn) {
+ struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+
+ mutex_lock(&pdev->sriov->lock);
+ if (pdev->ats)
+ rc = pdev->ats->stu == ps ? 0 : -EINVAL;
+ else
+ rc = ats_alloc_one(pdev, ps);
+
+ if (!rc)
+ pdev->ats->ref_cnt++;
+ mutex_unlock(&pdev->sriov->lock);
+ if (rc)
+ return rc;
+ }
+
+ if (!dev->is_physfn) {
+ rc = ats_alloc_one(dev, ps);
+ if (rc)
+ return rc;
+ }
+
+ ctrl = PCI_ATS_CTRL_ENABLE;
+ if (!dev->is_virtfn)
+ ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+
+ dev->ats->is_enabled = 1;
+
+ return 0;
+}
+
+/**
+ * pci_disable_ats - disable the ATS capability
+ * @dev: the PCI device
+ */
+void pci_disable_ats(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ BUG_ON(!dev->ats || !dev->ats->is_enabled);
+
+ pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
+ ctrl &= ~PCI_ATS_CTRL_ENABLE;
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+
+ dev->ats->is_enabled = 0;
+
+ if (dev->is_physfn || dev->is_virtfn) {
+ struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
+
+ mutex_lock(&pdev->sriov->lock);
+ pdev->ats->ref_cnt--;
+ if (!pdev->ats->ref_cnt)
+ ats_free_one(pdev);
+ mutex_unlock(&pdev->sriov->lock);
+ }
+
+ if (!dev->is_physfn)
+ ats_free_one(dev);
+}
+
+/**
+ * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
+ * @dev: the PCI device
+ *
+ * Returns the queue depth on success, or negative on failure.
+ *
+ * The ATS spec uses 0 in the Invalidate Queue Depth field to
+ * indicate that the function can accept 32 Invalidate Request.
+ * But here we use the `real' values (i.e. 1~32) for the Queue
+ * Depth; and 0 indicates the function shares the Queue with
+ * other functions (doesn't exclusively own a Queue).
+ */
+int pci_ats_queue_depth(struct pci_dev *dev)
+{
+ int pos;
+ u16 cap;
+
+ if (dev->is_virtfn)
+ return 0;
+
+ if (dev->ats)
+ return dev->ats->qdep;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
+
+ return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
+ PCI_ATS_MAX_QDEP;
+}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 362773247fbf..d9f06fbfa0bf 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -75,22 +75,17 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
}
#endif
-static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
+static void msi_set_enable(struct pci_dev *dev, int pos, int enable)
{
u16 control;
- if (pos) {
- pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
- control &= ~PCI_MSI_FLAGS_ENABLE;
- if (enable)
- control |= PCI_MSI_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
- }
-}
+ BUG_ON(!pos);
-static void msi_set_enable(struct pci_dev *dev, int enable)
-{
- __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+ if (enable)
+ control |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
}
static void msix_set_enable(struct pci_dev *dev, int enable)
@@ -131,9 +126,6 @@ static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
* mask all MSI interrupts by clearing the MSI enable bit does not work
* reliably as devices without an INTx disable bit will then generate a
* level IRQ which will never be cleared.
- *
- * Returns 1 if it succeeded in masking the interrupt and 0 if the device
- * doesn't support MSI masking.
*/
static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
@@ -303,7 +295,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
pos = entry->msi_attrib.pos;
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, 0);
+ msi_set_enable(dev, pos, 0);
write_msi_msg(dev->irq, &entry->msg);
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
@@ -321,22 +313,22 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
if (!dev->msix_enabled)
return;
+ BUG_ON(list_empty(&dev->msi_list));
+ entry = list_entry(dev->msi_list.next, struct msi_desc, list);
+ pos = entry->msi_attrib.pos;
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
/* route the table */
pci_intx_for_msi(dev, 0);
- msix_set_enable(dev, 0);
+ control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
list_for_each_entry(entry, &dev->msi_list, list) {
write_msi_msg(entry->irq, &entry->msg);
msix_mask_irq(entry, entry->masked);
}
- BUG_ON(list_empty(&dev->msi_list));
- entry = list_entry(dev->msi_list.next, struct msi_desc, list);
- pos = entry->msi_attrib.pos;
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
control &= ~PCI_MSIX_FLAGS_MASKALL;
- control |= PCI_MSIX_FLAGS_ENABLE;
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
}
@@ -365,9 +357,9 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
u16 control;
unsigned mask;
- msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */
-
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
+
pci_read_config_word(dev, msi_control_reg(pos), &control);
/* MSI Entry Initialization */
entry = alloc_msi_entry(dev);
@@ -381,7 +373,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
entry->msi_attrib.pos = pos;
- entry->mask_pos = msi_mask_bits_reg(pos, entry->msi_attrib.is_64);
+ entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
/* All MSIs are unmasked by default, Mask them all */
if (entry->msi_attrib.maskbit)
pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
@@ -399,7 +391,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
- msi_set_enable(dev, 1);
+ msi_set_enable(dev, pos, 1);
dev->msi_enabled = 1;
dev->irq = entry->irq;
@@ -427,11 +419,14 @@ static int msix_capability_init(struct pci_dev *dev,
u8 bir;
void __iomem *base;
- msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */
-
pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+
+ /* Ensure MSI-X is disabled while it is set up */
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+
/* Request & Map MSI-X table region */
- pci_read_config_word(dev, msi_control_reg(pos), &control);
nr_entries = multi_msix_capable(control);
pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
@@ -442,7 +437,6 @@ static int msix_capability_init(struct pci_dev *dev,
if (base == NULL)
return -ENOMEM;
- /* MSI-X Table Initialization */
for (i = 0; i < nvec; i++) {
entry = alloc_msi_entry(dev);
if (!entry)
@@ -455,7 +449,6 @@ static int msix_capability_init(struct pci_dev *dev,
entry->msi_attrib.default_irq = dev->irq;
entry->msi_attrib.pos = pos;
entry->mask_base = base;
- msix_mask_irq(entry, 1);
list_add_tail(&entry->list, &dev->msi_list);
}
@@ -480,22 +473,31 @@ static int msix_capability_init(struct pci_dev *dev,
return ret;
}
+ /*
+ * Some devices require MSI-X to be enabled before we can touch the
+ * MSI-X registers. We need to mask all the vectors to prevent
+ * interrupts coming in before they're fully set up.
+ */
+ control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+
i = 0;
list_for_each_entry(entry, &dev->msi_list, list) {
entries[i].vector = entry->irq;
set_irq_msi(entry->irq, entry);
+ j = entries[i].entry;
+ entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
+ msix_mask_irq(entry, 1);
i++;
}
- /* Set MSI-X enabled bits */
+
+ /* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
- msix_set_enable(dev, 1);
dev->msix_enabled = 1;
- list_for_each_entry(entry, &dev->msi_list, list) {
- int vector = entry->msi_attrib.entry_nr;
- entry->masked = readl(base + vector * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
- }
+ control &= ~PCI_MSIX_FLAGS_MASKALL;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
return 0;
}
@@ -596,17 +598,20 @@ void pci_msi_shutdown(struct pci_dev *dev)
struct msi_desc *desc;
u32 mask;
u16 ctrl;
+ unsigned pos;
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
- msi_set_enable(dev, 0);
+ BUG_ON(list_empty(&dev->msi_list));
+ desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
+ pos = desc->msi_attrib.pos;
+
+ msi_set_enable(dev, pos, 0);
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;
- BUG_ON(list_empty(&dev->msi_list));
- desc = list_first_entry(&dev->msi_list, struct msi_desc, list);
- pci_read_config_word(dev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &ctrl);
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &ctrl);
mask = msi_capable_mask(ctrl);
msi_mask_irq(desc, mask, ~mask);
@@ -648,10 +653,7 @@ static int msi_free_irqs(struct pci_dev* dev)
list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
if (entry->msi_attrib.is_msix) {
- writel(1, entry->mask_base + entry->msi_attrib.entry_nr
- * PCI_MSIX_ENTRY_SIZE
- + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET);
-
+ msix_mask_irq(entry, 1);
if (list_is_last(&entry->list, &dev->msi_list))
iounmap(entry->mask_base);
}
@@ -691,8 +693,8 @@ int pci_msix_table_size(struct pci_dev *dev)
* indicates the successful configuration of MSI-X capability structure
* with new allocated MSI-X irqs. A return of < 0 indicates a failure.
* Or a return of > 0 indicates that driver request is exceeding the number
- * of irqs available. Driver should use the returned value to re-send
- * its request.
+ * of irqs or MSI-X vectors available. Driver should use the returned value to
+ * re-send its request.
**/
int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
{
@@ -708,7 +710,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
nr_entries = pci_msix_table_size(dev);
if (nvec > nr_entries)
- return -EINVAL;
+ return nr_entries;
/* Check for any invalid entries */
for (i = 0; i < nvec; i++) {
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index 71f4df2ef654..a0662842550b 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -16,21 +16,15 @@
#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
#define msi_upper_address_reg(base) (base + PCI_MSI_ADDRESS_HI)
#define msi_data_reg(base, is64bit) \
- ( (is64bit == 1) ? base+PCI_MSI_DATA_64 : base+PCI_MSI_DATA_32 )
-#define msi_mask_bits_reg(base, is64bit) \
- ( (is64bit == 1) ? base+PCI_MSI_MASK_BIT : base+PCI_MSI_MASK_BIT-4)
-#define msi_disable(control) control &= ~PCI_MSI_FLAGS_ENABLE
+ (base + ((is64bit == 1) ? PCI_MSI_DATA_64 : PCI_MSI_DATA_32))
+#define msi_mask_reg(base, is64bit) \
+ (base + ((is64bit == 1) ? PCI_MSI_MASK_64 : PCI_MSI_MASK_32))
#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
#define msix_table_offset_reg(base) (base + 0x04)
#define msix_pba_offset_reg(base) (base + 0x08)
-#define msix_enable(control) control |= PCI_MSIX_FLAGS_ENABLE
-#define msix_disable(control) control &= ~PCI_MSIX_FLAGS_ENABLE
#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
-#define multi_msix_capable msix_table_size
-#define msix_unmask(address) (address & ~PCI_MSIX_FLAGS_BITMASK)
-#define msix_mask(address) (address | PCI_MSIX_FLAGS_BITMASK)
-#define msix_is_pending(address) (address & PCI_MSIX_FLAGS_PENDMASK)
+#define multi_msix_capable(control) msix_table_size((control))
#endif /* MSI_H */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 07bbb9b3b93f..6c93af5ced18 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -485,6 +485,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= state;
break;
+ case PCI_D3hot:
+ case PCI_D3cold:
case PCI_UNKNOWN: /* Boot-up */
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
&& !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
@@ -1208,7 +1210,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
* Error code depending on the platform is returned if both the platform and
* the native mechanism fail to enable the generation of wake-up events
*/
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
+int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
{
int error = 0;
bool pme_done = false;
@@ -1287,15 +1289,14 @@ pci_power_t pci_target_state(struct pci_dev *dev)
default:
target_state = state;
}
+ } else if (!dev->pm_cap) {
+ target_state = PCI_D0;
} else if (device_may_wakeup(&dev->dev)) {
/*
* Find the deepest state from which the device can generate
* wake-up events, make it the target state and enable device
* to generate PME#.
*/
- if (!dev->pm_cap)
- return PCI_POWER_ERROR;
-
if (dev->pme_support) {
while (target_state
&& !(dev->pme_support & (1 << target_state)))
@@ -1532,7 +1533,7 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
if (!pin)
return -1;
- while (dev->bus->parent) {
+ while (!pci_is_root_bus(dev->bus)) {
pin = pci_swizzle_interrupt_pin(dev, pin);
dev = dev->bus->self;
}
@@ -1552,7 +1553,7 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
{
u8 pin = *pinp;
- while (dev->bus->parent) {
+ while (!pci_is_root_bus(dev->bus)) {
pin = pci_swizzle_interrupt_pin(dev, pin);
dev = dev->bus->self;
}
@@ -2058,111 +2059,177 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
EXPORT_SYMBOL(pci_set_dma_seg_boundary);
#endif
-static int __pcie_flr(struct pci_dev *dev, int probe)
+static int pcie_flr(struct pci_dev *dev, int probe)
{
- u16 status;
+ int i;
+ int pos;
u32 cap;
- int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ u16 status;
- if (!exppos)
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
return -ENOTTY;
- pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
+
+ pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
if (!(cap & PCI_EXP_DEVCAP_FLR))
return -ENOTTY;
if (probe)
return 0;
- pci_block_user_cfg_access(dev);
-
/* Wait for Transaction Pending bit clean */
- pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto transaction_done;
+ for (i = 0; i < 4; i++) {
+ if (i)
+ msleep((1 << (i - 1)) * 100);
- msleep(100);
- pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto transaction_done;
-
- dev_info(&dev->dev, "Busy after 100ms while trying to reset; "
- "sleeping for 1 second\n");
- ssleep(1);
- pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
- if (status & PCI_EXP_DEVSTA_TRPND)
- dev_info(&dev->dev, "Still busy after 1s; "
- "proceeding with reset anyway\n");
-
-transaction_done:
- pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
+ pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+ if (!(status & PCI_EXP_DEVSTA_TRPND))
+ goto clear;
+ }
+
+ dev_err(&dev->dev, "transaction is not cleared; "
+ "proceeding with reset anyway\n");
+
+clear:
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_BCR_FLR);
- mdelay(100);
+ msleep(100);
- pci_unblock_user_cfg_access(dev);
return 0;
}
-static int __pci_af_flr(struct pci_dev *dev, int probe)
+static int pci_af_flr(struct pci_dev *dev, int probe)
{
- int cappos = pci_find_capability(dev, PCI_CAP_ID_AF);
- u8 status;
+ int i;
+ int pos;
u8 cap;
+ u8 status;
- if (!cappos)
+ pos = pci_find_capability(dev, PCI_CAP_ID_AF);
+ if (!pos)
return -ENOTTY;
- pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap);
+
+ pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
return -ENOTTY;
if (probe)
return 0;
- pci_block_user_cfg_access(dev);
-
/* Wait for Transaction Pending bit clean */
- pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
- if (!(status & PCI_AF_STATUS_TP))
- goto transaction_done;
+ for (i = 0; i < 4; i++) {
+ if (i)
+ msleep((1 << (i - 1)) * 100);
+
+ pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
+ if (!(status & PCI_AF_STATUS_TP))
+ goto clear;
+ }
+ dev_err(&dev->dev, "transaction is not cleared; "
+ "proceeding with reset anyway\n");
+
+clear:
+ pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
msleep(100);
- pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
- if (!(status & PCI_AF_STATUS_TP))
- goto transaction_done;
-
- dev_info(&dev->dev, "Busy after 100ms while trying to"
- " reset; sleeping for 1 second\n");
- ssleep(1);
- pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
- if (status & PCI_AF_STATUS_TP)
- dev_info(&dev->dev, "Still busy after 1s; "
- "proceeding with reset anyway\n");
-
-transaction_done:
- pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
- mdelay(100);
-
- pci_unblock_user_cfg_access(dev);
+
return 0;
}
-static int __pci_reset_function(struct pci_dev *pdev, int probe)
+static int pci_pm_reset(struct pci_dev *dev, int probe)
{
- int res;
+ u16 csr;
+
+ if (!dev->pm_cap)
+ return -ENOTTY;
- res = __pcie_flr(pdev, probe);
- if (res != -ENOTTY)
- return res;
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
+ if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
+ return -ENOTTY;
- res = __pci_af_flr(pdev, probe);
- if (res != -ENOTTY)
- return res;
+ if (probe)
+ return 0;
- return res;
+ if (dev->current_state != PCI_D0)
+ return -EINVAL;
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D3hot;
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
+ msleep(pci_pm_d3_delay);
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D0;
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
+ msleep(pci_pm_d3_delay);
+
+ return 0;
+}
+
+static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+{
+ u16 ctrl;
+ struct pci_dev *pdev;
+
+ if (dev->subordinate)
+ return -ENOTTY;
+
+ list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+ if (pdev != dev)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
+ ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
+ msleep(100);
+
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
+ msleep(100);
+
+ return 0;
+}
+
+static int pci_dev_reset(struct pci_dev *dev, int probe)
+{
+ int rc;
+
+ might_sleep();
+
+ if (!probe) {
+ pci_block_user_cfg_access(dev);
+ /* block PM suspend, driver probe, etc. */
+ down(&dev->dev.sem);
+ }
+
+ rc = pcie_flr(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_af_flr(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_pm_reset(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
+ rc = pci_parent_bus_reset(dev, probe);
+done:
+ if (!probe) {
+ up(&dev->dev.sem);
+ pci_unblock_user_cfg_access(dev);
+ }
+
+ return rc;
}
/**
- * pci_execute_reset_function() - Reset a PCI device function
- * @dev: Device function to reset
+ * __pci_reset_function - reset a PCI device function
+ * @dev: PCI device to reset
*
* Some devices allow an individual function to be reset without affecting
* other functions in the same device. The PCI device must be responsive
@@ -2174,18 +2241,18 @@ static int __pci_reset_function(struct pci_dev *pdev, int probe)
* device including MSI, bus mastering, BARs, decoding IO and memory spaces,
* etc.
*
- * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * Returns 0 if the device function was successfully reset or negative if the
* device doesn't support resetting a single function.
*/
-int pci_execute_reset_function(struct pci_dev *dev)
+int __pci_reset_function(struct pci_dev *dev)
{
- return __pci_reset_function(dev, 0);
+ return pci_dev_reset(dev, 0);
}
-EXPORT_SYMBOL_GPL(pci_execute_reset_function);
+EXPORT_SYMBOL_GPL(__pci_reset_function);
/**
- * pci_reset_function() - quiesce and reset a PCI device function
- * @dev: Device function to reset
+ * pci_reset_function - quiesce and reset a PCI device function
+ * @dev: PCI device to reset
*
* Some devices allow an individual function to be reset without affecting
* other functions in the same device. The PCI device must be responsive
@@ -2193,32 +2260,33 @@ EXPORT_SYMBOL_GPL(pci_execute_reset_function);
*
* This function does not just reset the PCI portion of a device, but
* clears all the state associated with the device. This function differs
- * from pci_execute_reset_function in that it saves and restores device state
+ * from __pci_reset_function in that it saves and restores device state
* over the reset.
*
- * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * Returns 0 if the device function was successfully reset or negative if the
* device doesn't support resetting a single function.
*/
int pci_reset_function(struct pci_dev *dev)
{
- int r = __pci_reset_function(dev, 1);
+ int rc;
- if (r < 0)
- return r;
+ rc = pci_dev_reset(dev, 1);
+ if (rc)
+ return rc;
- if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
- disable_irq(dev->irq);
pci_save_state(dev);
+ /*
+ * both INTx and MSI are disabled after the Interrupt Disable bit
+ * is set and the Bus Master bit is cleared.
+ */
pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
- r = pci_execute_reset_function(dev);
+ rc = pci_dev_reset(dev, 0);
pci_restore_state(dev);
- if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
- enable_irq(dev->irq);
- return r;
+ return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_function);
@@ -2591,6 +2659,8 @@ static int __init pci_setup(char *str)
} else if (!strncmp(str, "resource_alignment=", 19)) {
pci_set_resource_alignment_param(str + 19,
strlen(str + 19));
+ } else if (!strncmp(str, "ecrc=", 5)) {
+ pcie_ecrc_get_policy(str + 5);
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d03f6b99f292..f73bcbedf37c 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -229,6 +229,15 @@ struct pci_sriov {
u8 __iomem *mstate; /* VF Migration State Array */
};
+/* Address Translation Service */
+struct pci_ats {
+ int pos; /* capability position */
+ int stu; /* Smallest Translation Unit */
+ int qdep; /* Invalidate Queue Depth */
+ int ref_cnt; /* Physical Function reference count */
+ int is_enabled:1; /* Enable bit is set */
+};
+
#ifdef CONFIG_PCI_IOV
extern int pci_iov_init(struct pci_dev *dev);
extern void pci_iov_release(struct pci_dev *dev);
@@ -236,6 +245,20 @@ extern int pci_iov_resource_bar(struct pci_dev *dev, int resno,
enum pci_bar_type *type);
extern void pci_restore_iov_state(struct pci_dev *dev);
extern int pci_iov_bus_range(struct pci_bus *bus);
+
+extern int pci_enable_ats(struct pci_dev *dev, int ps);
+extern void pci_disable_ats(struct pci_dev *dev);
+extern int pci_ats_queue_depth(struct pci_dev *dev);
+/**
+ * pci_ats_enabled - query the ATS status
+ * @dev: the PCI device
+ *
+ * Returns 1 if ATS capability is enabled, or 0 if not.
+ */
+static inline int pci_ats_enabled(struct pci_dev *dev)
+{
+ return dev->ats && dev->ats->is_enabled;
+}
#else
static inline int pci_iov_init(struct pci_dev *dev)
{
@@ -257,6 +280,22 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
{
return 0;
}
+
+static inline int pci_enable_ats(struct pci_dev *dev, int ps)
+{
+ return -ENODEV;
+}
+static inline void pci_disable_ats(struct pci_dev *dev)
+{
+}
+static inline int pci_ats_queue_depth(struct pci_dev *dev)
+{
+ return -ENODEV;
+}
+static inline int pci_ats_enabled(struct pci_dev *dev)
+{
+ return 0;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/Kconfig b/drivers/pci/pcie/aer/Kconfig
index c3bde588aa13..50e94e02378a 100644
--- a/drivers/pci/pcie/aer/Kconfig
+++ b/drivers/pci/pcie/aer/Kconfig
@@ -10,3 +10,18 @@ config PCIEAER
This enables PCI Express Root Port Advanced Error Reporting
(AER) driver support. Error reporting messages sent to Root
Port will be handled by PCI Express AER driver.
+
+
+#
+# PCI Express ECRC
+#
+config PCIE_ECRC
+ bool "PCI Express ECRC settings control"
+ depends on PCIEAER
+ help
+ Used to override firmware/bios settings for PCI Express ECRC
+ (transaction layer end-to-end CRC checking).
+
+ When in doubt, say N.
+
+source "drivers/pci/pcie/aer/Kconfig.debug"
diff --git a/drivers/pci/pcie/aer/Kconfig.debug b/drivers/pci/pcie/aer/Kconfig.debug
new file mode 100644
index 000000000000..b8c925c1f6aa
--- /dev/null
+++ b/drivers/pci/pcie/aer/Kconfig.debug
@@ -0,0 +1,18 @@
+#
+# PCI Express Root Port Device AER Debug Configuration
+#
+
+config PCIEAER_INJECT
+ tristate "PCIE AER error injector support"
+ depends on PCIEAER
+ default n
+ help
+ This enables PCI Express Root Port Advanced Error Reporting
+ (AER) software error injector.
+
+ Debuging PCIE AER code is quite difficult because it is hard
+ to trigger various real hardware errors. Software based
+ error injection can fake almost all kinds of errors with the
+ help of a user space helper tool aer-inject, which can be
+ gotten from:
+ http://www.kernel.org/pub/linux/utils/pci/aer-inject/
diff --git a/drivers/pci/pcie/aer/Makefile b/drivers/pci/pcie/aer/Makefile
index 8da3bd8455a8..2cba67510dc8 100644
--- a/drivers/pci/pcie/aer/Makefile
+++ b/drivers/pci/pcie/aer/Makefile
@@ -4,6 +4,9 @@
obj-$(CONFIG_PCIEAER) += aerdriver.o
+obj-$(CONFIG_PCIE_ECRC) += ecrc.o
+
aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o
aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o
+obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
new file mode 100644
index 000000000000..d92ae21a59d8
--- /dev/null
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -0,0 +1,473 @@
+/*
+ * PCIE AER software error injection support.
+ *
+ * Debuging PCIE AER code is quite difficult because it is hard to
+ * trigger various real hardware errors. Software based error
+ * injection can fake almost all kinds of errors with the help of a
+ * user space helper tool aer-inject, which can be gotten from:
+ * http://www.kernel.org/pub/linux/utils/pci/aer-inject/
+ *
+ * Copyright 2009 Intel Corporation.
+ * Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/pci.h>
+#include <linux/fs.h>
+#include <asm/uaccess.h>
+#include "aerdrv.h"
+
+struct aer_error_inj
+{
+ u8 bus;
+ u8 dev;
+ u8 fn;
+ u32 uncor_status;
+ u32 cor_status;
+ u32 header_log0;
+ u32 header_log1;
+ u32 header_log2;
+ u32 header_log3;
+};
+
+struct aer_error
+{
+ struct list_head list;
+ unsigned int bus;
+ unsigned int devfn;
+ int pos_cap_err;
+
+ u32 uncor_status;
+ u32 cor_status;
+ u32 header_log0;
+ u32 header_log1;
+ u32 header_log2;
+ u32 header_log3;
+ u32 root_status;
+ u32 source_id;
+};
+
+struct pci_bus_ops
+{
+ struct list_head list;
+ struct pci_bus *bus;
+ struct pci_ops *ops;
+};
+
+static LIST_HEAD(einjected);
+
+static LIST_HEAD(pci_bus_ops_list);
+
+/* Protect einjected and pci_bus_ops_list */
+static DEFINE_SPINLOCK(inject_lock);
+
+static void aer_error_init(struct aer_error *err, unsigned int bus,
+ unsigned int devfn, int pos_cap_err)
+{
+ INIT_LIST_HEAD(&err->list);
+ err->bus = bus;
+ err->devfn = devfn;
+ err->pos_cap_err = pos_cap_err;
+}
+
+/* inject_lock must be held before calling */
+static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn)
+{
+ struct aer_error *err;
+
+ list_for_each_entry(err, &einjected, list) {
+ if (bus == err->bus && devfn == err->devfn)
+ return err;
+ }
+ return NULL;
+}
+
+/* inject_lock must be held before calling */
+static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
+{
+ return __find_aer_error(dev->bus->number, dev->devfn);
+}
+
+/* inject_lock must be held before calling */
+static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
+{
+ struct pci_bus_ops *bus_ops;
+
+ list_for_each_entry(bus_ops, &pci_bus_ops_list, list) {
+ if (bus_ops->bus == bus)
+ return bus_ops->ops;
+ }
+ return NULL;
+}
+
+static struct pci_bus_ops *pci_bus_ops_pop(void)
+{
+ unsigned long flags;
+ struct pci_bus_ops *bus_ops = NULL;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (list_empty(&pci_bus_ops_list))
+ bus_ops = NULL;
+ else {
+ struct list_head *lh = pci_bus_ops_list.next;
+ list_del(lh);
+ bus_ops = list_entry(lh, struct pci_bus_ops, list);
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return bus_ops;
+}
+
+static u32 *find_pci_config_dword(struct aer_error *err, int where,
+ int *prw1cs)
+{
+ int rw1cs = 0;
+ u32 *target = NULL;
+
+ if (err->pos_cap_err == -1)
+ return NULL;
+
+ switch (where - err->pos_cap_err) {
+ case PCI_ERR_UNCOR_STATUS:
+ target = &err->uncor_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_COR_STATUS:
+ target = &err->cor_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_HEADER_LOG:
+ target = &err->header_log0;
+ break;
+ case PCI_ERR_HEADER_LOG+4:
+ target = &err->header_log1;
+ break;
+ case PCI_ERR_HEADER_LOG+8:
+ target = &err->header_log2;
+ break;
+ case PCI_ERR_HEADER_LOG+12:
+ target = &err->header_log3;
+ break;
+ case PCI_ERR_ROOT_STATUS:
+ target = &err->root_status;
+ rw1cs = 1;
+ break;
+ case PCI_ERR_ROOT_COR_SRC:
+ target = &err->source_id;
+ break;
+ }
+ if (prw1cs)
+ *prw1cs = rw1cs;
+ return target;
+}
+
+static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ u32 *sim;
+ struct aer_error *err;
+ unsigned long flags;
+ struct pci_ops *ops;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (size != sizeof(u32))
+ goto out;
+ err = __find_aer_error(bus->number, devfn);
+ if (!err)
+ goto out;
+
+ sim = find_pci_config_dword(err, where, NULL);
+ if (sim) {
+ *val = *sim;
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return 0;
+ }
+out:
+ ops = __find_pci_bus_ops(bus);
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return ops->read(bus, devfn, where, size, val);
+}
+
+int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size,
+ u32 val)
+{
+ u32 *sim;
+ struct aer_error *err;
+ unsigned long flags;
+ int rw1cs;
+ struct pci_ops *ops;
+
+ spin_lock_irqsave(&inject_lock, flags);
+ if (size != sizeof(u32))
+ goto out;
+ err = __find_aer_error(bus->number, devfn);
+ if (!err)
+ goto out;
+
+ sim = find_pci_config_dword(err, where, &rw1cs);
+ if (sim) {
+ if (rw1cs)
+ *sim ^= val;
+ else
+ *sim = val;
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return 0;
+ }
+out:
+ ops = __find_pci_bus_ops(bus);
+ spin_unlock_irqrestore(&inject_lock, flags);
+ return ops->write(bus, devfn, where, size, val);
+}
+
+static struct pci_ops pci_ops_aer = {
+ .read = pci_read_aer,
+ .write = pci_write_aer,
+};
+
+static void pci_bus_ops_init(struct pci_bus_ops *bus_ops,
+ struct pci_bus *bus,
+ struct pci_ops *ops)
+{
+ INIT_LIST_HEAD(&bus_ops->list);
+ bus_ops->bus = bus;
+ bus_ops->ops = ops;
+}
+
+static int pci_bus_set_aer_ops(struct pci_bus *bus)
+{
+ struct pci_ops *ops;
+ struct pci_bus_ops *bus_ops;
+ unsigned long flags;
+
+ bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+ ops = pci_bus_set_ops(bus, &pci_ops_aer);
+ spin_lock_irqsave(&inject_lock, flags);
+ if (ops == &pci_ops_aer)
+ goto out;
+ pci_bus_ops_init(bus_ops, bus, ops);
+ list_add(&bus_ops->list, &pci_bus_ops_list);
+ bus_ops = NULL;
+out:
+ spin_unlock_irqrestore(&inject_lock, flags);
+ if (bus_ops)
+ kfree(bus_ops);
+ return 0;
+}
+
+static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
+{
+ while (1) {
+ if (!dev->is_pcie)
+ break;
+ if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
+ return dev;
+ if (!dev->bus->self)
+ break;
+ dev = dev->bus->self;
+ }
+ return NULL;
+}
+
+static int find_aer_device_iter(struct device *device, void *data)
+{
+ struct pcie_device **result = data;
+ struct pcie_device *pcie_dev;
+
+ if (device->bus == &pcie_port_bus_type) {
+ pcie_dev = to_pcie_device(device);
+ if (pcie_dev->service & PCIE_PORT_SERVICE_AER) {
+ *result = pcie_dev;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static int find_aer_device(struct pci_dev *dev, struct pcie_device **result)
+{
+ return device_for_each_child(&dev->dev, result, find_aer_device_iter);
+}
+
+static int aer_inject(struct aer_error_inj *einj)
+{
+ struct aer_error *err, *rperr;
+ struct aer_error *err_alloc = NULL, *rperr_alloc = NULL;
+ struct pci_dev *dev, *rpdev;
+ struct pcie_device *edev;
+ unsigned long flags;
+ unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
+ int pos_cap_err, rp_pos_cap_err;
+ u32 sever;
+ int ret = 0;
+
+ dev = pci_get_bus_and_slot(einj->bus, devfn);
+ if (!dev)
+ return -EINVAL;
+ rpdev = pcie_find_root_port(dev);
+ if (!rpdev) {
+ ret = -EINVAL;
+ goto out_put;
+ }
+
+ pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos_cap_err) {
+ ret = -EIO;
+ goto out_put;
+ }
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
+
+ rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
+ if (!rp_pos_cap_err) {
+ ret = -EIO;
+ goto out_put;
+ }
+
+ err_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
+ if (!err_alloc) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+ rperr_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
+ if (!rperr_alloc) {
+ ret = -ENOMEM;
+ goto out_put;
+ }
+
+ spin_lock_irqsave(&inject_lock, flags);
+
+ err = __find_aer_error_by_dev(dev);
+ if (!err) {
+ err = err_alloc;
+ err_alloc = NULL;
+ aer_error_init(err, einj->bus, devfn, pos_cap_err);
+ list_add(&err->list, &einjected);
+ }
+ err->uncor_status |= einj->uncor_status;
+ err->cor_status |= einj->cor_status;
+ err->header_log0 = einj->header_log0;
+ err->header_log1 = einj->header_log1;
+ err->header_log2 = einj->header_log2;
+ err->header_log3 = einj->header_log3;
+
+ rperr = __find_aer_error_by_dev(rpdev);
+ if (!rperr) {
+ rperr = rperr_alloc;
+ rperr_alloc = NULL;
+ aer_error_init(rperr, rpdev->bus->number, rpdev->devfn,
+ rp_pos_cap_err);
+ list_add(&rperr->list, &einjected);
+ }
+ if (einj->cor_status) {
+ if (rperr->root_status & PCI_ERR_ROOT_COR_RCV)
+ rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
+ else
+ rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
+ rperr->source_id &= 0xffff0000;
+ rperr->source_id |= (einj->bus << 8) | devfn;
+ }
+ if (einj->uncor_status) {
+ if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
+ rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
+ if (sever & einj->uncor_status) {
+ rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV;
+ if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV))
+ rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL;
+ } else
+ rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
+ rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
+ rperr->source_id &= 0x0000ffff;
+ rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+
+ ret = pci_bus_set_aer_ops(dev->bus);
+ if (ret)
+ goto out_put;
+ ret = pci_bus_set_aer_ops(rpdev->bus);
+ if (ret)
+ goto out_put;
+
+ if (find_aer_device(rpdev, &edev))
+ aer_irq(-1, edev);
+ else
+ ret = -EINVAL;
+out_put:
+ if (err_alloc)
+ kfree(err_alloc);
+ if (rperr_alloc)
+ kfree(rperr_alloc);
+ pci_dev_put(dev);
+ return ret;
+}
+
+static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf,
+ size_t usize, loff_t *off)
+{
+ struct aer_error_inj einj;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (usize != sizeof(struct aer_error_inj))
+ return -EINVAL;
+
+ if (copy_from_user(&einj, ubuf, usize))
+ return -EFAULT;
+
+ ret = aer_inject(&einj);
+ return ret ? ret : usize;
+}
+
+static const struct file_operations aer_inject_fops = {
+ .write = aer_inject_write,
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice aer_inject_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "aer_inject",
+ .fops = &aer_inject_fops,
+};
+
+static int __init aer_inject_init(void)
+{
+ return misc_register(&aer_inject_device);
+}
+
+static void __exit aer_inject_exit(void)
+{
+ struct aer_error *err, *err_next;
+ unsigned long flags;
+ struct pci_bus_ops *bus_ops;
+
+ misc_deregister(&aer_inject_device);
+
+ while ((bus_ops = pci_bus_ops_pop())) {
+ pci_bus_set_ops(bus_ops->bus, bus_ops->ops);
+ kfree(bus_ops);
+ }
+
+ spin_lock_irqsave(&inject_lock, flags);
+ list_for_each_entry_safe(err, err_next,
+ &pci_bus_ops_list, list) {
+ list_del(&err->list);
+ kfree(err);
+ }
+ spin_unlock_irqrestore(&inject_lock, flags);
+}
+
+module_init(aer_inject_init);
+module_exit(aer_inject_exit);
+
+MODULE_DESCRIPTION("PCIE AER software error injector");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 32ade5af927e..4770f13b3ca1 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -77,7 +77,7 @@ void pci_no_aer(void)
*
* Invoked when Root Port detects AER messages.
**/
-static irqreturn_t aer_irq(int irq, void *context)
+irqreturn_t aer_irq(int irq, void *context)
{
unsigned int status, id;
struct pcie_device *pdev = (struct pcie_device *)context;
@@ -126,6 +126,7 @@ static irqreturn_t aer_irq(int irq, void *context)
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_GPL(aer_irq);
/**
* aer_alloc_rpc - allocate Root Port data structure
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index aa14482a4779..bbd7428ca2d0 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -11,6 +11,7 @@
#include <linux/workqueue.h>
#include <linux/pcieport_if.h>
#include <linux/aer.h>
+#include <linux/interrupt.h>
#define AER_NONFATAL 0
#define AER_FATAL 1
@@ -56,7 +57,11 @@ struct header_log_regs {
unsigned int dw3;
};
+#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
struct aer_err_info {
+ struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
+ int error_dev_num;
+ u16 id;
int severity; /* 0:NONFATAL | 1:FATAL | 2:COR */
int flags;
unsigned int status; /* COR/UNCOR Error Status */
@@ -120,6 +125,7 @@ extern void aer_delete_rootport(struct aer_rpc *rpc);
extern int aer_init(struct pcie_device *dev);
extern void aer_isr(struct work_struct *work);
extern void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
+extern irqreturn_t aer_irq(int irq, void *context);
#ifdef CONFIG_ACPI
extern int aer_osc_setup(struct pcie_device *pciedev);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 307452f30035..3d8872704a58 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -26,7 +26,9 @@
#include "aerdrv.h"
static int forceload;
+static int nosourceid;
module_param(forceload, bool, 0);
+module_param(nosourceid, bool, 0);
int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
@@ -109,19 +111,23 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
#endif /* 0 */
-static void set_device_error_reporting(struct pci_dev *dev, void *data)
+static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
bool enable = *((bool *)data);
- if (dev->pcie_type != PCIE_RC_PORT &&
- dev->pcie_type != PCIE_SW_UPSTREAM_PORT &&
- dev->pcie_type != PCIE_SW_DOWNSTREAM_PORT)
- return;
+ if (dev->pcie_type == PCIE_RC_PORT ||
+ dev->pcie_type == PCIE_SW_UPSTREAM_PORT ||
+ dev->pcie_type == PCIE_SW_DOWNSTREAM_PORT) {
+ if (enable)
+ pci_enable_pcie_error_reporting(dev);
+ else
+ pci_disable_pcie_error_reporting(dev);
+ }
if (enable)
- pci_enable_pcie_error_reporting(dev);
- else
- pci_disable_pcie_error_reporting(dev);
+ pcie_set_ecrc_checking(dev);
+
+ return 0;
}
/**
@@ -139,73 +145,148 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
}
-static int find_device_iter(struct device *device, void *data)
+static inline int compare_device_id(struct pci_dev *dev,
+ struct aer_err_info *e_info)
{
- struct pci_dev *dev;
- u16 id = *(unsigned long *)data;
- u8 secondary, subordinate, d_bus = id >> 8;
+ if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) {
+ /*
+ * Device ID match
+ */
+ return 1;
+ }
- if (device->bus == &pci_bus_type) {
- dev = to_pci_dev(device);
- if (id == ((dev->bus->number << 8) | dev->devfn)) {
- /*
- * Device ID match
- */
- *(unsigned long*)data = (unsigned long)device;
- return 1;
- }
+ return 0;
+}
+
+static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
+{
+ if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
+ e_info->dev[e_info->error_dev_num] = dev;
+ e_info->error_dev_num++;
+ return 1;
+ } else
+ return 0;
+}
+
+
+#define PCI_BUS(x) (((x) >> 8) & 0xff)
+
+static int find_device_iter(struct pci_dev *dev, void *data)
+{
+ int pos;
+ u32 status;
+ u32 mask;
+ u16 reg16;
+ int result;
+ struct aer_err_info *e_info = (struct aer_err_info *)data;
+
+ /*
+ * When bus id is equal to 0, it might be a bad id
+ * reported by root port.
+ */
+ if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
+ result = compare_device_id(dev, e_info);
+ if (result)
+ add_error_device(e_info, dev);
/*
- * If device is P2P, check if it is an upstream?
+ * If there is no multiple error, we stop
+ * or continue based on the id comparing.
*/
- if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
- pci_read_config_byte(dev, PCI_SECONDARY_BUS,
- &secondary);
- pci_read_config_byte(dev, PCI_SUBORDINATE_BUS,
- &subordinate);
- if (d_bus >= secondary && d_bus <= subordinate) {
- *(unsigned long*)data = (unsigned long)device;
- return 1;
- }
+ if (!(e_info->flags & AER_MULTI_ERROR_VALID_FLAG))
+ return result;
+
+ /*
+ * If there are multiple errors and id does match,
+ * We need continue to search other devices under
+ * the root port. Return 0 means that.
+ */
+ if (result)
+ return 0;
+ }
+
+ /*
+ * When either
+ * 1) nosourceid==y;
+ * 2) bus id is equal to 0. Some ports might lose the bus
+ * id of error source id;
+ * 3) There are multiple errors and prior id comparing fails;
+ * We check AER status registers to find the initial reporter.
+ */
+ if (atomic_read(&dev->enable_cnt) == 0)
+ return 0;
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return 0;
+ /* Check if AER is enabled */
+ pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
+ if (!(reg16 & (
+ PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE)))
+ return 0;
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return 0;
+
+ status = 0;
+ mask = 0;
+ if (e_info->severity == AER_CORRECTABLE) {
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_COR_STATUS,
+ &status);
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_COR_MASK,
+ &mask);
+ if (status & ERR_CORRECTABLE_ERROR_MASK & ~mask) {
+ add_error_device(e_info, dev);
+ goto added;
+ }
+ } else {
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_UNCOR_STATUS,
+ &status);
+ pci_read_config_dword(dev,
+ pos + PCI_ERR_UNCOR_MASK,
+ &mask);
+ if (status & ERR_UNCORRECTABLE_ERROR_MASK & ~mask) {
+ add_error_device(e_info, dev);
+ goto added;
}
}
return 0;
+
+added:
+ if (e_info->flags & AER_MULTI_ERROR_VALID_FLAG)
+ return 0;
+ else
+ return 1;
}
/**
* find_source_device - search through device hierarchy for source device
* @parent: pointer to Root Port pci_dev data structure
- * @id: device ID of agent who sends an error message to this Root Port
+ * @err_info: including detailed error information such like id
*
* Invoked when error is detected at the Root Port.
*/
-static struct device* find_source_device(struct pci_dev *parent, u16 id)
+static void find_source_device(struct pci_dev *parent,
+ struct aer_err_info *e_info)
{
struct pci_dev *dev = parent;
- struct device *device;
- unsigned long device_addr;
- int status;
+ int result;
/* Is Root Port an agent that sends error message? */
- if (id == ((dev->bus->number << 8) | dev->devfn))
- return &dev->dev;
-
- do {
- device_addr = id;
- if ((status = device_for_each_child(&dev->dev,
- &device_addr, find_device_iter))) {
- device = (struct device*)device_addr;
- dev = to_pci_dev(device);
- if (id == ((dev->bus->number << 8) | dev->devfn))
- return device;
- }
- }while (status);
+ result = find_device_iter(dev, e_info);
+ if (result)
+ return;
- return NULL;
+ pci_walk_bus(parent->subordinate, find_device_iter, e_info);
}
-static void report_error_detected(struct pci_dev *dev, void *data)
+static int report_error_detected(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
struct pci_error_handlers *err_handler;
@@ -230,16 +311,16 @@ static void report_error_detected(struct pci_dev *dev, void *data)
dev->driver ?
"no AER-aware driver" : "no driver");
}
- return;
+ return 0;
}
err_handler = dev->driver->err_handler;
vote = err_handler->error_detected(dev, result_data->state);
result_data->result = merge_result(result_data->result, vote);
- return;
+ return 0;
}
-static void report_mmio_enabled(struct pci_dev *dev, void *data)
+static int report_mmio_enabled(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
struct pci_error_handlers *err_handler;
@@ -249,15 +330,15 @@ static void report_mmio_enabled(struct pci_dev *dev, void *data)
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->mmio_enabled)
- return;
+ return 0;
err_handler = dev->driver->err_handler;
vote = err_handler->mmio_enabled(dev);
result_data->result = merge_result(result_data->result, vote);
- return;
+ return 0;
}
-static void report_slot_reset(struct pci_dev *dev, void *data)
+static int report_slot_reset(struct pci_dev *dev, void *data)
{
pci_ers_result_t vote;
struct pci_error_handlers *err_handler;
@@ -267,15 +348,15 @@ static void report_slot_reset(struct pci_dev *dev, void *data)
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->slot_reset)
- return;
+ return 0;
err_handler = dev->driver->err_handler;
vote = err_handler->slot_reset(dev);
result_data->result = merge_result(result_data->result, vote);
- return;
+ return 0;
}
-static void report_resume(struct pci_dev *dev, void *data)
+static int report_resume(struct pci_dev *dev, void *data)
{
struct pci_error_handlers *err_handler;
@@ -284,11 +365,11 @@ static void report_resume(struct pci_dev *dev, void *data)
if (!dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->resume)
- return;
+ return 0;
err_handler = dev->driver->err_handler;
err_handler->resume(dev);
- return;
+ return 0;
}
/**
@@ -305,7 +386,7 @@ static void report_resume(struct pci_dev *dev, void *data)
static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
enum pci_channel_state state,
char *error_mesg,
- void (*cb)(struct pci_dev *, void *))
+ int (*cb)(struct pci_dev *, void *))
{
struct aer_broadcast_data result_data;
@@ -497,12 +578,12 @@ static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
*/
static void handle_error_source(struct pcie_device * aerdev,
struct pci_dev *dev,
- struct aer_err_info info)
+ struct aer_err_info *info)
{
pci_ers_result_t status = 0;
int pos;
- if (info.severity == AER_CORRECTABLE) {
+ if (info->severity == AER_CORRECTABLE) {
/*
* Correctable error does not need software intevention.
* No need to go through error recovery process.
@@ -510,9 +591,9 @@ static void handle_error_source(struct pcie_device * aerdev,
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
if (pos)
pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
- info.status);
+ info->status);
} else {
- status = do_recovery(aerdev, dev, info.severity);
+ status = do_recovery(aerdev, dev, info->severity);
if (status == PCI_ERS_RESULT_RECOVERED) {
dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
"successfully recovered\n");
@@ -661,6 +742,28 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
return AER_SUCCESS;
}
+static inline void aer_process_err_devices(struct pcie_device *p_device,
+ struct aer_err_info *e_info)
+{
+ int i;
+
+ if (!e_info->dev[0]) {
+ dev_printk(KERN_DEBUG, &p_device->port->dev,
+ "can't find device of ID%04x\n",
+ e_info->id);
+ }
+
+ for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
+ if (get_device_error_info(e_info->dev[i], e_info) ==
+ AER_SUCCESS) {
+ aer_print_error(e_info->dev[i], e_info);
+ handle_error_source(p_device,
+ e_info->dev[i],
+ e_info);
+ }
+ }
+}
+
/**
* aer_isr_one_error - consume an error detected by root port
* @p_device: pointer to error root port service device
@@ -669,10 +772,16 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
static void aer_isr_one_error(struct pcie_device *p_device,
struct aer_err_source *e_src)
{
- struct device *s_device;
- struct aer_err_info e_info = {0, 0, 0,};
+ struct aer_err_info *e_info;
int i;
- u16 id;
+
+ /* struct aer_err_info might be big, so we allocate it with slab */
+ e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
+ if (e_info == NULL) {
+ dev_printk(KERN_DEBUG, &p_device->port->dev,
+ "Can't allocate mem when processing AER errors\n");
+ return;
+ }
/*
* There is a possibility that both correctable error and
@@ -684,31 +793,26 @@ static void aer_isr_one_error(struct pcie_device *p_device,
if (!(e_src->status & i))
continue;
+ memset(e_info, 0, sizeof(struct aer_err_info));
+
/* Init comprehensive error information */
if (i & PCI_ERR_ROOT_COR_RCV) {
- id = ERR_COR_ID(e_src->id);
- e_info.severity = AER_CORRECTABLE;
+ e_info->id = ERR_COR_ID(e_src->id);
+ e_info->severity = AER_CORRECTABLE;
} else {
- id = ERR_UNCOR_ID(e_src->id);
- e_info.severity = ((e_src->status >> 6) & 1);
+ e_info->id = ERR_UNCOR_ID(e_src->id);
+ e_info->severity = ((e_src->status >> 6) & 1);
}
if (e_src->status &
(PCI_ERR_ROOT_MULTI_COR_RCV |
PCI_ERR_ROOT_MULTI_UNCOR_RCV))
- e_info.flags |= AER_MULTI_ERROR_VALID_FLAG;
- if (!(s_device = find_source_device(p_device->port, id))) {
- printk(KERN_DEBUG "%s->can't find device of ID%04x\n",
- __func__, id);
- continue;
- }
- if (get_device_error_info(to_pci_dev(s_device), &e_info) ==
- AER_SUCCESS) {
- aer_print_error(to_pci_dev(s_device), &e_info);
- handle_error_source(p_device,
- to_pci_dev(s_device),
- e_info);
- }
+ e_info->flags |= AER_MULTI_ERROR_VALID_FLAG;
+
+ find_source_device(p_device->port, e_info);
+ aer_process_err_devices(p_device, e_info);
}
+
+ kfree(e_info);
}
/**
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
new file mode 100644
index 000000000000..ece97df4df6d
--- /dev/null
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -0,0 +1,131 @@
+/*
+ * Enables/disables PCIe ECRC checking.
+ *
+ * (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
+ * Andrew Patterson <andrew.patterson@hp.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/errno.h>
+#include "../../pci.h"
+
+#define ECRC_POLICY_DEFAULT 0 /* ECRC set by BIOS */
+#define ECRC_POLICY_OFF 1 /* ECRC off for performance */
+#define ECRC_POLICY_ON 2 /* ECRC on for data integrity */
+
+static int ecrc_policy = ECRC_POLICY_DEFAULT;
+
+static const char *ecrc_policy_str[] = {
+ [ECRC_POLICY_DEFAULT] = "bios",
+ [ECRC_POLICY_OFF] = "off",
+ [ECRC_POLICY_ON] = "on"
+};
+
+/**
+ * enable_ercr_checking - enable PCIe ECRC checking for a device
+ * @dev: the PCI device
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+static int enable_ecrc_checking(struct pci_dev *dev)
+{
+ int pos;
+ u32 reg32;
+
+ if (!dev->is_pcie)
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ if (reg32 & PCI_ERR_CAP_ECRC_GENC)
+ reg32 |= PCI_ERR_CAP_ECRC_GENE;
+ if (reg32 & PCI_ERR_CAP_ECRC_CHKC)
+ reg32 |= PCI_ERR_CAP_ECRC_CHKE;
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ return 0;
+}
+
+/**
+ * disable_ercr_checking - disables PCIe ECRC checking for a device
+ * @dev: the PCI device
+ *
+ * Returns 0 on success, or negative on failure.
+ */
+static int disable_ecrc_checking(struct pci_dev *dev)
+{
+ int pos;
+ u32 reg32;
+
+ if (!dev->is_pcie)
+ return -ENODEV;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (!pos)
+ return -ENODEV;
+
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+ reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+ return 0;
+}
+
+/**
+ * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy
+ * @dev: the PCI device
+ */
+void pcie_set_ecrc_checking(struct pci_dev *dev)
+{
+ switch (ecrc_policy) {
+ case ECRC_POLICY_DEFAULT:
+ return;
+ case ECRC_POLICY_OFF:
+ disable_ecrc_checking(dev);
+ break;
+ case ECRC_POLICY_ON:
+ enable_ecrc_checking(dev);;
+ break;
+ default:
+ return;
+ }
+}
+
+/**
+ * pcie_ecrc_get_policy - parse kernel command-line ecrc option
+ */
+void pcie_ecrc_get_policy(char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++)
+ if (!strncmp(str, ecrc_policy_str[i],
+ strlen(ecrc_policy_str[i])))
+ break;
+ if (i >= ARRAY_SIZE(ecrc_policy_str))
+ return;
+
+ ecrc_policy = i;
+}
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index b0367f168af4..3d27c97e0486 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -26,40 +26,36 @@
#endif
#define MODULE_PARAM_PREFIX "pcie_aspm."
-struct endpoint_state {
- unsigned int l0s_acceptable_latency;
- unsigned int l1_acceptable_latency;
+struct aspm_latency {
+ u32 l0s; /* L0s latency (nsec) */
+ u32 l1; /* L1 latency (nsec) */
};
struct pcie_link_state {
- struct list_head sibiling;
- struct pci_dev *pdev;
- bool downstream_has_switch;
-
- struct pcie_link_state *parent;
- struct list_head children;
- struct list_head link;
+ struct pci_dev *pdev; /* Upstream component of the Link */
+ struct pcie_link_state *root; /* pointer to the root port link */
+ struct pcie_link_state *parent; /* pointer to the parent Link state */
+ struct list_head sibling; /* node in link_list */
+ struct list_head children; /* list of child link states */
+ struct list_head link; /* node in parent's children list */
/* ASPM state */
- unsigned int support_state;
- unsigned int enabled_state;
- unsigned int bios_aspm_state;
- /* upstream component */
- unsigned int l0s_upper_latency;
- unsigned int l1_upper_latency;
- /* downstream component */
- unsigned int l0s_down_latency;
- unsigned int l1_down_latency;
- /* Clock PM state*/
- unsigned int clk_pm_capable;
- unsigned int clk_pm_enabled;
- unsigned int bios_clk_state;
+ u32 aspm_support:2; /* Supported ASPM state */
+ u32 aspm_enabled:2; /* Enabled ASPM state */
+ u32 aspm_default:2; /* Default ASPM state by BIOS */
+
+ /* Clock PM state */
+ u32 clkpm_capable:1; /* Clock PM capable? */
+ u32 clkpm_enabled:1; /* Current Clock PM state */
+ u32 clkpm_default:1; /* Default Clock PM state by BIOS */
+ /* Latencies */
+ struct aspm_latency latency; /* Exit latency */
/*
- * A pcie downstream port only has one slot under it, so at most there
- * are 8 functions
+ * Endpoint acceptable latencies. A pcie downstream port only
+ * has one slot under it, so at most there are 8 functions.
*/
- struct endpoint_state endpoints[8];
+ struct aspm_latency acceptable[8];
};
static int aspm_disabled, aspm_force;
@@ -78,27 +74,23 @@ static const char *policy_str[] = {
#define LINK_RETRAIN_TIMEOUT HZ
-static int policy_to_aspm_state(struct pci_dev *pdev)
+static int policy_to_aspm_state(struct pcie_link_state *link)
{
- struct pcie_link_state *link_state = pdev->link_state;
-
switch (aspm_policy) {
case POLICY_PERFORMANCE:
/* Disable ASPM and Clock PM */
return 0;
case POLICY_POWERSAVE:
/* Enable ASPM L0s/L1 */
- return PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
+ return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
case POLICY_DEFAULT:
- return link_state->bios_aspm_state;
+ return link->aspm_default;
}
return 0;
}
-static int policy_to_clkpm_state(struct pci_dev *pdev)
+static int policy_to_clkpm_state(struct pcie_link_state *link)
{
- struct pcie_link_state *link_state = pdev->link_state;
-
switch (aspm_policy) {
case POLICY_PERFORMANCE:
/* Disable ASPM and Clock PM */
@@ -107,73 +99,78 @@ static int policy_to_clkpm_state(struct pci_dev *pdev)
/* Disable Clock PM */
return 1;
case POLICY_DEFAULT:
- return link_state->bios_clk_state;
+ return link->clkpm_default;
}
return 0;
}
-static void pcie_set_clock_pm(struct pci_dev *pdev, int enable)
+static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
{
- struct pci_dev *child_dev;
int pos;
u16 reg16;
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
if (!pos)
return;
- pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, &reg16);
+ pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
if (enable)
reg16 |= PCI_EXP_LNKCTL_CLKREQ_EN;
else
reg16 &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
- pci_write_config_word(child_dev, pos + PCI_EXP_LNKCTL, reg16);
+ pci_write_config_word(child, pos + PCI_EXP_LNKCTL, reg16);
}
- link_state->clk_pm_enabled = !!enable;
+ link->clkpm_enabled = !!enable;
}
-static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist)
+static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
{
- int pos;
+ /* Don't enable Clock PM if the link is not Clock PM capable */
+ if (!link->clkpm_capable && enable)
+ return;
+ /* Need nothing if the specified equals to current state */
+ if (link->clkpm_enabled == enable)
+ return;
+ pcie_set_clkpm_nocheck(link, enable);
+}
+
+static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
+{
+ int pos, capable = 1, enabled = 1;
u32 reg32;
u16 reg16;
- int capable = 1, enabled = 1;
- struct pci_dev *child_dev;
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
/* All functions should have the same cap and state, take the worst */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
if (!pos)
return;
- pci_read_config_dword(child_dev, pos + PCI_EXP_LNKCAP, &reg32);
+ pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, &reg32);
if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) {
capable = 0;
enabled = 0;
break;
}
- pci_read_config_word(child_dev, pos + PCI_EXP_LNKCTL, &reg16);
+ pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
enabled = 0;
}
- link_state->clk_pm_enabled = enabled;
- link_state->bios_clk_state = enabled;
- if (!blacklist) {
- link_state->clk_pm_capable = capable;
- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
- } else {
- link_state->clk_pm_capable = 0;
- pcie_set_clock_pm(pdev, 0);
- }
+ link->clkpm_enabled = enabled;
+ link->clkpm_default = enabled;
+ link->clkpm_capable = (blacklist) ? 0 : capable;
}
-static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
+static bool pcie_aspm_downstream_has_switch(struct pcie_link_state *link)
{
- struct pci_dev *child_dev;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM)
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if (child->pcie_type == PCI_EXP_TYPE_UPSTREAM)
return true;
}
return false;
@@ -184,289 +181,263 @@ static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
* could use common clock. If they are, configure them to use the
* common clock. That will reduce the ASPM state exit latency.
*/
-static void pcie_aspm_configure_common_clock(struct pci_dev *pdev)
+static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
{
- int pos, child_pos, i = 0;
- u16 reg16 = 0;
- struct pci_dev *child_dev;
- int same_clock = 1;
+ int ppos, cpos, same_clock = 1;
+ u16 reg16, parent_reg, child_reg[8];
unsigned long start_jiffies;
- u16 child_regs[8], parent_reg;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
/*
- * all functions of a slot should have the same Slot Clock
+ * All functions of a slot should have the same Slot Clock
* Configuration, so just check one function
- * */
- child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev,
- bus_list);
- BUG_ON(!child_dev->is_pcie);
+ */
+ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ BUG_ON(!child->is_pcie);
/* Check downstream component if bit Slot Clock Configuration is 1 */
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKSTA, &reg16);
+ cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Check upstream component if bit Slot Clock Configuration is 1 */
- pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16);
+ ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_SLC))
same_clock = 0;
/* Configure downstream component, all functions */
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
- &reg16);
- child_regs[i] = reg16;
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, &reg16);
+ child_reg[PCI_FUNC(child->devfn)] = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(child_dev, child_pos + PCI_EXP_LNKCTL,
- reg16);
- i++;
+ pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, reg16);
}
/* Configure upstream component */
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+ pci_read_config_word(parent, ppos + PCI_EXP_LNKCTL, &reg16);
parent_reg = reg16;
if (same_clock)
reg16 |= PCI_EXP_LNKCTL_CCC;
else
reg16 &= ~PCI_EXP_LNKCTL_CCC;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+ pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
- /* retrain link */
+ /* Retrain link */
reg16 |= PCI_EXP_LNKCTL_RL;
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+ pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, reg16);
- /* Wait for link training end */
- /* break out after waiting for timeout */
+ /* Wait for link training end. Break out after waiting for timeout */
start_jiffies = jiffies;
for (;;) {
- pci_read_config_word(pdev, pos + PCI_EXP_LNKSTA, &reg16);
+ pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
if (!(reg16 & PCI_EXP_LNKSTA_LT))
break;
if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT))
break;
msleep(1);
}
- /* training failed -> recover */
- if (reg16 & PCI_EXP_LNKSTA_LT) {
- dev_printk (KERN_ERR, &pdev->dev, "ASPM: Could not configure"
- " common clock\n");
- i = 0;
- list_for_each_entry(child_dev, &pdev->subordinate->devices,
- bus_list) {
- child_pos = pci_find_capability(child_dev,
- PCI_CAP_ID_EXP);
- pci_write_config_word(child_dev,
- child_pos + PCI_EXP_LNKCTL,
- child_regs[i]);
- i++;
- }
- pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, parent_reg);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ return;
+
+ /* Training failed. Restore common clock configurations */
+ dev_printk(KERN_ERR, &parent->dev,
+ "ASPM: Could not configure common clock\n");
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ cpos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_write_config_word(child, cpos + PCI_EXP_LNKCTL,
+ child_reg[PCI_FUNC(child->devfn)]);
}
+ pci_write_config_word(parent, ppos + PCI_EXP_LNKCTL, parent_reg);
}
-/*
- * calc_L0S_latency: Convert L0s latency encoding to ns
- */
-static unsigned int calc_L0S_latency(unsigned int latency_encoding, int ac)
+/* Convert L0s latency encoding to ns */
+static u32 calc_l0s_latency(u32 encoding)
{
- unsigned int ns = 64;
+ if (encoding == 0x7)
+ return (5 * 1000); /* > 4us */
+ return (64 << encoding);
+}
- if (latency_encoding == 0x7) {
- if (ac)
- ns = -1U;
- else
- ns = 5*1000; /* > 4us */
- } else
- ns *= (1 << latency_encoding);
- return ns;
+/* Convert L0s acceptable latency encoding to ns */
+static u32 calc_l0s_acceptable(u32 encoding)
+{
+ if (encoding == 0x7)
+ return -1U;
+ return (64 << encoding);
}
-/*
- * calc_L1_latency: Convert L1 latency encoding to ns
- */
-static unsigned int calc_L1_latency(unsigned int latency_encoding, int ac)
+/* Convert L1 latency encoding to ns */
+static u32 calc_l1_latency(u32 encoding)
{
- unsigned int ns = 1000;
+ if (encoding == 0x7)
+ return (65 * 1000); /* > 64us */
+ return (1000 << encoding);
+}
- if (latency_encoding == 0x7) {
- if (ac)
- ns = -1U;
- else
- ns = 65*1000; /* > 64us */
- } else
- ns *= (1 << latency_encoding);
- return ns;
+/* Convert L1 acceptable latency encoding to ns */
+static u32 calc_l1_acceptable(u32 encoding)
+{
+ if (encoding == 0x7)
+ return -1U;
+ return (1000 << encoding);
}
static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state,
- unsigned int *l0s, unsigned int *l1, unsigned int *enabled)
+ u32 *l0s, u32 *l1, u32 *enabled)
{
int pos;
u16 reg16;
- u32 reg32;
- unsigned int latency;
+ u32 reg32, encoding;
+ *l0s = *l1 = *enabled = 0;
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
*state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
if (*state != PCIE_LINK_STATE_L0S &&
- *state != (PCIE_LINK_STATE_L1|PCIE_LINK_STATE_L0S))
+ *state != (PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L0S))
*state = 0;
if (*state == 0)
return;
- latency = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
- *l0s = calc_L0S_latency(latency, 0);
+ encoding = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
+ *l0s = calc_l0s_latency(encoding);
if (*state & PCIE_LINK_STATE_L1) {
- latency = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
- *l1 = calc_L1_latency(latency, 0);
+ encoding = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
+ *l1 = calc_l1_latency(encoding);
}
pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
- *enabled = reg16 & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1);
+ *enabled = reg16 & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
}
-static void pcie_aspm_cap_init(struct pci_dev *pdev)
+static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
- struct pci_dev *child_dev;
- u32 state, tmp;
- struct pcie_link_state *link_state = pdev->link_state;
+ u32 support, l0s, l1, enabled;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
+
+ if (blacklist) {
+ /* Set support state to 0, so we will disable ASPM later */
+ link->aspm_support = 0;
+ link->aspm_default = 0;
+ link->aspm_enabled = PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
+ return;
+ }
+
+ /* Configure common clock before checking latencies */
+ pcie_aspm_configure_common_clock(link);
/* upstream component states */
- pcie_aspm_get_cap_device(pdev, &link_state->support_state,
- &link_state->l0s_upper_latency,
- &link_state->l1_upper_latency,
- &link_state->enabled_state);
+ pcie_aspm_get_cap_device(parent, &support, &l0s, &l1, &enabled);
+ link->aspm_support = support;
+ link->latency.l0s = l0s;
+ link->latency.l1 = l1;
+ link->aspm_enabled = enabled;
+
/* downstream component states, all functions have the same setting */
- child_dev = list_entry(pdev->subordinate->devices.next, struct pci_dev,
- bus_list);
- pcie_aspm_get_cap_device(child_dev, &state,
- &link_state->l0s_down_latency,
- &link_state->l1_down_latency,
- &tmp);
- link_state->support_state &= state;
- if (!link_state->support_state)
+ child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
+ pcie_aspm_get_cap_device(child, &support, &l0s, &l1, &enabled);
+ link->aspm_support &= support;
+ link->latency.l0s = max_t(u32, link->latency.l0s, l0s);
+ link->latency.l1 = max_t(u32, link->latency.l1, l1);
+
+ if (!link->aspm_support)
return;
- link_state->enabled_state &= link_state->support_state;
- link_state->bios_aspm_state = link_state->enabled_state;
+
+ link->aspm_enabled &= link->aspm_support;
+ link->aspm_default = link->aspm_enabled;
/* ENDPOINT states*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
int pos;
- u32 reg32;
- unsigned int latency;
- struct endpoint_state *ep_state =
- &link_state->endpoints[PCI_FUNC(child_dev->devfn)];
+ u32 reg32, encoding;
+ struct aspm_latency *acceptable =
+ &link->acceptable[PCI_FUNC(child->devfn)];
- if (child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child_dev->pcie_type != PCI_EXP_TYPE_LEG_END)
+ if (child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
+ child->pcie_type != PCI_EXP_TYPE_LEG_END)
continue;
- pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- pci_read_config_dword(child_dev, pos + PCI_EXP_DEVCAP, &reg32);
- latency = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
- latency = calc_L0S_latency(latency, 1);
- ep_state->l0s_acceptable_latency = latency;
- if (link_state->support_state & PCIE_LINK_STATE_L1) {
- latency = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
- latency = calc_L1_latency(latency, 1);
- ep_state->l1_acceptable_latency = latency;
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
+ encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
+ acceptable->l0s = calc_l0s_acceptable(encoding);
+ if (link->aspm_support & PCIE_LINK_STATE_L1) {
+ encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
+ acceptable->l1 = calc_l1_acceptable(encoding);
}
}
}
-static unsigned int __pcie_aspm_check_state_one(struct pci_dev *pdev,
- unsigned int state)
-{
- struct pci_dev *parent_dev, *tmp_dev;
- unsigned int latency, l1_latency = 0;
- struct pcie_link_state *link_state;
- struct endpoint_state *ep_state;
-
- parent_dev = pdev->bus->self;
- link_state = parent_dev->link_state;
- state &= link_state->support_state;
- if (state == 0)
- return 0;
- ep_state = &link_state->endpoints[PCI_FUNC(pdev->devfn)];
-
- /*
- * Check latency for endpoint device.
- * TBD: The latency from the endpoint to root complex vary per
- * switch's upstream link state above the device. Here we just do a
- * simple check which assumes all links above the device can be in L1
- * state, that is we just consider the worst case. If switch's upstream
- * link can't be put into L0S/L1, then our check is too strictly.
- */
- tmp_dev = pdev;
- while (state & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1)) {
- parent_dev = tmp_dev->bus->self;
- link_state = parent_dev->link_state;
- if (state & PCIE_LINK_STATE_L0S) {
- latency = max_t(unsigned int,
- link_state->l0s_upper_latency,
- link_state->l0s_down_latency);
- if (latency > ep_state->l0s_acceptable_latency)
- state &= ~PCIE_LINK_STATE_L0S;
- }
- if (state & PCIE_LINK_STATE_L1) {
- latency = max_t(unsigned int,
- link_state->l1_upper_latency,
- link_state->l1_down_latency);
- if (latency + l1_latency >
- ep_state->l1_acceptable_latency)
- state &= ~PCIE_LINK_STATE_L1;
- }
- if (!parent_dev->bus->self) /* parent_dev is a root port */
- break;
- else {
- /*
- * parent_dev is the downstream port of a switch, make
- * tmp_dev the upstream port of the switch
- */
- tmp_dev = parent_dev->bus->self;
- /*
- * every switch on the path to root complex need 1 more
- * microsecond for L1. Spec doesn't mention L0S.
- */
- if (state & PCIE_LINK_STATE_L1)
- l1_latency += 1000;
- }
+/**
+ * __pcie_aspm_check_state_one - check latency for endpoint device.
+ * @endpoint: pointer to the struct pci_dev of endpoint device
+ *
+ * TBD: The latency from the endpoint to root complex vary per switch's
+ * upstream link state above the device. Here we just do a simple check
+ * which assumes all links above the device can be in L1 state, that
+ * is we just consider the worst case. If switch's upstream link can't
+ * be put into L0S/L1, then our check is too strictly.
+ */
+static u32 __pcie_aspm_check_state_one(struct pci_dev *endpoint, u32 state)
+{
+ u32 l1_switch_latency = 0;
+ struct aspm_latency *acceptable;
+ struct pcie_link_state *link;
+
+ link = endpoint->bus->self->link_state;
+ state &= link->aspm_support;
+ acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
+
+ while (link && state) {
+ if ((state & PCIE_LINK_STATE_L0S) &&
+ (link->latency.l0s > acceptable->l0s))
+ state &= ~PCIE_LINK_STATE_L0S;
+ if ((state & PCIE_LINK_STATE_L1) &&
+ (link->latency.l1 + l1_switch_latency > acceptable->l1))
+ state &= ~PCIE_LINK_STATE_L1;
+ link = link->parent;
+ /*
+ * Every switch on the path to root complex need 1
+ * more microsecond for L1. Spec doesn't mention L0s.
+ */
+ l1_switch_latency += 1000;
}
return state;
}
-static unsigned int pcie_aspm_check_state(struct pci_dev *pdev,
- unsigned int state)
+static u32 pcie_aspm_check_state(struct pcie_link_state *link, u32 state)
{
- struct pci_dev *child_dev;
+ pci_power_t power_state;
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
/* If no child, ignore the link */
- if (list_empty(&pdev->subordinate->devices))
+ if (list_empty(&linkbus->devices))
return state;
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
- /*
- * If downstream component of a link is pci bridge, we
- * disable ASPM for now for the link
- * */
- state = 0;
- break;
- }
- if ((child_dev->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child_dev->pcie_type != PCI_EXP_TYPE_LEG_END))
+
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ /*
+ * If downstream component of a link is pci bridge, we
+ * disable ASPM for now for the link
+ */
+ if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return 0;
+
+ if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
+ child->pcie_type != PCI_EXP_TYPE_LEG_END))
continue;
/* Device not in D0 doesn't need check latency */
- if (child_dev->current_state == PCI_D1 ||
- child_dev->current_state == PCI_D2 ||
- child_dev->current_state == PCI_D3hot ||
- child_dev->current_state == PCI_D3cold)
+ power_state = child->current_state;
+ if (power_state == PCI_D1 || power_state == PCI_D2 ||
+ power_state == PCI_D3hot || power_state == PCI_D3cold)
continue;
- state = __pcie_aspm_check_state_one(child_dev, state);
+ state = __pcie_aspm_check_state_one(child, state);
}
return state;
}
@@ -482,90 +453,71 @@ static void __pcie_aspm_config_one_dev(struct pci_dev *pdev, unsigned int state)
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
}
-static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state)
+static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state)
{
- struct pci_dev *child_dev;
- int valid = 1;
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pci_dev *child, *parent = link->pdev;
+ struct pci_bus *linkbus = parent->subordinate;
/* If no child, disable the link */
- if (list_empty(&pdev->subordinate->devices))
+ if (list_empty(&linkbus->devices))
state = 0;
/*
- * if the downstream component has pci bridge function, don't do ASPM
- * now
+ * If the downstream component has pci bridge function, don't
+ * do ASPM now.
*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
- valid = 0;
- break;
- }
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
+ return;
}
- if (!valid)
- return;
-
/*
- * spec 2.0 suggests all functions should be configured the same
- * setting for ASPM. Enabling ASPM L1 should be done in upstream
- * component first and then downstream, and vice versa for disabling
- * ASPM L1. Spec doesn't mention L0S.
+ * Spec 2.0 suggests all functions should be configured the
+ * same setting for ASPM. Enabling ASPM L1 should be done in
+ * upstream component first and then downstream, and vice
+ * versa for disabling ASPM L1. Spec doesn't mention L0S.
*/
if (state & PCIE_LINK_STATE_L1)
- __pcie_aspm_config_one_dev(pdev, state);
+ __pcie_aspm_config_one_dev(parent, state);
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list)
- __pcie_aspm_config_one_dev(child_dev, state);
+ list_for_each_entry(child, &linkbus->devices, bus_list)
+ __pcie_aspm_config_one_dev(child, state);
if (!(state & PCIE_LINK_STATE_L1))
- __pcie_aspm_config_one_dev(pdev, state);
+ __pcie_aspm_config_one_dev(parent, state);
- link_state->enabled_state = state;
+ link->aspm_enabled = state;
}
-static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link)
+/* Check the whole hierarchy, and configure each link in the hierarchy */
+static void __pcie_aspm_configure_link_state(struct pcie_link_state *link,
+ u32 state)
{
- struct pcie_link_state *root_port_link = link;
- while (root_port_link->parent)
- root_port_link = root_port_link->parent;
- return root_port_link;
-}
+ struct pcie_link_state *leaf, *root = link->root;
-/* check the whole hierarchy, and configure each link in the hierarchy */
-static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
- unsigned int state)
-{
- struct pcie_link_state *link_state = pdev->link_state;
- struct pcie_link_state *root_port_link = get_root_port_link(link_state);
- struct pcie_link_state *leaf;
+ state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
- state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
-
- /* check all links who have specific root port link */
- list_for_each_entry(leaf, &link_list, sibiling) {
- if (!list_empty(&leaf->children) ||
- get_root_port_link(leaf) != root_port_link)
+ /* Check all links who have specific root port link */
+ list_for_each_entry(leaf, &link_list, sibling) {
+ if (!list_empty(&leaf->children) || (leaf->root != root))
continue;
- state = pcie_aspm_check_state(leaf->pdev, state);
+ state = pcie_aspm_check_state(leaf, state);
}
- /* check root port link too in case it hasn't children */
- state = pcie_aspm_check_state(root_port_link->pdev, state);
-
- if (link_state->enabled_state == state)
+ /* Check root port link too in case it hasn't children */
+ state = pcie_aspm_check_state(root, state);
+ if (link->aspm_enabled == state)
return;
-
/*
- * we must change the hierarchy. See comments in
+ * We must change the hierarchy. See comments in
* __pcie_aspm_config_link for the order
**/
if (state & PCIE_LINK_STATE_L1) {
- list_for_each_entry(leaf, &link_list, sibiling) {
- if (get_root_port_link(leaf) == root_port_link)
- __pcie_aspm_config_link(leaf->pdev, state);
+ list_for_each_entry(leaf, &link_list, sibling) {
+ if (leaf->root == root)
+ __pcie_aspm_config_link(leaf, state);
}
} else {
- list_for_each_entry_reverse(leaf, &link_list, sibiling) {
- if (get_root_port_link(leaf) == root_port_link)
- __pcie_aspm_config_link(leaf->pdev, state);
+ list_for_each_entry_reverse(leaf, &link_list, sibling) {
+ if (leaf->root == root)
+ __pcie_aspm_config_link(leaf, state);
}
}
}
@@ -574,45 +526,42 @@ static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
* pcie_aspm_configure_link_state: enable/disable PCI express link state
* @pdev: the root port or switch downstream port
*/
-static void pcie_aspm_configure_link_state(struct pci_dev *pdev,
- unsigned int state)
+static void pcie_aspm_configure_link_state(struct pcie_link_state *link,
+ u32 state)
{
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- __pcie_aspm_configure_link_state(pdev, state);
+ __pcie_aspm_configure_link_state(link, state);
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
-static void free_link_state(struct pci_dev *pdev)
+static void free_link_state(struct pcie_link_state *link)
{
- kfree(pdev->link_state);
- pdev->link_state = NULL;
+ link->pdev->link_state = NULL;
+ kfree(link);
}
static int pcie_aspm_sanity_check(struct pci_dev *pdev)
{
- struct pci_dev *child_dev;
- int child_pos;
+ struct pci_dev *child;
+ int pos;
u32 reg32;
-
/*
- * Some functions in a slot might not all be PCIE functions, very
- * strange. Disable ASPM for the whole slot
+ * Some functions in a slot might not all be PCIE functions,
+ * very strange. Disable ASPM for the whole slot
*/
- list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
- child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP);
- if (!child_pos)
+ list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
+ pos = pci_find_capability(child, PCI_CAP_ID_EXP);
+ if (!pos)
return -EINVAL;
-
/*
* Disable ASPM for pre-1.1 PCIe device, we follow MS to use
* RBER bit to determine if a function is 1.1 version device
*/
- pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP,
- &reg32);
+ pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) {
- dev_printk(KERN_INFO, &child_dev->dev, "disabling ASPM"
+ dev_printk(KERN_INFO, &child->dev, "disabling ASPM"
" on pre-1.1 PCIe device. You can enable it"
" with 'pcie_aspm=force'\n");
return -EINVAL;
@@ -621,6 +570,47 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
return 0;
}
+static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
+{
+ struct pcie_link_state *link;
+ int blacklist = !!pcie_aspm_sanity_check(pdev);
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return NULL;
+ INIT_LIST_HEAD(&link->sibling);
+ INIT_LIST_HEAD(&link->children);
+ INIT_LIST_HEAD(&link->link);
+ link->pdev = pdev;
+ if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) {
+ struct pcie_link_state *parent;
+ parent = pdev->bus->parent->self->link_state;
+ if (!parent) {
+ kfree(link);
+ return NULL;
+ }
+ link->parent = parent;
+ list_add(&link->link, &parent->children);
+ }
+ /* Setup a pointer to the root port link */
+ if (!link->parent)
+ link->root = link;
+ else
+ link->root = link->parent->root;
+
+ list_add(&link->sibling, &link_list);
+
+ pdev->link_state = link;
+
+ /* Check ASPM capability */
+ pcie_aspm_cap_init(link, blacklist);
+
+ /* Check Clock PM capability */
+ pcie_clkpm_cap_init(link, blacklist);
+
+ return link;
+}
+
/*
* pcie_aspm_init_link_state: Initiate PCI express link state.
* It is called after the pcie and its children devices are scaned.
@@ -628,75 +618,47 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
*/
void pcie_aspm_init_link_state(struct pci_dev *pdev)
{
- unsigned int state;
- struct pcie_link_state *link_state;
- int error = 0;
- int blacklist;
+ u32 state;
+ struct pcie_link_state *link;
if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
return;
if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+
+ /* VIA has a strange chipset, root port is under a bridge */
+ if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
+ pdev->bus->self)
return;
+
down_read(&pci_bus_sem);
if (list_empty(&pdev->subordinate->devices))
goto out;
- blacklist = !!pcie_aspm_sanity_check(pdev);
-
mutex_lock(&aspm_lock);
-
- link_state = kzalloc(sizeof(*link_state), GFP_KERNEL);
- if (!link_state)
- goto unlock_out;
-
- link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev);
- INIT_LIST_HEAD(&link_state->children);
- INIT_LIST_HEAD(&link_state->link);
- if (pdev->bus->self) {/* this is a switch */
- struct pcie_link_state *parent_link_state;
-
- parent_link_state = pdev->bus->parent->self->link_state;
- if (!parent_link_state) {
- kfree(link_state);
- goto unlock_out;
- }
- list_add(&link_state->link, &parent_link_state->children);
- link_state->parent = parent_link_state;
- }
-
- pdev->link_state = link_state;
-
- if (!blacklist) {
- pcie_aspm_configure_common_clock(pdev);
- pcie_aspm_cap_init(pdev);
+ link = pcie_aspm_setup_link_state(pdev);
+ if (!link)
+ goto unlock;
+ /*
+ * Setup initial ASPM state
+ *
+ * If link has switch, delay the link config. The leaf link
+ * initialization will config the whole hierarchy. But we must
+ * make sure BIOS doesn't set unsupported link state.
+ */
+ if (pcie_aspm_downstream_has_switch(link)) {
+ state = pcie_aspm_check_state(link, link->aspm_default);
+ __pcie_aspm_config_link(link, state);
} else {
- link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
- link_state->bios_aspm_state = 0;
- /* Set support state to 0, so we will disable ASPM later */
- link_state->support_state = 0;
+ state = policy_to_aspm_state(link);
+ __pcie_aspm_configure_link_state(link, state);
}
- link_state->pdev = pdev;
- list_add(&link_state->sibiling, &link_list);
-
- if (link_state->downstream_has_switch) {
- /*
- * If link has switch, delay the link config. The leaf link
- * initialization will config the whole hierarchy. but we must
- * make sure BIOS doesn't set unsupported link state
- **/
- state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state);
- __pcie_aspm_config_link(pdev, state);
- } else
- __pcie_aspm_configure_link_state(pdev,
- policy_to_aspm_state(pdev));
-
- pcie_check_clock_pm(pdev, blacklist);
-
-unlock_out:
- if (error)
- free_link_state(pdev);
+ /* Setup initial Clock PM state */
+ state = (link->clkpm_capable) ? policy_to_clkpm_state(link) : 0;
+ pcie_set_clkpm(link, state);
+unlock:
mutex_unlock(&aspm_lock);
out:
up_read(&pci_bus_sem);
@@ -725,11 +687,11 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
/* All functions are removed, so just disable ASPM for the link */
__pcie_aspm_config_one_dev(parent, 0);
- list_del(&link_state->sibiling);
+ list_del(&link_state->sibling);
list_del(&link_state->link);
/* Clock PM is for endpoint device */
- free_link_state(parent);
+ free_link_state(link_state);
out:
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -749,7 +711,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
* devices changed PM state, we should recheck if latency meets all
* functions' requirement
*/
- pcie_aspm_configure_link_state(pdev, link_state->enabled_state);
+ pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
}
/*
@@ -772,14 +734,12 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
link_state = parent->link_state;
- link_state->support_state &=
- ~(state & (PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1));
- if (state & PCIE_LINK_STATE_CLKPM)
- link_state->clk_pm_capable = 0;
-
- __pcie_aspm_configure_link_state(parent, link_state->enabled_state);
- if (!link_state->clk_pm_capable && link_state->clk_pm_enabled)
- pcie_set_clock_pm(parent, 0);
+ link_state->aspm_support &= ~state;
+ __pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
+ if (state & PCIE_LINK_STATE_CLKPM) {
+ link_state->clkpm_capable = 0;
+ pcie_set_clkpm(link_state, 0);
+ }
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
}
@@ -788,7 +748,6 @@ EXPORT_SYMBOL(pci_disable_link_state);
static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
{
int i;
- struct pci_dev *pdev;
struct pcie_link_state *link_state;
for (i = 0; i < ARRAY_SIZE(policy_str); i++)
@@ -802,14 +761,10 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
aspm_policy = i;
- list_for_each_entry(link_state, &link_list, sibiling) {
- pdev = link_state->pdev;
- __pcie_aspm_configure_link_state(pdev,
- policy_to_aspm_state(pdev));
- if (link_state->clk_pm_capable &&
- link_state->clk_pm_enabled != policy_to_clkpm_state(pdev))
- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
-
+ list_for_each_entry(link_state, &link_list, sibling) {
+ __pcie_aspm_configure_link_state(link_state,
+ policy_to_aspm_state(link_state));
+ pcie_set_clkpm(link_state, policy_to_clkpm_state(link_state));
}
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -838,7 +793,7 @@ static ssize_t link_state_show(struct device *dev,
struct pci_dev *pci_device = to_pci_dev(dev);
struct pcie_link_state *link_state = pci_device->link_state;
- return sprintf(buf, "%d\n", link_state->enabled_state);
+ return sprintf(buf, "%d\n", link_state->aspm_enabled);
}
static ssize_t link_state_store(struct device *dev,
@@ -846,7 +801,7 @@ static ssize_t link_state_store(struct device *dev,
const char *buf,
size_t n)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
int state;
if (n < 1)
@@ -854,7 +809,7 @@ static ssize_t link_state_store(struct device *dev,
state = buf[0]-'0';
if (state >= 0 && state <= 3) {
/* setup link aspm state */
- pcie_aspm_configure_link_state(pci_device, state);
+ pcie_aspm_configure_link_state(pdev->link_state, state);
return n;
}
@@ -868,7 +823,7 @@ static ssize_t clk_ctl_show(struct device *dev,
struct pci_dev *pci_device = to_pci_dev(dev);
struct pcie_link_state *link_state = pci_device->link_state;
- return sprintf(buf, "%d\n", link_state->clk_pm_enabled);
+ return sprintf(buf, "%d\n", link_state->clkpm_enabled);
}
static ssize_t clk_ctl_store(struct device *dev,
@@ -876,7 +831,7 @@ static ssize_t clk_ctl_store(struct device *dev,
const char *buf,
size_t n)
{
- struct pci_dev *pci_device = to_pci_dev(dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
int state;
if (n < 1)
@@ -885,7 +840,7 @@ static ssize_t clk_ctl_store(struct device *dev,
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- pcie_set_clock_pm(pci_device, !!state);
+ pcie_set_clkpm_nocheck(pdev->link_state, !!state);
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -904,10 +859,10 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
return;
- if (link_state->support_state)
+ if (link_state->aspm_support)
sysfs_add_file_to_group(&pdev->dev.kobj,
&dev_attr_link_state.attr, power_group);
- if (link_state->clk_pm_capable)
+ if (link_state->clkpm_capable)
sysfs_add_file_to_group(&pdev->dev.kobj,
&dev_attr_clk_ctl.attr, power_group);
}
@@ -920,10 +875,10 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
return;
- if (link_state->support_state)
+ if (link_state->aspm_support)
sysfs_remove_file_from_group(&pdev->dev.kobj,
&dev_attr_link_state.attr, power_group);
- if (link_state->clk_pm_capable)
+ if (link_state->clkpm_capable)
sysfs_remove_file_from_group(&pdev->dev.kobj,
&dev_attr_clk_ctl.attr, power_group);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f1ae2475ffff..40e75f6a5056 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -193,7 +193,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
if (type == pci_bar_io) {
l &= PCI_BASE_ADDRESS_IO_MASK;
- mask = PCI_BASE_ADDRESS_IO_MASK & 0xffff;
+ mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
} else {
l &= PCI_BASE_ADDRESS_MEM_MASK;
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
@@ -237,6 +237,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
dev_printk(KERN_DEBUG, &dev->dev,
"reg %x 64bit mmio: %pR\n", pos, res);
}
+
+ res->flags |= IORESOURCE_MEM_64;
} else {
sz = pci_size(l, sz, mask);
@@ -287,7 +289,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
struct resource *res;
int i;
- if (!child->parent) /* It's a host bus, nothing to read */
+ if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
return;
if (dev->transparent) {
@@ -362,7 +364,10 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
}
}
if (base <= limit) {
- res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
+ IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ if (res->flags & PCI_PREF_RANGE_TYPE_64)
+ res->flags |= IORESOURCE_MEM_64;
res->start = base;
res->end = limit + 0xfffff;
dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n",
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bd4253f93d5a..56552d74abea 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1133,6 +1133,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
switch (dev->subsystem_device) {
case 0x1751: /* M2N notebook */
case 0x1821: /* M5N notebook */
+ case 0x1897: /* A6L notebook */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
@@ -1163,6 +1164,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
switch (dev->subsystem_device) {
case 0x12bc: /* HP D330L */
case 0x12bd: /* HP D530 */
+ case 0x006a: /* HP Compaq nx9500 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
@@ -2016,6 +2018,28 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5709S,
quirk_brcm_570x_limit_vpd);
+/* Originally in EDAC sources for i82875P:
+ * Intel tells BIOS developers to hide device 6 which
+ * configures the overflow device access containing
+ * the DRBs - this is where we expose device 6.
+ * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
+ */
+static void __devinit quirk_unhide_mch_dev6(struct pci_dev *dev)
+{
+ u8 reg;
+
+ if (pci_read_config_byte(dev, 0xF4, &reg) == 0 && !(reg & 0x02)) {
+ dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n");
+ pci_write_config_byte(dev, 0xF4, reg | 0x02);
+ }
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
+ quirk_unhide_mch_dev6);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
+ quirk_unhide_mch_dev6);
+
+
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
* PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 86503c14ce7e..176615e7231f 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -32,8 +32,6 @@ static void pci_stop_dev(struct pci_dev *dev)
static void pci_destroy_dev(struct pci_dev *dev)
{
- pci_stop_dev(dev);
-
/* Remove the device from the device lists, and prevent any further
* list accesses from this device */
down_write(&pci_bus_sem);
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 710d4ea69568..e8cb5051c311 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -29,7 +29,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
if (pdev->is_pcie)
return NULL;
while (1) {
- if (!pdev->bus->parent)
+ if (pci_is_root_bus(pdev->bus))
break;
pdev = pdev->bus->self;
/* a p2p bridge */
@@ -115,36 +115,6 @@ pci_find_next_bus(const struct pci_bus *from)
#ifdef CONFIG_PCI_LEGACY
/**
- * pci_find_slot - locate PCI device from a given PCI slot
- * @bus: number of PCI bus on which desired PCI device resides
- * @devfn: encodes number of PCI slot in which the desired PCI
- * device resides and the logical device number within that slot
- * in case of multi-function devices.
- *
- * Given a PCI bus and slot/function number, the desired PCI device
- * is located in system global list of PCI devices. If the device
- * is found, a pointer to its data structure is returned. If no
- * device is found, %NULL is returned.
- *
- * NOTE: Do not use this function any more; use pci_get_slot() instead, as
- * the PCI device returned by this function can disappear at any moment in
- * time.
- */
-struct pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn)
-{
- struct pci_dev *dev = NULL;
-
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- if (dev->bus->number == bus && dev->devfn == devfn) {
- pci_dev_put(dev);
- return dev;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(pci_find_slot);
-
-/**
* pci_find_device - begin or continue searching for a PCI device by vendor/device id
* @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
* @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index a00f85471b6e..b636e245445d 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -58,7 +58,6 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus)
res = list->res;
idx = res - &list->dev->resource[0];
if (pci_assign_resource(list->dev, idx)) {
- /* FIXME: get rid of this */
res->start = 0;
res->end = 0;
res->flags = 0;
@@ -143,6 +142,7 @@ static void pci_setup_bridge(struct pci_bus *bus)
struct pci_dev *bridge = bus->self;
struct pci_bus_region region;
u32 l, bu, lu, io_upper16;
+ int pref_mem64;
if (pci_is_enabled(bridge))
return;
@@ -198,16 +198,22 @@ static void pci_setup_bridge(struct pci_bus *bus)
pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
/* Set up PREF base/limit. */
+ pref_mem64 = 0;
bu = lu = 0;
pcibios_resource_to_bus(bridge, &region, bus->resource[2]);
if (bus->resource[2]->flags & IORESOURCE_PREFETCH) {
+ int width = 8;
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
- bu = upper_32_bits(region.start);
- lu = upper_32_bits(region.end);
- dev_info(&bridge->dev, " PREFETCH window: %#016llx-%#016llx\n",
- (unsigned long long)region.start,
- (unsigned long long)region.end);
+ if (bus->resource[2]->flags & IORESOURCE_MEM_64) {
+ pref_mem64 = 1;
+ bu = upper_32_bits(region.start);
+ lu = upper_32_bits(region.end);
+ width = 16;
+ }
+ dev_info(&bridge->dev, " PREFETCH window: %#0*llx-%#0*llx\n",
+ width, (unsigned long long)region.start,
+ width, (unsigned long long)region.end);
}
else {
l = 0x0000fff0;
@@ -215,9 +221,11 @@ static void pci_setup_bridge(struct pci_bus *bus)
}
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
- /* Set the upper 32 bits of PREF base & limit. */
- pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
- pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
+ if (pref_mem64) {
+ /* Set the upper 32 bits of PREF base & limit. */
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
+ pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
+ }
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
@@ -255,8 +263,25 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
}
- if (pmem)
+ if (pmem) {
b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64)
+ b_res[2].flags |= IORESOURCE_MEM_64;
+ }
+
+ /* double check if bridge does support 64 bit pref */
+ if (b_res[2].flags & IORESOURCE_MEM_64) {
+ u32 mem_base_hi, tmp;
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ &mem_base_hi);
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ 0xffffffff);
+ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
+ if (!tmp)
+ b_res[2].flags &= ~IORESOURCE_MEM_64;
+ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
+ mem_base_hi);
+ }
}
/* Helper function for sizing routines: find first available
@@ -336,6 +361,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
int order, max_order;
struct resource *b_res = find_free_bus_resource(bus, type);
+ unsigned int mem64_mask = 0;
if (!b_res)
return 0;
@@ -344,9 +370,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
max_order = 0;
size = 0;
+ mem64_mask = b_res->flags & IORESOURCE_MEM_64;
+ b_res->flags &= ~IORESOURCE_MEM_64;
+
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
-
+
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *r = &dev->resource[i];
resource_size_t r_size;
@@ -372,6 +401,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
aligns[order] += align;
if (order > max_order)
max_order = order;
+ mem64_mask &= r->flags & IORESOURCE_MEM_64;
}
}
@@ -396,6 +426,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
b_res->start = min_align;
b_res->end = size + min_align - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
+ b_res->flags |= mem64_mask;
return 1;
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 12403516776a..b711fb7181e2 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -135,23 +135,16 @@ void pci_disable_bridge_window(struct pci_dev *dev)
}
#endif /* CONFIG_PCI_QUIRKS */
-int pci_assign_resource(struct pci_dev *dev, int resno)
+static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
+ int resno)
{
- struct pci_bus *bus = dev->bus;
struct resource *res = dev->resource + resno;
resource_size_t size, min, align;
int ret;
size = resource_size(res);
min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
-
align = resource_alignment(res);
- if (!align) {
- dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
- "alignment) %pR flags %#lx\n",
- resno, res, res->flags);
- return -EINVAL;
- }
/* First, try exact prefetching match.. */
ret = pci_bus_alloc_resource(bus, res, size, align, min,
@@ -169,10 +162,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
pcibios_align_resource, dev);
}
- if (ret) {
- dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
- resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
- } else {
+ if (!ret) {
res->flags &= ~IORESOURCE_STARTALIGN;
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
@@ -181,6 +171,39 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
return ret;
}
+int pci_assign_resource(struct pci_dev *dev, int resno)
+{
+ struct resource *res = dev->resource + resno;
+ resource_size_t align;
+ struct pci_bus *bus;
+ int ret;
+
+ align = resource_alignment(res);
+ if (!align) {
+ dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus "
+ "alignment) %pR flags %#lx\n",
+ resno, res, res->flags);
+ return -EINVAL;
+ }
+
+ bus = dev->bus;
+ while ((ret = __pci_assign_resource(bus, dev, resno))) {
+ if (bus->parent && bus->self->transparent)
+ bus = bus->parent;
+ else
+ bus = NULL;
+ if (bus)
+ continue;
+ break;
+ }
+
+ if (ret)
+ dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n",
+ resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res);
+
+ return ret;
+}
+
#if 0
int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
{
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index fe95ce20bcbd..eddb0748b0ea 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -307,6 +307,45 @@ void pci_destroy_slot(struct pci_slot *slot)
}
EXPORT_SYMBOL_GPL(pci_destroy_slot);
+#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
+#include <linux/pci_hotplug.h>
+/**
+ * pci_hp_create_link - create symbolic link to the hotplug driver module.
+ * @slot: struct pci_slot
+ *
+ * Helper function for pci_hotplug_core.c to create symbolic link to
+ * the hotplug driver module.
+ */
+void pci_hp_create_module_link(struct pci_slot *pci_slot)
+{
+ struct hotplug_slot *slot = pci_slot->hotplug;
+ struct kobject *kobj = NULL;
+ int no_warn;
+
+ if (!slot || !slot->ops)
+ return;
+ kobj = kset_find_obj(module_kset, slot->ops->mod_name);
+ if (!kobj)
+ return;
+ no_warn = sysfs_create_link(&pci_slot->kobj, kobj, "module");
+ kobject_put(kobj);
+}
+EXPORT_SYMBOL_GPL(pci_hp_create_module_link);
+
+/**
+ * pci_hp_remove_link - remove symbolic link to the hotplug driver module.
+ * @slot: struct pci_slot
+ *
+ * Helper function for pci_hotplug_core.c to remove symbolic link to
+ * the hotplug driver module.
+ */
+void pci_hp_remove_module_link(struct pci_slot *pci_slot)
+{
+ sysfs_remove_link(&pci_slot->kobj, "module");
+}
+EXPORT_SYMBOL_GPL(pci_hp_remove_module_link);
+#endif
+
static int pci_slot_init(void)
{
struct kset *pci_bus_kset;
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 09a503e5da6a..be2fd6f91639 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -958,12 +958,12 @@ static void acer_rfkill_update(struct work_struct *ignored)
status = get_u32(&state, ACER_CAP_WIRELESS);
if (ACPI_SUCCESS(status))
- rfkill_set_sw_state(wireless_rfkill, !!state);
+ rfkill_set_sw_state(wireless_rfkill, !state);
if (has_cap(ACER_CAP_BLUETOOTH)) {
status = get_u32(&state, ACER_CAP_BLUETOOTH);
if (ACPI_SUCCESS(status))
- rfkill_set_sw_state(bluetooth_rfkill, !!state);
+ rfkill_set_sw_state(bluetooth_rfkill, !state);
}
schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 03bf522bd7ab..8153b3e59189 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -180,6 +180,7 @@ static struct key_entry eeepc_keymap[] = {
*/
static int eeepc_hotk_add(struct acpi_device *device);
static int eeepc_hotk_remove(struct acpi_device *device, int type);
+static int eeepc_hotk_resume(struct acpi_device *device);
static const struct acpi_device_id eeepc_device_ids[] = {
{EEEPC_HOTK_HID, 0},
@@ -194,6 +195,7 @@ static struct acpi_driver eeepc_hotk_driver = {
.ops = {
.add = eeepc_hotk_add,
.remove = eeepc_hotk_remove,
+ .resume = eeepc_hotk_resume,
},
};
@@ -512,15 +514,12 @@ static int notify_brn(void)
return -1;
}
-static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
+static void eeepc_rfkill_hotplug(void)
{
struct pci_dev *dev;
struct pci_bus *bus = pci_find_bus(0, 1);
bool blocked;
- if (event != ACPI_NOTIFY_BUS_CHECK)
- return;
-
if (!bus) {
printk(EEEPC_WARNING "Unable to find PCI bus 1?\n");
return;
@@ -551,6 +550,14 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, blocked);
}
+static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
+{
+ if (event != ACPI_NOTIFY_BUS_CHECK)
+ return;
+
+ eeepc_rfkill_hotplug();
+}
+
static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
{
static struct key_entry *key;
@@ -675,8 +682,8 @@ static int eeepc_hotk_add(struct acpi_device *device)
if (!ehotk->eeepc_wlan_rfkill)
goto wlan_fail;
- rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill,
- get_acpi(CM_ASL_WLAN) != 1);
+ rfkill_init_sw_state(ehotk->eeepc_wlan_rfkill,
+ get_acpi(CM_ASL_WLAN) != 1);
result = rfkill_register(ehotk->eeepc_wlan_rfkill);
if (result)
goto wlan_fail;
@@ -693,8 +700,8 @@ static int eeepc_hotk_add(struct acpi_device *device)
if (!ehotk->eeepc_bluetooth_rfkill)
goto bluetooth_fail;
- rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill,
- get_acpi(CM_ASL_BLUETOOTH) != 1);
+ rfkill_init_sw_state(ehotk->eeepc_bluetooth_rfkill,
+ get_acpi(CM_ASL_BLUETOOTH) != 1);
result = rfkill_register(ehotk->eeepc_bluetooth_rfkill);
if (result)
goto bluetooth_fail;
@@ -734,6 +741,33 @@ static int eeepc_hotk_remove(struct acpi_device *device, int type)
return 0;
}
+static int eeepc_hotk_resume(struct acpi_device *device)
+{
+ if (ehotk->eeepc_wlan_rfkill) {
+ bool wlan;
+
+ /* Workaround - it seems that _PTS disables the wireless
+ without notification or changing the value read by WLAN.
+ Normally this is fine because the correct value is restored
+ from the non-volatile storage on resume, but we need to do
+ it ourself if case suspend is aborted, or we lose wireless.
+ */
+ wlan = get_acpi(CM_ASL_WLAN);
+ set_acpi(CM_ASL_WLAN, wlan);
+
+ rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill,
+ wlan != 1);
+
+ eeepc_rfkill_hotplug();
+ }
+
+ if (ehotk->eeepc_bluetooth_rfkill)
+ rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill,
+ get_acpi(CM_ASL_BLUETOOTH) != 1);
+
+ return 0;
+}
+
/*
* Hwmon
*/
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 86e958539f46..40d64c03278c 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1163,8 +1163,8 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
{
struct tpacpi_rfk *atp_rfk;
int res;
- bool initial_sw_state = false;
- int initial_sw_status;
+ bool sw_state = false;
+ int sw_status;
BUG_ON(id >= TPACPI_RFK_SW_MAX || tpacpi_rfkill_switches[id]);
@@ -1185,17 +1185,17 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
atp_rfk->id = id;
atp_rfk->ops = tp_rfkops;
- initial_sw_status = (tp_rfkops->get_status)();
- if (initial_sw_status < 0) {
+ sw_status = (tp_rfkops->get_status)();
+ if (sw_status < 0) {
printk(TPACPI_ERR
"failed to read initial state for %s, error %d\n",
- name, initial_sw_status);
+ name, sw_status);
} else {
- initial_sw_state = (initial_sw_status == TPACPI_RFK_RADIO_OFF);
+ sw_state = (sw_status == TPACPI_RFK_RADIO_OFF);
if (set_default) {
/* try to keep the initial state, since we ask the
* firmware to preserve it across S5 in NVRAM */
- rfkill_set_sw_state(atp_rfk->rfkill, initial_sw_state);
+ rfkill_init_sw_state(atp_rfk->rfkill, sw_state);
}
}
rfkill_set_hw_state(atp_rfk->rfkill, tpacpi_rfk_check_hwblock_state());
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e5b84db0aa03..749836668655 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -470,7 +470,7 @@ static int dasd_decrease_state(struct dasd_device *device)
*/
static void dasd_change_state(struct dasd_device *device)
{
- int rc;
+ int rc;
if (device->state == device->target)
/* Already where we want to go today... */
@@ -479,8 +479,10 @@ static void dasd_change_state(struct dasd_device *device)
rc = dasd_increase_state(device);
else
rc = dasd_decrease_state(device);
- if (rc && rc != -EAGAIN)
- device->target = device->state;
+ if (rc == -EAGAIN)
+ return;
+ if (rc)
+ device->target = device->state;
if (device->state == device->target) {
wake_up(&dasd_init_waitq);
@@ -2503,15 +2505,25 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
if (IS_ERR(device))
return PTR_ERR(device);
+ /* allow new IO again */
+ device->stopped &= ~DASD_STOPPED_PM;
+ device->stopped &= ~DASD_UNRESUMED_PM;
+
dasd_schedule_device_bh(device);
if (device->block)
dasd_schedule_block_bh(device->block);
if (device->discipline->restore)
rc = device->discipline->restore(device);
+ if (rc)
+ /*
+ * if the resume failed for the DASD we put it in
+ * an UNRESUMED stop state
+ */
+ device->stopped |= DASD_UNRESUMED_PM;
dasd_put_device(device);
- return rc;
+ return 0;
}
EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 1c28ec3e4ccb..f8b1f04f26b8 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3243,9 +3243,6 @@ int dasd_eckd_restore_device(struct dasd_device *device)
int is_known, rc;
struct dasd_uid temp_uid;
- /* allow new IO again */
- device->stopped &= ~DASD_STOPPED_PM;
-
private = (struct dasd_eckd_private *) device->private;
/* Read Configuration Data */
@@ -3295,12 +3292,7 @@ int dasd_eckd_restore_device(struct dasd_device *device)
return 0;
out_err:
- /*
- * if the resume failed for the DASD we put it in
- * an UNRESUMED stop state
- */
- device->stopped |= DASD_UNRESUMED_PM;
- return 0;
+ return -1;
}
static struct ccw_driver dasd_eckd_driver = {
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 04dc734805c6..21639d6c996f 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -20,10 +20,7 @@
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/reboot.h>
-
#include <linux/slab.h>
-#include <linux/bootmem.h>
-
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/io.h>
@@ -735,7 +732,7 @@ static int raw3215_pm_stop(struct ccw_device *cdev)
unsigned long flags;
/* Empty the output buffer, then prevent new I/O. */
- raw = cdev->dev.driver_data;
+ raw = dev_get_drvdata(&cdev->dev);
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
raw->flags |= RAW3215_FROZEN;
@@ -749,7 +746,7 @@ static int raw3215_pm_start(struct ccw_device *cdev)
unsigned long flags;
/* Allow I/O again and flush output buffer. */
- raw = cdev->dev.driver_data;
+ raw = dev_get_drvdata(&cdev->dev);
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw->flags &= ~RAW3215_FROZEN;
raw->flags |= RAW3215_FLUSHING;
@@ -883,7 +880,7 @@ static int __init con3215_init(void)
raw3215_freelist = NULL;
spin_lock_init(&raw3215_freelist_lock);
for (i = 0; i < NR_3215_REQ; i++) {
- req = (struct raw3215_req *) alloc_bootmem_low(sizeof(struct raw3215_req));
+ req = kzalloc(sizeof(struct raw3215_req), GFP_KERNEL | GFP_DMA);
req->next = raw3215_freelist;
raw3215_freelist = req;
}
@@ -893,10 +890,9 @@ static int __init con3215_init(void)
return -ENODEV;
raw3215[0] = raw = (struct raw3215_info *)
- alloc_bootmem_low(sizeof(struct raw3215_info));
- memset(raw, 0, sizeof(struct raw3215_info));
- raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE);
- raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
+ kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA);
+ raw->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
+ raw->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
raw->cdev = cdev;
dev_set_drvdata(&cdev->dev, raw);
cdev->handler = raw3215_irq;
@@ -906,9 +902,9 @@ static int __init con3215_init(void)
/* Request the console irq */
if (raw3215_startup(raw) != 0) {
- free_bootmem((unsigned long) raw->inbuf, RAW3215_INBUF_SIZE);
- free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE);
- free_bootmem((unsigned long) raw, sizeof(struct raw3215_info));
+ kfree(raw->inbuf);
+ kfree(raw->buffer);
+ kfree(raw);
raw3215[0] = NULL;
return -ENODEV;
}
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 44d02e371c04..bb838bdf829d 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -7,7 +7,6 @@
* Copyright IBM Corp. 2003, 2009
*/
-#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -600,16 +599,14 @@ con3270_init(void)
if (IS_ERR(rp))
return PTR_ERR(rp);
- condev = (struct con3270 *) alloc_bootmem_low(sizeof(struct con3270));
- memset(condev, 0, sizeof(struct con3270));
+ condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA);
condev->view.dev = rp;
- condev->read = raw3270_request_alloc_bootmem(0);
+ condev->read = raw3270_request_alloc(0);
condev->read->callback = con3270_read_callback;
condev->read->callback_data = condev;
- condev->write =
- raw3270_request_alloc_bootmem(CON3270_OUTPUT_BUFFER_SIZE);
- condev->kreset = raw3270_request_alloc_bootmem(1);
+ condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE);
+ condev->kreset = raw3270_request_alloc(1);
INIT_LIST_HEAD(&condev->lines);
INIT_LIST_HEAD(&condev->update);
@@ -623,7 +620,7 @@ con3270_init(void)
INIT_LIST_HEAD(&condev->freemem);
for (i = 0; i < CON3270_STRING_PAGES; i++) {
- cbuf = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
+ cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
}
condev->cline = alloc_string(&condev->freemem, condev->view.cols);
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 75a8831eebbc..7892550d7932 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -320,7 +320,7 @@ static int mon_open(struct inode *inode, struct file *filp)
goto out_path;
}
filp->private_data = monpriv;
- monreader_device->driver_data = monpriv;
+ dev_set_drvdata(&monreader_device, monpriv);
unlock_kernel();
return nonseekable_open(inode, filp);
@@ -463,7 +463,7 @@ static struct miscdevice mon_dev = {
*****************************************************************************/
static int monreader_freeze(struct device *dev)
{
- struct mon_private *monpriv = dev->driver_data;
+ struct mon_private *monpriv = dev_get_drvdata(&dev);
int rc;
if (!monpriv)
@@ -487,7 +487,7 @@ static int monreader_freeze(struct device *dev)
static int monreader_thaw(struct device *dev)
{
- struct mon_private *monpriv = dev->driver_data;
+ struct mon_private *monpriv = dev_get_drvdata(dev);
int rc;
if (!monpriv)
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index acab7b2dfe8a..d6a022f55e92 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -7,7 +7,6 @@
* Copyright IBM Corp. 2003, 2009
*/
-#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -143,33 +142,6 @@ raw3270_request_alloc(size_t size)
return rq;
}
-#ifdef CONFIG_TN3270_CONSOLE
-/*
- * Allocate a new 3270 ccw request from bootmem. Only works very
- * early in the boot process. Only con3270.c should be using this.
- */
-struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size)
-{
- struct raw3270_request *rq;
-
- rq = alloc_bootmem_low(sizeof(struct raw3270));
-
- /* alloc output buffer. */
- if (size > 0)
- rq->buffer = alloc_bootmem_low(size);
- rq->size = size;
- INIT_LIST_HEAD(&rq->list);
-
- /*
- * Setup ccw.
- */
- rq->ccw.cda = __pa(rq->buffer);
- rq->ccw.flags = CCW_FLAG_SLI;
-
- return rq;
-}
-#endif
-
/*
* Free 3270 ccw request
*/
@@ -846,8 +818,8 @@ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
char *ascebc;
int rc;
- rp = (struct raw3270 *) alloc_bootmem_low(sizeof(struct raw3270));
- ascebc = (char *) alloc_bootmem(256);
+ rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
+ ascebc = kzalloc(256, GFP_KERNEL);
rc = raw3270_setup_device(cdev, rp, ascebc);
if (rc)
return ERR_PTR(rc);
@@ -1350,7 +1322,7 @@ static int raw3270_pm_stop(struct ccw_device *cdev)
struct raw3270_view *view;
unsigned long flags;
- rp = cdev->dev.driver_data;
+ rp = dev_get_drvdata(&cdev->dev);
if (!rp)
return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
@@ -1376,7 +1348,7 @@ static int raw3270_pm_start(struct ccw_device *cdev)
struct raw3270 *rp;
unsigned long flags;
- rp = cdev->dev.driver_data;
+ rp = dev_get_drvdata(&cdev->dev);
if (!rp)
return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c
index 336811a77672..ad698d30cb3b 100644
--- a/drivers/s390/char/sclp_con.c
+++ b/drivers/s390/char/sclp_con.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
-#include <linux/bootmem.h>
#include <linux/termios.h>
#include <linux/err.h>
#include <linux/reboot.h>
@@ -110,7 +109,7 @@ static void sclp_console_sync_queue(void)
spin_lock_irqsave(&sclp_con_lock, flags);
if (timer_pending(&sclp_con_timer))
- del_timer_sync(&sclp_con_timer);
+ del_timer(&sclp_con_timer);
while (sclp_con_queue_running) {
spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_sync_wait();
@@ -298,8 +297,8 @@ sclp_console_init(void)
/* Allocate pages for output buffering */
INIT_LIST_HEAD(&sclp_con_pages);
for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
- page = alloc_bootmem_low_pages(PAGE_SIZE);
- list_add_tail((struct list_head *) page, &sclp_con_pages);
+ page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ list_add_tail(page, &sclp_con_pages);
}
INIT_LIST_HEAD(&sclp_con_outqueue);
spin_lock_init(&sclp_con_lock);
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 5518e24946aa..178724f2a4c3 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -20,7 +20,6 @@
#include <linux/major.h>
#include <linux/console.h>
#include <linux/kdev_t.h>
-#include <linux/bootmem.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/reboot.h>
@@ -601,10 +600,7 @@ static void __init __sclp_vt220_free_pages(void)
list_for_each_safe(page, p, &sclp_vt220_empty) {
list_del(page);
- if (slab_is_available())
- free_page((unsigned long) page);
- else
- free_bootmem((unsigned long) page, PAGE_SIZE);
+ free_page((unsigned long) page);
}
}
@@ -640,16 +636,12 @@ static int __init __sclp_vt220_init(int num_pages)
sclp_vt220_flush_later = 0;
/* Allocate pages for output buffering */
+ rc = -ENOMEM;
for (i = 0; i < num_pages; i++) {
- if (slab_is_available())
- page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- else
- page = alloc_bootmem_low_pages(PAGE_SIZE);
- if (!page) {
- rc = -ENOMEM;
+ page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!page)
goto out;
- }
- list_add_tail((struct list_head *) page, &sclp_vt220_empty);
+ list_add_tail(page, &sclp_vt220_empty);
}
rc = sclp_register(&sclp_vt220_register);
out:
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 595aa04cfd01..1d420d947596 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -396,7 +396,7 @@ int tape_generic_pm_suspend(struct ccw_device *cdev)
{
struct tape_device *device;
- device = cdev->dev.driver_data;
+ device = dev_get_drvdata(&cdev->dev);
if (!device) {
return -ENODEV;
}
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 411cfa3c7719..c20a4fe6da51 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -663,7 +663,7 @@ static struct attribute *vmlogrdr_attrs[] = {
static int vmlogrdr_pm_prepare(struct device *dev)
{
int rc;
- struct vmlogrdr_priv_t *priv = dev->driver_data;
+ struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
rc = 0;
if (priv) {
@@ -753,7 +753,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
dev->bus = &iucv_bus;
dev->parent = iucv_root;
dev->driver = &vmlogrdr_driver;
- dev->driver_data = priv;
+ dev_set_drvdata(dev, priv);
/*
* The release function could be called after the
* module has been unloaded. It's _only_ task is to
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 7d9e67cb6471..31b902e94f7b 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -170,7 +170,7 @@ static void urdev_put(struct urdev *urd)
*/
static int ur_pm_suspend(struct ccw_device *cdev)
{
- struct urdev *urd = cdev->dev.driver_data;
+ struct urdev *urd = dev_get_drvdata(&cdev->dev);
TRACE("ur_pm_suspend: cdev=%p\n", cdev);
if (urd->open_flag) {
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 13bcb8114388..b1241f8fae88 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -351,15 +351,6 @@ static inline unsigned long long get_usecs(void)
((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
/* prototypes for thin interrupt */
-void qdio_sync_after_thinint(struct qdio_q *q);
-int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state,
- int auto_ack);
-void qdio_check_outbound_after_thinint(struct qdio_q *q);
-int qdio_inbound_q_moved(struct qdio_q *q);
-void qdio_kick_handler(struct qdio_q *q);
-void qdio_stop_polling(struct qdio_q *q);
-int qdio_siga_sync_q(struct qdio_q *q);
-
void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
@@ -392,4 +383,6 @@ void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
int qdio_setup_init(void);
void qdio_setup_exit(void);
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state);
#endif /* _CIO_QDIO_H */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index e3434b34f86c..b8626d4df116 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -70,9 +70,8 @@ static int qstat_show(struct seq_file *m, void *v)
seq_printf(m, "slsb buffer states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
- qdio_siga_sync_q(q);
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
- get_buf_state(q, i, &state, 0);
+ debug_get_buf_state(q, i, &state);
switch (state) {
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_OUTPUT_NOT_INIT:
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index d79cf5bf0e62..0038750ad945 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -231,8 +231,8 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
return i;
}
-inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
- unsigned char *state, int auto_ack)
+static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state, int auto_ack)
{
return get_buf_states(q, bufnr, state, 1, auto_ack);
}
@@ -276,7 +276,7 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
QDIO_MAX_BUFFERS_PER_Q);
}
-static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
+static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
unsigned int input)
{
int cc;
@@ -293,7 +293,7 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
return cc;
}
-inline int qdio_siga_sync_q(struct qdio_q *q)
+static inline int qdio_siga_sync_q(struct qdio_q *q)
{
if (q->is_input_q)
return qdio_siga_sync(q, 0, q->mask);
@@ -358,8 +358,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
return cc;
}
-/* called from thinint inbound handler */
-void qdio_sync_after_thinint(struct qdio_q *q)
+static inline void qdio_sync_after_thinint(struct qdio_q *q)
{
if (pci_out_supported(q)) {
if (need_siga_sync_thinint(q))
@@ -370,7 +369,14 @@ void qdio_sync_after_thinint(struct qdio_q *q)
qdio_siga_sync_q(q);
}
-inline void qdio_stop_polling(struct qdio_q *q)
+int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
+ unsigned char *state)
+{
+ qdio_siga_sync_q(q);
+ return get_buf_states(q, bufnr, state, 1, 0);
+}
+
+static inline void qdio_stop_polling(struct qdio_q *q)
{
if (!q->u.in.polling)
return;
@@ -449,13 +455,6 @@ static inline void inbound_primed(struct qdio_q *q, int count)
count--;
if (!count)
return;
-
- /*
- * Need to change all PRIMED buffers to NOT_INIT, otherwise
- * we're loosing initiative in the thinint code.
- */
- set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT,
- count);
}
static int get_inbound_buffer_frontier(struct qdio_q *q)
@@ -470,19 +469,13 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
- /*
- * No siga sync here, as a PCI or we after a thin interrupt
- * will sync the queues.
- */
-
- /* need to set count to 1 for non-qebsm */
- if (!is_qebsm(q))
- count = 1;
-
-check_next:
if (q->first_to_check == stop)
goto out;
+ /*
+ * No siga sync here, as a PCI or we after a thin interrupt
+ * already sync'ed the queues.
+ */
count = get_buf_states(q, q->first_to_check, &state, count, 1);
if (!count)
goto out;
@@ -490,14 +483,9 @@ check_next:
switch (state) {
case SLSB_P_INPUT_PRIMED:
inbound_primed(q, count);
- /*
- * No siga-sync needed for non-qebsm here, as the inbound queue
- * will be synced on the next siga-r, resp.
- * tiqdio_is_inbound_q_done will do the siga-sync.
- */
q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used);
- goto check_next;
+ break;
case SLSB_P_INPUT_ERROR:
announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */
@@ -516,7 +504,7 @@ out:
return q->first_to_check;
}
-int qdio_inbound_q_moved(struct qdio_q *q)
+static int qdio_inbound_q_moved(struct qdio_q *q)
{
int bufnr;
@@ -524,35 +512,32 @@ int qdio_inbound_q_moved(struct qdio_q *q)
if ((bufnr != q->last_move) || q->qdio_error) {
q->last_move = bufnr;
- if (!need_siga_sync(q) && !pci_out_supported(q))
+ if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM)
q->u.in.timestamp = get_usecs();
-
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved");
return 1;
} else
return 0;
}
-static int qdio_inbound_q_done(struct qdio_q *q)
+static inline int qdio_inbound_q_done(struct qdio_q *q)
{
unsigned char state = 0;
if (!atomic_read(&q->nr_buf_used))
return 1;
- /*
- * We need that one for synchronization with the adapter, as it
- * does a kind of PCI avoidance.
- */
qdio_siga_sync_q(q);
-
get_buf_state(q, q->first_to_check, &state, 0);
+
if (state == SLSB_P_INPUT_PRIMED)
- /* we got something to do */
+ /* more work coming */
return 0;
- /* on VM, we don't poll, so the q is always done here */
- if (need_siga_sync(q) || pci_out_supported(q))
+ if (is_thinint_irq(q->irq_ptr))
+ return 1;
+
+ /* don't poll under z/VM */
+ if (MACHINE_IS_VM)
return 1;
/*
@@ -563,14 +548,11 @@ static int qdio_inbound_q_done(struct qdio_q *q)
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
q->first_to_check);
return 1;
- } else {
- DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d",
- q->first_to_check);
+ } else
return 0;
- }
}
-void qdio_kick_handler(struct qdio_q *q)
+static void qdio_kick_handler(struct qdio_q *q)
{
int start = q->first_to_kick;
int end = q->first_to_check;
@@ -619,7 +601,6 @@ again:
goto again;
}
-/* inbound tasklet */
void qdio_inbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
@@ -642,11 +623,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count);
- /* need to set count to 1 for non-qebsm */
- if (!is_qebsm(q))
- count = 1;
-
-check_next:
if (q->first_to_check == stop)
return q->first_to_check;
@@ -661,13 +637,7 @@ check_next:
atomic_sub(count, &q->nr_buf_used);
q->first_to_check = add_buf(q->first_to_check, count);
- /*
- * We fetch all buffer states at once. get_buf_states may
- * return count < stop. For QEBSM we do not loop.
- */
- if (is_qebsm(q))
- break;
- goto check_next;
+ break;
case SLSB_P_OUTPUT_ERROR:
announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */
@@ -797,8 +767,7 @@ void qdio_outbound_timer(unsigned long data)
tasklet_schedule(&q->tasklet);
}
-/* called from thinint inbound tasklet */
-void qdio_check_outbound_after_thinint(struct qdio_q *q)
+static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
{
struct qdio_q *out;
int i;
@@ -811,6 +780,46 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q)
tasklet_schedule(&out->tasklet);
}
+static void __tiqdio_inbound_processing(struct qdio_q *q)
+{
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound);
+ qdio_sync_after_thinint(q);
+
+ /*
+ * The interrupt could be caused by a PCI request. Check the
+ * PCI capable outbound queues.
+ */
+ qdio_check_outbound_after_thinint(q);
+
+ if (!qdio_inbound_q_moved(q))
+ return;
+
+ qdio_kick_handler(q);
+
+ if (!qdio_inbound_q_done(q)) {
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
+ }
+
+ qdio_stop_polling(q);
+ /*
+ * We need to check again to not lose initiative after
+ * resetting the ACK state.
+ */
+ if (!qdio_inbound_q_done(q)) {
+ qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
+ if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
+ tasklet_schedule(&q->tasklet);
+ }
+}
+
+void tiqdio_inbound_processing(unsigned long data)
+{
+ struct qdio_q *q = (struct qdio_q *)data;
+ __tiqdio_inbound_processing(q);
+}
+
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
enum qdio_irq_states state)
{
@@ -1488,18 +1497,13 @@ out:
* @count: how many buffers to process
*/
int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
- int q_nr, int bufnr, int count)
+ int q_nr, unsigned int bufnr, unsigned int count)
{
struct qdio_irq *irq_ptr;
- if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) ||
- (count > QDIO_MAX_BUFFERS_PER_Q) ||
- (q_nr >= QDIO_MAX_QUEUES_PER_IRQ))
+ if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
- if (!count)
- return 0;
-
irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index c655d011a78d..981a77ea7ee2 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -43,9 +43,6 @@ struct indicator_t {
};
static struct indicator_t *q_indicators;
-static void tiqdio_tasklet_fn(unsigned long data);
-static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0);
-
static int css_qdio_omit_svs;
static inline unsigned long do_clear_global_summary(void)
@@ -103,11 +100,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
xchg(irq_ptr->dsci, 1);
}
-/*
- * we cannot stop the tiqdio tasklet here since it is for all
- * thinint qdio devices and it must run as long as there is a
- * thinint device left
- */
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
@@ -126,79 +118,39 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
}
}
-static inline int tiqdio_inbound_q_done(struct qdio_q *q)
-{
- unsigned char state = 0;
-
- if (!atomic_read(&q->nr_buf_used))
- return 1;
-
- qdio_siga_sync_q(q);
- get_buf_state(q, q->first_to_check, &state, 0);
-
- if (state == SLSB_P_INPUT_PRIMED)
- /* more work coming */
- return 0;
- return 1;
-}
-
static inline int shared_ind(struct qdio_irq *irq_ptr)
{
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
}
-static void __tiqdio_inbound_processing(struct qdio_q *q)
+/**
+ * tiqdio_thinint_handler - thin interrupt handler for qdio
+ * @ind: pointer to adapter local summary indicator
+ * @drv_data: NULL
+ */
+static void tiqdio_thinint_handler(void *ind, void *drv_data)
{
- qdio_perf_stat_inc(&perf_stats.thinint_inbound);
- qdio_sync_after_thinint(q);
+ struct qdio_q *q;
+
+ qdio_perf_stat_inc(&perf_stats.thin_int);
/*
- * Maybe we have work on our outbound queues... at least
- * we have to check the PCI capable queues.
+ * SVS only when needed: issue SVS to benefit from iqdio interrupt
+ * avoidance (SVS clears adapter interrupt suppression overwrite)
*/
- qdio_check_outbound_after_thinint(q);
-
- if (!qdio_inbound_q_moved(q))
- return;
-
- qdio_kick_handler(q);
-
- if (!tiqdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
- tasklet_schedule(&q->tasklet);
- }
+ if (!css_qdio_omit_svs)
+ do_clear_global_summary();
- qdio_stop_polling(q);
/*
- * We need to check again to not lose initiative after
- * resetting the ACK state.
+ * reset local summary indicator (tiqdio_alsi) to stop adapter
+ * interrupts for now
*/
- if (!tiqdio_inbound_q_done(q)) {
- qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
- if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
- tasklet_schedule(&q->tasklet);
- }
-}
-
-void tiqdio_inbound_processing(unsigned long data)
-{
- struct qdio_q *q = (struct qdio_q *)data;
-
- __tiqdio_inbound_processing(q);
-}
-
-/* check for work on all inbound thinint queues */
-static void tiqdio_tasklet_fn(unsigned long data)
-{
- struct qdio_q *q;
-
- qdio_perf_stat_inc(&perf_stats.tasklet_thinint);
-again:
+ xchg((u8 *)ind, 0);
/* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock();
+ /* check for work on all inbound thinint queues */
list_for_each_entry_rcu(q, &tiq_list, entry)
/* only process queues from changed sets */
if (*q->irq_ptr->dsci) {
@@ -226,37 +178,6 @@ again:
if (*tiqdio_alsi)
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
}
-
- /* check for more work */
- if (*tiqdio_alsi) {
- xchg(tiqdio_alsi, 0);
- qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop);
- goto again;
- }
-}
-
-/**
- * tiqdio_thinint_handler - thin interrupt handler for qdio
- * @ind: pointer to adapter local summary indicator
- * @drv_data: NULL
- */
-static void tiqdio_thinint_handler(void *ind, void *drv_data)
-{
- qdio_perf_stat_inc(&perf_stats.thin_int);
-
- /*
- * SVS only when needed: issue SVS to benefit from iqdio interrupt
- * avoidance (SVS clears adapter interrupt suppression overwrite)
- */
- if (!css_qdio_omit_svs)
- do_clear_global_summary();
-
- /*
- * reset local summary indicator (tiqdio_alsi) to stop adapter
- * interrupts for now, the tasklet will clean all dsci's
- */
- xchg((u8 *)ind, 0);
- tasklet_hi_schedule(&tiqdio_tasklet);
}
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
@@ -376,5 +297,4 @@ void __exit tiqdio_unregister_thinints(void)
s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
isc_unregister(QDIO_AIRQ_ISC);
}
- tasklet_kill(&tiqdio_tasklet);
}
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9c148406b980..727a809636d8 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -54,6 +54,12 @@ static int ap_poll_thread_start(void);
static void ap_poll_thread_stop(void);
static void ap_request_timeout(unsigned long);
static inline void ap_schedule_poll_timer(void);
+static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
+static int ap_device_remove(struct device *dev);
+static int ap_device_probe(struct device *dev);
+static void ap_interrupt_handler(void *unused1, void *unused2);
+static void ap_reset(struct ap_device *ap_dev);
+static void ap_config_timeout(unsigned long ptr);
/*
* Module description.
@@ -101,6 +107,10 @@ static struct hrtimer ap_poll_timer;
* If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
static unsigned long long poll_timeout = 250000;
+/* Suspend flag */
+static int ap_suspend_flag;
+static struct bus_type ap_bus_type;
+
/**
* ap_using_interrupts() - Returns non-zero if interrupt support is
* available.
@@ -617,10 +627,79 @@ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
return retval;
}
+static int ap_bus_suspend(struct device *dev, pm_message_t state)
+{
+ struct ap_device *ap_dev = to_ap_dev(dev);
+ unsigned long flags;
+
+ if (!ap_suspend_flag) {
+ ap_suspend_flag = 1;
+
+ /* Disable scanning for devices, thus we do not want to scan
+ * for them after removing.
+ */
+ del_timer_sync(&ap_config_timer);
+ if (ap_work_queue != NULL) {
+ destroy_workqueue(ap_work_queue);
+ ap_work_queue = NULL;
+ }
+ tasklet_disable(&ap_tasklet);
+ }
+ /* Poll on the device until all requests are finished. */
+ do {
+ flags = 0;
+ __ap_poll_device(ap_dev, &flags);
+ } while ((flags & 1) || (flags & 2));
+
+ ap_device_remove(dev);
+ return 0;
+}
+
+static int ap_bus_resume(struct device *dev)
+{
+ int rc = 0;
+ struct ap_device *ap_dev = to_ap_dev(dev);
+
+ if (ap_suspend_flag) {
+ ap_suspend_flag = 0;
+ if (!ap_interrupts_available())
+ ap_interrupt_indicator = NULL;
+ ap_device_probe(dev);
+ ap_reset(ap_dev);
+ setup_timer(&ap_dev->timeout, ap_request_timeout,
+ (unsigned long) ap_dev);
+ ap_scan_bus(NULL);
+ init_timer(&ap_config_timer);
+ ap_config_timer.function = ap_config_timeout;
+ ap_config_timer.data = 0;
+ ap_config_timer.expires = jiffies + ap_config_time * HZ;
+ add_timer(&ap_config_timer);
+ ap_work_queue = create_singlethread_workqueue("kapwork");
+ if (!ap_work_queue)
+ return -ENOMEM;
+ tasklet_enable(&ap_tasklet);
+ if (!ap_using_interrupts())
+ ap_schedule_poll_timer();
+ else
+ tasklet_schedule(&ap_tasklet);
+ if (ap_thread_flag)
+ rc = ap_poll_thread_start();
+ } else {
+ ap_device_probe(dev);
+ ap_reset(ap_dev);
+ setup_timer(&ap_dev->timeout, ap_request_timeout,
+ (unsigned long) ap_dev);
+ }
+
+ return rc;
+}
+
static struct bus_type ap_bus_type = {
.name = "ap",
.match = &ap_bus_match,
.uevent = &ap_uevent,
+ .suspend = ap_bus_suspend,
+ .resume = ap_bus_resume
};
static int ap_device_probe(struct device *dev)
@@ -1066,7 +1145,7 @@ ap_config_timeout(unsigned long ptr)
*/
static inline void ap_schedule_poll_timer(void)
{
- if (ap_using_interrupts())
+ if (ap_using_interrupts() || ap_suspend_flag)
return;
if (hrtimer_is_queued(&ap_poll_timer))
return;
@@ -1384,6 +1463,8 @@ static int ap_poll_thread(void *data)
set_user_nice(current, 19);
while (1) {
+ if (ap_suspend_flag)
+ return 0;
if (need_resched()) {
schedule();
continue;
@@ -1414,7 +1495,7 @@ static int ap_poll_thread_start(void)
{
int rc;
- if (ap_using_interrupts())
+ if (ap_using_interrupts() || ap_suspend_flag)
return 0;
mutex_lock(&ap_poll_thread_mutex);
if (!ap_poll_kthread) {
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 52574ce797b2..8c36eafcfbfe 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1307,7 +1307,7 @@ static void netiucv_pm_complete(struct device *dev)
*/
static int netiucv_pm_freeze(struct device *dev)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
struct net_device *ndev = NULL;
int rc = 0;
@@ -1331,7 +1331,7 @@ out:
*/
static int netiucv_pm_restore_thaw(struct device *dev)
{
- struct netiucv_priv *priv = dev->driver_data;
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
struct net_device *ndev = NULL;
int rc = 0;
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 1132c5cae7ab..037c1e0b7c4c 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1320,6 +1320,16 @@ config SERIAL_SGI_IOC3
If you have an SGI Altix with an IOC3 serial card,
say Y or M. Otherwise, say N.
+config SERIAL_MSM
+ bool "MSM on-chip serial port support"
+ depends on ARM && ARCH_MSM
+ select SERIAL_CORE
+
+config SERIAL_MSM_CONSOLE
+ bool "MSM serial console support"
+ depends on SERIAL_MSM=y
+ select SERIAL_CORE_CONSOLE
+
config SERIAL_NETX
tristate "NetX serial port support"
depends on ARM && ARCH_NETX
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 45a8658f54d5..d5a29981c6c4 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -71,6 +71,7 @@ obj-$(CONFIG_SERIAL_SGI_IOC4) += ioc4_serial.o
obj-$(CONFIG_SERIAL_SGI_IOC3) += ioc3_serial.o
obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
+obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o
obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index e2f6b1bfac98..b4a7650af696 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -38,6 +38,10 @@
#include <asm/cacheflush.h>
#endif
+#ifdef CONFIG_SERIAL_BFIN_MODULE
+# undef CONFIG_EARLY_PRINTK
+#endif
+
/* UART name and device definitions */
#define BFIN_SERIAL_NAME "ttyBF"
#define BFIN_SERIAL_MAJOR 204
@@ -1110,6 +1114,7 @@ static void __init bfin_serial_init_ports(void)
bfin_serial_hw_init();
for (i = 0; i < nr_active_ports; i++) {
+ spin_lock_init(&bfin_serial_ports[i].port.lock);
bfin_serial_ports[i].port.uartclk = get_sclk();
bfin_serial_ports[i].port.fifosize = BFIN_UART_TX_FIFO_SIZE;
bfin_serial_ports[i].port.ops = &bfin_serial_pops;
diff --git a/drivers/serial/msm_serial.c b/drivers/serial/msm_serial.c
new file mode 100644
index 000000000000..698048f64f5e
--- /dev/null
+++ b/drivers/serial/msm_serial.c
@@ -0,0 +1,772 @@
+/*
+ * drivers/serial/msm_serial.c - driver for msm7k serial device and console
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if defined(CONFIG_SERIAL_MSM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+# define SUPPORT_SYSRQ
+#endif
+
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+
+#include "msm_serial.h"
+
+struct msm_port {
+ struct uart_port uart;
+ char name[16];
+ struct clk *clk;
+ unsigned int imr;
+};
+
+#define UART_TO_MSM(uart_port) ((struct msm_port *) uart_port)
+
+static inline void msm_write(struct uart_port *port, unsigned int val,
+ unsigned int off)
+{
+ __raw_writel(val, port->membase + off);
+}
+
+static inline unsigned int msm_read(struct uart_port *port, unsigned int off)
+{
+ return __raw_readl(port->membase + off);
+}
+
+static void msm_stop_tx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr &= ~UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_start_tx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr |= UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_stop_rx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_enable_ms(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr |= UART_IMR_DELTA_CTS;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void handle_rx(struct uart_port *port)
+{
+ struct tty_struct *tty = port->info->port.tty;
+ unsigned int sr;
+
+ /*
+ * Handle overrun. My understanding of the hardware is that overrun
+ * is not tied to the RX buffer, so we handle the case out of band.
+ */
+ if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) {
+ port->icount.overrun++;
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ }
+
+ /* and now the main RX loop */
+ while ((sr = msm_read(port, UART_SR)) & UART_SR_RX_READY) {
+ unsigned int c;
+ char flag = TTY_NORMAL;
+
+ c = msm_read(port, UART_RF);
+
+ if (sr & UART_SR_RX_BREAK) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ } else if (sr & UART_SR_PAR_FRAME_ERR) {
+ port->icount.frame++;
+ } else {
+ port->icount.rx++;
+ }
+
+ /* Mask conditions we're ignorning. */
+ sr &= port->read_status_mask;
+
+ if (sr & UART_SR_RX_BREAK) {
+ flag = TTY_BREAK;
+ } else if (sr & UART_SR_PAR_FRAME_ERR) {
+ flag = TTY_FRAME;
+ }
+
+ if (!uart_handle_sysrq_char(port, c))
+ tty_insert_flip_char(tty, c, flag);
+ }
+
+ tty_flip_buffer_push(tty);
+}
+
+static void handle_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->info->xmit;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ int sent_tx;
+
+ if (port->x_char) {
+ msm_write(port, port->x_char, UART_TF);
+ port->icount.tx++;
+ port->x_char = 0;
+ }
+
+ while (msm_read(port, UART_SR) & UART_SR_TX_READY) {
+ if (uart_circ_empty(xmit)) {
+ /* disable tx interrupts */
+ msm_port->imr &= ~UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+ break;
+ }
+
+ msm_write(port, xmit->buf[xmit->tail], UART_TF);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ sent_tx = 1;
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static void handle_delta_cts(struct uart_port *port)
+{
+ msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ port->icount.cts++;
+ wake_up_interruptible(&port->info->delta_msr_wait);
+}
+
+static irqreturn_t msm_irq(int irq, void *dev_id)
+{
+ struct uart_port *port = dev_id;
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int misr;
+
+ spin_lock(&port->lock);
+ misr = msm_read(port, UART_MISR);
+ msm_write(port, 0, UART_IMR); /* disable interrupt */
+
+ if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE))
+ handle_rx(port);
+ if (misr & UART_IMR_TXLEV)
+ handle_tx(port);
+ if (misr & UART_IMR_DELTA_CTS)
+ handle_delta_cts(port);
+
+ msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
+ spin_unlock(&port->lock);
+
+ return IRQ_HANDLED;
+}
+
+static unsigned int msm_tx_empty(struct uart_port *port)
+{
+ return (msm_read(port, UART_SR) & UART_SR_TX_EMPTY) ? TIOCSER_TEMT : 0;
+}
+
+static unsigned int msm_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR | TIOCM_RTS;
+}
+
+static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ unsigned int mr;
+
+ mr = msm_read(port, UART_MR1);
+
+ if (!(mctrl & TIOCM_RTS)) {
+ mr &= ~UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, UART_MR1);
+ msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
+ } else {
+ mr |= UART_MR1_RX_RDY_CTL;
+ msm_write(port, mr, UART_MR1);
+ }
+}
+
+static void msm_break_ctl(struct uart_port *port, int break_ctl)
+{
+ if (break_ctl)
+ msm_write(port, UART_CR_CMD_START_BREAK, UART_CR);
+ else
+ msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
+}
+
+static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
+{
+ unsigned int baud_code, rxstale, watermark;
+
+ switch (baud) {
+ case 300:
+ baud_code = UART_CSR_300;
+ rxstale = 1;
+ break;
+ case 600:
+ baud_code = UART_CSR_600;
+ rxstale = 1;
+ break;
+ case 1200:
+ baud_code = UART_CSR_1200;
+ rxstale = 1;
+ break;
+ case 2400:
+ baud_code = UART_CSR_2400;
+ rxstale = 1;
+ break;
+ case 4800:
+ baud_code = UART_CSR_4800;
+ rxstale = 1;
+ break;
+ case 9600:
+ baud_code = UART_CSR_9600;
+ rxstale = 2;
+ break;
+ case 14400:
+ baud_code = UART_CSR_14400;
+ rxstale = 3;
+ break;
+ case 19200:
+ baud_code = UART_CSR_19200;
+ rxstale = 4;
+ break;
+ case 28800:
+ baud_code = UART_CSR_28800;
+ rxstale = 6;
+ break;
+ case 38400:
+ baud_code = UART_CSR_38400;
+ rxstale = 8;
+ break;
+ case 57600:
+ baud_code = UART_CSR_57600;
+ rxstale = 16;
+ break;
+ case 115200:
+ default:
+ baud_code = UART_CSR_115200;
+ baud = 115200;
+ rxstale = 31;
+ break;
+ }
+
+ msm_write(port, baud_code, UART_CSR);
+
+ /* RX stale watermark */
+ watermark = UART_IPR_STALE_LSB & rxstale;
+ watermark |= UART_IPR_RXSTALE_LAST;
+ watermark |= UART_IPR_STALE_TIMEOUT_MSB & (rxstale << 2);
+ msm_write(port, watermark, UART_IPR);
+
+ /* set RX watermark */
+ watermark = (port->fifosize * 3) / 4;
+ msm_write(port, watermark, UART_RFWR);
+
+ /* set TX watermark */
+ msm_write(port, 10, UART_TFWR);
+
+ return baud;
+}
+
+static void msm_reset(struct uart_port *port)
+{
+ /* reset everything */
+ msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+ msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
+}
+
+static void msm_init_clock(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ clk_enable(msm_port->clk);
+
+ msm_write(port, 0xC0, UART_MREG);
+ msm_write(port, 0xB2, UART_NREG);
+ msm_write(port, 0x7D, UART_DREG);
+ msm_write(port, 0x1C, UART_MNDREG);
+}
+
+static int msm_startup(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ unsigned int data, rfr_level;
+ int ret;
+
+ snprintf(msm_port->name, sizeof(msm_port->name),
+ "msm_serial%d", port->line);
+
+ ret = request_irq(port->irq, msm_irq, IRQF_TRIGGER_HIGH,
+ msm_port->name, port);
+ if (unlikely(ret))
+ return ret;
+
+ msm_init_clock(port);
+
+ if (likely(port->fifosize > 12))
+ rfr_level = port->fifosize - 12;
+ else
+ rfr_level = port->fifosize;
+
+ /* set automatic RFR level */
+ data = msm_read(port, UART_MR1);
+ data &= ~UART_MR1_AUTO_RFR_LEVEL1;
+ data &= ~UART_MR1_AUTO_RFR_LEVEL0;
+ data |= UART_MR1_AUTO_RFR_LEVEL1 & (rfr_level << 2);
+ data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
+ msm_write(port, data, UART_MR1);
+
+ /* make sure that RXSTALE count is non-zero */
+ data = msm_read(port, UART_IPR);
+ if (unlikely(!data)) {
+ data |= UART_IPR_RXSTALE_LAST;
+ data |= UART_IPR_STALE_LSB;
+ msm_write(port, data, UART_IPR);
+ }
+
+ msm_reset(port);
+
+ msm_write(port, 0x05, UART_CR); /* enable TX & RX */
+
+ /* turn on RX and CTS interrupts */
+ msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE |
+ UART_IMR_CURRENT_CTS;
+ msm_write(port, msm_port->imr, UART_IMR);
+
+ return 0;
+}
+
+static void msm_shutdown(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ msm_port->imr = 0;
+ msm_write(port, 0, UART_IMR); /* disable interrupts */
+
+ clk_disable(msm_port->clk);
+
+ free_irq(port->irq, port);
+}
+
+static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ unsigned long flags;
+ unsigned int baud, mr;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* calculate and set baud rate */
+ baud = uart_get_baud_rate(port, termios, old, 300, 115200);
+ baud = msm_set_baud_rate(port, baud);
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, baud, baud);
+
+ /* calculate parity */
+ mr = msm_read(port, UART_MR2);
+ mr &= ~UART_MR2_PARITY_MODE;
+ if (termios->c_cflag & PARENB) {
+ if (termios->c_cflag & PARODD)
+ mr |= UART_MR2_PARITY_MODE_ODD;
+ else if (termios->c_cflag & CMSPAR)
+ mr |= UART_MR2_PARITY_MODE_SPACE;
+ else
+ mr |= UART_MR2_PARITY_MODE_EVEN;
+ }
+
+ /* calculate bits per char */
+ mr &= ~UART_MR2_BITS_PER_CHAR;
+ switch (termios->c_cflag & CSIZE) {
+ case CS5:
+ mr |= UART_MR2_BITS_PER_CHAR_5;
+ break;
+ case CS6:
+ mr |= UART_MR2_BITS_PER_CHAR_6;
+ break;
+ case CS7:
+ mr |= UART_MR2_BITS_PER_CHAR_7;
+ break;
+ case CS8:
+ default:
+ mr |= UART_MR2_BITS_PER_CHAR_8;
+ break;
+ }
+
+ /* calculate stop bits */
+ mr &= ~(UART_MR2_STOP_BIT_LEN_ONE | UART_MR2_STOP_BIT_LEN_TWO);
+ if (termios->c_cflag & CSTOPB)
+ mr |= UART_MR2_STOP_BIT_LEN_TWO;
+ else
+ mr |= UART_MR2_STOP_BIT_LEN_ONE;
+
+ /* set parity, bits per char, and stop bit */
+ msm_write(port, mr, UART_MR2);
+
+ /* calculate and set hardware flow control */
+ mr = msm_read(port, UART_MR1);
+ mr &= ~(UART_MR1_CTS_CTL | UART_MR1_RX_RDY_CTL);
+ if (termios->c_cflag & CRTSCTS) {
+ mr |= UART_MR1_CTS_CTL;
+ mr |= UART_MR1_RX_RDY_CTL;
+ }
+ msm_write(port, mr, UART_MR1);
+
+ /* Configure status bits to ignore based on termio flags. */
+ port->read_status_mask = 0;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= UART_SR_RX_BREAK;
+
+ uart_update_timeout(port, termios->c_cflag, baud);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *msm_type(struct uart_port *port)
+{
+ return "MSM";
+}
+
+static void msm_release_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *resource;
+ resource_size_t size;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return;
+ size = resource->end - resource->start + 1;
+
+ release_mem_region(port->mapbase, size);
+ iounmap(port->membase);
+ port->membase = NULL;
+}
+
+static int msm_request_port(struct uart_port *port)
+{
+ struct platform_device *pdev = to_platform_device(port->dev);
+ struct resource *resource;
+ resource_size_t size;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return -ENXIO;
+ size = resource->end - resource->start + 1;
+
+ if (unlikely(!request_mem_region(port->mapbase, size, "msm_serial")))
+ return -EBUSY;
+
+ port->membase = ioremap(port->mapbase, size);
+ if (!port->membase) {
+ release_mem_region(port->mapbase, size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void msm_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = PORT_MSM;
+ msm_request_port(port);
+ }
+}
+
+static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ if (unlikely(ser->type != PORT_UNKNOWN && ser->type != PORT_MSM))
+ return -EINVAL;
+ if (unlikely(port->irq != ser->irq))
+ return -EINVAL;
+ return 0;
+}
+
+static void msm_power(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+
+ switch (state) {
+ case 0:
+ clk_enable(msm_port->clk);
+ break;
+ case 3:
+ clk_disable(msm_port->clk);
+ break;
+ default:
+ printk(KERN_ERR "msm_serial: Unknown PM state %d\n", state);
+ }
+}
+
+static struct uart_ops msm_uart_pops = {
+ .tx_empty = msm_tx_empty,
+ .set_mctrl = msm_set_mctrl,
+ .get_mctrl = msm_get_mctrl,
+ .stop_tx = msm_stop_tx,
+ .start_tx = msm_start_tx,
+ .stop_rx = msm_stop_rx,
+ .enable_ms = msm_enable_ms,
+ .break_ctl = msm_break_ctl,
+ .startup = msm_startup,
+ .shutdown = msm_shutdown,
+ .set_termios = msm_set_termios,
+ .type = msm_type,
+ .release_port = msm_release_port,
+ .request_port = msm_request_port,
+ .config_port = msm_config_port,
+ .verify_port = msm_verify_port,
+ .pm = msm_power,
+};
+
+static struct msm_port msm_uart_ports[] = {
+ {
+ .uart = {
+ .iotype = UPIO_MEM,
+ .ops = &msm_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .fifosize = 512,
+ .line = 0,
+ },
+ },
+ {
+ .uart = {
+ .iotype = UPIO_MEM,
+ .ops = &msm_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .fifosize = 512,
+ .line = 1,
+ },
+ },
+ {
+ .uart = {
+ .iotype = UPIO_MEM,
+ .ops = &msm_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .fifosize = 64,
+ .line = 2,
+ },
+ },
+};
+
+#define UART_NR ARRAY_SIZE(msm_uart_ports)
+
+static inline struct uart_port *get_port_from_line(unsigned int line)
+{
+ return &msm_uart_ports[line].uart;
+}
+
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+
+static void msm_console_putchar(struct uart_port *port, int c)
+{
+ while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ ;
+ msm_write(port, c, UART_TF);
+}
+
+static void msm_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port;
+ struct msm_port *msm_port;
+
+ BUG_ON(co->index < 0 || co->index >= UART_NR);
+
+ port = get_port_from_line(co->index);
+ msm_port = UART_TO_MSM(port);
+
+ spin_lock(&port->lock);
+ uart_console_write(port, s, count, msm_console_putchar);
+ spin_unlock(&port->lock);
+}
+
+static int __init msm_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud, flow, bits, parity;
+
+ if (unlikely(co->index >= UART_NR || co->index < 0))
+ return -ENXIO;
+
+ port = get_port_from_line(co->index);
+
+ if (unlikely(!port->membase))
+ return -ENXIO;
+
+ port->cons = co;
+
+ msm_init_clock(port);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ bits = 8;
+ parity = 'n';
+ flow = 'n';
+ msm_write(port, UART_MR2_BITS_PER_CHAR_8 | UART_MR2_STOP_BIT_LEN_ONE,
+ UART_MR2); /* 8N1 */
+
+ if (baud < 300 || baud > 115200)
+ baud = 115200;
+ msm_set_baud_rate(port, baud);
+
+ msm_reset(port);
+
+ printk(KERN_INFO "msm_serial: console setup on port #%d\n", port->line);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver msm_uart_driver;
+
+static struct console msm_console = {
+ .name = "ttyMSM",
+ .write = msm_console_write,
+ .device = uart_console_device,
+ .setup = msm_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &msm_uart_driver,
+};
+
+#define MSM_CONSOLE (&msm_console)
+
+#else
+#define MSM_CONSOLE NULL
+#endif
+
+static struct uart_driver msm_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "msm_serial",
+ .dev_name = "ttyMSM",
+ .nr = UART_NR,
+ .cons = MSM_CONSOLE,
+};
+
+static int __init msm_serial_probe(struct platform_device *pdev)
+{
+ struct msm_port *msm_port;
+ struct resource *resource;
+ struct uart_port *port;
+
+ if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
+ return -ENXIO;
+
+ printk(KERN_INFO "msm_serial: detected port #%d\n", pdev->id);
+
+ port = get_port_from_line(pdev->id);
+ port->dev = &pdev->dev;
+ msm_port = UART_TO_MSM(port);
+
+ msm_port->clk = clk_get(&pdev->dev, "uart_clk");
+ if (unlikely(IS_ERR(msm_port->clk)))
+ return PTR_ERR(msm_port->clk);
+ port->uartclk = clk_get_rate(msm_port->clk);
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!resource))
+ return -ENXIO;
+ port->mapbase = resource->start;
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (unlikely(port->irq < 0))
+ return -ENXIO;
+
+ platform_set_drvdata(pdev, port);
+
+ return uart_add_one_port(&msm_uart_driver, port);
+}
+
+static int __devexit msm_serial_remove(struct platform_device *pdev)
+{
+ struct msm_port *msm_port = platform_get_drvdata(pdev);
+
+ clk_put(msm_port->clk);
+
+ return 0;
+}
+
+static struct platform_driver msm_platform_driver = {
+ .probe = msm_serial_probe,
+ .remove = msm_serial_remove,
+ .driver = {
+ .name = "msm_serial",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_serial_init(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&msm_uart_driver);
+ if (unlikely(ret))
+ return ret;
+
+ ret = platform_driver_probe(&msm_platform_driver, msm_serial_probe);
+ if (unlikely(ret))
+ uart_unregister_driver(&msm_uart_driver);
+
+ printk(KERN_INFO "msm_serial: driver initialized\n");
+
+ return ret;
+}
+
+static void __exit msm_serial_exit(void)
+{
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+ unregister_console(&msm_console);
+#endif
+ platform_driver_unregister(&msm_platform_driver);
+ uart_unregister_driver(&msm_uart_driver);
+}
+
+module_init(msm_serial_init);
+module_exit(msm_serial_exit);
+
+MODULE_AUTHOR("Robert Love <rlove@google.com>");
+MODULE_DESCRIPTION("Driver for msm7x serial device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/serial/msm_serial.h b/drivers/serial/msm_serial.h
new file mode 100644
index 000000000000..689f1fa0e84e
--- /dev/null
+++ b/drivers/serial/msm_serial.h
@@ -0,0 +1,117 @@
+/*
+ * drivers/serial/msm_serial.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DRIVERS_SERIAL_MSM_SERIAL_H
+#define __DRIVERS_SERIAL_MSM_SERIAL_H
+
+#define UART_MR1 0x0000
+
+#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
+#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
+#define UART_MR1_RX_RDY_CTL (1 << 7)
+#define UART_MR1_CTS_CTL (1 << 6)
+
+#define UART_MR2 0x0004
+#define UART_MR2_ERROR_MODE (1 << 6)
+#define UART_MR2_BITS_PER_CHAR 0x30
+#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
+#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
+#define UART_MR2_BITS_PER_CHAR_7 (0x2 << 4)
+#define UART_MR2_BITS_PER_CHAR_8 (0x3 << 4)
+#define UART_MR2_STOP_BIT_LEN_ONE (0x1 << 2)
+#define UART_MR2_STOP_BIT_LEN_TWO (0x3 << 2)
+#define UART_MR2_PARITY_MODE_NONE 0x0
+#define UART_MR2_PARITY_MODE_ODD 0x1
+#define UART_MR2_PARITY_MODE_EVEN 0x2
+#define UART_MR2_PARITY_MODE_SPACE 0x3
+#define UART_MR2_PARITY_MODE 0x3
+
+#define UART_CSR 0x0008
+#define UART_CSR_115200 0xFF
+#define UART_CSR_57600 0xEE
+#define UART_CSR_38400 0xDD
+#define UART_CSR_28800 0xCC
+#define UART_CSR_19200 0xBB
+#define UART_CSR_14400 0xAA
+#define UART_CSR_9600 0x99
+#define UART_CSR_4800 0x77
+#define UART_CSR_2400 0x55
+#define UART_CSR_1200 0x44
+#define UART_CSR_600 0x33
+#define UART_CSR_300 0x22
+
+#define UART_TF 0x000C
+
+#define UART_CR 0x0010
+#define UART_CR_CMD_NULL (0 << 4)
+#define UART_CR_CMD_RESET_RX (1 << 4)
+#define UART_CR_CMD_RESET_TX (2 << 4)
+#define UART_CR_CMD_RESET_ERR (3 << 4)
+#define UART_CR_CMD_RESET_BREAK_INT (4 << 4)
+#define UART_CR_CMD_START_BREAK (5 << 4)
+#define UART_CR_CMD_STOP_BREAK (6 << 4)
+#define UART_CR_CMD_RESET_CTS (7 << 4)
+#define UART_CR_CMD_PACKET_MODE (9 << 4)
+#define UART_CR_CMD_MODE_RESET (12 << 4)
+#define UART_CR_CMD_SET_RFR (13 << 4)
+#define UART_CR_CMD_RESET_RFR (14 << 4)
+#define UART_CR_TX_DISABLE (1 << 3)
+#define UART_CR_TX_ENABLE (1 << 3)
+#define UART_CR_RX_DISABLE (1 << 3)
+#define UART_CR_RX_ENABLE (1 << 3)
+
+#define UART_IMR 0x0014
+#define UART_IMR_TXLEV (1 << 0)
+#define UART_IMR_RXSTALE (1 << 3)
+#define UART_IMR_RXLEV (1 << 4)
+#define UART_IMR_DELTA_CTS (1 << 5)
+#define UART_IMR_CURRENT_CTS (1 << 6)
+
+#define UART_IPR_RXSTALE_LAST 0x20
+#define UART_IPR_STALE_LSB 0x1F
+#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
+
+#define UART_IPR 0x0018
+#define UART_TFWR 0x001C
+#define UART_RFWR 0x0020
+#define UART_HCR 0x0024
+
+#define UART_MREG 0x0028
+#define UART_NREG 0x002C
+#define UART_DREG 0x0030
+#define UART_MNDREG 0x0034
+#define UART_IRDA 0x0038
+#define UART_MISR_MODE 0x0040
+#define UART_MISR_RESET 0x0044
+#define UART_MISR_EXPORT 0x0048
+#define UART_MISR_VAL 0x004C
+#define UART_TEST_CTRL 0x0050
+
+#define UART_SR 0x0008
+#define UART_SR_HUNT_CHAR (1 << 7)
+#define UART_SR_RX_BREAK (1 << 6)
+#define UART_SR_PAR_FRAME_ERR (1 << 5)
+#define UART_SR_OVERRUN (1 << 4)
+#define UART_SR_TX_EMPTY (1 << 3)
+#define UART_SR_TX_READY (1 << 2)
+#define UART_SR_RX_FULL (1 << 1)
+#define UART_SR_RX_READY (1 << 0)
+
+#define UART_RF 0x000C
+#define UART_MISR 0x0010
+#define UART_ISR 0x0014
+
+#endif /* __DRIVERS_SERIAL_MSM_SERIAL_H */
diff --git a/drivers/serial/s3c2400.c b/drivers/serial/s3c2400.c
index 4873f2978bd2..fb00ed5296e6 100644
--- a/drivers/serial/s3c2400.c
+++ b/drivers/serial/s3c2400.c
@@ -78,7 +78,7 @@ static int s3c2400_serial_probe(struct platform_device *dev)
static struct platform_driver s3c2400_serial_drv = {
.probe = s3c2400_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2400-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c
index 87c182ef71b8..b5d7cbcba2ae 100644
--- a/drivers/serial/s3c2410.c
+++ b/drivers/serial/s3c2410.c
@@ -90,7 +90,7 @@ static int s3c2410_serial_probe(struct platform_device *dev)
static struct platform_driver s3c2410_serial_drv = {
.probe = s3c2410_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2410-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c2412.c b/drivers/serial/s3c2412.c
index fd017b375568..11dcb90bdfef 100644
--- a/drivers/serial/s3c2412.c
+++ b/drivers/serial/s3c2412.c
@@ -123,7 +123,7 @@ static int s3c2412_serial_probe(struct platform_device *dev)
static struct platform_driver s3c2412_serial_drv = {
.probe = s3c2412_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2412-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c2440.c b/drivers/serial/s3c2440.c
index 29cbb0afef8e..06c5b0cc47a3 100644
--- a/drivers/serial/s3c2440.c
+++ b/drivers/serial/s3c2440.c
@@ -153,7 +153,7 @@ static int s3c2440_serial_probe(struct platform_device *dev)
static struct platform_driver s3c2440_serial_drv = {
.probe = s3c2440_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c2440-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c24a0.c b/drivers/serial/s3c24a0.c
index ebf2fd3c8f7d..786a067d62ac 100644
--- a/drivers/serial/s3c24a0.c
+++ b/drivers/serial/s3c24a0.c
@@ -94,7 +94,7 @@ static int s3c24a0_serial_probe(struct platform_device *dev)
static struct platform_driver s3c24a0_serial_drv = {
.probe = s3c24a0_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c24a0-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/s3c6400.c b/drivers/serial/s3c6400.c
index 3e3785233682..48f1a3781f0d 100644
--- a/drivers/serial/s3c6400.c
+++ b/drivers/serial/s3c6400.c
@@ -124,7 +124,7 @@ static int s3c6400_serial_probe(struct platform_device *dev)
static struct platform_driver s3c6400_serial_drv = {
.probe = s3c6400_serial_probe,
- .remove = s3c24xx_serial_remove,
+ .remove = __devexit_p(s3c24xx_serial_remove),
.driver = {
.name = "s3c6400-uart",
.owner = THIS_MODULE,
diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c
index 93b5d75db126..c8851a0db63a 100644
--- a/drivers/serial/samsung.c
+++ b/drivers/serial/samsung.c
@@ -1174,7 +1174,7 @@ int s3c24xx_serial_probe(struct platform_device *dev,
EXPORT_SYMBOL_GPL(s3c24xx_serial_probe);
-int s3c24xx_serial_remove(struct platform_device *dev)
+int __devexit s3c24xx_serial_remove(struct platform_device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
diff --git a/drivers/serial/samsung.h b/drivers/serial/samsung.h
index 7afb94843a08..d3fe315969f6 100644
--- a/drivers/serial/samsung.h
+++ b/drivers/serial/samsung.h
@@ -72,7 +72,7 @@ struct s3c24xx_uart_port {
extern int s3c24xx_serial_probe(struct platform_device *dev,
struct s3c24xx_uart_info *uart);
-extern int s3c24xx_serial_remove(struct platform_device *dev);
+extern int __devexit s3c24xx_serial_remove(struct platform_device *dev);
extern int s3c24xx_serial_initconsole(struct platform_driver *drv,
struct s3c24xx_uart_info *uart);
diff --git a/drivers/serial/sb1250-duart.c b/drivers/serial/sb1250-duart.c
index a4fb343a08da..319e8b83f6be 100644
--- a/drivers/serial/sb1250-duart.c
+++ b/drivers/serial/sb1250-duart.c
@@ -204,7 +204,7 @@ static int sbd_receive_drain(struct sbd_port *sport)
{
int loops = 10000;
- while (sbd_receive_ready(sport) && loops--)
+ while (sbd_receive_ready(sport) && --loops)
read_sbdchn(sport, R_DUART_RX_HOLD);
return loops;
}
@@ -218,7 +218,7 @@ static int __maybe_unused sbd_transmit_drain(struct sbd_port *sport)
{
int loops = 10000;
- while (!sbd_transmit_ready(sport) && loops--)
+ while (!sbd_transmit_ready(sport) && --loops)
udelay(2);
return loops;
}
@@ -232,7 +232,7 @@ static int sbd_line_drain(struct sbd_port *sport)
{
int loops = 10000;
- while (!sbd_transmit_empty(sport) && loops--)
+ while (!sbd_transmit_empty(sport) && --loops)
udelay(2);
return loops;
}
diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c
index a94a2ab4b571..1df5325faab2 100644
--- a/drivers/serial/sunhv.c
+++ b/drivers/serial/sunhv.c
@@ -461,7 +461,7 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
break;
udelay(1);
}
- if (limit <= 0)
+ if (limit < 0)
break;
page_bytes -= written;
ra += written;
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
index ac9e5d5f742e..063a313b755c 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/serial/timbuart.c
@@ -33,29 +33,29 @@ struct timbuart_port {
struct uart_port port;
struct tasklet_struct tasklet;
int usedma;
- u8 last_ier;
+ u32 last_ier;
struct platform_device *dev;
};
static int baudrates[] = {9600, 19200, 38400, 57600, 115200, 230400, 460800,
921600, 1843200, 3250000};
-static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier);
+static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier);
static irqreturn_t timbuart_handleinterrupt(int irq, void *devid);
static void timbuart_stop_rx(struct uart_port *port)
{
/* spin lock held by upper layer, disable all RX interrupts */
- u8 ier = ioread8(port->membase + TIMBUART_IER) & ~RXFLAGS;
- iowrite8(ier, port->membase + TIMBUART_IER);
+ u32 ier = ioread32(port->membase + TIMBUART_IER) & ~RXFLAGS;
+ iowrite32(ier, port->membase + TIMBUART_IER);
}
static void timbuart_stop_tx(struct uart_port *port)
{
/* spinlock held by upper layer, disable TX interrupt */
- u8 ier = ioread8(port->membase + TIMBUART_IER) & ~TXBAE;
- iowrite8(ier, port->membase + TIMBUART_IER);
+ u32 ier = ioread32(port->membase + TIMBUART_IER) & ~TXBAE;
+ iowrite32(ier, port->membase + TIMBUART_IER);
}
static void timbuart_start_tx(struct uart_port *port)
@@ -72,14 +72,14 @@ static void timbuart_flush_buffer(struct uart_port *port)
u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHTX;
iowrite8(ctl, port->membase + TIMBUART_CTRL);
- iowrite8(TXBF, port->membase + TIMBUART_ISR);
+ iowrite32(TXBF, port->membase + TIMBUART_ISR);
}
static void timbuart_rx_chars(struct uart_port *port)
{
struct tty_struct *tty = port->info->port.tty;
- while (ioread8(port->membase + TIMBUART_ISR) & RXDP) {
+ while (ioread32(port->membase + TIMBUART_ISR) & RXDP) {
u8 ch = ioread8(port->membase + TIMBUART_RXFIFO);
port->icount.rx++;
tty_insert_flip_char(tty, ch, TTY_NORMAL);
@@ -97,7 +97,7 @@ static void timbuart_tx_chars(struct uart_port *port)
{
struct circ_buf *xmit = &port->info->xmit;
- while (!(ioread8(port->membase + TIMBUART_ISR) & TXBF) &&
+ while (!(ioread32(port->membase + TIMBUART_ISR) & TXBF) &&
!uart_circ_empty(xmit)) {
iowrite8(xmit->buf[xmit->tail],
port->membase + TIMBUART_TXFIFO);
@@ -114,7 +114,7 @@ static void timbuart_tx_chars(struct uart_port *port)
ioread8(port->membase + TIMBUART_BAUDRATE));
}
-static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
+static void timbuart_handle_tx_port(struct uart_port *port, u32 isr, u32 *ier)
{
struct timbuart_port *uart =
container_of(port, struct timbuart_port, port);
@@ -129,7 +129,7 @@ static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
if (isr & TXFLAGS) {
timbuart_tx_chars(port);
/* clear all TX interrupts */
- iowrite8(TXFLAGS, port->membase + TIMBUART_ISR);
+ iowrite32(TXFLAGS, port->membase + TIMBUART_ISR);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
@@ -148,7 +148,7 @@ static void timbuart_handle_tx_port(struct uart_port *port, u8 isr, u8 *ier)
dev_dbg(port->dev, "%s - leaving\n", __func__);
}
-void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
+void timbuart_handle_rx_port(struct uart_port *port, u32 isr, u32 *ier)
{
if (isr & RXFLAGS) {
/* Some RX status is set */
@@ -161,7 +161,7 @@ void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
timbuart_rx_chars(port);
/* ack all RX interrupts */
- iowrite8(RXFLAGS, port->membase + TIMBUART_ISR);
+ iowrite32(RXFLAGS, port->membase + TIMBUART_ISR);
}
/* always have the RX interrupts enabled */
@@ -173,11 +173,11 @@ void timbuart_handle_rx_port(struct uart_port *port, u8 isr, u8 *ier)
void timbuart_tasklet(unsigned long arg)
{
struct timbuart_port *uart = (struct timbuart_port *)arg;
- u8 isr, ier = 0;
+ u32 isr, ier = 0;
spin_lock(&uart->port.lock);
- isr = ioread8(uart->port.membase + TIMBUART_ISR);
+ isr = ioread32(uart->port.membase + TIMBUART_ISR);
dev_dbg(uart->port.dev, "%s ISR: %x\n", __func__, isr);
if (!uart->usedma)
@@ -188,7 +188,7 @@ void timbuart_tasklet(unsigned long arg)
if (!uart->usedma)
timbuart_handle_rx_port(&uart->port, isr, &ier);
- iowrite8(ier, uart->port.membase + TIMBUART_IER);
+ iowrite32(ier, uart->port.membase + TIMBUART_IER);
spin_unlock(&uart->port.lock);
dev_dbg(uart->port.dev, "%s leaving\n", __func__);
@@ -196,9 +196,9 @@ void timbuart_tasklet(unsigned long arg)
static unsigned int timbuart_tx_empty(struct uart_port *port)
{
- u8 isr = ioread8(port->membase + TIMBUART_ISR);
+ u32 isr = ioread32(port->membase + TIMBUART_ISR);
- return (isr & TXBAE) ? TIOCSER_TEMT : 0;
+ return (isr & TXBE) ? TIOCSER_TEMT : 0;
}
static unsigned int timbuart_get_mctrl(struct uart_port *port)
@@ -222,13 +222,13 @@ static void timbuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
iowrite8(TIMBUART_CTRL_RTS, port->membase + TIMBUART_CTRL);
}
-static void timbuart_mctrl_check(struct uart_port *port, u8 isr, u8 *ier)
+static void timbuart_mctrl_check(struct uart_port *port, u32 isr, u32 *ier)
{
unsigned int cts;
if (isr & CTS_DELTA) {
/* ack */
- iowrite8(CTS_DELTA, port->membase + TIMBUART_ISR);
+ iowrite32(CTS_DELTA, port->membase + TIMBUART_ISR);
cts = timbuart_get_mctrl(port);
uart_handle_cts_change(port, cts & TIOCM_CTS);
wake_up_interruptible(&port->info->delta_msr_wait);
@@ -255,9 +255,9 @@ static int timbuart_startup(struct uart_port *port)
dev_dbg(port->dev, "%s\n", __func__);
iowrite8(TIMBUART_CTRL_FLSHRX, port->membase + TIMBUART_CTRL);
- iowrite8(0xff, port->membase + TIMBUART_ISR);
+ iowrite32(0x1ff, port->membase + TIMBUART_ISR);
/* Enable all but TX interrupts */
- iowrite8(RXBAF | RXBF | RXTT | CTS_DELTA,
+ iowrite32(RXBAF | RXBF | RXTT | CTS_DELTA,
port->membase + TIMBUART_IER);
return request_irq(port->irq, timbuart_handleinterrupt, IRQF_SHARED,
@@ -270,7 +270,7 @@ static void timbuart_shutdown(struct uart_port *port)
container_of(port, struct timbuart_port, port);
dev_dbg(port->dev, "%s\n", __func__);
free_irq(port->irq, uart);
- iowrite8(0, port->membase + TIMBUART_IER);
+ iowrite32(0, port->membase + TIMBUART_IER);
}
static int get_bindex(int baud)
@@ -359,10 +359,10 @@ static irqreturn_t timbuart_handleinterrupt(int irq, void *devid)
struct timbuart_port *uart = (struct timbuart_port *)devid;
if (ioread8(uart->port.membase + TIMBUART_IPR)) {
- uart->last_ier = ioread8(uart->port.membase + TIMBUART_IER);
+ uart->last_ier = ioread32(uart->port.membase + TIMBUART_IER);
/* disable interrupts, the tasklet enables them again */
- iowrite8(0, uart->port.membase + TIMBUART_IER);
+ iowrite32(0, uart->port.membase + TIMBUART_IER);
/* fire off bottom half */
tasklet_schedule(&uart->tasklet);
diff --git a/drivers/serial/zs.c b/drivers/serial/zs.c
index 9e6a873f8203..d8c2809b1ab6 100644
--- a/drivers/serial/zs.c
+++ b/drivers/serial/zs.c
@@ -231,7 +231,7 @@ static int zs_receive_drain(struct zs_port *zport)
{
int loops = 10000;
- while ((read_zsreg(zport, R0) & Rx_CH_AV) && loops--)
+ while ((read_zsreg(zport, R0) & Rx_CH_AV) && --loops)
read_zsdata(zport);
return loops;
}
@@ -241,7 +241,7 @@ static int zs_transmit_drain(struct zs_port *zport, int irq)
struct zs_scc *scc = zport->scc;
int loops = 10000;
- while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && loops--) {
+ while (!(read_zsreg(zport, R0) & Tx_BUF_EMP) && --loops) {
zs_spin_unlock_cond_irq(&scc->zlock, irq);
udelay(2);
zs_spin_lock_cond_irq(&scc->zlock, irq);
@@ -254,7 +254,7 @@ static int zs_line_drain(struct zs_port *zport, int irq)
struct zs_scc *scc = zport->scc;
int loops = 10000;
- while (!(read_zsreg(zport, R1) & ALL_SNT) && loops--) {
+ while (!(read_zsreg(zport, R1) & ALL_SNT) && --loops) {
zs_spin_unlock_cond_irq(&scc->zlock, irq);
udelay(2);
zs_spin_lock_cond_irq(&scc->zlock, irq);
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index 581232b719fd..90b29b564631 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -284,21 +284,12 @@ static void ProcessModemStatus(struct quatech_port *qt_port,
return;
}
-static void ProcessRxChar(struct usb_serial_port *port, unsigned char Data)
+static void ProcessRxChar(struct tty_struct *tty, struct usb_serial_port *port,
+ unsigned char data)
{
- struct tty_struct *tty;
struct urb *urb = port->read_urb;
- tty = tty_port_tty_get(&port->port);
-
- /* if we insert more than TTY_FLIPBUF_SIZE characters, we drop them. */
-
- if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, 1);
- tty_insert_flip_string(tty, &Data, 1);
- /* tty_flip_buffer_push(tty); */
- }
-
- return;
+ if (urb->actual_length)
+ tty_insert_flip_char(tty, data, TTY_NORMAL);
}
static void qt_write_bulk_callback(struct urb *urb)
@@ -435,8 +426,10 @@ static void qt_read_bulk_callback(struct urb *urb)
case 0xff:
dbg("No status sequence. \n");
- ProcessRxChar(port, data[i]);
- ProcessRxChar(port, data[i + 1]);
+ if (tty) {
+ ProcessRxChar(tty, port, data[i]);
+ ProcessRxChar(tty, port, data[i + 1]);
+ }
i += 2;
break;
}
@@ -444,10 +437,8 @@ static void qt_read_bulk_callback(struct urb *urb)
continue;
}
- if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, 1);
- tty_insert_flip_string(tty, (data + i), 1);
- }
+ if (tty && urb->actual_length)
+ tty_insert_flip_char(tty, data[i], TTY_NORMAL);
}
tty_flip_buffer_push(tty);
diff --git a/fs/Kconfig b/fs/Kconfig
index d78e950402c1..a97263be6a91 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -236,10 +236,12 @@ source "fs/nfsd/Kconfig"
config LOCKD
tristate
+ depends on FILE_LOCKING
config LOCKD_V4
bool
depends on NFSD_V3 || NFS_V3
+ depends on FILE_LOCKING
default y
config EXPORTFS
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index c5ded5ff72b5..c135202c38b3 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -94,7 +94,6 @@
#include <linux/atm_tcp.h>
#include <linux/sonet.h>
#include <linux/atm_suni.h>
-#include <linux/mtd/mtd.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
@@ -1405,46 +1404,6 @@ static int ioc_settimeout(unsigned int fd, unsigned int cmd, unsigned long arg)
#define HIDPGETCONNLIST _IOR('H', 210, int)
#define HIDPGETCONNINFO _IOR('H', 211, int)
-struct mtd_oob_buf32 {
- u_int32_t start;
- u_int32_t length;
- compat_caddr_t ptr; /* unsigned char* */
-};
-
-#define MEMWRITEOOB32 _IOWR('M',3,struct mtd_oob_buf32)
-#define MEMREADOOB32 _IOWR('M',4,struct mtd_oob_buf32)
-
-static int mtd_rw_oob(unsigned int fd, unsigned int cmd, unsigned long arg)
-{
- struct mtd_oob_buf __user *buf = compat_alloc_user_space(sizeof(*buf));
- struct mtd_oob_buf32 __user *buf32 = compat_ptr(arg);
- u32 data;
- char __user *datap;
- unsigned int real_cmd;
- int err;
-
- real_cmd = (cmd == MEMREADOOB32) ?
- MEMREADOOB : MEMWRITEOOB;
-
- if (copy_in_user(&buf->start, &buf32->start,
- 2 * sizeof(u32)) ||
- get_user(data, &buf32->ptr))
- return -EFAULT;
- datap = compat_ptr(data);
- if (put_user(datap, &buf->ptr))
- return -EFAULT;
-
- err = sys_ioctl(fd, real_cmd, (unsigned long) buf);
-
- if (!err) {
- if (copy_in_user(&buf32->start, &buf->start,
- 2 * sizeof(u32)))
- err = -EFAULT;
- }
-
- return err;
-}
-
#ifdef CONFIG_BLOCK
struct raw32_config_request
{
@@ -2426,15 +2385,6 @@ COMPATIBLE_IOCTL(USBDEVFS_SUBMITURB32)
COMPATIBLE_IOCTL(USBDEVFS_REAPURB32)
COMPATIBLE_IOCTL(USBDEVFS_REAPURBNDELAY32)
COMPATIBLE_IOCTL(USBDEVFS_CLEAR_HALT)
-/* MTD */
-COMPATIBLE_IOCTL(MEMGETINFO)
-COMPATIBLE_IOCTL(MEMERASE)
-COMPATIBLE_IOCTL(MEMLOCK)
-COMPATIBLE_IOCTL(MEMUNLOCK)
-COMPATIBLE_IOCTL(MEMGETREGIONCOUNT)
-COMPATIBLE_IOCTL(MEMGETREGIONINFO)
-COMPATIBLE_IOCTL(MEMGETBADBLOCK)
-COMPATIBLE_IOCTL(MEMSETBADBLOCK)
/* NBD */
ULONG_IOCTL(NBD_SET_SOCK)
ULONG_IOCTL(NBD_SET_BLKSIZE)
@@ -2544,8 +2494,6 @@ COMPATIBLE_IOCTL(JSIOCGBUTTONS)
COMPATIBLE_IOCTL(JSIOCGNAME(0))
/* now things that need handlers */
-HANDLE_IOCTL(MEMREADOOB32, mtd_rw_oob)
-HANDLE_IOCTL(MEMWRITEOOB32, mtd_rw_oob)
#ifdef CONFIG_NET
HANDLE_IOCTL(SIOCGIFNAME, dev_ifname32)
HANDLE_IOCTL(SIOCGIFCONF, dev_ifconf)
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 1d437de1e9a8..7515e73e2bfb 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -196,7 +196,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
if (c->nextblock) {
ret = file_dirty(c, c->nextblock);
if (ret)
- return ret;
+ goto out;
/* deleting summary information of the old nextblock */
jffs2_sum_reset_collected(c->summary);
}
@@ -207,7 +207,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
} else {
ret = file_dirty(c, jeb);
if (ret)
- return ret;
+ goto out;
}
break;
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index dd7957064a8c..f2fdcbce143e 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -126,7 +126,6 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
struct nlm_lock *lock = &argp->lock;
nlmclnt_next_cookie(&argp->cookie);
- argp->state = nsm_local_state;
memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh));
lock->caller = utsname()->nodename;
lock->oh.data = req->a_owner;
@@ -165,6 +164,7 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
/* Set up the argument struct */
nlmclnt_setlockargs(call, fl);
+ lock_kernel();
if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
if (fl->fl_type != F_UNLCK) {
call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
@@ -178,6 +178,7 @@ int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
fl->fl_ops->fl_release_private(fl);
fl->fl_ops = NULL;
+ unlock_kernel();
dprintk("lockd: clnt proc returns %d\n", status);
return status;
@@ -519,6 +520,7 @@ nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
if (nsm_monitor(host) < 0)
goto out;
+ req->a_args.state = nsm_local_state;
fl->fl_flags |= FL_ACCESS;
status = do_vfs_lock(fl);
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 6d5d4a4169e5..7fce1b525849 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -53,7 +53,7 @@ static DEFINE_SPINLOCK(nsm_lock);
/*
* Local NSM state
*/
-int __read_mostly nsm_local_state;
+u32 __read_mostly nsm_local_state;
int __read_mostly nsm_use_hostnames;
static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm)
@@ -112,6 +112,7 @@ static struct rpc_clnt *nsm_create(void)
.program = &nsm_program,
.version = NSM_VERSION,
.authflavor = RPC_AUTH_NULL,
+ .flags = RPC_CLNT_CREATE_NOPING,
};
return rpc_create(&args);
@@ -184,13 +185,19 @@ int nsm_monitor(const struct nlm_host *host)
nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf;
status = nsm_mon_unmon(nsm, NSMPROC_MON, &res);
- if (res.status != 0)
+ if (unlikely(res.status != 0))
status = -EIO;
- if (status < 0)
+ if (unlikely(status < 0)) {
printk(KERN_NOTICE "lockd: cannot monitor %s\n", nsm->sm_name);
- else
- nsm->sm_monitored = 1;
- return status;
+ return status;
+ }
+
+ nsm->sm_monitored = 1;
+ if (unlikely(nsm_local_state != res.state)) {
+ nsm_local_state = res.state;
+ dprintk("lockd: NSM state changed to %d\n", nsm_local_state);
+ }
+ return 0;
}
/**
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 83ee34203bd7..e577a78d7bac 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -326,6 +326,8 @@ static void nlmsvc_freegrantargs(struct nlm_rqst *call)
{
if (call->a_args.lock.oh.data != call->a_owner)
kfree(call->a_args.lock.oh.data);
+
+ locks_release_private(&call->a_args.lock.fl);
}
/*
diff --git a/fs/locks.c b/fs/locks.c
index ec3deea29e37..b6440f52178f 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -151,7 +151,7 @@ static struct file_lock *locks_alloc_lock(void)
return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
}
-static void locks_release_private(struct file_lock *fl)
+void locks_release_private(struct file_lock *fl)
{
if (fl->fl_ops) {
if (fl->fl_ops->fl_release_private)
@@ -165,6 +165,7 @@ static void locks_release_private(struct file_lock *fl)
}
}
+EXPORT_SYMBOL_GPL(locks_release_private);
/* Free a lock which is not in use. */
static void locks_free_lock(struct file_lock *fl)
diff --git a/fs/minix/minix.h b/fs/minix/minix.h
index cb7fdd11f9a5..9dcf95b42116 100644
--- a/fs/minix/minix.h
+++ b/fs/minix/minix.h
@@ -1,3 +1,6 @@
+#ifndef FS_MINIX_H
+#define FS_MINIX_H
+
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/minix_fs.h>
@@ -86,3 +89,5 @@ static inline struct minix_inode_info *minix_i(struct inode *inode)
{
return list_entry(inode, struct minix_inode_info, vfs_inode);
}
+
+#endif /* FS_MINIX_H */
diff --git a/fs/namespace.c b/fs/namespace.c
index 2dd333b0fe7f..a7bea8c8bd46 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1937,6 +1937,21 @@ dput_out:
return retval;
}
+static struct mnt_namespace *alloc_mnt_ns(void)
+{
+ struct mnt_namespace *new_ns;
+
+ new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
+ if (!new_ns)
+ return ERR_PTR(-ENOMEM);
+ atomic_set(&new_ns->count, 1);
+ new_ns->root = NULL;
+ INIT_LIST_HEAD(&new_ns->list);
+ init_waitqueue_head(&new_ns->poll);
+ new_ns->event = 0;
+ return new_ns;
+}
+
/*
* Allocate a new namespace structure and populate it with contents
* copied from the namespace of the passed in task structure.
@@ -1948,14 +1963,9 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;
struct vfsmount *p, *q;
- new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL);
- if (!new_ns)
- return ERR_PTR(-ENOMEM);
-
- atomic_set(&new_ns->count, 1);
- INIT_LIST_HEAD(&new_ns->list);
- init_waitqueue_head(&new_ns->poll);
- new_ns->event = 0;
+ new_ns = alloc_mnt_ns();
+ if (IS_ERR(new_ns))
+ return new_ns;
down_write(&namespace_sem);
/* First pass: copy the tree topology */
@@ -2019,6 +2029,24 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
return new_ns;
}
+/**
+ * create_mnt_ns - creates a private namespace and adds a root filesystem
+ * @mnt: pointer to the new root filesystem mountpoint
+ */
+struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt)
+{
+ struct mnt_namespace *new_ns;
+
+ new_ns = alloc_mnt_ns();
+ if (!IS_ERR(new_ns)) {
+ mnt->mnt_ns = new_ns;
+ new_ns->root = mnt;
+ list_add(&new_ns->list, &new_ns->root->mnt_list);
+ }
+ return new_ns;
+}
+EXPORT_SYMBOL(create_mnt_ns);
+
SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
char __user *, type, unsigned long, flags, void __user *, data)
{
@@ -2246,10 +2274,14 @@ void __init mnt_init(void)
init_mount_tree();
}
-void __put_mnt_ns(struct mnt_namespace *ns)
+void put_mnt_ns(struct mnt_namespace *ns)
{
- struct vfsmount *root = ns->root;
+ struct vfsmount *root;
LIST_HEAD(umount_list);
+
+ if (!atomic_dec_and_lock(&ns->count, &vfsmount_lock))
+ return;
+ root = ns->root;
ns->root = NULL;
spin_unlock(&vfsmount_lock);
down_write(&namespace_sem);
@@ -2260,3 +2292,4 @@ void __put_mnt_ns(struct mnt_namespace *ns)
release_mounts(&umount_list);
kfree(ns);
}
+EXPORT_SYMBOL(put_mnt_ns);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index e67f3ec07736..2a77bc25d5af 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -1,6 +1,6 @@
config NFS_FS
tristate "NFS client support"
- depends on INET
+ depends on INET && FILE_LOCKING
select LOCKD
select SUNRPC
select NFS_ACL_SUPPORT if NFS_V3_ACL
@@ -74,6 +74,15 @@ config NFS_V4
If unsure, say N.
+config NFS_V4_1
+ bool "NFS client support for NFSv4.1 (DEVELOPER ONLY)"
+ depends on NFS_V4 && EXPERIMENTAL
+ help
+ This option enables support for minor version 1 of the NFSv4 protocol
+ (draft-ietf-nfsv4-minorversion1) in the kernel's NFS client.
+
+ Unless you're an NFS developer, say N.
+
config ROOT_NFS
bool "Root file system on NFS"
depends on NFS_FS=y && IP_PNP
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index a886e692ddd0..7f604c7941fb 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -17,6 +17,9 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/sunrpc/svcauth_gss.h>
+#if defined(CONFIG_NFS_V4_1)
+#include <linux/sunrpc/bc_xprt.h>
+#endif
#include <net/inet_sock.h>
@@ -28,11 +31,12 @@
struct nfs_callback_data {
unsigned int users;
+ struct svc_serv *serv;
struct svc_rqst *rqst;
struct task_struct *task;
};
-static struct nfs_callback_data nfs_callback_info;
+static struct nfs_callback_data nfs_callback_info[NFS4_MAX_MINOR_VERSION + 1];
static DEFINE_MUTEX(nfs_callback_mutex);
static struct svc_program nfs4_callback_program;
@@ -56,10 +60,10 @@ module_param_call(callback_tcpport, param_set_port, param_get_int,
&nfs_callback_set_tcpport, 0644);
/*
- * This is the callback kernel thread.
+ * This is the NFSv4 callback kernel thread.
*/
static int
-nfs_callback_svc(void *vrqstp)
+nfs4_callback_svc(void *vrqstp)
{
int err, preverr = 0;
struct svc_rqst *rqstp = vrqstp;
@@ -97,20 +101,12 @@ nfs_callback_svc(void *vrqstp)
}
/*
- * Bring up the callback thread if it is not already up.
+ * Prepare to bring up the NFSv4 callback service
*/
-int nfs_callback_up(void)
+struct svc_rqst *
+nfs4_callback_up(struct svc_serv *serv)
{
- struct svc_serv *serv = NULL;
- int ret = 0;
-
- mutex_lock(&nfs_callback_mutex);
- if (nfs_callback_info.users++ || nfs_callback_info.task != NULL)
- goto out;
- serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
- ret = -ENOMEM;
- if (!serv)
- goto out_err;
+ int ret;
ret = svc_create_xprt(serv, "tcp", PF_INET,
nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
@@ -127,27 +123,174 @@ int nfs_callback_up(void)
nfs_callback_tcpport6 = ret;
dprintk("NFS: Callback listener port = %u (af %u)\n",
nfs_callback_tcpport6, PF_INET6);
- } else if (ret != -EAFNOSUPPORT)
+ } else if (ret == -EAFNOSUPPORT)
+ ret = 0;
+ else
goto out_err;
#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
- nfs_callback_info.rqst = svc_prepare_thread(serv, &serv->sv_pools[0]);
- if (IS_ERR(nfs_callback_info.rqst)) {
- ret = PTR_ERR(nfs_callback_info.rqst);
- nfs_callback_info.rqst = NULL;
+ return svc_prepare_thread(serv, &serv->sv_pools[0]);
+
+out_err:
+ if (ret == 0)
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * The callback service for NFSv4.1 callbacks
+ */
+static int
+nfs41_callback_svc(void *vrqstp)
+{
+ struct svc_rqst *rqstp = vrqstp;
+ struct svc_serv *serv = rqstp->rq_server;
+ struct rpc_rqst *req;
+ int error;
+ DEFINE_WAIT(wq);
+
+ set_freezable();
+
+ /*
+ * FIXME: do we really need to run this under the BKL? If so, please
+ * add a comment about what it's intended to protect.
+ */
+ lock_kernel();
+ while (!kthread_should_stop()) {
+ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
+ spin_lock_bh(&serv->sv_cb_lock);
+ if (!list_empty(&serv->sv_cb_list)) {
+ req = list_first_entry(&serv->sv_cb_list,
+ struct rpc_rqst, rq_bc_list);
+ list_del(&req->rq_bc_list);
+ spin_unlock_bh(&serv->sv_cb_lock);
+ dprintk("Invoking bc_svc_process()\n");
+ error = bc_svc_process(serv, req, rqstp);
+ dprintk("bc_svc_process() returned w/ error code= %d\n",
+ error);
+ } else {
+ spin_unlock_bh(&serv->sv_cb_lock);
+ schedule();
+ }
+ finish_wait(&serv->sv_cb_waitq, &wq);
+ }
+ unlock_kernel();
+ return 0;
+}
+
+/*
+ * Bring up the NFSv4.1 callback service
+ */
+struct svc_rqst *
+nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
+{
+ struct svc_xprt *bc_xprt;
+ struct svc_rqst *rqstp = ERR_PTR(-ENOMEM);
+
+ dprintk("--> %s\n", __func__);
+ /* Create a svc_sock for the service */
+ bc_xprt = svc_sock_create(serv, xprt->prot);
+ if (!bc_xprt)
+ goto out;
+
+ /*
+ * Save the svc_serv in the transport so that it can
+ * be referenced when the session backchannel is initialized
+ */
+ serv->bc_xprt = bc_xprt;
+ xprt->bc_serv = serv;
+
+ INIT_LIST_HEAD(&serv->sv_cb_list);
+ spin_lock_init(&serv->sv_cb_lock);
+ init_waitqueue_head(&serv->sv_cb_waitq);
+ rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
+ if (IS_ERR(rqstp))
+ svc_sock_destroy(bc_xprt);
+out:
+ dprintk("--> %s return %p\n", __func__, rqstp);
+ return rqstp;
+}
+
+static inline int nfs_minorversion_callback_svc_setup(u32 minorversion,
+ struct svc_serv *serv, struct rpc_xprt *xprt,
+ struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
+{
+ if (minorversion) {
+ *rqstpp = nfs41_callback_up(serv, xprt);
+ *callback_svc = nfs41_callback_svc;
+ }
+ return minorversion;
+}
+
+static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
+ struct nfs_callback_data *cb_info)
+{
+ if (minorversion)
+ xprt->bc_serv = cb_info->serv;
+}
+#else
+static inline int nfs_minorversion_callback_svc_setup(u32 minorversion,
+ struct svc_serv *serv, struct rpc_xprt *xprt,
+ struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
+{
+ return 0;
+}
+
+static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
+ struct nfs_callback_data *cb_info)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
+ * Bring up the callback thread if it is not already up.
+ */
+int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
+{
+ struct svc_serv *serv = NULL;
+ struct svc_rqst *rqstp;
+ int (*callback_svc)(void *vrqstp);
+ struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
+ char svc_name[12];
+ int ret = 0;
+ int minorversion_setup;
+
+ mutex_lock(&nfs_callback_mutex);
+ if (cb_info->users++ || cb_info->task != NULL) {
+ nfs_callback_bc_serv(minorversion, xprt, cb_info);
+ goto out;
+ }
+ serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
+ if (!serv) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ minorversion_setup = nfs_minorversion_callback_svc_setup(minorversion,
+ serv, xprt, &rqstp, &callback_svc);
+ if (!minorversion_setup) {
+ /* v4.0 callback setup */
+ rqstp = nfs4_callback_up(serv);
+ callback_svc = nfs4_callback_svc;
+ }
+
+ if (IS_ERR(rqstp)) {
+ ret = PTR_ERR(rqstp);
goto out_err;
}
svc_sock_update_bufs(serv);
- nfs_callback_info.task = kthread_run(nfs_callback_svc,
- nfs_callback_info.rqst,
- "nfsv4-svc");
- if (IS_ERR(nfs_callback_info.task)) {
- ret = PTR_ERR(nfs_callback_info.task);
- svc_exit_thread(nfs_callback_info.rqst);
- nfs_callback_info.rqst = NULL;
- nfs_callback_info.task = NULL;
+ sprintf(svc_name, "nfsv4.%u-svc", minorversion);
+ cb_info->serv = serv;
+ cb_info->rqst = rqstp;
+ cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
+ if (IS_ERR(cb_info->task)) {
+ ret = PTR_ERR(cb_info->task);
+ svc_exit_thread(cb_info->rqst);
+ cb_info->rqst = NULL;
+ cb_info->task = NULL;
goto out_err;
}
out:
@@ -164,22 +307,25 @@ out:
out_err:
dprintk("NFS: Couldn't create callback socket or server thread; "
"err = %d\n", ret);
- nfs_callback_info.users--;
+ cb_info->users--;
goto out;
}
/*
* Kill the callback thread if it's no longer being used.
*/
-void nfs_callback_down(void)
+void nfs_callback_down(int minorversion)
{
+ struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
+
mutex_lock(&nfs_callback_mutex);
- nfs_callback_info.users--;
- if (nfs_callback_info.users == 0 && nfs_callback_info.task != NULL) {
- kthread_stop(nfs_callback_info.task);
- svc_exit_thread(nfs_callback_info.rqst);
- nfs_callback_info.rqst = NULL;
- nfs_callback_info.task = NULL;
+ cb_info->users--;
+ if (cb_info->users == 0 && cb_info->task != NULL) {
+ kthread_stop(cb_info->task);
+ svc_exit_thread(cb_info->rqst);
+ cb_info->serv = NULL;
+ cb_info->rqst = NULL;
+ cb_info->task = NULL;
}
mutex_unlock(&nfs_callback_mutex);
}
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index e110e286a262..07baa8254ca1 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -20,13 +20,24 @@ enum nfs4_callback_procnum {
enum nfs4_callback_opnum {
OP_CB_GETATTR = 3,
OP_CB_RECALL = 4,
+/* Callback operations new to NFSv4.1 */
+ OP_CB_LAYOUTRECALL = 5,
+ OP_CB_NOTIFY = 6,
+ OP_CB_PUSH_DELEG = 7,
+ OP_CB_RECALL_ANY = 8,
+ OP_CB_RECALLABLE_OBJ_AVAIL = 9,
+ OP_CB_RECALL_SLOT = 10,
+ OP_CB_SEQUENCE = 11,
+ OP_CB_WANTS_CANCELLED = 12,
+ OP_CB_NOTIFY_LOCK = 13,
+ OP_CB_NOTIFY_DEVICEID = 14,
OP_CB_ILLEGAL = 10044,
};
struct cb_compound_hdr_arg {
unsigned int taglen;
const char *tag;
- unsigned int callback_ident;
+ unsigned int minorversion;
unsigned nops;
};
@@ -59,16 +70,59 @@ struct cb_recallargs {
uint32_t truncate;
};
+#if defined(CONFIG_NFS_V4_1)
+
+struct referring_call {
+ uint32_t rc_sequenceid;
+ uint32_t rc_slotid;
+};
+
+struct referring_call_list {
+ struct nfs4_sessionid rcl_sessionid;
+ uint32_t rcl_nrefcalls;
+ struct referring_call *rcl_refcalls;
+};
+
+struct cb_sequenceargs {
+ struct sockaddr *csa_addr;
+ struct nfs4_sessionid csa_sessionid;
+ uint32_t csa_sequenceid;
+ uint32_t csa_slotid;
+ uint32_t csa_highestslotid;
+ uint32_t csa_cachethis;
+ uint32_t csa_nrclists;
+ struct referring_call_list *csa_rclists;
+};
+
+struct cb_sequenceres {
+ __be32 csr_status;
+ struct nfs4_sessionid csr_sessionid;
+ uint32_t csr_sequenceid;
+ uint32_t csr_slotid;
+ uint32_t csr_highestslotid;
+ uint32_t csr_target_highestslotid;
+};
+
+extern unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
+ struct cb_sequenceres *res);
+
+#endif /* CONFIG_NFS_V4_1 */
+
extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy);
#ifdef CONFIG_NFS_V4
-extern int nfs_callback_up(void);
-extern void nfs_callback_down(void);
-#else
-#define nfs_callback_up() (0)
-#define nfs_callback_down() do {} while(0)
-#endif
+extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
+extern void nfs_callback_down(int minorversion);
+#endif /* CONFIG_NFS_V4 */
+
+/*
+ * nfs41: Callbacks are expected to not cause substantial latency,
+ * so we limit their concurrency to 1 by setting up the maximum number
+ * of slots for the backchannel.
+ */
+#define NFS41_BC_MIN_CALLBACKS 1
+#define NFS41_BC_MAX_CALLBACKS 1
extern unsigned int nfs_callback_set_tcpport;
extern unsigned short nfs_callback_tcpport;
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index f7e83e23cf9f..b7da1f54da68 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -101,3 +101,130 @@ out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
return res;
}
+
+#if defined(CONFIG_NFS_V4_1)
+
+/*
+ * Validate the sequenceID sent by the server.
+ * Return success if the sequenceID is one more than what we last saw on
+ * this slot, accounting for wraparound. Increments the slot's sequence.
+ *
+ * We don't yet implement a duplicate request cache, so at this time
+ * we will log replays, and process them as if we had not seen them before,
+ * but we don't bump the sequence in the slot. Not too worried about it,
+ * since we only currently implement idempotent callbacks anyway.
+ *
+ * We have a single slot backchannel at this time, so we don't bother
+ * checking the used_slots bit array on the table. The lower layer guarantees
+ * a single outstanding callback request at a time.
+ */
+static int
+validate_seqid(struct nfs4_slot_table *tbl, u32 slotid, u32 seqid)
+{
+ struct nfs4_slot *slot;
+
+ dprintk("%s enter. slotid %d seqid %d\n",
+ __func__, slotid, seqid);
+
+ if (slotid > NFS41_BC_MAX_CALLBACKS)
+ return htonl(NFS4ERR_BADSLOT);
+
+ slot = tbl->slots + slotid;
+ dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr);
+
+ /* Normal */
+ if (likely(seqid == slot->seq_nr + 1)) {
+ slot->seq_nr++;
+ return htonl(NFS4_OK);
+ }
+
+ /* Replay */
+ if (seqid == slot->seq_nr) {
+ dprintk("%s seqid %d is a replay - no DRC available\n",
+ __func__, seqid);
+ return htonl(NFS4_OK);
+ }
+
+ /* Wraparound */
+ if (seqid == 1 && (slot->seq_nr + 1) == 0) {
+ slot->seq_nr = 1;
+ return htonl(NFS4_OK);
+ }
+
+ /* Misordered request */
+ return htonl(NFS4ERR_SEQ_MISORDERED);
+}
+
+/*
+ * Returns a pointer to a held 'struct nfs_client' that matches the server's
+ * address, major version number, and session ID. It is the caller's
+ * responsibility to release the returned reference.
+ *
+ * Returns NULL if there are no connections with sessions, or if no session
+ * matches the one of interest.
+ */
+ static struct nfs_client *find_client_with_session(
+ const struct sockaddr *addr, u32 nfsversion,
+ struct nfs4_sessionid *sessionid)
+{
+ struct nfs_client *clp;
+
+ clp = nfs_find_client(addr, 4);
+ if (clp == NULL)
+ return NULL;
+
+ do {
+ struct nfs_client *prev = clp;
+
+ if (clp->cl_session != NULL) {
+ if (memcmp(clp->cl_session->sess_id.data,
+ sessionid->data,
+ NFS4_MAX_SESSIONID_LEN) == 0) {
+ /* Returns a held reference to clp */
+ return clp;
+ }
+ }
+ clp = nfs_find_client_next(prev);
+ nfs_put_client(prev);
+ } while (clp != NULL);
+
+ return NULL;
+}
+
+/* FIXME: referring calls should be processed */
+unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
+ struct cb_sequenceres *res)
+{
+ struct nfs_client *clp;
+ int i, status;
+
+ for (i = 0; i < args->csa_nrclists; i++)
+ kfree(args->csa_rclists[i].rcl_refcalls);
+ kfree(args->csa_rclists);
+
+ status = htonl(NFS4ERR_BADSESSION);
+ clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid);
+ if (clp == NULL)
+ goto out;
+
+ status = validate_seqid(&clp->cl_session->bc_slot_table,
+ args->csa_slotid, args->csa_sequenceid);
+ if (status)
+ goto out_putclient;
+
+ memcpy(&res->csr_sessionid, &args->csa_sessionid,
+ sizeof(res->csr_sessionid));
+ res->csr_sequenceid = args->csa_sequenceid;
+ res->csr_slotid = args->csa_slotid;
+ res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
+ res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
+
+out_putclient:
+ nfs_put_client(clp);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ res->csr_status = status;
+ return res->csr_status;
+}
+
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index dd0ef34b5845..e5a2dac5f715 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -20,6 +20,11 @@
2 + 2 + 3 + 3)
#define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
+#if defined(CONFIG_NFS_V4_1)
+#define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
+ 4 + 1 + 3)
+#endif /* CONFIG_NFS_V4_1 */
+
#define NFSDBG_FACILITY NFSDBG_CALLBACK
typedef __be32 (*callback_process_op_t)(void *, void *);
@@ -132,7 +137,6 @@ static __be32 decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr)
{
__be32 *p;
- unsigned int minor_version;
__be32 status;
status = decode_string(xdr, &hdr->taglen, &hdr->tag);
@@ -147,15 +151,19 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound
p = read_buf(xdr, 12);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
- minor_version = ntohl(*p++);
- /* Check minor version is zero. */
- if (minor_version != 0) {
- printk(KERN_WARNING "%s: NFSv4 server callback with illegal minor version %u!\n",
- __func__, minor_version);
+ hdr->minorversion = ntohl(*p++);
+ /* Check minor version is zero or one. */
+ if (hdr->minorversion <= 1) {
+ p++; /* skip callback_ident */
+ } else {
+ printk(KERN_WARNING "%s: NFSv4 server callback with "
+ "illegal minor version %u!\n",
+ __func__, hdr->minorversion);
return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
}
- hdr->callback_ident = ntohl(*p++);
hdr->nops = ntohl(*p);
+ dprintk("%s: minorversion %d nops %d\n", __func__,
+ hdr->minorversion, hdr->nops);
return 0;
}
@@ -204,6 +212,122 @@ out:
return status;
}
+#if defined(CONFIG_NFS_V4_1)
+
+static unsigned decode_sessionid(struct xdr_stream *xdr,
+ struct nfs4_sessionid *sid)
+{
+ uint32_t *p;
+ int len = NFS4_MAX_SESSIONID_LEN;
+
+ p = read_buf(xdr, len);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);;
+
+ memcpy(sid->data, p, len);
+ return 0;
+}
+
+static unsigned decode_rc_list(struct xdr_stream *xdr,
+ struct referring_call_list *rc_list)
+{
+ uint32_t *p;
+ int i;
+ unsigned status;
+
+ status = decode_sessionid(xdr, &rc_list->rcl_sessionid);
+ if (status)
+ goto out;
+
+ status = htonl(NFS4ERR_RESOURCE);
+ p = read_buf(xdr, sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ goto out;
+
+ rc_list->rcl_nrefcalls = ntohl(*p++);
+ if (rc_list->rcl_nrefcalls) {
+ p = read_buf(xdr,
+ rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ goto out;
+ rc_list->rcl_refcalls = kmalloc(rc_list->rcl_nrefcalls *
+ sizeof(*rc_list->rcl_refcalls),
+ GFP_KERNEL);
+ if (unlikely(rc_list->rcl_refcalls == NULL))
+ goto out;
+ for (i = 0; i < rc_list->rcl_nrefcalls; i++) {
+ rc_list->rcl_refcalls[i].rc_sequenceid = ntohl(*p++);
+ rc_list->rcl_refcalls[i].rc_slotid = ntohl(*p++);
+ }
+ }
+ status = 0;
+
+out:
+ return status;
+}
+
+static unsigned decode_cb_sequence_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct cb_sequenceargs *args)
+{
+ uint32_t *p;
+ int i;
+ unsigned status;
+
+ status = decode_sessionid(xdr, &args->csa_sessionid);
+ if (status)
+ goto out;
+
+ status = htonl(NFS4ERR_RESOURCE);
+ p = read_buf(xdr, 5 * sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ goto out;
+
+ args->csa_addr = svc_addr(rqstp);
+ args->csa_sequenceid = ntohl(*p++);
+ args->csa_slotid = ntohl(*p++);
+ args->csa_highestslotid = ntohl(*p++);
+ args->csa_cachethis = ntohl(*p++);
+ args->csa_nrclists = ntohl(*p++);
+ args->csa_rclists = NULL;
+ if (args->csa_nrclists) {
+ args->csa_rclists = kmalloc(args->csa_nrclists *
+ sizeof(*args->csa_rclists),
+ GFP_KERNEL);
+ if (unlikely(args->csa_rclists == NULL))
+ goto out;
+
+ for (i = 0; i < args->csa_nrclists; i++) {
+ status = decode_rc_list(xdr, &args->csa_rclists[i]);
+ if (status)
+ goto out_free;
+ }
+ }
+ status = 0;
+
+ dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u slotid %u "
+ "highestslotid %u cachethis %d nrclists %u\n",
+ __func__,
+ ((u32 *)&args->csa_sessionid)[0],
+ ((u32 *)&args->csa_sessionid)[1],
+ ((u32 *)&args->csa_sessionid)[2],
+ ((u32 *)&args->csa_sessionid)[3],
+ args->csa_sequenceid, args->csa_slotid,
+ args->csa_highestslotid, args->csa_cachethis,
+ args->csa_nrclists);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+
+out_free:
+ for (i = 0; i < args->csa_nrclists; i++)
+ kfree(args->csa_rclists[i].rcl_refcalls);
+ kfree(args->csa_rclists);
+ goto out;
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
{
__be32 *p;
@@ -353,31 +477,134 @@ out:
return status;
}
-static __be32 process_op(struct svc_rqst *rqstp,
+#if defined(CONFIG_NFS_V4_1)
+
+static unsigned encode_sessionid(struct xdr_stream *xdr,
+ const struct nfs4_sessionid *sid)
+{
+ uint32_t *p;
+ int len = NFS4_MAX_SESSIONID_LEN;
+
+ p = xdr_reserve_space(xdr, len);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+
+ memcpy(p, sid, len);
+ return 0;
+}
+
+static unsigned encode_cb_sequence_res(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ const struct cb_sequenceres *res)
+{
+ uint32_t *p;
+ unsigned status = res->csr_status;
+
+ if (unlikely(status != 0))
+ goto out;
+
+ encode_sessionid(xdr, &res->csr_sessionid);
+
+ p = xdr_reserve_space(xdr, 4 * sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+
+ *p++ = htonl(res->csr_sequenceid);
+ *p++ = htonl(res->csr_slotid);
+ *p++ = htonl(res->csr_highestslotid);
+ *p++ = htonl(res->csr_target_highestslotid);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+}
+
+static __be32
+preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
+{
+ if (op_nr == OP_CB_SEQUENCE) {
+ if (nop != 0)
+ return htonl(NFS4ERR_SEQUENCE_POS);
+ } else {
+ if (nop == 0)
+ return htonl(NFS4ERR_OP_NOT_IN_SESSION);
+ }
+
+ switch (op_nr) {
+ case OP_CB_GETATTR:
+ case OP_CB_RECALL:
+ case OP_CB_SEQUENCE:
+ *op = &callback_ops[op_nr];
+ break;
+
+ case OP_CB_LAYOUTRECALL:
+ case OP_CB_NOTIFY_DEVICEID:
+ case OP_CB_NOTIFY:
+ case OP_CB_PUSH_DELEG:
+ case OP_CB_RECALL_ANY:
+ case OP_CB_RECALLABLE_OBJ_AVAIL:
+ case OP_CB_RECALL_SLOT:
+ case OP_CB_WANTS_CANCELLED:
+ case OP_CB_NOTIFY_LOCK:
+ return htonl(NFS4ERR_NOTSUPP);
+
+ default:
+ return htonl(NFS4ERR_OP_ILLEGAL);
+ }
+
+ return htonl(NFS_OK);
+}
+
+#else /* CONFIG_NFS_V4_1 */
+
+static __be32
+preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
+{
+ return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+static __be32
+preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op)
+{
+ switch (op_nr) {
+ case OP_CB_GETATTR:
+ case OP_CB_RECALL:
+ *op = &callback_ops[op_nr];
+ break;
+ default:
+ return htonl(NFS4ERR_OP_ILLEGAL);
+ }
+
+ return htonl(NFS_OK);
+}
+
+static __be32 process_op(uint32_t minorversion, int nop,
+ struct svc_rqst *rqstp,
struct xdr_stream *xdr_in, void *argp,
struct xdr_stream *xdr_out, void *resp)
{
struct callback_op *op = &callback_ops[0];
unsigned int op_nr = OP_CB_ILLEGAL;
- __be32 status = 0;
+ __be32 status;
long maxlen;
__be32 res;
dprintk("%s: start\n", __func__);
status = decode_op_hdr(xdr_in, &op_nr);
- if (likely(status == 0)) {
- switch (op_nr) {
- case OP_CB_GETATTR:
- case OP_CB_RECALL:
- op = &callback_ops[op_nr];
- break;
- default:
- op_nr = OP_CB_ILLEGAL;
- op = &callback_ops[0];
- status = htonl(NFS4ERR_OP_ILLEGAL);
- }
+ if (unlikely(status)) {
+ status = htonl(NFS4ERR_OP_ILLEGAL);
+ goto out;
}
+ dprintk("%s: minorversion=%d nop=%d op_nr=%u\n",
+ __func__, minorversion, nop, op_nr);
+
+ status = minorversion ? preprocess_nfs41_op(nop, op_nr, &op) :
+ preprocess_nfs4_op(op_nr, &op);
+ if (status == htonl(NFS4ERR_OP_ILLEGAL))
+ op_nr = OP_CB_ILLEGAL;
+out:
maxlen = xdr_out->end - xdr_out->p;
if (maxlen > 0 && maxlen < PAGE_SIZE) {
if (likely(status == 0 && op->decode_args != NULL))
@@ -425,7 +652,8 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
return rpc_system_err;
while (status == 0 && nops != hdr_arg.nops) {
- status = process_op(rqstp, &xdr_in, argp, &xdr_out, resp);
+ status = process_op(hdr_arg.minorversion, nops,
+ rqstp, &xdr_in, argp, &xdr_out, resp);
nops++;
}
@@ -452,7 +680,15 @@ static struct callback_op callback_ops[] = {
.process_op = (callback_process_op_t)nfs4_callback_recall,
.decode_args = (callback_decode_arg_t)decode_recall_args,
.res_maxsize = CB_OP_RECALL_RES_MAXSZ,
- }
+ },
+#if defined(CONFIG_NFS_V4_1)
+ [OP_CB_SEQUENCE] = {
+ .process_op = (callback_process_op_t)nfs4_callback_sequence,
+ .decode_args = (callback_decode_arg_t)decode_cb_sequence_args,
+ .encode_res = (callback_encode_res_t)encode_cb_sequence_res,
+ .res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ,
+ },
+#endif /* CONFIG_NFS_V4_1 */
};
/*
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 75c9cd2aa119..c2d061675d80 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -37,6 +37,7 @@
#include <linux/in6.h>
#include <net/ipv6.h>
#include <linux/nfs_xdr.h>
+#include <linux/sunrpc/bc_xprt.h>
#include <asm/system.h>
@@ -102,6 +103,7 @@ struct nfs_client_initdata {
size_t addrlen;
const struct nfs_rpc_ops *rpc_ops;
int proto;
+ u32 minorversion;
};
/*
@@ -114,18 +116,13 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
{
struct nfs_client *clp;
struct rpc_cred *cred;
+ int err = -ENOMEM;
if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL)
goto error_0;
clp->rpc_ops = cl_init->rpc_ops;
- if (cl_init->rpc_ops->version == 4) {
- if (nfs_callback_up() < 0)
- goto error_2;
- __set_bit(NFS_CS_CALLBACK, &clp->cl_res_state);
- }
-
atomic_set(&clp->cl_count, 1);
clp->cl_cons_state = NFS_CS_INITING;
@@ -133,9 +130,10 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
clp->cl_addrlen = cl_init->addrlen;
if (cl_init->hostname) {
+ err = -ENOMEM;
clp->cl_hostname = kstrdup(cl_init->hostname, GFP_KERNEL);
if (!clp->cl_hostname)
- goto error_3;
+ goto error_cleanup;
}
INIT_LIST_HEAD(&clp->cl_superblocks);
@@ -150,6 +148,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
clp->cl_boot_time = CURRENT_TIME;
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
+ clp->cl_minorversion = cl_init->minorversion;
#endif
cred = rpc_lookup_machine_cred();
if (!IS_ERR(cred))
@@ -159,13 +158,10 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
return clp;
-error_3:
- if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
- nfs_callback_down();
-error_2:
+error_cleanup:
kfree(clp);
error_0:
- return NULL;
+ return ERR_PTR(err);
}
static void nfs4_shutdown_client(struct nfs_client *clp)
@@ -182,12 +178,42 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
}
/*
+ * Destroy the NFS4 callback service
+ */
+static void nfs4_destroy_callback(struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4
+ if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
+ nfs_callback_down(clp->cl_minorversion);
+#endif /* CONFIG_NFS_V4 */
+}
+
+/*
+ * Clears/puts all minor version specific parts from an nfs_client struct
+ * reverting it to minorversion 0.
+ */
+static void nfs4_clear_client_minor_version(struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(clp)) {
+ nfs4_destroy_session(clp->cl_session);
+ clp->cl_session = NULL;
+ }
+
+ clp->cl_call_sync = _nfs4_call_sync;
+#endif /* CONFIG_NFS_V4_1 */
+
+ nfs4_destroy_callback(clp);
+}
+
+/*
* Destroy a shared client record
*/
static void nfs_free_client(struct nfs_client *clp)
{
dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version);
+ nfs4_clear_client_minor_version(clp);
nfs4_shutdown_client(clp);
nfs_fscache_release_client_cookie(clp);
@@ -196,9 +222,6 @@ static void nfs_free_client(struct nfs_client *clp)
if (!IS_ERR(clp->cl_rpcclient))
rpc_shutdown_client(clp->cl_rpcclient);
- if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
- nfs_callback_down();
-
if (clp->cl_machine_cred != NULL)
put_rpccred(clp->cl_machine_cred);
@@ -347,7 +370,8 @@ struct nfs_client *nfs_find_client(const struct sockaddr *addr, u32 nfsversion)
struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
/* Don't match clients that failed to initialise properly */
- if (clp->cl_cons_state != NFS_CS_READY)
+ if (!(clp->cl_cons_state == NFS_CS_READY ||
+ clp->cl_cons_state == NFS_CS_SESSION_INITING))
continue;
/* Different NFS versions cannot share the same nfs_client */
@@ -420,7 +444,9 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
if (clp->cl_proto != data->proto)
continue;
-
+ /* Match nfsv4 minorversion */
+ if (clp->cl_minorversion != data->minorversion)
+ continue;
/* Match the full socket address */
if (!nfs_sockaddr_cmp(sap, clap))
continue;
@@ -456,9 +482,10 @@ static struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_in
spin_unlock(&nfs_client_lock);
new = nfs_alloc_client(cl_init);
- } while (new);
+ } while (!IS_ERR(new));
- return ERR_PTR(-ENOMEM);
+ dprintk("--> nfs_get_client() = %ld [failed]\n", PTR_ERR(new));
+ return new;
/* install a new client and return with it unready */
install_client:
@@ -478,7 +505,7 @@ found_client:
nfs_free_client(new);
error = wait_event_killable(nfs_client_active_wq,
- clp->cl_cons_state != NFS_CS_INITING);
+ clp->cl_cons_state < NFS_CS_INITING);
if (error < 0) {
nfs_put_client(clp);
return ERR_PTR(-ERESTARTSYS);
@@ -499,13 +526,29 @@ found_client:
/*
* Mark a server as ready or failed
*/
-static void nfs_mark_client_ready(struct nfs_client *clp, int state)
+void nfs_mark_client_ready(struct nfs_client *clp, int state)
{
clp->cl_cons_state = state;
wake_up_all(&nfs_client_active_wq);
}
/*
+ * With sessions, the client is not marked ready until after a
+ * successful EXCHANGE_ID and CREATE_SESSION.
+ *
+ * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
+ * other versions of NFS can be tried.
+ */
+int nfs4_check_client_ready(struct nfs_client *clp)
+{
+ if (!nfs4_has_session(clp))
+ return 0;
+ if (clp->cl_cons_state < NFS_CS_READY)
+ return -EPROTONOSUPPORT;
+ return 0;
+}
+
+/*
* Initialise the timeout values for a connection
*/
static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
@@ -1050,6 +1093,61 @@ error:
#ifdef CONFIG_NFS_V4
/*
+ * Initialize the NFS4 callback service
+ */
+static int nfs4_init_callback(struct nfs_client *clp)
+{
+ int error;
+
+ if (clp->rpc_ops->version == 4) {
+ if (nfs4_has_session(clp)) {
+ error = xprt_setup_backchannel(
+ clp->cl_rpcclient->cl_xprt,
+ NFS41_BC_MIN_CALLBACKS);
+ if (error < 0)
+ return error;
+ }
+
+ error = nfs_callback_up(clp->cl_minorversion,
+ clp->cl_rpcclient->cl_xprt);
+ if (error < 0) {
+ dprintk("%s: failed to start callback. Error = %d\n",
+ __func__, error);
+ return error;
+ }
+ __set_bit(NFS_CS_CALLBACK, &clp->cl_res_state);
+ }
+ return 0;
+}
+
+/*
+ * Initialize the minor version specific parts of an NFS4 client record
+ */
+static int nfs4_init_client_minor_version(struct nfs_client *clp)
+{
+ clp->cl_call_sync = _nfs4_call_sync;
+
+#if defined(CONFIG_NFS_V4_1)
+ if (clp->cl_minorversion) {
+ struct nfs4_session *session = NULL;
+ /*
+ * Create the session and mark it expired.
+ * When a SEQUENCE operation encounters the expired session
+ * it will do session recovery to initialize it.
+ */
+ session = nfs4_alloc_session(clp);
+ if (!session)
+ return -ENOMEM;
+
+ clp->cl_session = session;
+ clp->cl_call_sync = _nfs4_call_sync_session;
+ }
+#endif /* CONFIG_NFS_V4_1 */
+
+ return nfs4_init_callback(clp);
+}
+
+/*
* Initialise an NFS4 client record
*/
static int nfs4_init_client(struct nfs_client *clp,
@@ -1083,7 +1181,12 @@ static int nfs4_init_client(struct nfs_client *clp,
}
__set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
- nfs_mark_client_ready(clp, NFS_CS_READY);
+ error = nfs4_init_client_minor_version(clp);
+ if (error < 0)
+ goto error;
+
+ if (!nfs4_has_session(clp))
+ nfs_mark_client_ready(clp, NFS_CS_READY);
return 0;
error:
@@ -1101,7 +1204,8 @@ static int nfs4_set_client(struct nfs_server *server,
const size_t addrlen,
const char *ip_addr,
rpc_authflavor_t authflavour,
- int proto, const struct rpc_timeout *timeparms)
+ int proto, const struct rpc_timeout *timeparms,
+ u32 minorversion)
{
struct nfs_client_initdata cl_init = {
.hostname = hostname,
@@ -1109,6 +1213,7 @@ static int nfs4_set_client(struct nfs_server *server,
.addrlen = addrlen,
.rpc_ops = &nfs_v4_clientops,
.proto = proto,
+ .minorversion = minorversion,
};
struct nfs_client *clp;
int error;
@@ -1138,6 +1243,36 @@ error:
}
/*
+ * Initialize a session.
+ * Note: save the mount rsize and wsize for create_server negotiation.
+ */
+static void nfs4_init_session(struct nfs_client *clp,
+ unsigned int wsize, unsigned int rsize)
+{
+#if defined(CONFIG_NFS_V4_1)
+ if (nfs4_has_session(clp)) {
+ clp->cl_session->fc_attrs.max_rqst_sz = wsize;
+ clp->cl_session->fc_attrs.max_resp_sz = rsize;
+ }
+#endif /* CONFIG_NFS_V4_1 */
+}
+
+/*
+ * Session has been established, and the client marked ready.
+ * Set the mount rsize and wsize with negotiated fore channel
+ * attributes which will be bound checked in nfs_server_set_fsinfo.
+ */
+static void nfs4_session_set_rwsize(struct nfs_server *server)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (!nfs4_has_session(server->nfs_client))
+ return;
+ server->rsize = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+ server->wsize = server->nfs_client->cl_session->fc_attrs.max_rqst_sz;
+#endif /* CONFIG_NFS_V4_1 */
+}
+
+/*
* Create a version 4 volume record
*/
static int nfs4_init_server(struct nfs_server *server,
@@ -1164,7 +1299,8 @@ static int nfs4_init_server(struct nfs_server *server,
data->client_address,
data->auth_flavors[0],
data->nfs_server.protocol,
- &timeparms);
+ &timeparms,
+ data->minorversion);
if (error < 0)
goto error;
@@ -1214,6 +1350,8 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
BUG_ON(!server->nfs_client->rpc_ops);
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
+ nfs4_init_session(server->nfs_client, server->wsize, server->rsize);
+
/* Probe the root fh to retrieve its FSID */
error = nfs4_path_walk(server, mntfh, data->nfs_server.export_path);
if (error < 0)
@@ -1224,6 +1362,8 @@ struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
(unsigned long long) server->fsid.minor);
dprintk("Mount FH: %d\n", mntfh->size);
+ nfs4_session_set_rwsize(server);
+
error = nfs_probe_fsinfo(server, mntfh, &fattr);
if (error < 0)
goto error;
@@ -1282,7 +1422,8 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
parent_client->cl_ipaddr,
data->authflavor,
parent_server->client->cl_xprt->prot,
- parent_server->client->cl_timeout);
+ parent_server->client->cl_timeout,
+ parent_client->cl_minorversion);
if (error < 0)
goto error;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 968225a88015..af05b918cb5b 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -68,29 +68,26 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
{
struct inode *inode = state->inode;
struct file_lock *fl;
- int status;
+ int status = 0;
+
+ if (inode->i_flock == NULL)
+ goto out;
+ /* Protect inode->i_flock using the BKL */
+ lock_kernel();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
continue;
if (nfs_file_open_context(fl->fl_file) != ctx)
continue;
+ unlock_kernel();
status = nfs4_lock_delegation_recall(state, fl);
- if (status >= 0)
- continue;
- switch (status) {
- default:
- printk(KERN_ERR "%s: unhandled error %d.\n",
- __func__, status);
- case -NFS4ERR_EXPIRED:
- /* kill_proc(fl->fl_pid, SIGLOST, 1); */
- case -NFS4ERR_STALE_CLIENTID:
- nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs_client);
- goto out_err;
- }
+ if (status < 0)
+ goto out;
+ lock_kernel();
}
- return 0;
-out_err:
+ unlock_kernel();
+out:
return status;
}
@@ -268,7 +265,10 @@ static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegat
struct nfs_inode *nfsi = NFS_I(inode);
nfs_msync_inode(inode);
- /* Guard against new delegated open calls */
+ /*
+ * Guard against new delegated open/lock/unlock calls and against
+ * state recovery
+ */
down_write(&nfsi->rwsem);
nfs_delegation_claim_opens(inode, &delegation->stateid);
up_write(&nfsi->rwsem);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 08f6b040d289..489fc01a3204 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -259,6 +259,9 @@ static void nfs_direct_read_release(void *calldata)
}
static const struct rpc_call_ops nfs_read_direct_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_read_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_direct_read_result,
.rpc_release = nfs_direct_read_release,
};
@@ -535,6 +538,9 @@ static void nfs_direct_commit_release(void *calldata)
}
static const struct rpc_call_ops nfs_commit_direct_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_write_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_direct_commit_result,
.rpc_release = nfs_direct_commit_release,
};
@@ -673,6 +679,9 @@ out_unlock:
}
static const struct rpc_call_ops nfs_write_direct_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_write_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_direct_write_result,
.rpc_release = nfs_direct_write_release,
};
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index ec7e27d00bc6..0055b813ec2c 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -48,6 +48,9 @@ static ssize_t nfs_file_splice_read(struct file *filp, loff_t *ppos,
size_t count, unsigned int flags);
static ssize_t nfs_file_read(struct kiocb *, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
+static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *filp, loff_t *ppos,
+ size_t count, unsigned int flags);
static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
static int nfs_file_flush(struct file *, fl_owner_t id);
@@ -73,6 +76,7 @@ const struct file_operations nfs_file_operations = {
.lock = nfs_lock,
.flock = nfs_flock,
.splice_read = nfs_file_splice_read,
+ .splice_write = nfs_file_splice_write,
.check_flags = nfs_check_flags,
.setlease = nfs_setlease,
};
@@ -587,12 +591,38 @@ out_swapfile:
goto out;
}
+static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *filp, loff_t *ppos,
+ size_t count, unsigned int flags)
+{
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ ssize_t ret;
+
+ dprintk("NFS splice_write(%s/%s, %lu@%llu)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ (unsigned long) count, (unsigned long long) *ppos);
+
+ /*
+ * The combination of splice and an O_APPEND destination is disallowed.
+ */
+
+ nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, count);
+
+ ret = generic_file_splice_write(pipe, filp, ppos, count, flags);
+ if (ret >= 0 && nfs_need_sync_write(filp, inode)) {
+ int err = nfs_do_fsync(nfs_file_open_context(filp), inode);
+ if (err < 0)
+ ret = err;
+ }
+ return ret;
+}
+
static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
{
struct inode *inode = filp->f_mapping->host;
int status = 0;
- lock_kernel();
/* Try local locking first */
posix_test_lock(filp, fl);
if (fl->fl_type != F_UNLCK) {
@@ -608,7 +638,6 @@ static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
out:
- unlock_kernel();
return status;
out_noconflict:
fl->fl_type = F_UNLCK;
@@ -650,13 +679,11 @@ static int do_unlk(struct file *filp, int cmd, struct file_lock *fl)
* If we're signalled while cleaning up locks on process exit, we
* still need to complete the unlock.
*/
- lock_kernel();
/* Use local locking if mounted with "-onolock" */
if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
else
status = do_vfs_lock(filp, fl);
- unlock_kernel();
return status;
}
@@ -673,13 +700,11 @@ static int do_setlk(struct file *filp, int cmd, struct file_lock *fl)
if (status != 0)
goto out;
- lock_kernel();
/* Use local locking if mounted with "-onolock" */
if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
else
status = do_vfs_lock(filp, fl);
- unlock_kernel();
if (status < 0)
goto out;
/*
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e4d6a8348adf..7dd90a6769d0 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -2,6 +2,7 @@
* NFS internal definitions
*/
+#include "nfs4_fs.h"
#include <linux/mount.h>
#include <linux/security.h>
@@ -17,6 +18,18 @@ struct nfs_string;
*/
#define NFS_MAX_READAHEAD (RPC_DEF_SLOT_TABLE - 1)
+/*
+ * Determine if sessions are in use.
+ */
+static inline int nfs4_has_session(const struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (clp->cl_session)
+ return 1;
+#endif /* CONFIG_NFS_V4_1 */
+ return 0;
+}
+
struct nfs_clone_mount {
const struct super_block *sb;
const struct dentry *dentry;
@@ -30,6 +43,12 @@ struct nfs_clone_mount {
};
/*
+ * Note: RFC 1813 doesn't limit the number of auth flavors that
+ * a server can return, so make something up.
+ */
+#define NFS_MAX_SECFLAVORS (12)
+
+/*
* In-kernel mount arguments
*/
struct nfs_parsed_mount_data {
@@ -44,6 +63,7 @@ struct nfs_parsed_mount_data {
unsigned int auth_flavor_len;
rpc_authflavor_t auth_flavors[1];
char *client_address;
+ unsigned int minorversion;
char *fscache_uniq;
struct {
@@ -77,6 +97,8 @@ struct nfs_mount_request {
unsigned short protocol;
struct nfs_fh *fh;
int noresvport;
+ unsigned int *auth_flav_len;
+ rpc_authflavor_t *auth_flavs;
};
extern int nfs_mount(struct nfs_mount_request *info);
@@ -99,6 +121,8 @@ extern void nfs_free_server(struct nfs_server *server);
extern struct nfs_server *nfs_clone_server(struct nfs_server *,
struct nfs_fh *,
struct nfs_fattr *);
+extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
+extern int nfs4_check_client_ready(struct nfs_client *clp);
#ifdef CONFIG_PROC_FS
extern int __init nfs_fs_proc_init(void);
extern void nfs_fs_proc_exit(void);
@@ -146,6 +170,20 @@ extern __be32 * nfs_decode_dirent(__be32 *, struct nfs_entry *, int);
extern struct rpc_procinfo nfs3_procedures[];
extern __be32 *nfs3_decode_dirent(__be32 *, struct nfs_entry *, int);
+/* nfs4proc.c */
+static inline void nfs4_restart_rpc(struct rpc_task *task,
+ const struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(clp) &&
+ test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+#endif /* CONFIG_NFS_V4_1 */
+ rpc_restart_call(task);
+}
+
/* nfs4xdr.c */
#ifdef CONFIG_NFS_V4
extern __be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus);
@@ -205,6 +243,38 @@ extern int nfs4_path_walk(struct nfs_server *server,
const char *path);
#endif
+/* read.c */
+extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
+
+/* write.c */
+extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
+
+/* nfs4proc.c */
+extern int _nfs4_call_sync(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply);
+extern int _nfs4_call_sync_session(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply);
+
+#ifdef CONFIG_NFS_V4_1
+extern void nfs41_sequence_free_slot(const struct nfs_client *,
+ struct nfs4_sequence_res *res);
+#endif /* CONFIG_NFS_V4_1 */
+
+static inline void nfs4_sequence_free_slot(const struct nfs_client *clp,
+ struct nfs4_sequence_res *res)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(clp))
+ nfs41_sequence_free_slot(clp, res);
+#endif /* CONFIG_NFS_V4_1 */
+}
+
/*
* Determine the device name as a string
*/
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index ca905a5bb1ba..38ef9eaec407 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -20,8 +20,116 @@
# define NFSDBG_FACILITY NFSDBG_MOUNT
#endif
+/*
+ * Defined by RFC 1094, section A.3; and RFC 1813, section 5.1.4
+ */
+#define MNTPATHLEN (1024)
+
+/*
+ * XDR data type sizes
+ */
+#define encode_dirpath_sz (1 + XDR_QUADLEN(MNTPATHLEN))
+#define MNT_status_sz (1)
+#define MNT_fhs_status_sz (1)
+#define MNT_fhandle_sz XDR_QUADLEN(NFS2_FHSIZE)
+#define MNT_fhandle3_sz (1 + XDR_QUADLEN(NFS3_FHSIZE))
+#define MNT_authflav3_sz (1 + NFS_MAX_SECFLAVORS)
+
+/*
+ * XDR argument and result sizes
+ */
+#define MNT_enc_dirpath_sz encode_dirpath_sz
+#define MNT_dec_mountres_sz (MNT_status_sz + MNT_fhandle_sz)
+#define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandle_sz + \
+ MNT_authflav3_sz)
+
+/*
+ * Defined by RFC 1094, section A.5
+ */
+enum {
+ MOUNTPROC_NULL = 0,
+ MOUNTPROC_MNT = 1,
+ MOUNTPROC_DUMP = 2,
+ MOUNTPROC_UMNT = 3,
+ MOUNTPROC_UMNTALL = 4,
+ MOUNTPROC_EXPORT = 5,
+};
+
+/*
+ * Defined by RFC 1813, section 5.2
+ */
+enum {
+ MOUNTPROC3_NULL = 0,
+ MOUNTPROC3_MNT = 1,
+ MOUNTPROC3_DUMP = 2,
+ MOUNTPROC3_UMNT = 3,
+ MOUNTPROC3_UMNTALL = 4,
+ MOUNTPROC3_EXPORT = 5,
+};
+
static struct rpc_program mnt_program;
+/*
+ * Defined by OpenGroup XNFS Version 3W, chapter 8
+ */
+enum mountstat {
+ MNT_OK = 0,
+ MNT_EPERM = 1,
+ MNT_ENOENT = 2,
+ MNT_EACCES = 13,
+ MNT_EINVAL = 22,
+};
+
+static struct {
+ u32 status;
+ int errno;
+} mnt_errtbl[] = {
+ { .status = MNT_OK, .errno = 0, },
+ { .status = MNT_EPERM, .errno = -EPERM, },
+ { .status = MNT_ENOENT, .errno = -ENOENT, },
+ { .status = MNT_EACCES, .errno = -EACCES, },
+ { .status = MNT_EINVAL, .errno = -EINVAL, },
+};
+
+/*
+ * Defined by RFC 1813, section 5.1.5
+ */
+enum mountstat3 {
+ MNT3_OK = 0, /* no error */
+ MNT3ERR_PERM = 1, /* Not owner */
+ MNT3ERR_NOENT = 2, /* No such file or directory */
+ MNT3ERR_IO = 5, /* I/O error */
+ MNT3ERR_ACCES = 13, /* Permission denied */
+ MNT3ERR_NOTDIR = 20, /* Not a directory */
+ MNT3ERR_INVAL = 22, /* Invalid argument */
+ MNT3ERR_NAMETOOLONG = 63, /* Filename too long */
+ MNT3ERR_NOTSUPP = 10004, /* Operation not supported */
+ MNT3ERR_SERVERFAULT = 10006, /* A failure on the server */
+};
+
+static struct {
+ u32 status;
+ int errno;
+} mnt3_errtbl[] = {
+ { .status = MNT3_OK, .errno = 0, },
+ { .status = MNT3ERR_PERM, .errno = -EPERM, },
+ { .status = MNT3ERR_NOENT, .errno = -ENOENT, },
+ { .status = MNT3ERR_IO, .errno = -EIO, },
+ { .status = MNT3ERR_ACCES, .errno = -EACCES, },
+ { .status = MNT3ERR_NOTDIR, .errno = -ENOTDIR, },
+ { .status = MNT3ERR_INVAL, .errno = -EINVAL, },
+ { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, },
+ { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, },
+ { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, },
+};
+
+struct mountres {
+ int errno;
+ struct nfs_fh *fh;
+ unsigned int *auth_count;
+ rpc_authflavor_t *auth_flavors;
+};
+
struct mnt_fhstatus {
u32 status;
struct nfs_fh *fh;
@@ -35,8 +143,10 @@ struct mnt_fhstatus {
*/
int nfs_mount(struct nfs_mount_request *info)
{
- struct mnt_fhstatus result = {
- .fh = info->fh
+ struct mountres result = {
+ .fh = info->fh,
+ .auth_count = info->auth_flav_len,
+ .auth_flavors = info->auth_flavs,
};
struct rpc_message msg = {
.rpc_argp = info->dirpath,
@@ -68,14 +178,14 @@ int nfs_mount(struct nfs_mount_request *info)
if (info->version == NFS_MNT3_VERSION)
msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC3_MNT];
else
- msg.rpc_proc = &mnt_clnt->cl_procinfo[MNTPROC_MNT];
+ msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC_MNT];
status = rpc_call_sync(mnt_clnt, &msg, 0);
rpc_shutdown_client(mnt_clnt);
if (status < 0)
goto out_call_err;
- if (result.status != 0)
+ if (result.errno != 0)
goto out_mnt_err;
dprintk("NFS: MNT request succeeded\n");
@@ -86,72 +196,215 @@ out:
out_clnt_err:
status = PTR_ERR(mnt_clnt);
- dprintk("NFS: failed to create RPC client, status=%d\n", status);
+ dprintk("NFS: failed to create MNT RPC client, status=%d\n", status);
goto out;
out_call_err:
- dprintk("NFS: failed to start MNT request, status=%d\n", status);
+ dprintk("NFS: MNT request failed, status=%d\n", status);
goto out;
out_mnt_err:
- dprintk("NFS: MNT server returned result %d\n", result.status);
- status = nfs_stat_to_errno(result.status);
+ dprintk("NFS: MNT server returned result %d\n", result.errno);
+ status = result.errno;
goto out;
}
/*
* XDR encode/decode functions for MOUNT
*/
-static int xdr_encode_dirpath(struct rpc_rqst *req, __be32 *p,
- const char *path)
+
+static int encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
+{
+ const u32 pathname_len = strlen(pathname);
+ __be32 *p;
+
+ if (unlikely(pathname_len > MNTPATHLEN))
+ return -EIO;
+
+ p = xdr_reserve_space(xdr, sizeof(u32) + pathname_len);
+ if (unlikely(p == NULL))
+ return -EIO;
+ xdr_encode_opaque(p, pathname, pathname_len);
+
+ return 0;
+}
+
+static int mnt_enc_dirpath(struct rpc_rqst *req, __be32 *p,
+ const char *dirpath)
+{
+ struct xdr_stream xdr;
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ return encode_mntdirpath(&xdr, dirpath);
+}
+
+/*
+ * RFC 1094: "A non-zero status indicates some sort of error. In this
+ * case, the status is a UNIX error number." This can be problematic
+ * if the server and client use different errno values for the same
+ * error.
+ *
+ * However, the OpenGroup XNFS spec provides a simple mapping that is
+ * independent of local errno values on the server and the client.
+ */
+static int decode_status(struct xdr_stream *xdr, struct mountres *res)
{
- p = xdr_encode_string(p, path);
+ unsigned int i;
+ u32 status;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, sizeof(status));
+ if (unlikely(p == NULL))
+ return -EIO;
+ status = ntohl(*p);
- req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+ for (i = 0; i <= ARRAY_SIZE(mnt_errtbl); i++) {
+ if (mnt_errtbl[i].status == status) {
+ res->errno = mnt_errtbl[i].errno;
+ return 0;
+ }
+ }
+
+ dprintk("NFS: unrecognized MNT status code: %u\n", status);
+ res->errno = -EACCES;
return 0;
}
-static int xdr_decode_fhstatus(struct rpc_rqst *req, __be32 *p,
- struct mnt_fhstatus *res)
+static int decode_fhandle(struct xdr_stream *xdr, struct mountres *res)
{
struct nfs_fh *fh = res->fh;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS2_FHSIZE);
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ fh->size = NFS2_FHSIZE;
+ memcpy(fh->data, p, NFS2_FHSIZE);
+ return 0;
+}
+
+static int mnt_dec_mountres(struct rpc_rqst *req, __be32 *p,
+ struct mountres *res)
+{
+ struct xdr_stream xdr;
+ int status;
+
+ xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+
+ status = decode_status(&xdr, res);
+ if (unlikely(status != 0 || res->errno != 0))
+ return status;
+ return decode_fhandle(&xdr, res);
+}
+
+static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res)
+{
+ unsigned int i;
+ u32 status;
+ __be32 *p;
- if ((res->status = ntohl(*p++)) == 0) {
- fh->size = NFS2_FHSIZE;
- memcpy(fh->data, p, NFS2_FHSIZE);
+ p = xdr_inline_decode(xdr, sizeof(status));
+ if (unlikely(p == NULL))
+ return -EIO;
+ status = ntohl(*p);
+
+ for (i = 0; i <= ARRAY_SIZE(mnt3_errtbl); i++) {
+ if (mnt3_errtbl[i].status == status) {
+ res->errno = mnt3_errtbl[i].errno;
+ return 0;
+ }
}
+
+ dprintk("NFS: unrecognized MNT3 status code: %u\n", status);
+ res->errno = -EACCES;
return 0;
}
-static int xdr_decode_fhstatus3(struct rpc_rqst *req, __be32 *p,
- struct mnt_fhstatus *res)
+static int decode_fhandle3(struct xdr_stream *xdr, struct mountres *res)
{
struct nfs_fh *fh = res->fh;
- unsigned size;
-
- if ((res->status = ntohl(*p++)) == 0) {
- size = ntohl(*p++);
- if (size <= NFS3_FHSIZE && size != 0) {
- fh->size = size;
- memcpy(fh->data, p, size);
- } else
- res->status = -EBADHANDLE;
+ u32 size;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, sizeof(size));
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ size = ntohl(*p++);
+ if (size > NFS3_FHSIZE || size == 0)
+ return -EIO;
+
+ p = xdr_inline_decode(xdr, size);
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ fh->size = size;
+ memcpy(fh->data, p, size);
+ return 0;
+}
+
+static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res)
+{
+ rpc_authflavor_t *flavors = res->auth_flavors;
+ unsigned int *count = res->auth_count;
+ u32 entries, i;
+ __be32 *p;
+
+ if (*count == 0)
+ return 0;
+
+ p = xdr_inline_decode(xdr, sizeof(entries));
+ if (unlikely(p == NULL))
+ return -EIO;
+ entries = ntohl(*p);
+ dprintk("NFS: received %u auth flavors\n", entries);
+ if (entries > NFS_MAX_SECFLAVORS)
+ entries = NFS_MAX_SECFLAVORS;
+
+ p = xdr_inline_decode(xdr, sizeof(u32) * entries);
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ if (entries > *count)
+ entries = *count;
+
+ for (i = 0; i < entries; i++) {
+ flavors[i] = ntohl(*p++);
+ dprintk("NFS:\tflavor %u: %d\n", i, flavors[i]);
}
+ *count = i;
+
return 0;
}
-#define MNT_dirpath_sz (1 + 256)
-#define MNT_fhstatus_sz (1 + 8)
-#define MNT_fhstatus3_sz (1 + 16)
+static int mnt_dec_mountres3(struct rpc_rqst *req, __be32 *p,
+ struct mountres *res)
+{
+ struct xdr_stream xdr;
+ int status;
+
+ xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+
+ status = decode_fhs_status(&xdr, res);
+ if (unlikely(status != 0 || res->errno != 0))
+ return status;
+ status = decode_fhandle3(&xdr, res);
+ if (unlikely(status != 0)) {
+ res->errno = -EBADHANDLE;
+ return 0;
+ }
+ return decode_auth_flavors(&xdr, res);
+}
static struct rpc_procinfo mnt_procedures[] = {
- [MNTPROC_MNT] = {
- .p_proc = MNTPROC_MNT,
- .p_encode = (kxdrproc_t) xdr_encode_dirpath,
- .p_decode = (kxdrproc_t) xdr_decode_fhstatus,
- .p_arglen = MNT_dirpath_sz,
- .p_replen = MNT_fhstatus_sz,
- .p_statidx = MNTPROC_MNT,
+ [MOUNTPROC_MNT] = {
+ .p_proc = MOUNTPROC_MNT,
+ .p_encode = (kxdrproc_t)mnt_enc_dirpath,
+ .p_decode = (kxdrproc_t)mnt_dec_mountres,
+ .p_arglen = MNT_enc_dirpath_sz,
+ .p_replen = MNT_dec_mountres_sz,
+ .p_statidx = MOUNTPROC_MNT,
.p_name = "MOUNT",
},
};
@@ -159,10 +412,10 @@ static struct rpc_procinfo mnt_procedures[] = {
static struct rpc_procinfo mnt3_procedures[] = {
[MOUNTPROC3_MNT] = {
.p_proc = MOUNTPROC3_MNT,
- .p_encode = (kxdrproc_t) xdr_encode_dirpath,
- .p_decode = (kxdrproc_t) xdr_decode_fhstatus3,
- .p_arglen = MNT_dirpath_sz,
- .p_replen = MNT_fhstatus3_sz,
+ .p_encode = (kxdrproc_t)mnt_enc_dirpath,
+ .p_decode = (kxdrproc_t)mnt_dec_mountres3,
+ .p_arglen = MNT_enc_dirpath_sz,
+ .p_replen = MNT_dec_mountres3_sz,
.p_statidx = MOUNTPROC3_MNT,
.p_name = "MOUNT",
},
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index f01caec84463..40c766782891 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -65,6 +65,11 @@ char *nfs_path(const char *base,
dentry = dentry->d_parent;
}
spin_unlock(&dcache_lock);
+ if (*end != '/') {
+ if (--buflen < 0)
+ goto Elong;
+ *--end = '/';
+ }
namelen = strlen(base);
/* Strip off excess slashes in base string */
while (namelen > 0 && base[namelen - 1] == '/')
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 6bbf0e6daad2..bac60515a4b3 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -207,8 +207,6 @@ struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
status = nfs_revalidate_inode(server, inode);
if (status < 0)
return ERR_PTR(status);
- if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
- nfs_zap_acl_cache(inode);
acl = nfs3_get_cached_acl(inode, type);
if (acl != ERR_PTR(-EAGAIN))
return acl;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 84345deab26f..61bc3a32e1e2 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -44,6 +44,7 @@ enum nfs4_client_state {
NFS4CLNT_RECLAIM_REBOOT,
NFS4CLNT_RECLAIM_NOGRACE,
NFS4CLNT_DELEGRETURN,
+ NFS4CLNT_SESSION_SETUP,
};
/*
@@ -177,6 +178,14 @@ struct nfs4_state_recovery_ops {
int state_flag_bit;
int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *);
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
+ int (*establish_clid)(struct nfs_client *, struct rpc_cred *);
+ struct rpc_cred * (*get_clid_cred)(struct nfs_client *);
+};
+
+struct nfs4_state_maintenance_ops {
+ int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *);
+ struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *);
+ int (*renew_lease)(struct nfs_client *, struct rpc_cred *);
};
extern const struct dentry_operations nfs4_dentry_operations;
@@ -193,6 +202,7 @@ extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struc
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
+extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait);
extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
@@ -200,8 +210,26 @@ extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fh
extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
struct nfs4_fs_locations *fs_locations, struct page *page);
-extern struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops;
-extern struct nfs4_state_recovery_ops nfs4_nograce_recovery_ops;
+extern struct nfs4_state_recovery_ops *nfs4_reboot_recovery_ops[];
+extern struct nfs4_state_recovery_ops *nfs4_nograce_recovery_ops[];
+#if defined(CONFIG_NFS_V4_1)
+extern int nfs4_setup_sequence(struct nfs_client *clp,
+ struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
+ int cache_reply, struct rpc_task *task);
+extern void nfs4_destroy_session(struct nfs4_session *session);
+extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
+extern int nfs4_proc_create_session(struct nfs_client *, int reset);
+extern int nfs4_proc_destroy_session(struct nfs4_session *);
+#else /* CONFIG_NFS_v4_1 */
+static inline int nfs4_setup_sequence(struct nfs_client *clp,
+ struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
+ int cache_reply, struct rpc_task *task)
+{
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+extern struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[];
extern const u32 nfs4_fattr_bitmap[2];
extern const u32 nfs4_statfs_bitmap[2];
@@ -216,7 +244,12 @@ extern void nfs4_kill_renewd(struct nfs_client *);
extern void nfs4_renew_state(struct work_struct *);
/* nfs4state.c */
+struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp);
struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
+#if defined(CONFIG_NFS_V4_1)
+struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
+struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
+#endif /* CONFIG_NFS_V4_1 */
extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4674f8092da8..92ce43517814 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -48,11 +48,14 @@
#include <linux/smp_lock.h>
#include <linux/namei.h>
#include <linux/mount.h>
+#include <linux/module.h>
+#include <linux/sunrpc/bc_xprt.h>
#include "nfs4_fs.h"
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
+#include "callback.h"
#define NFSDBG_FACILITY NFSDBG_PROC
@@ -247,7 +250,25 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
ret = nfs4_wait_clnt_recover(clp);
if (ret == 0)
exception->retry = 1;
+#if !defined(CONFIG_NFS_V4_1)
break;
+#else /* !defined(CONFIG_NFS_V4_1) */
+ if (!nfs4_has_session(server->nfs_client))
+ break;
+ /* FALLTHROUGH */
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ dprintk("%s ERROR: %d Reset session\n", __func__,
+ errorcode);
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ exception->retry = 1;
+ /* FALLTHROUGH */
+#endif /* !defined(CONFIG_NFS_V4_1) */
case -NFS4ERR_FILE_OPEN:
case -NFS4ERR_GRACE:
case -NFS4ERR_DELAY:
@@ -271,6 +292,353 @@ static void renew_lease(const struct nfs_server *server, unsigned long timestamp
spin_unlock(&clp->cl_lock);
}
+#if defined(CONFIG_NFS_V4_1)
+
+/*
+ * nfs4_free_slot - free a slot and efficiently update slot table.
+ *
+ * freeing a slot is trivially done by clearing its respective bit
+ * in the bitmap.
+ * If the freed slotid equals highest_used_slotid we want to update it
+ * so that the server would be able to size down the slot table if needed,
+ * otherwise we know that the highest_used_slotid is still in use.
+ * When updating highest_used_slotid there may be "holes" in the bitmap
+ * so we need to scan down from highest_used_slotid to 0 looking for the now
+ * highest slotid in use.
+ * If none found, highest_used_slotid is set to -1.
+ */
+static void
+nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
+{
+ int slotid = free_slotid;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ /* clear used bit in bitmap */
+ __clear_bit(slotid, tbl->used_slots);
+
+ /* update highest_used_slotid when it is freed */
+ if (slotid == tbl->highest_used_slotid) {
+ slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
+ if (slotid >= 0 && slotid < tbl->max_slots)
+ tbl->highest_used_slotid = slotid;
+ else
+ tbl->highest_used_slotid = -1;
+ }
+ rpc_wake_up_next(&tbl->slot_tbl_waitq);
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__,
+ free_slotid, tbl->highest_used_slotid);
+}
+
+void nfs41_sequence_free_slot(const struct nfs_client *clp,
+ struct nfs4_sequence_res *res)
+{
+ struct nfs4_slot_table *tbl;
+
+ if (!nfs4_has_session(clp)) {
+ dprintk("%s: No session\n", __func__);
+ return;
+ }
+ tbl = &clp->cl_session->fc_slot_table;
+ if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) {
+ dprintk("%s: No slot\n", __func__);
+ /* just wake up the next guy waiting since
+ * we may have not consumed a slot after all */
+ rpc_wake_up_next(&tbl->slot_tbl_waitq);
+ return;
+ }
+ nfs4_free_slot(tbl, res->sr_slotid);
+ res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+}
+
+static void nfs41_sequence_done(struct nfs_client *clp,
+ struct nfs4_sequence_res *res,
+ int rpc_status)
+{
+ unsigned long timestamp;
+ struct nfs4_slot_table *tbl;
+ struct nfs4_slot *slot;
+
+ /*
+ * sr_status remains 1 if an RPC level error occurred. The server
+ * may or may not have processed the sequence operation..
+ * Proceed as if the server received and processed the sequence
+ * operation.
+ */
+ if (res->sr_status == 1)
+ res->sr_status = NFS_OK;
+
+ /* -ERESTARTSYS can result in skipping nfs41_sequence_setup */
+ if (res->sr_slotid == NFS4_MAX_SLOT_TABLE)
+ goto out;
+
+ tbl = &clp->cl_session->fc_slot_table;
+ slot = tbl->slots + res->sr_slotid;
+
+ if (res->sr_status == 0) {
+ /* Update the slot's sequence and clientid lease timer */
+ ++slot->seq_nr;
+ timestamp = res->sr_renewal_time;
+ spin_lock(&clp->cl_lock);
+ if (time_before(clp->cl_last_renewal, timestamp))
+ clp->cl_last_renewal = timestamp;
+ spin_unlock(&clp->cl_lock);
+ return;
+ }
+out:
+ /* The session may be reset by one of the error handlers. */
+ dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
+ nfs41_sequence_free_slot(clp, res);
+}
+
+/*
+ * nfs4_find_slot - efficiently look for a free slot
+ *
+ * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
+ * If found, we mark the slot as used, update the highest_used_slotid,
+ * and respectively set up the sequence operation args.
+ * The slot number is returned if found, or NFS4_MAX_SLOT_TABLE otherwise.
+ *
+ * Note: must be called with under the slot_tbl_lock.
+ */
+static u8
+nfs4_find_slot(struct nfs4_slot_table *tbl, struct rpc_task *task)
+{
+ int slotid;
+ u8 ret_id = NFS4_MAX_SLOT_TABLE;
+ BUILD_BUG_ON((u8)NFS4_MAX_SLOT_TABLE != (int)NFS4_MAX_SLOT_TABLE);
+
+ dprintk("--> %s used_slots=%04lx highest_used=%d max_slots=%d\n",
+ __func__, tbl->used_slots[0], tbl->highest_used_slotid,
+ tbl->max_slots);
+ slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
+ if (slotid >= tbl->max_slots)
+ goto out;
+ __set_bit(slotid, tbl->used_slots);
+ if (slotid > tbl->highest_used_slotid)
+ tbl->highest_used_slotid = slotid;
+ ret_id = slotid;
+out:
+ dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
+ __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
+ return ret_id;
+}
+
+static int nfs4_recover_session(struct nfs4_session *session)
+{
+ struct nfs_client *clp = session->clp;
+ int ret;
+
+ for (;;) {
+ ret = nfs4_wait_clnt_recover(clp);
+ if (ret != 0)
+ return ret;
+ if (!test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
+ break;
+ nfs4_schedule_state_manager(clp);
+ }
+ return 0;
+}
+
+static int nfs41_setup_sequence(struct nfs4_session *session,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply,
+ struct rpc_task *task)
+{
+ struct nfs4_slot *slot;
+ struct nfs4_slot_table *tbl;
+ int status = 0;
+ u8 slotid;
+
+ dprintk("--> %s\n", __func__);
+ /* slot already allocated? */
+ if (res->sr_slotid != NFS4_MAX_SLOT_TABLE)
+ return 0;
+
+ memset(res, 0, sizeof(*res));
+ res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+ tbl = &session->fc_slot_table;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ if (test_bit(NFS4CLNT_SESSION_SETUP, &session->clp->cl_state)) {
+ if (tbl->highest_used_slotid != -1) {
+ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("<-- %s: Session reset: draining\n", __func__);
+ return -EAGAIN;
+ }
+
+ /* The slot table is empty; start the reset thread */
+ dprintk("%s Session Reset\n", __func__);
+ spin_unlock(&tbl->slot_tbl_lock);
+ status = nfs4_recover_session(session);
+ if (status)
+ return status;
+ spin_lock(&tbl->slot_tbl_lock);
+ }
+
+ slotid = nfs4_find_slot(tbl, task);
+ if (slotid == NFS4_MAX_SLOT_TABLE) {
+ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("<-- %s: no free slots\n", __func__);
+ return -EAGAIN;
+ }
+ spin_unlock(&tbl->slot_tbl_lock);
+
+ slot = tbl->slots + slotid;
+ args->sa_session = session;
+ args->sa_slotid = slotid;
+ args->sa_cache_this = cache_reply;
+
+ dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
+
+ res->sr_session = session;
+ res->sr_slotid = slotid;
+ res->sr_renewal_time = jiffies;
+ /*
+ * sr_status is only set in decode_sequence, and so will remain
+ * set to 1 if an rpc level failure occurs.
+ */
+ res->sr_status = 1;
+ return 0;
+}
+
+int nfs4_setup_sequence(struct nfs_client *clp,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply,
+ struct rpc_task *task)
+{
+ int ret = 0;
+
+ dprintk("--> %s clp %p session %p sr_slotid %d\n",
+ __func__, clp, clp->cl_session, res->sr_slotid);
+
+ if (!nfs4_has_session(clp))
+ goto out;
+ ret = nfs41_setup_sequence(clp->cl_session, args, res, cache_reply,
+ task);
+ if (ret != -EAGAIN) {
+ /* terminate rpc task */
+ task->tk_status = ret;
+ task->tk_action = NULL;
+ }
+out:
+ dprintk("<-- %s status=%d\n", __func__, ret);
+ return ret;
+}
+
+struct nfs41_call_sync_data {
+ struct nfs_client *clp;
+ struct nfs4_sequence_args *seq_args;
+ struct nfs4_sequence_res *seq_res;
+ int cache_reply;
+};
+
+static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs41_call_sync_data *data = calldata;
+
+ dprintk("--> %s data->clp->cl_session %p\n", __func__,
+ data->clp->cl_session);
+ if (nfs4_setup_sequence(data->clp, data->seq_args,
+ data->seq_res, data->cache_reply, task))
+ return;
+ rpc_call_start(task);
+}
+
+static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs41_call_sync_data *data = calldata;
+
+ nfs41_sequence_done(data->clp, data->seq_res, task->tk_status);
+ nfs41_sequence_free_slot(data->clp, data->seq_res);
+}
+
+struct rpc_call_ops nfs41_call_sync_ops = {
+ .rpc_call_prepare = nfs41_call_sync_prepare,
+ .rpc_call_done = nfs41_call_sync_done,
+};
+
+static int nfs4_call_sync_sequence(struct nfs_client *clp,
+ struct rpc_clnt *clnt,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply)
+{
+ int ret;
+ struct rpc_task *task;
+ struct nfs41_call_sync_data data = {
+ .clp = clp,
+ .seq_args = args,
+ .seq_res = res,
+ .cache_reply = cache_reply,
+ };
+ struct rpc_task_setup task_setup = {
+ .rpc_client = clnt,
+ .rpc_message = msg,
+ .callback_ops = &nfs41_call_sync_ops,
+ .callback_data = &data
+ };
+
+ res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+ task = rpc_run_task(&task_setup);
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+ else {
+ ret = task->tk_status;
+ rpc_put_task(task);
+ }
+ return ret;
+}
+
+int _nfs4_call_sync_session(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply)
+{
+ return nfs4_call_sync_sequence(server->nfs_client, server->client,
+ msg, args, res, cache_reply);
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+int _nfs4_call_sync(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply)
+{
+ args->sa_session = res->sr_session = NULL;
+ return rpc_call_sync(server->client, msg, 0);
+}
+
+#define nfs4_call_sync(server, msg, args, res, cache_reply) \
+ (server)->nfs_client->cl_call_sync((server), (msg), &(args)->seq_args, \
+ &(res)->seq_res, (cache_reply))
+
+static void nfs4_sequence_done(const struct nfs_server *server,
+ struct nfs4_sequence_res *res, int rpc_status)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(server->nfs_client))
+ nfs41_sequence_done(server->nfs_client, res, rpc_status);
+#endif /* CONFIG_NFS_V4_1 */
+}
+
+/* no restart, therefore free slot here */
+static void nfs4_sequence_done_free_slot(const struct nfs_server *server,
+ struct nfs4_sequence_res *res,
+ int rpc_status)
+{
+ nfs4_sequence_done(server, res, rpc_status);
+ nfs4_sequence_free_slot(server->nfs_client, res);
+}
+
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
{
struct nfs_inode *nfsi = NFS_I(dir);
@@ -312,6 +680,7 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p)
p->o_res.server = p->o_arg.server;
nfs_fattr_init(&p->f_attr);
nfs_fattr_init(&p->dir_attr);
+ p->o_res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
}
static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path,
@@ -804,16 +1173,30 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
err = _nfs4_open_delegation_recall(ctx, state, stateid);
switch (err) {
case 0:
- return err;
+ case -ENOENT:
+ case -ESTALE:
+ goto out;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
/* Don't recall a delegation if it was lost */
nfs4_schedule_state_recovery(server->nfs_client);
- return err;
+ goto out;
+ case -ERESTARTSYS:
+ /*
+ * The show must go on: exit, but mark the
+ * stateid as needing recovery.
+ */
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
+ case -ENOMEM:
+ err = 0;
+ goto out;
}
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
+out:
return err;
}
@@ -929,6 +1312,10 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
}
data->timestamp = jiffies;
+ if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
+ &data->o_arg.seq_args,
+ &data->o_res.seq_res, 1, task))
+ return;
rpc_call_start(task);
return;
out_no_action:
@@ -941,6 +1328,10 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
struct nfs4_opendata *data = calldata;
data->rpc_status = task->tk_status;
+
+ nfs4_sequence_done_free_slot(data->o_arg.server, &data->o_res.seq_res,
+ task->tk_status);
+
if (RPC_ASSASSINATED(task))
return;
if (task->tk_status == 0) {
@@ -1269,7 +1660,7 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
} else
memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &arg, &res, 1);
if (status == 0 && state != NULL)
renew_lease(server, timestamp);
return status;
@@ -1318,6 +1709,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
struct nfs4_state *state = calldata->state;
struct nfs_server *server = NFS_SERVER(calldata->inode);
+ nfs4_sequence_done(server, &calldata->res.seq_res, task->tk_status);
if (RPC_ASSASSINATED(task))
return;
/* hmm. we are done with the inode, and in the process of freeing
@@ -1336,10 +1728,11 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
break;
default:
if (nfs4_async_handle_error(task, server, state) == -EAGAIN) {
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, server->nfs_client);
return;
}
}
+ nfs4_sequence_free_slot(server->nfs_client, &calldata->res.seq_res);
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
}
@@ -1380,6 +1773,10 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
calldata->arg.fmode = FMODE_WRITE;
}
calldata->timestamp = jiffies;
+ if (nfs4_setup_sequence((NFS_SERVER(calldata->inode))->nfs_client,
+ &calldata->arg.seq_args, &calldata->res.seq_res,
+ 1, task))
+ return;
rpc_call_start(task);
}
@@ -1419,13 +1816,15 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
};
int status = -ENOMEM;
- calldata = kmalloc(sizeof(*calldata), GFP_KERNEL);
+ calldata = kzalloc(sizeof(*calldata), GFP_KERNEL);
if (calldata == NULL)
goto out;
calldata->inode = state->inode;
calldata->state = state;
calldata->arg.fh = NFS_FH(state->inode);
calldata->arg.stateid = &state->open_stateid;
+ if (nfs4_has_session(server->nfs_client))
+ memset(calldata->arg.stateid->data, 0, 4); /* clear seqid */
/* Serialization for the sequence id */
calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid);
if (calldata->arg.seqid == NULL)
@@ -1435,6 +1834,7 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
calldata->res.fattr = &calldata->fattr;
calldata->res.seqid = calldata->arg.seqid;
calldata->res.server = server;
+ calldata->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
calldata->path.mnt = mntget(path->mnt);
calldata->path.dentry = dget(path->dentry);
@@ -1584,15 +1984,18 @@ void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
{
+ struct nfs4_server_caps_arg args = {
+ .fhandle = fhandle,
+ };
struct nfs4_server_caps_res res = {};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
- .rpc_argp = fhandle,
+ .rpc_argp = &args,
.rpc_resp = &res,
};
int status;
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
if (status == 0) {
memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
@@ -1606,6 +2009,7 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
server->acl_bitmask = res.acl_bitmask;
}
+
return status;
}
@@ -1637,8 +2041,15 @@ static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
.rpc_argp = &args,
.rpc_resp = &res,
};
+ int status;
+
nfs_fattr_init(info->fattr);
- return rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_recover_expired_lease(server);
+ if (!status)
+ status = nfs4_check_client_ready(server->nfs_client);
+ if (!status)
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
+ return status;
}
static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -1728,7 +2139,7 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
};
nfs_fattr_init(fattr);
- return rpc_call_sync(server->client, &msg, 0);
+ return nfs4_call_sync(server, &msg, &args, &res, 0);
}
static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
@@ -1812,7 +2223,7 @@ static int _nfs4_proc_lookupfh(struct nfs_server *server, const struct nfs_fh *d
nfs_fattr_init(fattr);
dprintk("NFS call lookupfh %s\n", name->name);
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
dprintk("NFS reply lookupfh: %d\n", status);
return status;
}
@@ -1898,7 +2309,7 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
args.access |= NFS4_ACCESS_EXECUTE;
}
nfs_fattr_init(&fattr);
- status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
if (!status) {
entry->mask = 0;
if (res.access & NFS4_ACCESS_READ)
@@ -1957,13 +2368,14 @@ static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
.pglen = pglen,
.pages = &page,
};
+ struct nfs4_readlink_res res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
.rpc_argp = &args,
- .rpc_resp = NULL,
+ .rpc_resp = &res,
};
- return rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ return nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0);
}
static int nfs4_proc_readlink(struct inode *inode, struct page *page,
@@ -2057,7 +2469,7 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
int status;
nfs_fattr_init(&res.dir_attr);
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &args, &res, 1);
if (status == 0) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, &res.dir_attr);
@@ -2092,8 +2504,10 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
{
struct nfs_removeres *res = task->tk_msg.rpc_resp;
+ nfs4_sequence_done(res->server, &res->seq_res, task->tk_status);
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
+ nfs4_sequence_free_slot(res->server->nfs_client, &res->seq_res);
update_changeattr(dir, &res->cinfo);
nfs_post_op_update_inode(dir, &res->dir_attr);
return 1;
@@ -2125,7 +2539,7 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
nfs_fattr_init(res.old_fattr);
nfs_fattr_init(res.new_fattr);
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &arg, &res, 1);
if (!status) {
update_changeattr(old_dir, &res.old_cinfo);
@@ -2174,7 +2588,7 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *
nfs_fattr_init(res.fattr);
nfs_fattr_init(res.dir_attr);
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &arg, &res, 1);
if (!status) {
update_changeattr(dir, &res.cinfo);
nfs_post_op_update_inode(dir, res.dir_attr);
@@ -2235,7 +2649,8 @@ static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
{
- int status = rpc_call_sync(NFS_CLIENT(dir), &data->msg, 0);
+ int status = nfs4_call_sync(NFS_SERVER(dir), &data->msg,
+ &data->arg, &data->res, 1);
if (status == 0) {
update_changeattr(dir, &data->res.dir_cinfo);
nfs_post_op_update_inode(dir, data->res.dir_fattr);
@@ -2344,7 +2759,7 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
(unsigned long long)cookie);
nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
res.pgbase = args.pgbase;
- status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ status = nfs4_call_sync(NFS_SERVER(dir), &msg, &args, &res, 0);
if (status == 0)
memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
@@ -2422,14 +2837,17 @@ static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
+ struct nfs4_statfs_res res = {
+ .fsstat = fsstat,
+ };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
.rpc_argp = &args,
- .rpc_resp = fsstat,
+ .rpc_resp = &res,
};
nfs_fattr_init(fsstat->fattr);
- return rpc_call_sync(server->client, &msg, 0);
+ return nfs4_call_sync(server, &msg, &args, &res, 0);
}
static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
@@ -2451,13 +2869,16 @@ static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
+ struct nfs4_fsinfo_res res = {
+ .fsinfo = fsinfo,
+ };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
.rpc_argp = &args,
- .rpc_resp = fsinfo,
+ .rpc_resp = &res,
};
- return rpc_call_sync(server->client, &msg, 0);
+ return nfs4_call_sync(server, &msg, &args, &res, 0);
}
static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
@@ -2486,10 +2907,13 @@ static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle
.fh = fhandle,
.bitmask = server->attr_bitmask,
};
+ struct nfs4_pathconf_res res = {
+ .pathconf = pathconf,
+ };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
.rpc_argp = &args,
- .rpc_resp = pathconf,
+ .rpc_resp = &res,
};
/* None of the pathconf attributes are mandatory to implement */
@@ -2499,7 +2923,7 @@ static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle
}
nfs_fattr_init(pathconf->fattr);
- return rpc_call_sync(server->client, &msg, 0);
+ return nfs4_call_sync(server, &msg, &args, &res, 0);
}
static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
@@ -2520,8 +2944,13 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
{
struct nfs_server *server = NFS_SERVER(data->inode);
+ dprintk("--> %s\n", __func__);
+
+ /* nfs4_sequence_free_slot called in the read rpc_call_done */
+ nfs4_sequence_done(server, &data->res.seq_res, task->tk_status);
+
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, server->nfs_client);
return -EAGAIN;
}
@@ -2541,8 +2970,12 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
+ /* slot is freed in nfs_writeback_done */
+ nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
+ task->tk_status);
+
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
return -EAGAIN;
}
if (task->tk_status >= 0) {
@@ -2567,10 +3000,14 @@ static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
+ nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
+ task->tk_status);
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
return -EAGAIN;
}
+ nfs4_sequence_free_slot(NFS_SERVER(inode)->nfs_client,
+ &data->res.seq_res);
nfs_refresh_inode(inode, data->res.fattr);
return 0;
}
@@ -2603,6 +3040,9 @@ static void nfs4_renew_done(struct rpc_task *task, void *data)
if (time_before(clp->cl_last_renewal,timestamp))
clp->cl_last_renewal = timestamp;
spin_unlock(&clp->cl_lock);
+ dprintk("%s calling put_rpccred on rpc_cred %p\n", __func__,
+ task->tk_msg.rpc_cred);
+ put_rpccred(task->tk_msg.rpc_cred);
}
static const struct rpc_call_ops nfs4_renew_ops = {
@@ -2742,12 +3182,14 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
.acl_pages = pages,
.acl_len = buflen,
};
- size_t resp_len = buflen;
+ struct nfs_getaclres res = {
+ .acl_len = buflen,
+ };
void *resp_buf;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
.rpc_argp = &args,
- .rpc_resp = &resp_len,
+ .rpc_resp = &res,
};
struct page *localpage = NULL;
int ret;
@@ -2761,26 +3203,26 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
return -ENOMEM;
args.acl_pages[0] = localpage;
args.acl_pgbase = 0;
- resp_len = args.acl_len = PAGE_SIZE;
+ args.acl_len = PAGE_SIZE;
} else {
resp_buf = buf;
buf_to_pages(buf, buflen, args.acl_pages, &args.acl_pgbase);
}
- ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ ret = nfs4_call_sync(NFS_SERVER(inode), &msg, &args, &res, 0);
if (ret)
goto out_free;
- if (resp_len > args.acl_len)
- nfs4_write_cached_acl(inode, NULL, resp_len);
+ if (res.acl_len > args.acl_len)
+ nfs4_write_cached_acl(inode, NULL, res.acl_len);
else
- nfs4_write_cached_acl(inode, resp_buf, resp_len);
+ nfs4_write_cached_acl(inode, resp_buf, res.acl_len);
if (buf) {
ret = -ERANGE;
- if (resp_len > buflen)
+ if (res.acl_len > buflen)
goto out_free;
if (localpage)
- memcpy(buf, resp_buf, resp_len);
+ memcpy(buf, resp_buf, res.acl_len);
}
- ret = resp_len;
+ ret = res.acl_len;
out_free:
if (localpage)
__free_page(localpage);
@@ -2810,8 +3252,6 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
ret = nfs_revalidate_inode(server, inode);
if (ret < 0)
return ret;
- if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
- nfs_zap_acl_cache(inode);
ret = nfs4_read_cached_acl(inode, buf, buflen);
if (ret != -ENOENT)
return ret;
@@ -2827,10 +3267,11 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
.acl_pages = pages,
.acl_len = buflen,
};
+ struct nfs_setaclres res;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
.rpc_argp = &arg,
- .rpc_resp = NULL,
+ .rpc_resp = &res,
};
int ret;
@@ -2838,7 +3279,7 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
return -EOPNOTSUPP;
nfs_inode_return_delegation(inode);
buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
- ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ ret = nfs4_call_sync(server, &msg, &arg, &res, 1);
nfs_access_zap_cache(inode);
nfs_zap_acl_cache(inode);
return ret;
@@ -2857,10 +3298,8 @@ static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen
}
static int
-nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
+_nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs_client *clp, struct nfs4_state *state)
{
- struct nfs_client *clp = server->nfs_client;
-
if (!clp || task->tk_status >= 0)
return 0;
switch(task->tk_status) {
@@ -2879,8 +3318,23 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
task->tk_status = 0;
return -EAGAIN;
+#if defined(CONFIG_NFS_V4_1)
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ dprintk("%s ERROR %d, Reset session\n", __func__,
+ task->tk_status);
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ task->tk_status = 0;
+ return -EAGAIN;
+#endif /* CONFIG_NFS_V4_1 */
case -NFS4ERR_DELAY:
- nfs_inc_server_stats(server, NFSIOS_DELAY);
+ if (server)
+ nfs_inc_server_stats(server, NFSIOS_DELAY);
case -NFS4ERR_GRACE:
rpc_delay(task, NFS4_POLL_RETRY_MAX);
task->tk_status = 0;
@@ -2893,6 +3347,12 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
return 0;
}
+static int
+nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
+{
+ return _nfs4_async_handle_error(task, server, server->nfs_client, state);
+}
+
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short port, struct rpc_cred *cred)
{
nfs4_verifier sc_verifier;
@@ -3000,6 +3460,10 @@ struct nfs4_delegreturndata {
static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegreturndata *data = calldata;
+
+ nfs4_sequence_done_free_slot(data->res.server, &data->res.seq_res,
+ task->tk_status);
+
data->rpc_status = task->tk_status;
if (data->rpc_status == 0)
renew_lease(data->res.server, data->timestamp);
@@ -3010,7 +3474,25 @@ static void nfs4_delegreturn_release(void *calldata)
kfree(calldata);
}
+#if defined(CONFIG_NFS_V4_1)
+static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs4_delegreturndata *d_data;
+
+ d_data = (struct nfs4_delegreturndata *)data;
+
+ if (nfs4_setup_sequence(d_data->res.server->nfs_client,
+ &d_data->args.seq_args,
+ &d_data->res.seq_res, 1, task))
+ return;
+ rpc_call_start(task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static const struct rpc_call_ops nfs4_delegreturn_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs4_delegreturn_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs4_delegreturn_done,
.rpc_release = nfs4_delegreturn_release,
};
@@ -3032,7 +3514,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
};
int status = 0;
- data = kmalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
data->args.fhandle = &data->fh;
@@ -3042,6 +3524,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
memcpy(&data->stateid, stateid, sizeof(data->stateid));
data->res.fattr = &data->fattr;
data->res.server = server;
+ data->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
nfs_fattr_init(data->res.fattr);
data->timestamp = jiffies;
data->rpc_status = 0;
@@ -3127,7 +3610,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
goto out;
lsp = request->fl_u.nfs4_fl.owner;
arg.lock_owner.id = lsp->ls_id.id;
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &arg, &res, 1);
switch (status) {
case 0:
request->fl_type = F_UNLCK;
@@ -3187,13 +3670,14 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
struct nfs4_unlockdata *p;
struct inode *inode = lsp->ls_state->inode;
- p = kmalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL)
return NULL;
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.seqid = seqid;
p->res.seqid = seqid;
+ p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
p->arg.stateid = &lsp->ls_stateid;
p->lsp = lsp;
atomic_inc(&lsp->ls_count);
@@ -3217,6 +3701,8 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
{
struct nfs4_unlockdata *calldata = data;
+ nfs4_sequence_done(calldata->server, &calldata->res.seq_res,
+ task->tk_status);
if (RPC_ASSASSINATED(task))
return;
switch (task->tk_status) {
@@ -3233,8 +3719,11 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
break;
default:
if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
- rpc_restart_call(task);
+ nfs4_restart_rpc(task,
+ calldata->server->nfs_client);
}
+ nfs4_sequence_free_slot(calldata->server->nfs_client,
+ &calldata->res.seq_res);
}
static void nfs4_locku_prepare(struct rpc_task *task, void *data)
@@ -3249,6 +3738,10 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
return;
}
calldata->timestamp = jiffies;
+ if (nfs4_setup_sequence(calldata->server->nfs_client,
+ &calldata->arg.seq_args,
+ &calldata->res.seq_res, 1, task))
+ return;
rpc_call_start(task);
}
@@ -3341,6 +3834,7 @@ struct nfs4_lockdata {
unsigned long timestamp;
int rpc_status;
int cancelled;
+ struct nfs_server *server;
};
static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
@@ -3366,7 +3860,9 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_id.id;
p->res.lock_seqid = p->arg.lock_seqid;
+ p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
p->lsp = lsp;
+ p->server = server;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
memcpy(&p->fl, fl, sizeof(p->fl));
@@ -3396,6 +3892,9 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
} else
data->arg.new_lock_owner = 0;
data->timestamp = jiffies;
+ if (nfs4_setup_sequence(data->server->nfs_client, &data->arg.seq_args,
+ &data->res.seq_res, 1, task))
+ return;
rpc_call_start(task);
dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
}
@@ -3406,6 +3905,9 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
dprintk("%s: begin!\n", __func__);
+ nfs4_sequence_done_free_slot(data->server, &data->res.seq_res,
+ task->tk_status);
+
data->rpc_status = task->tk_status;
if (RPC_ASSASSINATED(task))
goto out;
@@ -3487,8 +3989,6 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
ret = nfs4_wait_for_completion_rpc_task(task);
if (ret == 0) {
ret = data->rpc_status;
- if (ret == -NFS4ERR_DENIED)
- ret = -EAGAIN;
} else
data->cancelled = 1;
rpc_put_task(task);
@@ -3576,9 +4076,11 @@ static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *
int err;
do {
+ err = _nfs4_proc_setlk(state, cmd, request);
+ if (err == -NFS4ERR_DENIED)
+ err = -EAGAIN;
err = nfs4_handle_exception(NFS_SERVER(state->inode),
- _nfs4_proc_setlk(state, cmd, request),
- &exception);
+ err, &exception);
} while (exception.retry);
return err;
}
@@ -3630,8 +4132,37 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
goto out;
do {
err = _nfs4_do_setlk(state, F_SETLK, fl, 0);
- if (err != -NFS4ERR_DELAY)
- break;
+ switch (err) {
+ default:
+ printk(KERN_ERR "%s: unhandled error %d.\n",
+ __func__, err);
+ case 0:
+ case -ESTALE:
+ goto out;
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_STALE_STATEID:
+ nfs4_schedule_state_recovery(server->nfs_client);
+ goto out;
+ case -ERESTARTSYS:
+ /*
+ * The show must go on: exit, but mark the
+ * stateid as needing recovery.
+ */
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OPENMODE:
+ nfs4_state_mark_reclaim_nograce(server->nfs_client, state);
+ err = 0;
+ goto out;
+ case -ENOMEM:
+ case -NFS4ERR_DENIED:
+ /* kill_proc(fl->fl_pid, SIGLOST, 1); */
+ err = 0;
+ goto out;
+ case -NFS4ERR_DELAY:
+ break;
+ }
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
out:
@@ -3706,10 +4237,13 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
.page = page,
.bitmask = bitmask,
};
+ struct nfs4_fs_locations_res res = {
+ .fs_locations = fs_locations,
+ };
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
.rpc_argp = &args,
- .rpc_resp = fs_locations,
+ .rpc_resp = &res,
};
int status;
@@ -3717,24 +4251,720 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
nfs_fattr_init(&fs_locations->fattr);
fs_locations->server = server;
fs_locations->nlocations = 0;
- status = rpc_call_sync(server->client, &msg, 0);
+ status = nfs4_call_sync(server, &msg, &args, &res, 0);
nfs_fixup_referral_attributes(&fs_locations->fattr);
dprintk("%s: returned status = %d\n", __func__, status);
return status;
}
-struct nfs4_state_recovery_ops nfs4_reboot_recovery_ops = {
+#ifdef CONFIG_NFS_V4_1
+/*
+ * nfs4_proc_exchange_id()
+ *
+ * Since the clientid has expired, all compounds using sessions
+ * associated with the stale clientid will be returning
+ * NFS4ERR_BADSESSION in the sequence operation, and will therefore
+ * be in some phase of session reset.
+ */
+static int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ nfs4_verifier verifier;
+ struct nfs41_exchange_id_args args = {
+ .client = clp,
+ .flags = clp->cl_exchange_flags,
+ };
+ struct nfs41_exchange_id_res res = {
+ .client = clp,
+ };
+ int status;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ .rpc_cred = cred,
+ };
+ __be32 *p;
+
+ dprintk("--> %s\n", __func__);
+ BUG_ON(clp == NULL);
+
+ p = (u32 *)verifier.data;
+ *p++ = htonl((u32)clp->cl_boot_time.tv_sec);
+ *p = htonl((u32)clp->cl_boot_time.tv_nsec);
+ args.verifier = &verifier;
+
+ while (1) {
+ args.id_len = scnprintf(args.id, sizeof(args.id),
+ "%s/%s %u",
+ clp->cl_ipaddr,
+ rpc_peeraddr2str(clp->cl_rpcclient,
+ RPC_DISPLAY_ADDR),
+ clp->cl_id_uniquifier);
+
+ status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
+
+ if (status != NFS4ERR_CLID_INUSE)
+ break;
+
+ if (signalled())
+ break;
+
+ if (++clp->cl_id_uniquifier == 0)
+ break;
+ }
+
+ dprintk("<-- %s status= %d\n", __func__, status);
+ return status;
+}
+
+struct nfs4_get_lease_time_data {
+ struct nfs4_get_lease_time_args *args;
+ struct nfs4_get_lease_time_res *res;
+ struct nfs_client *clp;
+};
+
+static void nfs4_get_lease_time_prepare(struct rpc_task *task,
+ void *calldata)
+{
+ int ret;
+ struct nfs4_get_lease_time_data *data =
+ (struct nfs4_get_lease_time_data *)calldata;
+
+ dprintk("--> %s\n", __func__);
+ /* just setup sequence, do not trigger session recovery
+ since we're invoked within one */
+ ret = nfs41_setup_sequence(data->clp->cl_session,
+ &data->args->la_seq_args,
+ &data->res->lr_seq_res, 0, task);
+
+ BUG_ON(ret == -EAGAIN);
+ rpc_call_start(task);
+ dprintk("<-- %s\n", __func__);
+}
+
+/*
+ * Called from nfs4_state_manager thread for session setup, so don't recover
+ * from sequence operation or clientid errors.
+ */
+static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_get_lease_time_data *data =
+ (struct nfs4_get_lease_time_data *)calldata;
+
+ dprintk("--> %s\n", __func__);
+ nfs41_sequence_done(data->clp, &data->res->lr_seq_res, task->tk_status);
+ switch (task->tk_status) {
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_GRACE:
+ dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
+ rpc_delay(task, NFS4_POLL_RETRY_MIN);
+ task->tk_status = 0;
+ nfs4_restart_rpc(task, data->clp);
+ return;
+ }
+ nfs41_sequence_free_slot(data->clp, &data->res->lr_seq_res);
+ dprintk("<-- %s\n", __func__);
+}
+
+struct rpc_call_ops nfs4_get_lease_time_ops = {
+ .rpc_call_prepare = nfs4_get_lease_time_prepare,
+ .rpc_call_done = nfs4_get_lease_time_done,
+};
+
+int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
+{
+ struct rpc_task *task;
+ struct nfs4_get_lease_time_args args;
+ struct nfs4_get_lease_time_res res = {
+ .lr_fsinfo = fsinfo,
+ };
+ struct nfs4_get_lease_time_data data = {
+ .args = &args,
+ .res = &res,
+ .clp = clp,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ struct rpc_task_setup task_setup = {
+ .rpc_client = clp->cl_rpcclient,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_get_lease_time_ops,
+ .callback_data = &data
+ };
+ int status;
+
+ res.lr_seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
+ dprintk("--> %s\n", __func__);
+ task = rpc_run_task(&task_setup);
+
+ if (IS_ERR(task))
+ status = PTR_ERR(task);
+ else {
+ status = task->tk_status;
+ rpc_put_task(task);
+ }
+ dprintk("<-- %s return %d\n", __func__, status);
+
+ return status;
+}
+
+/*
+ * Reset a slot table
+ */
+static int nfs4_reset_slot_table(struct nfs4_slot_table *tbl, int max_slots,
+ int old_max_slots, int ivalue)
+{
+ int i;
+ int ret = 0;
+
+ dprintk("--> %s: max_reqs=%u, tbl %p\n", __func__, max_slots, tbl);
+
+ /*
+ * Until we have dynamic slot table adjustment, insist
+ * upon the same slot table size
+ */
+ if (max_slots != old_max_slots) {
+ dprintk("%s reset slot table does't match old\n",
+ __func__);
+ ret = -EINVAL; /*XXX NFS4ERR_REQ_TOO_BIG ? */
+ goto out;
+ }
+ spin_lock(&tbl->slot_tbl_lock);
+ for (i = 0; i < max_slots; ++i)
+ tbl->slots[i].seq_nr = ivalue;
+ tbl->highest_used_slotid = -1;
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
+ tbl, tbl->slots, tbl->max_slots);
+out:
+ dprintk("<-- %s: return %d\n", __func__, ret);
+ return ret;
+}
+
+/*
+ * Reset the forechannel and backchannel slot tables
+ */
+static int nfs4_reset_slot_tables(struct nfs4_session *session)
+{
+ int status;
+
+ status = nfs4_reset_slot_table(&session->fc_slot_table,
+ session->fc_attrs.max_reqs,
+ session->fc_slot_table.max_slots,
+ 1);
+ if (status)
+ return status;
+
+ status = nfs4_reset_slot_table(&session->bc_slot_table,
+ session->bc_attrs.max_reqs,
+ session->bc_slot_table.max_slots,
+ 0);
+ return status;
+}
+
+/* Destroy the slot table */
+static void nfs4_destroy_slot_tables(struct nfs4_session *session)
+{
+ if (session->fc_slot_table.slots != NULL) {
+ kfree(session->fc_slot_table.slots);
+ session->fc_slot_table.slots = NULL;
+ }
+ if (session->bc_slot_table.slots != NULL) {
+ kfree(session->bc_slot_table.slots);
+ session->bc_slot_table.slots = NULL;
+ }
+ return;
+}
+
+/*
+ * Initialize slot table
+ */
+static int nfs4_init_slot_table(struct nfs4_slot_table *tbl,
+ int max_slots, int ivalue)
+{
+ int i;
+ struct nfs4_slot *slot;
+ int ret = -ENOMEM;
+
+ BUG_ON(max_slots > NFS4_MAX_SLOT_TABLE);
+
+ dprintk("--> %s: max_reqs=%u\n", __func__, max_slots);
+
+ slot = kcalloc(max_slots, sizeof(struct nfs4_slot), GFP_KERNEL);
+ if (!slot)
+ goto out;
+ for (i = 0; i < max_slots; ++i)
+ slot[i].seq_nr = ivalue;
+ ret = 0;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ if (tbl->slots != NULL) {
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("%s: slot table already initialized. tbl=%p slots=%p\n",
+ __func__, tbl, tbl->slots);
+ WARN_ON(1);
+ goto out_free;
+ }
+ tbl->max_slots = max_slots;
+ tbl->slots = slot;
+ tbl->highest_used_slotid = -1; /* no slot is currently used */
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
+ tbl, tbl->slots, tbl->max_slots);
+out:
+ dprintk("<-- %s: return %d\n", __func__, ret);
+ return ret;
+
+out_free:
+ kfree(slot);
+ goto out;
+}
+
+/*
+ * Initialize the forechannel and backchannel tables
+ */
+static int nfs4_init_slot_tables(struct nfs4_session *session)
+{
+ int status;
+
+ status = nfs4_init_slot_table(&session->fc_slot_table,
+ session->fc_attrs.max_reqs, 1);
+ if (status)
+ return status;
+
+ status = nfs4_init_slot_table(&session->bc_slot_table,
+ session->bc_attrs.max_reqs, 0);
+ if (status)
+ nfs4_destroy_slot_tables(session);
+
+ return status;
+}
+
+struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
+{
+ struct nfs4_session *session;
+ struct nfs4_slot_table *tbl;
+
+ session = kzalloc(sizeof(struct nfs4_session), GFP_KERNEL);
+ if (!session)
+ return NULL;
+
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ /*
+ * The create session reply races with the server back
+ * channel probe. Mark the client NFS_CS_SESSION_INITING
+ * so that the client back channel can find the
+ * nfs_client struct
+ */
+ clp->cl_cons_state = NFS_CS_SESSION_INITING;
+
+ tbl = &session->fc_slot_table;
+ spin_lock_init(&tbl->slot_tbl_lock);
+ rpc_init_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
+
+ tbl = &session->bc_slot_table;
+ spin_lock_init(&tbl->slot_tbl_lock);
+ rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
+
+ session->clp = clp;
+ return session;
+}
+
+void nfs4_destroy_session(struct nfs4_session *session)
+{
+ nfs4_proc_destroy_session(session);
+ dprintk("%s Destroy backchannel for xprt %p\n",
+ __func__, session->clp->cl_rpcclient->cl_xprt);
+ xprt_destroy_backchannel(session->clp->cl_rpcclient->cl_xprt,
+ NFS41_BC_MIN_CALLBACKS);
+ nfs4_destroy_slot_tables(session);
+ kfree(session);
+}
+
+/*
+ * Initialize the values to be used by the client in CREATE_SESSION
+ * If nfs4_init_session set the fore channel request and response sizes,
+ * use them.
+ *
+ * Set the back channel max_resp_sz_cached to zero to force the client to
+ * always set csa_cachethis to FALSE because the current implementation
+ * of the back channel DRC only supports caching the CB_SEQUENCE operation.
+ */
+static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
+{
+ struct nfs4_session *session = args->client->cl_session;
+ unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
+ mxresp_sz = session->fc_attrs.max_resp_sz;
+
+ if (mxrqst_sz == 0)
+ mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
+ if (mxresp_sz == 0)
+ mxresp_sz = NFS_MAX_FILE_IO_SIZE;
+ /* Fore channel attributes */
+ args->fc_attrs.headerpadsz = 0;
+ args->fc_attrs.max_rqst_sz = mxrqst_sz;
+ args->fc_attrs.max_resp_sz = mxresp_sz;
+ args->fc_attrs.max_resp_sz_cached = mxresp_sz;
+ args->fc_attrs.max_ops = NFS4_MAX_OPS;
+ args->fc_attrs.max_reqs = session->clp->cl_rpcclient->cl_xprt->max_reqs;
+
+ dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
+ "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
+ __func__,
+ args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
+ args->fc_attrs.max_resp_sz_cached, args->fc_attrs.max_ops,
+ args->fc_attrs.max_reqs);
+
+ /* Back channel attributes */
+ args->bc_attrs.headerpadsz = 0;
+ args->bc_attrs.max_rqst_sz = PAGE_SIZE;
+ args->bc_attrs.max_resp_sz = PAGE_SIZE;
+ args->bc_attrs.max_resp_sz_cached = 0;
+ args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
+ args->bc_attrs.max_reqs = 1;
+
+ dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
+ "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
+ __func__,
+ args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
+ args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
+ args->bc_attrs.max_reqs);
+}
+
+static int _verify_channel_attr(char *chan, char *attr_name, u32 sent, u32 rcvd)
+{
+ if (rcvd <= sent)
+ return 0;
+ printk(KERN_WARNING "%s: Session INVALID: %s channel %s increased. "
+ "sent=%u rcvd=%u\n", __func__, chan, attr_name, sent, rcvd);
+ return -EINVAL;
+}
+
+#define _verify_fore_channel_attr(_name_) \
+ _verify_channel_attr("fore", #_name_, \
+ args->fc_attrs._name_, \
+ session->fc_attrs._name_)
+
+#define _verify_back_channel_attr(_name_) \
+ _verify_channel_attr("back", #_name_, \
+ args->bc_attrs._name_, \
+ session->bc_attrs._name_)
+
+/*
+ * The server is not allowed to increase the fore channel header pad size,
+ * maximum response size, or maximum number of operations.
+ *
+ * The back channel attributes are only negotiatied down: We send what the
+ * (back channel) server insists upon.
+ */
+static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
+ struct nfs4_session *session)
+{
+ int ret = 0;
+
+ ret |= _verify_fore_channel_attr(headerpadsz);
+ ret |= _verify_fore_channel_attr(max_resp_sz);
+ ret |= _verify_fore_channel_attr(max_ops);
+
+ ret |= _verify_back_channel_attr(headerpadsz);
+ ret |= _verify_back_channel_attr(max_rqst_sz);
+ ret |= _verify_back_channel_attr(max_resp_sz);
+ ret |= _verify_back_channel_attr(max_resp_sz_cached);
+ ret |= _verify_back_channel_attr(max_ops);
+ ret |= _verify_back_channel_attr(max_reqs);
+
+ return ret;
+}
+
+static int _nfs4_proc_create_session(struct nfs_client *clp)
+{
+ struct nfs4_session *session = clp->cl_session;
+ struct nfs41_create_session_args args = {
+ .client = clp,
+ .cb_program = NFS4_CALLBACK,
+ };
+ struct nfs41_create_session_res res = {
+ .client = clp,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ int status;
+
+ nfs4_init_channel_attrs(&args);
+ args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
+
+ status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0);
+
+ if (!status)
+ /* Verify the session's negotiated channel_attrs values */
+ status = nfs4_verify_channel_attrs(&args, session);
+ if (!status) {
+ /* Increment the clientid slot sequence id */
+ clp->cl_seqid++;
+ }
+
+ return status;
+}
+
+/*
+ * Issues a CREATE_SESSION operation to the server.
+ * It is the responsibility of the caller to verify the session is
+ * expired before calling this routine.
+ */
+int nfs4_proc_create_session(struct nfs_client *clp, int reset)
+{
+ int status;
+ unsigned *ptr;
+ struct nfs_fsinfo fsinfo;
+ struct nfs4_session *session = clp->cl_session;
+
+ dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
+
+ status = _nfs4_proc_create_session(clp);
+ if (status)
+ goto out;
+
+ /* Init or reset the fore channel */
+ if (reset)
+ status = nfs4_reset_slot_tables(session);
+ else
+ status = nfs4_init_slot_tables(session);
+ dprintk("fore channel slot table initialization returned %d\n", status);
+ if (status)
+ goto out;
+
+ ptr = (unsigned *)&session->sess_id.data[0];
+ dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
+ clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
+
+ if (reset)
+ /* Lease time is aleady set */
+ goto out;
+
+ /* Get the lease time */
+ status = nfs4_proc_get_lease_time(clp, &fsinfo);
+ if (status == 0) {
+ /* Update lease time and schedule renewal */
+ spin_lock(&clp->cl_lock);
+ clp->cl_lease_time = fsinfo.lease_time * HZ;
+ clp->cl_last_renewal = jiffies;
+ clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ spin_unlock(&clp->cl_lock);
+
+ nfs4_schedule_state_renewal(clp);
+ }
+out:
+ dprintk("<-- %s\n", __func__);
+ return status;
+}
+
+/*
+ * Issue the over-the-wire RPC DESTROY_SESSION.
+ * The caller must serialize access to this routine.
+ */
+int nfs4_proc_destroy_session(struct nfs4_session *session)
+{
+ int status = 0;
+ struct rpc_message msg;
+
+ dprintk("--> nfs4_proc_destroy_session\n");
+
+ /* session is still being setup */
+ if (session->clp->cl_cons_state != NFS_CS_READY)
+ return status;
+
+ msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
+ msg.rpc_argp = session;
+ msg.rpc_resp = NULL;
+ msg.rpc_cred = NULL;
+ status = rpc_call_sync(session->clp->cl_rpcclient, &msg, 0);
+
+ if (status)
+ printk(KERN_WARNING
+ "Got error %d from the server on DESTROY_SESSION. "
+ "Session has been destroyed regardless...\n", status);
+
+ dprintk("<-- nfs4_proc_destroy_session\n");
+ return status;
+}
+
+/*
+ * Renew the cl_session lease.
+ */
+static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ struct nfs4_sequence_args args;
+ struct nfs4_sequence_res res;
+
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ .rpc_cred = cred,
+ };
+
+ args.sa_cache_this = 0;
+
+ return nfs4_call_sync_sequence(clp, clp->cl_rpcclient, &msg, &args,
+ &res, 0);
+}
+
+void nfs41_sequence_call_done(struct rpc_task *task, void *data)
+{
+ struct nfs_client *clp = (struct nfs_client *)data;
+
+ nfs41_sequence_done(clp, task->tk_msg.rpc_resp, task->tk_status);
+
+ if (task->tk_status < 0) {
+ dprintk("%s ERROR %d\n", __func__, task->tk_status);
+
+ if (_nfs4_async_handle_error(task, NULL, clp, NULL)
+ == -EAGAIN) {
+ nfs4_restart_rpc(task, clp);
+ return;
+ }
+ }
+ nfs41_sequence_free_slot(clp, task->tk_msg.rpc_resp);
+ dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
+
+ put_rpccred(task->tk_msg.rpc_cred);
+ kfree(task->tk_msg.rpc_argp);
+ kfree(task->tk_msg.rpc_resp);
+
+ dprintk("<-- %s\n", __func__);
+}
+
+static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs_client *clp;
+ struct nfs4_sequence_args *args;
+ struct nfs4_sequence_res *res;
+
+ clp = (struct nfs_client *)data;
+ args = task->tk_msg.rpc_argp;
+ res = task->tk_msg.rpc_resp;
+
+ if (nfs4_setup_sequence(clp, args, res, 0, task))
+ return;
+ rpc_call_start(task);
+}
+
+static const struct rpc_call_ops nfs41_sequence_ops = {
+ .rpc_call_done = nfs41_sequence_call_done,
+ .rpc_call_prepare = nfs41_sequence_prepare,
+};
+
+static int nfs41_proc_async_sequence(struct nfs_client *clp,
+ struct rpc_cred *cred)
+{
+ struct nfs4_sequence_args *args;
+ struct nfs4_sequence_res *res;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
+ .rpc_cred = cred,
+ };
+
+ args = kzalloc(sizeof(*args), GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ kfree(args);
+ return -ENOMEM;
+ }
+ res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+ msg.rpc_argp = args;
+ msg.rpc_resp = res;
+
+ return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
+ &nfs41_sequence_ops, (void *)clp);
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
.recover_open = nfs4_open_reclaim,
.recover_lock = nfs4_lock_reclaim,
+ .establish_clid = nfs4_init_clientid,
+ .get_clid_cred = nfs4_get_setclientid_cred,
+};
+
+#if defined(CONFIG_NFS_V4_1)
+struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
+ .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
+ .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
+ .recover_open = nfs4_open_reclaim,
+ .recover_lock = nfs4_lock_reclaim,
+ .establish_clid = nfs4_proc_exchange_id,
+ .get_clid_cred = nfs4_get_exchange_id_cred,
+};
+#endif /* CONFIG_NFS_V4_1 */
+
+struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
+ .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
+ .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
+ .recover_open = nfs4_open_expired,
+ .recover_lock = nfs4_lock_expired,
+ .establish_clid = nfs4_init_clientid,
+ .get_clid_cred = nfs4_get_setclientid_cred,
};
-struct nfs4_state_recovery_ops nfs4_nograce_recovery_ops = {
+#if defined(CONFIG_NFS_V4_1)
+struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
.recover_open = nfs4_open_expired,
.recover_lock = nfs4_lock_expired,
+ .establish_clid = nfs4_proc_exchange_id,
+ .get_clid_cred = nfs4_get_exchange_id_cred,
+};
+#endif /* CONFIG_NFS_V4_1 */
+
+struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
+ .sched_state_renewal = nfs4_proc_async_renew,
+ .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
+ .renew_lease = nfs4_proc_renew,
+};
+
+#if defined(CONFIG_NFS_V4_1)
+struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
+ .sched_state_renewal = nfs41_proc_async_sequence,
+ .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
+ .renew_lease = nfs4_proc_sequence,
+};
+#endif
+
+/*
+ * Per minor version reboot and network partition recovery ops
+ */
+
+struct nfs4_state_recovery_ops *nfs4_reboot_recovery_ops[] = {
+ &nfs40_reboot_recovery_ops,
+#if defined(CONFIG_NFS_V4_1)
+ &nfs41_reboot_recovery_ops,
+#endif
+};
+
+struct nfs4_state_recovery_ops *nfs4_nograce_recovery_ops[] = {
+ &nfs40_nograce_recovery_ops,
+#if defined(CONFIG_NFS_V4_1)
+ &nfs41_nograce_recovery_ops,
+#endif
+};
+
+struct nfs4_state_maintenance_ops *nfs4_state_renewal_ops[] = {
+ &nfs40_state_renewal_ops,
+#if defined(CONFIG_NFS_V4_1)
+ &nfs41_state_renewal_ops,
+#endif
};
static const struct inode_operations nfs4_file_inode_operations = {
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index f524e932ff7b..e27c6cef18f2 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -59,12 +59,14 @@
void
nfs4_renew_state(struct work_struct *work)
{
+ struct nfs4_state_maintenance_ops *ops;
struct nfs_client *clp =
container_of(work, struct nfs_client, cl_renewd.work);
struct rpc_cred *cred;
long lease, timeout;
unsigned long last, now;
+ ops = nfs4_state_renewal_ops[clp->cl_minorversion];
dprintk("%s: start\n", __func__);
/* Are there any active superblocks? */
if (list_empty(&clp->cl_superblocks))
@@ -76,7 +78,7 @@ nfs4_renew_state(struct work_struct *work)
timeout = (2 * lease) / 3 + (long)last - (long)now;
/* Are we close to a lease timeout? */
if (time_after(now, last + lease/3)) {
- cred = nfs4_get_renew_cred_locked(clp);
+ cred = ops->get_state_renewal_cred_locked(clp);
spin_unlock(&clp->cl_lock);
if (cred == NULL) {
if (list_empty(&clp->cl_delegations)) {
@@ -86,7 +88,7 @@ nfs4_renew_state(struct work_struct *work)
nfs_expire_all_delegations(clp);
} else {
/* Queue an asynchronous RENEW. */
- nfs4_proc_async_renew(clp, cred);
+ ops->sched_state_renewal(clp, cred);
put_rpccred(cred);
}
timeout = (2 * lease) / 3;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0298e909559f..b73c5a728655 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -60,7 +60,7 @@ const nfs4_stateid zero_stateid;
static LIST_HEAD(nfs4_clientid_list);
-static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
+int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
{
unsigned short port;
int status;
@@ -77,7 +77,7 @@ static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
return status;
}
-static struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
+struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
{
struct rpc_cred *cred = NULL;
@@ -114,17 +114,21 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
return cred;
}
-static struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
+#if defined(CONFIG_NFS_V4_1)
+
+struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
{
struct rpc_cred *cred;
spin_lock(&clp->cl_lock);
- cred = nfs4_get_renew_cred_locked(clp);
+ cred = nfs4_get_machine_cred_locked(clp);
spin_unlock(&clp->cl_lock);
return cred;
}
-static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+#endif /* CONFIG_NFS_V4_1 */
+
+struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
{
struct nfs4_state_owner *sp;
struct rb_node *pos;
@@ -738,12 +742,14 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
{
- if (status == -NFS4ERR_BAD_SEQID) {
- struct nfs4_state_owner *sp = container_of(seqid->sequence,
- struct nfs4_state_owner, so_seqid);
+ struct nfs4_state_owner *sp = container_of(seqid->sequence,
+ struct nfs4_state_owner, so_seqid);
+ struct nfs_server *server = sp->so_server;
+
+ if (status == -NFS4ERR_BAD_SEQID)
nfs4_drop_state_owner(sp);
- }
- nfs_increment_seqid(status, seqid);
+ if (!nfs4_has_session(server->nfs_client))
+ nfs_increment_seqid(status, seqid);
}
/*
@@ -847,32 +853,45 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
struct file_lock *fl;
int status = 0;
+ if (inode->i_flock == NULL)
+ return 0;
+
+ /* Guard against delegation returns and new lock/unlock calls */
down_write(&nfsi->rwsem);
+ /* Protect inode->i_flock using the BKL */
+ lock_kernel();
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
continue;
if (nfs_file_open_context(fl->fl_file)->state != state)
continue;
+ unlock_kernel();
status = ops->recover_lock(state, fl);
- if (status >= 0)
- continue;
switch (status) {
+ case 0:
+ break;
+ case -ESTALE:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_NO_GRACE:
+ case -NFS4ERR_STALE_CLIENTID:
+ goto out;
default:
printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
__func__, status);
- case -NFS4ERR_EXPIRED:
- case -NFS4ERR_NO_GRACE:
+ case -ENOMEM:
+ case -NFS4ERR_DENIED:
case -NFS4ERR_RECLAIM_BAD:
case -NFS4ERR_RECLAIM_CONFLICT:
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
- break;
- case -NFS4ERR_STALE_CLIENTID:
- goto out_err;
+ status = 0;
}
+ lock_kernel();
}
- up_write(&nfsi->rwsem);
- return 0;
-out_err:
+ unlock_kernel();
+out:
up_write(&nfsi->rwsem);
return status;
}
@@ -918,6 +937,7 @@ restart:
printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
__func__, status);
case -ENOENT:
+ case -ENOMEM:
case -ESTALE:
/*
* Open state on this file cannot be recovered
@@ -928,6 +948,9 @@ restart:
/* Mark the file as being 'closed' */
state->state = 0;
break;
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_RECLAIM_BAD:
case -NFS4ERR_RECLAIM_CONFLICT:
nfs4_state_mark_reclaim_nograce(sp->so_client, state);
@@ -1042,6 +1065,14 @@ static void nfs4_recovery_handle_error(struct nfs_client *clp, int error)
case -NFS4ERR_EXPIRED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
nfs4_state_start_reclaim_nograce(clp);
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
}
}
@@ -1075,18 +1106,22 @@ restart:
static int nfs4_check_lease(struct nfs_client *clp)
{
struct rpc_cred *cred;
+ struct nfs4_state_maintenance_ops *ops =
+ nfs4_state_renewal_ops[clp->cl_minorversion];
int status = -NFS4ERR_EXPIRED;
/* Is the client already known to have an expired lease? */
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
return 0;
- cred = nfs4_get_renew_cred(clp);
+ spin_lock(&clp->cl_lock);
+ cred = ops->get_state_renewal_cred_locked(clp);
+ spin_unlock(&clp->cl_lock);
if (cred == NULL) {
cred = nfs4_get_setclientid_cred(clp);
if (cred == NULL)
goto out;
}
- status = nfs4_proc_renew(clp, cred);
+ status = ops->renew_lease(clp, cred);
put_rpccred(cred);
out:
nfs4_recovery_handle_error(clp, status);
@@ -1096,21 +1131,98 @@ out:
static int nfs4_reclaim_lease(struct nfs_client *clp)
{
struct rpc_cred *cred;
+ struct nfs4_state_recovery_ops *ops =
+ nfs4_reboot_recovery_ops[clp->cl_minorversion];
int status = -ENOENT;
- cred = nfs4_get_setclientid_cred(clp);
+ cred = ops->get_clid_cred(clp);
if (cred != NULL) {
- status = nfs4_init_client(clp, cred);
+ status = ops->establish_clid(clp, cred);
put_rpccred(cred);
/* Handle case where the user hasn't set up machine creds */
if (status == -EACCES && cred == clp->cl_machine_cred) {
nfs4_clear_machine_cred(clp);
status = -EAGAIN;
}
+ if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
+ status = -EPROTONOSUPPORT;
+ }
+ return status;
+}
+
+#ifdef CONFIG_NFS_V4_1
+static void nfs4_session_recovery_handle_error(struct nfs_client *clp, int err)
+{
+ switch (err) {
+ case -NFS4ERR_STALE_CLIENTID:
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ }
+}
+
+static int nfs4_reset_session(struct nfs_client *clp)
+{
+ int status;
+
+ status = nfs4_proc_destroy_session(clp->cl_session);
+ if (status && status != -NFS4ERR_BADSESSION &&
+ status != -NFS4ERR_DEADSESSION) {
+ nfs4_session_recovery_handle_error(clp, status);
+ goto out;
}
+
+ memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
+ status = nfs4_proc_create_session(clp, 1);
+ if (status)
+ nfs4_session_recovery_handle_error(clp, status);
+ /* fall through*/
+out:
+ /* Wake up the next rpc task even on error */
+ rpc_wake_up_next(&clp->cl_session->fc_slot_table.slot_tbl_waitq);
return status;
}
+static int nfs4_initialize_session(struct nfs_client *clp)
+{
+ int status;
+
+ status = nfs4_proc_create_session(clp, 0);
+ if (!status) {
+ nfs_mark_client_ready(clp, NFS_CS_READY);
+ } else if (status == -NFS4ERR_STALE_CLIENTID) {
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ } else {
+ nfs_mark_client_ready(clp, status);
+ }
+ return status;
+}
+#else /* CONFIG_NFS_V4_1 */
+static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
+static int nfs4_initialize_session(struct nfs_client *clp) { return 0; }
+#endif /* CONFIG_NFS_V4_1 */
+
+/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
+ * on EXCHANGE_ID for v4.1
+ */
+static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
+{
+ if (nfs4_has_session(clp)) {
+ switch (status) {
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_CLID_INUSE:
+ case -EAGAIN:
+ break;
+
+ case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
+ * in nfs4_exchange_id */
+ default:
+ return;
+ }
+ }
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+}
+
static void nfs4_state_manager(struct nfs_client *clp)
{
int status = 0;
@@ -1121,9 +1233,12 @@ static void nfs4_state_manager(struct nfs_client *clp)
/* We're going to have to re-establish a clientid */
status = nfs4_reclaim_lease(clp);
if (status) {
- set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ nfs4_set_lease_expired(clp, status);
if (status == -EAGAIN)
continue;
+ if (clp->cl_cons_state ==
+ NFS_CS_SESSION_INITING)
+ nfs_mark_client_ready(clp, status);
goto out_error;
}
clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
@@ -1134,25 +1249,44 @@ static void nfs4_state_manager(struct nfs_client *clp)
if (status != 0)
continue;
}
-
+ /* Initialize or reset the session */
+ if (nfs4_has_session(clp) &&
+ test_and_clear_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) {
+ if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
+ status = nfs4_initialize_session(clp);
+ else
+ status = nfs4_reset_session(clp);
+ if (status) {
+ if (status == -NFS4ERR_STALE_CLIENTID)
+ continue;
+ goto out_error;
+ }
+ }
/* First recover reboot state... */
if (test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
- status = nfs4_do_reclaim(clp, &nfs4_reboot_recovery_ops);
+ status = nfs4_do_reclaim(clp,
+ nfs4_reboot_recovery_ops[clp->cl_minorversion]);
if (status == -NFS4ERR_STALE_CLIENTID)
continue;
+ if (test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
+ continue;
nfs4_state_end_reclaim_reboot(clp);
continue;
}
/* Now recover expired state... */
if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
- status = nfs4_do_reclaim(clp, &nfs4_nograce_recovery_ops);
+ status = nfs4_do_reclaim(clp,
+ nfs4_nograce_recovery_ops[clp->cl_minorversion]);
if (status < 0) {
set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
if (status == -NFS4ERR_STALE_CLIENTID)
continue;
if (status == -NFS4ERR_EXPIRED)
continue;
+ if (test_bit(NFS4CLNT_SESSION_SETUP,
+ &clp->cl_state))
+ continue;
goto out_error;
} else
nfs4_state_end_reclaim_nograce(clp);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 1690f0e44b91..617273e7d47f 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -192,12 +192,16 @@ static int nfs4_stat_to_errno(int);
decode_verifier_maxsz)
#define encode_remove_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
+#define decode_remove_maxsz (op_decode_hdr_maxsz + \
+ decode_change_info_maxsz)
#define encode_rename_maxsz (op_encode_hdr_maxsz + \
2 * nfs4_name_maxsz)
-#define decode_rename_maxsz (op_decode_hdr_maxsz + 5 + 5)
+#define decode_rename_maxsz (op_decode_hdr_maxsz + \
+ decode_change_info_maxsz + \
+ decode_change_info_maxsz)
#define encode_link_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
-#define decode_link_maxsz (op_decode_hdr_maxsz + 5)
+#define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
#define encode_lock_maxsz (op_encode_hdr_maxsz + \
7 + \
1 + encode_stateid_maxsz + 8)
@@ -240,43 +244,115 @@ static int nfs4_stat_to_errno(int);
(encode_getattr_maxsz)
#define decode_fs_locations_maxsz \
(0)
+
+#if defined(CONFIG_NFS_V4_1)
+#define NFS4_MAX_MACHINE_NAME_LEN (64)
+
+#define encode_exchange_id_maxsz (op_encode_hdr_maxsz + \
+ encode_verifier_maxsz + \
+ 1 /* co_ownerid.len */ + \
+ XDR_QUADLEN(NFS4_EXCHANGE_ID_LEN) + \
+ 1 /* flags */ + \
+ 1 /* spa_how */ + \
+ 0 /* SP4_NONE (for now) */ + \
+ 1 /* zero implemetation id array */)
+#define decode_exchange_id_maxsz (op_decode_hdr_maxsz + \
+ 2 /* eir_clientid */ + \
+ 1 /* eir_sequenceid */ + \
+ 1 /* eir_flags */ + \
+ 1 /* spr_how */ + \
+ 0 /* SP4_NONE (for now) */ + \
+ 2 /* eir_server_owner.so_minor_id */ + \
+ /* eir_server_owner.so_major_id<> */ \
+ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 + \
+ /* eir_server_scope<> */ \
+ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 + \
+ 1 /* eir_server_impl_id array length */ + \
+ 0 /* ignored eir_server_impl_id contents */)
+#define encode_channel_attrs_maxsz (6 + 1 /* ca_rdma_ird.len (0) */)
+#define decode_channel_attrs_maxsz (6 + \
+ 1 /* ca_rdma_ird.len */ + \
+ 1 /* ca_rdma_ird */)
+#define encode_create_session_maxsz (op_encode_hdr_maxsz + \
+ 2 /* csa_clientid */ + \
+ 1 /* csa_sequence */ + \
+ 1 /* csa_flags */ + \
+ encode_channel_attrs_maxsz + \
+ encode_channel_attrs_maxsz + \
+ 1 /* csa_cb_program */ + \
+ 1 /* csa_sec_parms.len (1) */ + \
+ 1 /* cb_secflavor (AUTH_SYS) */ + \
+ 1 /* stamp */ + \
+ 1 /* machinename.len */ + \
+ XDR_QUADLEN(NFS4_MAX_MACHINE_NAME_LEN) + \
+ 1 /* uid */ + \
+ 1 /* gid */ + \
+ 1 /* gids.len (0) */)
+#define decode_create_session_maxsz (op_decode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
+ 1 /* csr_sequence */ + \
+ 1 /* csr_flags */ + \
+ decode_channel_attrs_maxsz + \
+ decode_channel_attrs_maxsz)
+#define encode_destroy_session_maxsz (op_encode_hdr_maxsz + 4)
+#define decode_destroy_session_maxsz (op_decode_hdr_maxsz)
+#define encode_sequence_maxsz (op_encode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4)
+#define decode_sequence_maxsz (op_decode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5)
+#else /* CONFIG_NFS_V4_1 */
+#define encode_sequence_maxsz 0
+#define decode_sequence_maxsz 0
+#endif /* CONFIG_NFS_V4_1 */
+
#define NFS4_enc_compound_sz (1024) /* XXX: large enough? */
#define NFS4_dec_compound_sz (1024) /* XXX: large enough? */
#define NFS4_enc_read_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_read_maxsz)
#define NFS4_dec_read_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_read_maxsz)
#define NFS4_enc_readlink_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_readlink_maxsz)
#define NFS4_dec_readlink_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_readlink_maxsz)
#define NFS4_enc_readdir_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_readdir_maxsz)
#define NFS4_dec_readdir_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_readdir_maxsz)
#define NFS4_enc_write_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_write_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_write_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_write_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_commit_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_commit_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_commit_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_commit_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_open_maxsz + \
@@ -285,6 +361,7 @@ static int nfs4_stat_to_errno(int);
encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_open_maxsz + \
@@ -301,43 +378,53 @@ static int nfs4_stat_to_errno(int);
decode_putfh_maxsz + \
decode_open_confirm_maxsz)
#define NFS4_enc_open_noattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_open_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_noattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_open_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_open_downgrade_sz \
(compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_open_downgrade_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_open_downgrade_sz \
(compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_open_downgrade_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_close_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_close_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_close_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_close_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_setattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_setattr_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_setattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_setattr_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_fsinfo_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_fsinfo_maxsz)
#define NFS4_dec_fsinfo_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_fsinfo_maxsz)
#define NFS4_enc_renew_sz (compound_encode_hdr_maxsz + \
@@ -359,64 +446,81 @@ static int nfs4_stat_to_errno(int);
decode_putrootfh_maxsz + \
decode_fsinfo_maxsz)
#define NFS4_enc_lock_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lock_maxsz)
#define NFS4_dec_lock_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lock_maxsz)
#define NFS4_enc_lockt_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lockt_maxsz)
#define NFS4_dec_lockt_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lockt_maxsz)
#define NFS4_enc_locku_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_locku_maxsz)
#define NFS4_dec_locku_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_locku_maxsz)
#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_access_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_access_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_access_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_getattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_getattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_lookup_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lookup_maxsz + \
encode_getattr_maxsz + \
encode_getfh_maxsz)
#define NFS4_dec_lookup_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lookup_maxsz + \
decode_getattr_maxsz + \
decode_getfh_maxsz)
#define NFS4_enc_lookup_root_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putrootfh_maxsz + \
encode_getattr_maxsz + \
encode_getfh_maxsz)
#define NFS4_dec_lookup_root_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putrootfh_maxsz + \
decode_getattr_maxsz + \
decode_getfh_maxsz)
#define NFS4_enc_remove_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_remove_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_remove_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
- op_decode_hdr_maxsz + 5 + \
+ decode_remove_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_rename_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_putfh_maxsz + \
@@ -425,6 +529,7 @@ static int nfs4_stat_to_errno(int);
encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_rename_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_putfh_maxsz + \
@@ -433,6 +538,7 @@ static int nfs4_stat_to_errno(int);
decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_link_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_putfh_maxsz + \
@@ -441,6 +547,7 @@ static int nfs4_stat_to_errno(int);
encode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_dec_link_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_putfh_maxsz + \
@@ -449,16 +556,19 @@ static int nfs4_stat_to_errno(int);
decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_symlink_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_symlink_maxsz + \
encode_getattr_maxsz + \
encode_getfh_maxsz)
#define NFS4_dec_symlink_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_symlink_maxsz + \
decode_getattr_maxsz + \
decode_getfh_maxsz)
#define NFS4_enc_create_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_savefh_maxsz + \
encode_create_maxsz + \
@@ -467,6 +577,7 @@ static int nfs4_stat_to_errno(int);
encode_restorefh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_create_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_savefh_maxsz + \
decode_create_maxsz + \
@@ -475,52 +586,98 @@ static int nfs4_stat_to_errno(int);
decode_restorefh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_pathconf_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_pathconf_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_statfs_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_statfs_maxsz)
#define NFS4_dec_statfs_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_statfs_maxsz)
#define NFS4_enc_server_caps_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_server_caps_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_delegreturn_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_delegreturn_maxsz + \
encode_getattr_maxsz)
#define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_delegreturn_maxsz + \
decode_getattr_maxsz)
#define NFS4_enc_getacl_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_getacl_maxsz)
#define NFS4_dec_getacl_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_getacl_maxsz)
#define NFS4_enc_setacl_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_setacl_maxsz)
#define NFS4_dec_setacl_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_setacl_maxsz)
#define NFS4_enc_fs_locations_sz \
(compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
encode_putfh_maxsz + \
encode_lookup_maxsz + \
encode_fs_locations_maxsz)
#define NFS4_dec_fs_locations_sz \
(compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_lookup_maxsz + \
decode_fs_locations_maxsz)
+#if defined(CONFIG_NFS_V4_1)
+#define NFS4_enc_exchange_id_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_exchange_id_maxsz)
+#define NFS4_dec_exchange_id_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_exchange_id_maxsz)
+#define NFS4_enc_create_session_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_create_session_maxsz)
+#define NFS4_dec_create_session_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_create_session_maxsz)
+#define NFS4_enc_destroy_session_sz (compound_encode_hdr_maxsz + \
+ encode_destroy_session_maxsz)
+#define NFS4_dec_destroy_session_sz (compound_decode_hdr_maxsz + \
+ decode_destroy_session_maxsz)
+#define NFS4_enc_sequence_sz \
+ (compound_decode_hdr_maxsz + \
+ encode_sequence_maxsz)
+#define NFS4_dec_sequence_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz)
+#define NFS4_enc_get_lease_time_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putrootfh_maxsz + \
+ encode_fsinfo_maxsz)
+#define NFS4_dec_get_lease_time_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putrootfh_maxsz + \
+ decode_fsinfo_maxsz)
+#endif /* CONFIG_NFS_V4_1 */
static const umode_t nfs_type2fmt[] = {
[NF4BAD] = 0,
@@ -541,6 +698,8 @@ struct compound_hdr {
__be32 * nops_p;
uint32_t taglen;
char * tag;
+ uint32_t replen; /* expected reply words */
+ u32 minorversion;
};
/*
@@ -576,22 +735,31 @@ static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *
xdr_encode_opaque(p, str, len);
}
-static void encode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr)
+static void encode_compound_hdr(struct xdr_stream *xdr,
+ struct rpc_rqst *req,
+ struct compound_hdr *hdr)
{
__be32 *p;
+ struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
+
+ /* initialize running count of expected bytes in reply.
+ * NOTE: the replied tag SHOULD be the same is the one sent,
+ * but this is not required as a MUST for the server to do so. */
+ hdr->replen = RPC_REPHDRSIZE + auth->au_rslack + 3 + hdr->taglen;
dprintk("encode_compound: tag=%.*s\n", (int)hdr->taglen, hdr->tag);
BUG_ON(hdr->taglen > NFS4_MAXTAGLEN);
RESERVE_SPACE(12+(XDR_QUADLEN(hdr->taglen)<<2));
WRITE32(hdr->taglen);
WRITEMEM(hdr->tag, hdr->taglen);
- WRITE32(NFS4_MINOR_VERSION);
+ WRITE32(hdr->minorversion);
hdr->nops_p = p;
WRITE32(hdr->nops);
}
static void encode_nops(struct compound_hdr *hdr)
{
+ BUG_ON(hdr->nops > NFS4_MAX_OPS);
*hdr->nops_p = htonl(hdr->nops);
}
@@ -736,6 +904,7 @@ static void encode_access(struct xdr_stream *xdr, u32 access, struct compound_hd
WRITE32(OP_ACCESS);
WRITE32(access);
hdr->nops++;
+ hdr->replen += decode_access_maxsz;
}
static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr)
@@ -747,6 +916,7 @@ static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg
WRITE32(arg->seqid->sequence->counter);
WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
hdr->nops++;
+ hdr->replen += decode_close_maxsz;
}
static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr)
@@ -758,6 +928,7 @@ static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *ar
WRITE64(args->offset);
WRITE32(args->count);
hdr->nops++;
+ hdr->replen += decode_commit_maxsz;
}
static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *create, struct compound_hdr *hdr)
@@ -789,6 +960,7 @@ static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *
WRITE32(create->name->len);
WRITEMEM(create->name->name, create->name->len);
hdr->nops++;
+ hdr->replen += decode_create_maxsz;
encode_attrs(xdr, create->attrs, create->server);
}
@@ -802,6 +974,7 @@ static void encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct c
WRITE32(1);
WRITE32(bitmap);
hdr->nops++;
+ hdr->replen += decode_getattr_maxsz;
}
static void encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1, struct compound_hdr *hdr)
@@ -814,6 +987,7 @@ static void encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm
WRITE32(bm0);
WRITE32(bm1);
hdr->nops++;
+ hdr->replen += decode_getattr_maxsz;
}
static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
@@ -841,6 +1015,7 @@ static void encode_getfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
RESERVE_SPACE(4);
WRITE32(OP_GETFH);
hdr->nops++;
+ hdr->replen += decode_getfh_maxsz;
}
static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
@@ -852,6 +1027,7 @@ static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct
WRITE32(name->len);
WRITEMEM(name->name, name->len);
hdr->nops++;
+ hdr->replen += decode_link_maxsz;
}
static inline int nfs4_lock_type(struct file_lock *fl, int block)
@@ -899,6 +1075,7 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args
WRITE32(args->lock_seqid->sequence->counter);
}
hdr->nops++;
+ hdr->replen += decode_lock_maxsz;
}
static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *args, struct compound_hdr *hdr)
@@ -915,6 +1092,7 @@ static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *ar
WRITEMEM("lock id:", 8);
WRITE64(args->lock_owner.id);
hdr->nops++;
+ hdr->replen += decode_lockt_maxsz;
}
static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *args, struct compound_hdr *hdr)
@@ -929,6 +1107,7 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar
WRITE64(args->fl->fl_start);
WRITE64(nfs4_lock_length(args->fl));
hdr->nops++;
+ hdr->replen += decode_locku_maxsz;
}
static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
@@ -941,6 +1120,7 @@ static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struc
WRITE32(len);
WRITEMEM(name->name, len);
hdr->nops++;
+ hdr->replen += decode_lookup_maxsz;
}
static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode)
@@ -1080,6 +1260,7 @@ static void encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg,
BUG();
}
hdr->nops++;
+ hdr->replen += decode_open_maxsz;
}
static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg, struct compound_hdr *hdr)
@@ -1091,6 +1272,7 @@ static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_co
WRITEMEM(arg->stateid->data, NFS4_STATEID_SIZE);
WRITE32(arg->seqid->sequence->counter);
hdr->nops++;
+ hdr->replen += decode_open_confirm_maxsz;
}
static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr)
@@ -1103,6 +1285,7 @@ static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_close
WRITE32(arg->seqid->sequence->counter);
encode_share_access(xdr, arg->fmode);
hdr->nops++;
+ hdr->replen += decode_open_downgrade_maxsz;
}
static void
@@ -1116,6 +1299,7 @@ encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh, struct compound_hd
WRITE32(len);
WRITEMEM(fh->data, len);
hdr->nops++;
+ hdr->replen += decode_putfh_maxsz;
}
static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
@@ -1125,6 +1309,7 @@ static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
RESERVE_SPACE(4);
WRITE32(OP_PUTROOTFH);
hdr->nops++;
+ hdr->replen += decode_putrootfh_maxsz;
}
static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx)
@@ -1153,6 +1338,7 @@ static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args,
WRITE64(args->offset);
WRITE32(args->count);
hdr->nops++;
+ hdr->replen += decode_read_maxsz;
}
static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr)
@@ -1178,6 +1364,7 @@ static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg
WRITE32(attrs[0] & readdir->bitmask[0]);
WRITE32(attrs[1] & readdir->bitmask[1]);
hdr->nops++;
+ hdr->replen += decode_readdir_maxsz;
dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n",
__func__,
(unsigned long long)readdir->cookie,
@@ -1194,6 +1381,7 @@ static void encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *
RESERVE_SPACE(4);
WRITE32(OP_READLINK);
hdr->nops++;
+ hdr->replen += decode_readlink_maxsz;
}
static void encode_remove(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
@@ -1205,6 +1393,7 @@ static void encode_remove(struct xdr_stream *xdr, const struct qstr *name, struc
WRITE32(name->len);
WRITEMEM(name->name, name->len);
hdr->nops++;
+ hdr->replen += decode_remove_maxsz;
}
static void encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, const struct qstr *newname, struct compound_hdr *hdr)
@@ -1220,6 +1409,7 @@ static void encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, co
WRITE32(newname->len);
WRITEMEM(newname->name, newname->len);
hdr->nops++;
+ hdr->replen += decode_rename_maxsz;
}
static void encode_renew(struct xdr_stream *xdr, const struct nfs_client *client_stateid, struct compound_hdr *hdr)
@@ -1230,6 +1420,7 @@ static void encode_renew(struct xdr_stream *xdr, const struct nfs_client *client
WRITE32(OP_RENEW);
WRITE64(client_stateid->cl_clientid);
hdr->nops++;
+ hdr->replen += decode_renew_maxsz;
}
static void
@@ -1240,6 +1431,7 @@ encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
RESERVE_SPACE(4);
WRITE32(OP_RESTOREFH);
hdr->nops++;
+ hdr->replen += decode_restorefh_maxsz;
}
static int
@@ -1259,6 +1451,7 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compoun
WRITE32(arg->acl_len);
xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len);
hdr->nops++;
+ hdr->replen += decode_setacl_maxsz;
return 0;
}
@@ -1270,6 +1463,7 @@ encode_savefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
RESERVE_SPACE(4);
WRITE32(OP_SAVEFH);
hdr->nops++;
+ hdr->replen += decode_savefh_maxsz;
}
static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs *arg, const struct nfs_server *server, struct compound_hdr *hdr)
@@ -1280,6 +1474,7 @@ static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs
WRITE32(OP_SETATTR);
WRITEMEM(arg->stateid.data, NFS4_STATEID_SIZE);
hdr->nops++;
+ hdr->replen += decode_setattr_maxsz;
encode_attrs(xdr, arg->iap, server);
}
@@ -1299,6 +1494,7 @@ static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclie
RESERVE_SPACE(4);
WRITE32(setclientid->sc_cb_ident);
hdr->nops++;
+ hdr->replen += decode_setclientid_maxsz;
}
static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_client *client_state, struct compound_hdr *hdr)
@@ -1310,6 +1506,7 @@ static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs_
WRITE64(client_state->cl_clientid);
WRITEMEM(client_state->cl_confirm.data, NFS4_VERIFIER_SIZE);
hdr->nops++;
+ hdr->replen += decode_setclientid_confirm_maxsz;
}
static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr)
@@ -1328,6 +1525,7 @@ static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *arg
xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
hdr->nops++;
+ hdr->replen += decode_write_maxsz;
}
static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *stateid, struct compound_hdr *hdr)
@@ -1339,11 +1537,163 @@ static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *state
WRITE32(OP_DELEGRETURN);
WRITEMEM(stateid->data, NFS4_STATEID_SIZE);
hdr->nops++;
+ hdr->replen += decode_delegreturn_maxsz;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/* NFSv4.1 operations */
+static void encode_exchange_id(struct xdr_stream *xdr,
+ struct nfs41_exchange_id_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ RESERVE_SPACE(4 + sizeof(args->verifier->data));
+ WRITE32(OP_EXCHANGE_ID);
+ WRITEMEM(args->verifier->data, sizeof(args->verifier->data));
+
+ encode_string(xdr, args->id_len, args->id);
+
+ RESERVE_SPACE(12);
+ WRITE32(args->flags);
+ WRITE32(0); /* zero length state_protect4_a */
+ WRITE32(0); /* zero length implementation id array */
+ hdr->nops++;
+ hdr->replen += decode_exchange_id_maxsz;
+}
+
+static void encode_create_session(struct xdr_stream *xdr,
+ struct nfs41_create_session_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+ char machine_name[NFS4_MAX_MACHINE_NAME_LEN];
+ uint32_t len;
+ struct nfs_client *clp = args->client;
+
+ RESERVE_SPACE(4);
+ WRITE32(OP_CREATE_SESSION);
+
+ RESERVE_SPACE(8);
+ WRITE64(clp->cl_ex_clid);
+
+ RESERVE_SPACE(8);
+ WRITE32(clp->cl_seqid); /*Sequence id */
+ WRITE32(args->flags); /*flags */
+
+ RESERVE_SPACE(2*28); /* 2 channel_attrs */
+ /* Fore Channel */
+ WRITE32(args->fc_attrs.headerpadsz); /* header padding size */
+ WRITE32(args->fc_attrs.max_rqst_sz); /* max req size */
+ WRITE32(args->fc_attrs.max_resp_sz); /* max resp size */
+ WRITE32(args->fc_attrs.max_resp_sz_cached); /* Max resp sz cached */
+ WRITE32(args->fc_attrs.max_ops); /* max operations */
+ WRITE32(args->fc_attrs.max_reqs); /* max requests */
+ WRITE32(0); /* rdmachannel_attrs */
+
+ /* Back Channel */
+ WRITE32(args->fc_attrs.headerpadsz); /* header padding size */
+ WRITE32(args->bc_attrs.max_rqst_sz); /* max req size */
+ WRITE32(args->bc_attrs.max_resp_sz); /* max resp size */
+ WRITE32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */
+ WRITE32(args->bc_attrs.max_ops); /* max operations */
+ WRITE32(args->bc_attrs.max_reqs); /* max requests */
+ WRITE32(0); /* rdmachannel_attrs */
+
+ RESERVE_SPACE(4);
+ WRITE32(args->cb_program); /* cb_program */
+
+ RESERVE_SPACE(4); /* # of security flavors */
+ WRITE32(1);
+
+ RESERVE_SPACE(4);
+ WRITE32(RPC_AUTH_UNIX); /* auth_sys */
+
+ /* authsys_parms rfc1831 */
+ RESERVE_SPACE(4);
+ WRITE32((u32)clp->cl_boot_time.tv_nsec); /* stamp */
+ len = scnprintf(machine_name, sizeof(machine_name), "%s",
+ clp->cl_ipaddr);
+ RESERVE_SPACE(16 + len);
+ WRITE32(len);
+ WRITEMEM(machine_name, len);
+ WRITE32(0); /* UID */
+ WRITE32(0); /* GID */
+ WRITE32(0); /* No more gids */
+ hdr->nops++;
+ hdr->replen += decode_create_session_maxsz;
+}
+
+static void encode_destroy_session(struct xdr_stream *xdr,
+ struct nfs4_session *session,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+ RESERVE_SPACE(4 + NFS4_MAX_SESSIONID_LEN);
+ WRITE32(OP_DESTROY_SESSION);
+ WRITEMEM(session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+ hdr->nops++;
+ hdr->replen += decode_destroy_session_maxsz;
}
+#endif /* CONFIG_NFS_V4_1 */
+
+static void encode_sequence(struct xdr_stream *xdr,
+ const struct nfs4_sequence_args *args,
+ struct compound_hdr *hdr)
+{
+#if defined(CONFIG_NFS_V4_1)
+ struct nfs4_session *session = args->sa_session;
+ struct nfs4_slot_table *tp;
+ struct nfs4_slot *slot;
+ __be32 *p;
+
+ if (!session)
+ return;
+
+ tp = &session->fc_slot_table;
+
+ WARN_ON(args->sa_slotid == NFS4_MAX_SLOT_TABLE);
+ slot = tp->slots + args->sa_slotid;
+
+ RESERVE_SPACE(4);
+ WRITE32(OP_SEQUENCE);
+
+ /*
+ * Sessionid + seqid + slotid + max slotid + cache_this
+ */
+ dprintk("%s: sessionid=%u:%u:%u:%u seqid=%d slotid=%d "
+ "max_slotid=%d cache_this=%d\n",
+ __func__,
+ ((u32 *)session->sess_id.data)[0],
+ ((u32 *)session->sess_id.data)[1],
+ ((u32 *)session->sess_id.data)[2],
+ ((u32 *)session->sess_id.data)[3],
+ slot->seq_nr, args->sa_slotid,
+ tp->highest_used_slotid, args->sa_cache_this);
+ RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 16);
+ WRITEMEM(session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+ WRITE32(slot->seq_nr);
+ WRITE32(args->sa_slotid);
+ WRITE32(tp->highest_used_slotid);
+ WRITE32(args->sa_cache_this);
+ hdr->nops++;
+ hdr->replen += decode_sequence_maxsz;
+#endif /* CONFIG_NFS_V4_1 */
+}
+
/*
* END OF "GENERIC" ENCODE ROUTINES.
*/
+static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args)
+{
+#if defined(CONFIG_NFS_V4_1)
+ if (args->sa_session)
+ return args->sa_session->clp->cl_minorversion;
+#endif /* CONFIG_NFS_V4_1 */
+ return 0;
+}
+
/*
* Encode an ACCESS request
*/
@@ -1351,11 +1701,12 @@ static int nfs4_xdr_enc_access(struct rpc_rqst *req, __be32 *p, const struct nfs
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_access(&xdr, args->access, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1370,11 +1721,12 @@ static int nfs4_xdr_enc_lookup(struct rpc_rqst *req, __be32 *p, const struct nfs
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->dir_fh, &hdr);
encode_lookup(&xdr, args->name, &hdr);
encode_getfh(&xdr, &hdr);
@@ -1390,11 +1742,12 @@ static int nfs4_xdr_enc_lookup_root(struct rpc_rqst *req, __be32 *p, const struc
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putrootfh(&xdr, &hdr);
encode_getfh(&xdr, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1409,11 +1762,12 @@ static int nfs4_xdr_enc_remove(struct rpc_rqst *req, __be32 *p, const struct nfs
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_remove(&xdr, &args->name, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1428,11 +1782,12 @@ static int nfs4_xdr_enc_rename(struct rpc_rqst *req, __be32 *p, const struct nfs
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->old_dir, &hdr);
encode_savefh(&xdr, &hdr);
encode_putfh(&xdr, args->new_dir, &hdr);
@@ -1451,11 +1806,12 @@ static int nfs4_xdr_enc_link(struct rpc_rqst *req, __be32 *p, const struct nfs4_
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_savefh(&xdr, &hdr);
encode_putfh(&xdr, args->dir_fh, &hdr);
@@ -1474,11 +1830,12 @@ static int nfs4_xdr_enc_create(struct rpc_rqst *req, __be32 *p, const struct nfs
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->dir_fh, &hdr);
encode_savefh(&xdr, &hdr);
encode_create(&xdr, args, &hdr);
@@ -1505,11 +1862,12 @@ static int nfs4_xdr_enc_getattr(struct rpc_rqst *req, __be32 *p, const struct nf
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
encode_nops(&hdr);
@@ -1523,11 +1881,12 @@ static int nfs4_xdr_enc_close(struct rpc_rqst *req, __be32 *p, struct nfs_closea
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_close(&xdr, args, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1542,11 +1901,12 @@ static int nfs4_xdr_enc_open(struct rpc_rqst *req, __be32 *p, struct nfs_openarg
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_savefh(&xdr, &hdr);
encode_open(&xdr, args, &hdr);
@@ -1569,7 +1929,7 @@ static int nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, __be32 *p, struct nfs
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_open_confirm(&xdr, args, &hdr);
encode_nops(&hdr);
@@ -1583,11 +1943,12 @@ static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, __be32 *p, struct nfs_
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_open(&xdr, args, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1602,11 +1963,12 @@ static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, __be32 *p, struct n
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_open_downgrade(&xdr, args, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1621,11 +1983,12 @@ static int nfs4_xdr_enc_lock(struct rpc_rqst *req, __be32 *p, struct nfs_lock_ar
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_lock(&xdr, args, &hdr);
encode_nops(&hdr);
@@ -1639,11 +2002,12 @@ static int nfs4_xdr_enc_lockt(struct rpc_rqst *req, __be32 *p, struct nfs_lockt_
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_lockt(&xdr, args, &hdr);
encode_nops(&hdr);
@@ -1657,11 +2021,12 @@ static int nfs4_xdr_enc_locku(struct rpc_rqst *req, __be32 *p, struct nfs_locku_
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_locku(&xdr, args, &hdr);
encode_nops(&hdr);
@@ -1675,22 +2040,16 @@ static int nfs4_xdr_enc_readlink(struct rpc_rqst *req, __be32 *p, const struct n
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
- unsigned int replen;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_readlink(&xdr, args, req, &hdr);
- /* set up reply kvec
- * toplevel_status + taglen + rescount + OP_PUTFH + status
- * + OP_READLINK + status + string length = 8
- */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_readlink_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages,
+ xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
args->pgbase, args->pglen);
encode_nops(&hdr);
return 0;
@@ -1703,25 +2062,19 @@ static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nf
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
- int replen;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_readdir(&xdr, args, req, &hdr);
- /* set up reply kvec
- * toplevel_status + taglen + rescount + OP_PUTFH + status
- * + OP_READDIR + status + verifer(2) = 9
- */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_readdir_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages,
+ xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
args->pgbase, args->count);
dprintk("%s: inlined page args = (%u, %p, %u, %u)\n",
- __func__, replen, args->pages,
+ __func__, hdr.replen << 2, args->pages,
args->pgbase, args->count);
encode_nops(&hdr);
return 0;
@@ -1732,24 +2085,18 @@ static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nf
*/
static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
{
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- int replen;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_read(&xdr, args, &hdr);
- /* set up reply kvec
- * toplevel status + taglen=0 + rescount + OP_PUTFH + status
- * + OP_READ + status + eof + datalen = 9
- */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_read_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen,
+ xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
args->pages, args->pgbase, args->count);
req->rq_rcv_buf.flags |= XDRBUF_READ;
encode_nops(&hdr);
@@ -1763,11 +2110,12 @@ static int nfs4_xdr_enc_setattr(struct rpc_rqst *req, __be32 *p, struct nfs_seta
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_setattr(&xdr, args, args->server, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1783,20 +2131,19 @@ nfs4_xdr_enc_getacl(struct rpc_rqst *req, __be32 *p,
struct nfs_getaclargs *args)
{
struct xdr_stream xdr;
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- int replen;
+ uint32_t replen;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
+ replen = hdr.replen + nfs4_fattr_bitmap_maxsz + 1;
encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr);
- /* set up reply buffer: */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS4_dec_getacl_sz) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen,
+ xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
args->acl_pages, args->acl_pgbase, args->acl_len);
encode_nops(&hdr);
return 0;
@@ -1809,11 +2156,12 @@ static int nfs4_xdr_enc_write(struct rpc_rqst *req, __be32 *p, struct nfs_writea
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_write(&xdr, args, &hdr);
req->rq_snd_buf.flags |= XDRBUF_WRITE;
@@ -1829,11 +2177,12 @@ static int nfs4_xdr_enc_commit(struct rpc_rqst *req, __be32 *p, struct nfs_write
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_commit(&xdr, args, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1848,11 +2197,12 @@ static int nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs4_fsin
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_fsinfo(&xdr, args->bitmask, &hdr);
encode_nops(&hdr);
@@ -1866,11 +2216,12 @@ static int nfs4_xdr_enc_pathconf(struct rpc_rqst *req, __be32 *p, const struct n
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_getattr_one(&xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
&hdr);
@@ -1885,11 +2236,12 @@ static int nfs4_xdr_enc_statfs(struct rpc_rqst *req, __be32 *p, const struct nfs
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
encode_getattr_two(&xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr);
@@ -1900,16 +2252,18 @@ static int nfs4_xdr_enc_statfs(struct rpc_rqst *req, __be32 *p, const struct nfs
/*
* GETATTR_BITMAP request
*/
-static int nfs4_xdr_enc_server_caps(struct rpc_rqst *req, __be32 *p, const struct nfs_fh *fhandle)
+static int nfs4_xdr_enc_server_caps(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_server_caps_arg *args)
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
- encode_putfh(&xdr, fhandle, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
+ encode_putfh(&xdr, args->fhandle, &hdr);
encode_getattr_one(&xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
FATTR4_WORD0_LINK_SUPPORT|
FATTR4_WORD0_SYMLINK_SUPPORT|
@@ -1929,7 +2283,7 @@ static int nfs4_xdr_enc_renew(struct rpc_rqst *req, __be32 *p, struct nfs_client
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
encode_renew(&xdr, clp, &hdr);
encode_nops(&hdr);
return 0;
@@ -1946,7 +2300,7 @@ static int nfs4_xdr_enc_setclientid(struct rpc_rqst *req, __be32 *p, struct nfs4
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
encode_setclientid(&xdr, sc, &hdr);
encode_nops(&hdr);
return 0;
@@ -1964,7 +2318,7 @@ static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, str
const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
encode_setclientid_confirm(&xdr, clp, &hdr);
encode_putrootfh(&xdr, &hdr);
encode_fsinfo(&xdr, lease_bitmap, &hdr);
@@ -1979,11 +2333,12 @@ static int nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, __be32 *p, const struc
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fhandle, &hdr);
encode_delegreturn(&xdr, args->stateid, &hdr);
encode_getfattr(&xdr, args->bitmask, &hdr);
@@ -1998,28 +2353,119 @@ static int nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
- struct rpc_auth *auth = req->rq_task->tk_msg.rpc_cred->cr_auth;
- int replen;
+ uint32_t replen;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->dir_fh, &hdr);
encode_lookup(&xdr, args->name, &hdr);
+ replen = hdr.replen; /* get the attribute into args->page */
encode_fs_locations(&xdr, args->bitmask, &hdr);
- /* set up reply
- * toplevel_status + OP_PUTFH + status
- * + OP_LOOKUP + status + OP_GETATTR + status = 7
- */
- replen = (RPC_REPHDRSIZE + auth->au_rslack + 7) << 2;
- xdr_inline_pages(&req->rq_rcv_buf, replen, &args->page,
+ xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page,
0, PAGE_SIZE);
encode_nops(&hdr);
return 0;
}
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * EXCHANGE_ID request
+ */
+static int nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, uint32_t *p,
+ struct nfs41_exchange_id_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = args->client->cl_minorversion,
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_exchange_id(&xdr, args, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
+/*
+ * a CREATE_SESSION request
+ */
+static int nfs4_xdr_enc_create_session(struct rpc_rqst *req, uint32_t *p,
+ struct nfs41_create_session_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = args->client->cl_minorversion,
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_create_session(&xdr, args, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
+/*
+ * a DESTROY_SESSION request
+ */
+static int nfs4_xdr_enc_destroy_session(struct rpc_rqst *req, uint32_t *p,
+ struct nfs4_session *session)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = session->clp->cl_minorversion,
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_destroy_session(&xdr, session, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
+/*
+ * a SEQUENCE request
+ */
+static int nfs4_xdr_enc_sequence(struct rpc_rqst *req, uint32_t *p,
+ struct nfs4_sequence_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(args),
+ };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, args, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
+/*
+ * a GET_LEASE_TIME request
+ */
+static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p,
+ struct nfs4_get_lease_time_args *args)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->la_seq_args),
+ };
+ const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
+
+ xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->la_seq_args, &hdr);
+ encode_putrootfh(&xdr, &hdr);
+ encode_fsinfo(&xdr, lease_bitmap, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
/*
* START OF "GENERIC" DECODE ROUTINES.
* These may look a little ugly since they are imported from a "generic"
@@ -3657,7 +4103,7 @@ decode_savefh(struct xdr_stream *xdr)
return decode_op_hdr(xdr, OP_SAVEFH);
}
-static int decode_setattr(struct xdr_stream *xdr, struct nfs_setattrres *res)
+static int decode_setattr(struct xdr_stream *xdr)
{
__be32 *p;
uint32_t bmlen;
@@ -3735,6 +4181,169 @@ static int decode_delegreturn(struct xdr_stream *xdr)
return decode_op_hdr(xdr, OP_DELEGRETURN);
}
+#if defined(CONFIG_NFS_V4_1)
+static int decode_exchange_id(struct xdr_stream *xdr,
+ struct nfs41_exchange_id_res *res)
+{
+ __be32 *p;
+ uint32_t dummy;
+ int status;
+ struct nfs_client *clp = res->client;
+
+ status = decode_op_hdr(xdr, OP_EXCHANGE_ID);
+ if (status)
+ return status;
+
+ READ_BUF(8);
+ READ64(clp->cl_ex_clid);
+ READ_BUF(12);
+ READ32(clp->cl_seqid);
+ READ32(clp->cl_exchange_flags);
+
+ /* We ask for SP4_NONE */
+ READ32(dummy);
+ if (dummy != SP4_NONE)
+ return -EIO;
+
+ /* Throw away minor_id */
+ READ_BUF(8);
+
+ /* Throw away Major id */
+ READ_BUF(4);
+ READ32(dummy);
+ READ_BUF(dummy);
+
+ /* Throw away server_scope */
+ READ_BUF(4);
+ READ32(dummy);
+ READ_BUF(dummy);
+
+ /* Throw away Implementation id array */
+ READ_BUF(4);
+ READ32(dummy);
+ READ_BUF(dummy);
+
+ return 0;
+}
+
+static int decode_chan_attrs(struct xdr_stream *xdr,
+ struct nfs4_channel_attrs *attrs)
+{
+ __be32 *p;
+ u32 nr_attrs;
+
+ READ_BUF(28);
+ READ32(attrs->headerpadsz);
+ READ32(attrs->max_rqst_sz);
+ READ32(attrs->max_resp_sz);
+ READ32(attrs->max_resp_sz_cached);
+ READ32(attrs->max_ops);
+ READ32(attrs->max_reqs);
+ READ32(nr_attrs);
+ if (unlikely(nr_attrs > 1)) {
+ printk(KERN_WARNING "%s: Invalid rdma channel attrs count %u\n",
+ __func__, nr_attrs);
+ return -EINVAL;
+ }
+ if (nr_attrs == 1)
+ READ_BUF(4); /* skip rdma_attrs */
+ return 0;
+}
+
+static int decode_create_session(struct xdr_stream *xdr,
+ struct nfs41_create_session_res *res)
+{
+ __be32 *p;
+ int status;
+ struct nfs_client *clp = res->client;
+ struct nfs4_session *session = clp->cl_session;
+
+ status = decode_op_hdr(xdr, OP_CREATE_SESSION);
+
+ if (status)
+ return status;
+
+ /* sessionid */
+ READ_BUF(NFS4_MAX_SESSIONID_LEN);
+ COPYMEM(&session->sess_id, NFS4_MAX_SESSIONID_LEN);
+
+ /* seqid, flags */
+ READ_BUF(8);
+ READ32(clp->cl_seqid);
+ READ32(session->flags);
+
+ /* Channel attributes */
+ status = decode_chan_attrs(xdr, &session->fc_attrs);
+ if (!status)
+ status = decode_chan_attrs(xdr, &session->bc_attrs);
+ return status;
+}
+
+static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
+{
+ return decode_op_hdr(xdr, OP_DESTROY_SESSION);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+static int decode_sequence(struct xdr_stream *xdr,
+ struct nfs4_sequence_res *res,
+ struct rpc_rqst *rqstp)
+{
+#if defined(CONFIG_NFS_V4_1)
+ struct nfs4_slot *slot;
+ struct nfs4_sessionid id;
+ u32 dummy;
+ int status;
+ __be32 *p;
+
+ if (!res->sr_session)
+ return 0;
+
+ status = decode_op_hdr(xdr, OP_SEQUENCE);
+ if (status)
+ goto out_err;
+
+ /*
+ * If the server returns different values for sessionID, slotID or
+ * sequence number, the server is looney tunes.
+ */
+ status = -ESERVERFAULT;
+
+ slot = &res->sr_session->fc_slot_table.slots[res->sr_slotid];
+ READ_BUF(NFS4_MAX_SESSIONID_LEN + 20);
+ COPYMEM(id.data, NFS4_MAX_SESSIONID_LEN);
+ if (memcmp(id.data, res->sr_session->sess_id.data,
+ NFS4_MAX_SESSIONID_LEN)) {
+ dprintk("%s Invalid session id\n", __func__);
+ goto out_err;
+ }
+ /* seqid */
+ READ32(dummy);
+ if (dummy != slot->seq_nr) {
+ dprintk("%s Invalid sequence number\n", __func__);
+ goto out_err;
+ }
+ /* slot id */
+ READ32(dummy);
+ if (dummy != res->sr_slotid) {
+ dprintk("%s Invalid slot id\n", __func__);
+ goto out_err;
+ }
+ /* highest slot id - currently not processed */
+ READ32(dummy);
+ /* target highest slot id - currently not processed */
+ READ32(dummy);
+ /* result flags - currently not processed */
+ READ32(dummy);
+ status = 0;
+out_err:
+ res->sr_status = status;
+ return status;
+#else /* CONFIG_NFS_V4_1 */
+ return 0;
+#endif /* CONFIG_NFS_V4_1 */
+}
+
/*
* END OF "GENERIC" DECODE ROUTINES.
*/
@@ -3752,6 +4361,9 @@ static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, __be32 *p, struct
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -3773,7 +4385,11 @@ static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_ac
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
status = decode_putfh(&xdr);
if (status != 0)
@@ -3796,7 +4412,11 @@ static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lo
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -3819,7 +4439,11 @@ static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp, __be32 *p, struct nf
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putrootfh(&xdr)) != 0)
goto out;
@@ -3839,7 +4463,11 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_rem
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -3860,7 +4488,11 @@ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_re
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -3890,7 +4522,11 @@ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_link
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -3923,7 +4559,11 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_cr
int status;
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -3963,6 +4603,9 @@ static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_g
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -3979,12 +4622,13 @@ nfs4_xdr_enc_setacl(struct rpc_rqst *req, __be32 *p, struct nfs_setaclargs *args
{
struct xdr_stream xdr;
struct compound_hdr hdr = {
- .nops = 0,
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
};
int status;
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
- encode_compound_hdr(&xdr, &hdr);
+ encode_compound_hdr(&xdr, req, &hdr);
+ encode_sequence(&xdr, &args->seq_args, &hdr);
encode_putfh(&xdr, args->fh, &hdr);
status = encode_setacl(&xdr, args, &hdr);
encode_nops(&hdr);
@@ -3995,7 +4639,8 @@ nfs4_xdr_enc_setacl(struct rpc_rqst *req, __be32 *p, struct nfs_setaclargs *args
* Decode SETACL response
*/
static int
-nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, __be32 *p, void *res)
+nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, __be32 *p,
+ struct nfs_setaclres *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4005,10 +4650,13 @@ nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, __be32 *p, void *res)
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
- status = decode_setattr(&xdr, res);
+ status = decode_setattr(&xdr);
out:
return status;
}
@@ -4017,7 +4665,8 @@ out:
* Decode GETACL response
*/
static int
-nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, __be32 *p, size_t *acl_len)
+nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, __be32 *p,
+ struct nfs_getaclres *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4027,10 +4676,13 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, __be32 *p, size_t *acl_len)
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
- status = decode_getacl(&xdr, rqstp, acl_len);
+ status = decode_getacl(&xdr, rqstp, &res->acl_len);
out:
return status;
@@ -4049,6 +4701,9 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, __be32 *p, struct nfs_clos
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4079,6 +4734,9 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openr
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4133,6 +4791,9 @@ static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, __be32 *p, struct nf
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4157,10 +4818,13 @@ static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_se
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
- status = decode_setattr(&xdr, res);
+ status = decode_setattr(&xdr);
if (status)
goto out;
decode_getfattr(&xdr, res->fattr, res->server);
@@ -4181,6 +4845,9 @@ static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, __be32 *p, struct nfs_lock_
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4202,6 +4869,9 @@ static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, __be32 *p, struct nfs_lock
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4223,6 +4893,9 @@ static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, __be32 *p, struct nfs_lock
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4234,7 +4907,8 @@ out:
/*
* Decode READLINK response
*/
-static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, __be32 *p, void *res)
+static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, __be32 *p,
+ struct nfs4_readlink_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4244,6 +4918,9 @@ static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, __be32 *p, void *res)
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4265,6 +4942,9 @@ static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_r
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4286,6 +4966,9 @@ static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, __be32 *p, struct nfs_readr
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4309,6 +4992,9 @@ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writ
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4335,6 +5021,9 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, __be32 *p, struct nfs_wri
status = decode_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
status = decode_putfh(&xdr);
if (status)
goto out;
@@ -4349,7 +5038,8 @@ out:
/*
* FSINFO request
*/
-static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *fsinfo)
+static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_fsinfo_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4358,16 +5048,19 @@ static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs_fsinf
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
if (!status)
+ status = decode_sequence(&xdr, &res->seq_res, req);
+ if (!status)
status = decode_putfh(&xdr);
if (!status)
- status = decode_fsinfo(&xdr, fsinfo);
+ status = decode_fsinfo(&xdr, res->fsinfo);
return status;
}
/*
* PATHCONF request
*/
-static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p, struct nfs_pathconf *pathconf)
+static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_pathconf_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4376,16 +5069,19 @@ static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p, struct nfs_pat
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
if (!status)
+ status = decode_sequence(&xdr, &res->seq_res, req);
+ if (!status)
status = decode_putfh(&xdr);
if (!status)
- status = decode_pathconf(&xdr, pathconf);
+ status = decode_pathconf(&xdr, res->pathconf);
return status;
}
/*
* STATFS request
*/
-static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p, struct nfs_fsstat *fsstat)
+static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_statfs_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4394,9 +5090,11 @@ static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p, struct nfs_fssta
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
if (!status)
+ status = decode_sequence(&xdr, &res->seq_res, req);
+ if (!status)
status = decode_putfh(&xdr);
if (!status)
- status = decode_statfs(&xdr, fsstat);
+ status = decode_statfs(&xdr, res->fsstat);
return status;
}
@@ -4410,7 +5108,11 @@ static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req, __be32 *p, struct nfs4
int status;
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
- if ((status = decode_compound_hdr(&xdr, &hdr)) != 0)
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, req);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
@@ -4483,7 +5185,10 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nf
xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
- if (status != 0)
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, rqstp);
+ if (status)
goto out;
status = decode_putfh(&xdr);
if (status != 0)
@@ -4497,7 +5202,8 @@ out:
/*
* FS_LOCATIONS request
*/
-static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs4_fs_locations *res)
+static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_fs_locations_res *res)
{
struct xdr_stream xdr;
struct compound_hdr hdr;
@@ -4505,18 +5211,113 @@ static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs
xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
status = decode_compound_hdr(&xdr, &hdr);
- if (status != 0)
+ if (status)
+ goto out;
+ status = decode_sequence(&xdr, &res->seq_res, req);
+ if (status)
goto out;
if ((status = decode_putfh(&xdr)) != 0)
goto out;
if ((status = decode_lookup(&xdr)) != 0)
goto out;
xdr_enter_page(&xdr, PAGE_SIZE);
- status = decode_getfattr(&xdr, &res->fattr, res->server);
+ status = decode_getfattr(&xdr, &res->fs_locations->fattr,
+ res->fs_locations->server);
out:
return status;
}
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * EXCHANGE_ID request
+ */
+static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p,
+ void *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_exchange_id(&xdr, res);
+ return status;
+}
+
+/*
+ * a CREATE_SESSION request
+ */
+static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p,
+ struct nfs41_create_session_res *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_create_session(&xdr, res);
+ return status;
+}
+
+/*
+ * a DESTROY_SESSION request
+ */
+static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p,
+ void *dummy)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_destroy_session(&xdr, dummy);
+ return status;
+}
+
+/*
+ * a SEQUENCE request
+ */
+static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p,
+ struct nfs4_sequence_res *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_sequence(&xdr, res, rqstp);
+ return status;
+}
+
+/*
+ * a GET_LEASE_TIME request
+ */
+static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p,
+ struct nfs4_get_lease_time_res *res)
+{
+ struct xdr_stream xdr;
+ struct compound_hdr hdr;
+ int status;
+
+ xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+ status = decode_compound_hdr(&xdr, &hdr);
+ if (!status)
+ status = decode_sequence(&xdr, &res->lr_seq_res, rqstp);
+ if (!status)
+ status = decode_putrootfh(&xdr);
+ if (!status)
+ status = decode_fsinfo(&xdr, res->lr_fsinfo);
+ return status;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
__be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus)
{
uint32_t bitmap[2] = {0};
@@ -4686,6 +5487,13 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(GETACL, enc_getacl, dec_getacl),
PROC(SETACL, enc_setacl, dec_setacl),
PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
+#if defined(CONFIG_NFS_V4_1)
+ PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
+ PROC(CREATE_SESSION, enc_create_session, dec_create_session),
+ PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
+ PROC(SEQUENCE, enc_sequence, dec_sequence),
+ PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
+#endif /* CONFIG_NFS_V4_1 */
};
struct rpc_version nfs_version4 = {
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index e3ed5908820b..8c55b27c0de4 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -92,6 +92,9 @@
#undef NFSROOT_DEBUG
#define NFSDBG_FACILITY NFSDBG_ROOT
+/* Default port to use if server is not running a portmapper */
+#define NFS_MNT_PORT 627
+
/* Default path we try to mount. "%s" gets replaced by our IP address */
#define NFS_ROOT "/tftpboot/%s"
@@ -487,6 +490,7 @@ static int __init root_nfs_get_handle(void)
{
struct nfs_fh fh;
struct sockaddr_in sin;
+ unsigned int auth_flav_len = 0;
struct nfs_mount_request request = {
.sap = (struct sockaddr *)&sin,
.salen = sizeof(sin),
@@ -496,6 +500,7 @@ static int __init root_nfs_get_handle(void)
.protocol = (nfs_data.flags & NFS_MOUNT_TCP) ?
XPRT_TRANSPORT_TCP : XPRT_TRANSPORT_UDP,
.fh = &fh,
+ .auth_flav_len = &auth_flav_len,
};
int status;
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 4ace3c50a8eb..96c4ebfa46f4 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -22,6 +22,7 @@
#include <asm/system.h>
+#include "nfs4_fs.h"
#include "internal.h"
#include "iostat.h"
#include "fscache.h"
@@ -46,6 +47,7 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->npages = pagecount;
+ p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
if (pagecount <= ARRAY_SIZE(p->page_array))
p->pagevec = p->page_array;
else {
@@ -357,19 +359,25 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
struct nfs_readres *resp = &data->res;
if (resp->eof || resp->count == argp->count)
- return;
+ goto out;
/* This is a short read! */
nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
/* Has the server at least made some progress? */
if (resp->count == 0)
- return;
+ goto out;
/* Yes, so retry the read at the end of the data */
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
+ return;
+out:
+ nfs4_sequence_free_slot(NFS_SERVER(data->inode)->nfs_client,
+ &data->res.seq_res);
+ return;
+
}
/*
@@ -406,7 +414,23 @@ static void nfs_readpage_release_partial(void *calldata)
nfs_readdata_release(calldata);
}
+#if defined(CONFIG_NFS_V4_1)
+void nfs_read_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs_read_data *data = calldata;
+
+ if (nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
+ &data->args.seq_args, &data->res.seq_res,
+ 0, task))
+ return;
+ rpc_call_start(task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static const struct rpc_call_ops nfs_read_partial_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_read_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_readpage_result_partial,
.rpc_release = nfs_readpage_release_partial,
};
@@ -470,6 +494,9 @@ static void nfs_readpage_release_full(void *calldata)
}
static const struct rpc_call_ops nfs_read_full_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_read_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_readpage_result_full,
.rpc_release = nfs_readpage_release_full,
};
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 26127b69a275..0b4cbdc60abd 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -42,6 +42,8 @@
#include <linux/smp_lock.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
+#include <linux/mnt_namespace.h>
+#include <linux/namei.h>
#include <linux/nfs_idmap.h>
#include <linux/vfs.h>
#include <linux/inet.h>
@@ -90,6 +92,7 @@ enum {
Opt_mountport,
Opt_mountvers,
Opt_nfsvers,
+ Opt_minorversion,
/* Mount options that take string arguments */
Opt_sec, Opt_proto, Opt_mountproto, Opt_mounthost,
@@ -139,22 +142,23 @@ static const match_table_t nfs_mount_option_tokens = {
{ Opt_fscache_uniq, "fsc=%s" },
{ Opt_nofscache, "nofsc" },
- { Opt_port, "port=%u" },
- { Opt_rsize, "rsize=%u" },
- { Opt_wsize, "wsize=%u" },
- { Opt_bsize, "bsize=%u" },
- { Opt_timeo, "timeo=%u" },
- { Opt_retrans, "retrans=%u" },
- { Opt_acregmin, "acregmin=%u" },
- { Opt_acregmax, "acregmax=%u" },
- { Opt_acdirmin, "acdirmin=%u" },
- { Opt_acdirmax, "acdirmax=%u" },
- { Opt_actimeo, "actimeo=%u" },
- { Opt_namelen, "namlen=%u" },
- { Opt_mountport, "mountport=%u" },
- { Opt_mountvers, "mountvers=%u" },
- { Opt_nfsvers, "nfsvers=%u" },
- { Opt_nfsvers, "vers=%u" },
+ { Opt_port, "port=%s" },
+ { Opt_rsize, "rsize=%s" },
+ { Opt_wsize, "wsize=%s" },
+ { Opt_bsize, "bsize=%s" },
+ { Opt_timeo, "timeo=%s" },
+ { Opt_retrans, "retrans=%s" },
+ { Opt_acregmin, "acregmin=%s" },
+ { Opt_acregmax, "acregmax=%s" },
+ { Opt_acdirmin, "acdirmin=%s" },
+ { Opt_acdirmax, "acdirmax=%s" },
+ { Opt_actimeo, "actimeo=%s" },
+ { Opt_namelen, "namlen=%s" },
+ { Opt_mountport, "mountport=%s" },
+ { Opt_mountvers, "mountvers=%s" },
+ { Opt_nfsvers, "nfsvers=%s" },
+ { Opt_nfsvers, "vers=%s" },
+ { Opt_minorversion, "minorversion=%u" },
{ Opt_sec, "sec=%s" },
{ Opt_proto, "proto=%s" },
@@ -270,10 +274,14 @@ static const struct super_operations nfs_sops = {
#ifdef CONFIG_NFS_V4
static int nfs4_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
+static int nfs4_remote_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
static int nfs4_xdev_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
static int nfs4_referral_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
+static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
static void nfs4_kill_super(struct super_block *sb);
static struct file_system_type nfs4_fs_type = {
@@ -284,6 +292,14 @@ static struct file_system_type nfs4_fs_type = {
.fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
};
+static struct file_system_type nfs4_remote_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .get_sb = nfs4_remote_get_sb,
+ .kill_sb = nfs4_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
struct file_system_type nfs4_xdev_fs_type = {
.owner = THIS_MODULE,
.name = "nfs4",
@@ -292,6 +308,14 @@ struct file_system_type nfs4_xdev_fs_type = {
.fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
};
+static struct file_system_type nfs4_remote_referral_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .get_sb = nfs4_remote_referral_get_sb,
+ .kill_sb = nfs4_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
struct file_system_type nfs4_referral_fs_type = {
.owner = THIS_MODULE,
.name = "nfs4",
@@ -514,7 +538,6 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
const char *nostr;
} nfs_info[] = {
{ NFS_MOUNT_SOFT, ",soft", ",hard" },
- { NFS_MOUNT_INTR, ",intr", ",nointr" },
{ NFS_MOUNT_POSIX, ",posix", "" },
{ NFS_MOUNT_NOCTO, ",nocto", "" },
{ NFS_MOUNT_NOAC, ",noac", "" },
@@ -943,11 +966,6 @@ static int nfs_parse_security_flavors(char *value,
return 1;
}
-static void nfs_parse_invalid_value(const char *option)
-{
- dfprintk(MOUNT, "NFS: bad value specified for %s option\n", option);
-}
-
/*
* Error-check and convert a string of mount options from user space into
* a data structure. The whole mount string is processed; bad options are
@@ -958,7 +976,7 @@ static int nfs_parse_mount_options(char *raw,
struct nfs_parsed_mount_data *mnt)
{
char *p, *string, *secdata;
- int rc, sloppy = 0, errors = 0;
+ int rc, sloppy = 0, invalid_option = 0;
if (!raw) {
dfprintk(MOUNT, "NFS: mount options string was NULL.\n");
@@ -982,7 +1000,9 @@ static int nfs_parse_mount_options(char *raw,
while ((p = strsep(&raw, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
- int option, token;
+ unsigned long option;
+ int int_option;
+ int token;
if (!*p)
continue;
@@ -1091,114 +1111,156 @@ static int nfs_parse_mount_options(char *raw,
* options that take numeric values
*/
case Opt_port:
- if (match_int(args, &option) ||
- option < 0 || option > USHORT_MAX) {
- errors++;
- nfs_parse_invalid_value("port");
- } else
- mnt->nfs_server.port = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0 || option > USHORT_MAX)
+ goto out_invalid_value;
+ mnt->nfs_server.port = option;
break;
case Opt_rsize:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("rsize");
- } else
- mnt->rsize = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->rsize = option;
break;
case Opt_wsize:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("wsize");
- } else
- mnt->wsize = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->wsize = option;
break;
case Opt_bsize:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("bsize");
- } else
- mnt->bsize = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->bsize = option;
break;
case Opt_timeo:
- if (match_int(args, &option) || option <= 0) {
- errors++;
- nfs_parse_invalid_value("timeo");
- } else
- mnt->timeo = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0 || option == 0)
+ goto out_invalid_value;
+ mnt->timeo = option;
break;
case Opt_retrans:
- if (match_int(args, &option) || option <= 0) {
- errors++;
- nfs_parse_invalid_value("retrans");
- } else
- mnt->retrans = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0 || option == 0)
+ goto out_invalid_value;
+ mnt->retrans = option;
break;
case Opt_acregmin:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("acregmin");
- } else
- mnt->acregmin = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->acregmin = option;
break;
case Opt_acregmax:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("acregmax");
- } else
- mnt->acregmax = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->acregmax = option;
break;
case Opt_acdirmin:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("acdirmin");
- } else
- mnt->acdirmin = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->acdirmin = option;
break;
case Opt_acdirmax:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("acdirmax");
- } else
- mnt->acdirmax = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->acdirmax = option;
break;
case Opt_actimeo:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("actimeo");
- } else
- mnt->acregmin = mnt->acregmax =
- mnt->acdirmin = mnt->acdirmax = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->acregmin = mnt->acregmax =
+ mnt->acdirmin = mnt->acdirmax = option;
break;
case Opt_namelen:
- if (match_int(args, &option) || option < 0) {
- errors++;
- nfs_parse_invalid_value("namlen");
- } else
- mnt->namlen = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
+ mnt->namlen = option;
break;
case Opt_mountport:
- if (match_int(args, &option) ||
- option < 0 || option > USHORT_MAX) {
- errors++;
- nfs_parse_invalid_value("mountport");
- } else
- mnt->mount_server.port = option;
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0 || option > USHORT_MAX)
+ goto out_invalid_value;
+ mnt->mount_server.port = option;
break;
case Opt_mountvers:
- if (match_int(args, &option) ||
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0 ||
option < NFS_MNT_VERSION ||
- option > NFS_MNT3_VERSION) {
- errors++;
- nfs_parse_invalid_value("mountvers");
- } else
- mnt->mount_server.version = option;
+ option > NFS_MNT3_VERSION)
+ goto out_invalid_value;
+ mnt->mount_server.version = option;
break;
case Opt_nfsvers:
- if (match_int(args, &option)) {
- errors++;
- nfs_parse_invalid_value("nfsvers");
- break;
- }
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = strict_strtoul(string, 10, &option);
+ kfree(string);
+ if (rc != 0)
+ goto out_invalid_value;
switch (option) {
case NFS2_VERSION:
mnt->flags &= ~NFS_MOUNT_VER3;
@@ -1207,10 +1269,16 @@ static int nfs_parse_mount_options(char *raw,
mnt->flags |= NFS_MOUNT_VER3;
break;
default:
- errors++;
- nfs_parse_invalid_value("nfsvers");
+ goto out_invalid_value;
}
break;
+ case Opt_minorversion:
+ if (match_int(args, &int_option))
+ return 0;
+ if (int_option < 0 || int_option > NFS4_MAX_MINOR_VERSION)
+ return 0;
+ mnt->minorversion = int_option;
+ break;
/*
* options that take text values
@@ -1222,9 +1290,9 @@ static int nfs_parse_mount_options(char *raw,
rc = nfs_parse_security_flavors(string, mnt);
kfree(string);
if (!rc) {
- errors++;
dfprintk(MOUNT, "NFS: unrecognized "
"security flavor\n");
+ return 0;
}
break;
case Opt_proto:
@@ -1238,23 +1306,25 @@ static int nfs_parse_mount_options(char *raw,
case Opt_xprt_udp:
mnt->flags &= ~NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
+ kfree(string);
break;
case Opt_xprt_tcp:
mnt->flags |= NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
+ kfree(string);
break;
case Opt_xprt_rdma:
/* vector side protocols to TCP */
mnt->flags |= NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
xprt_load_transport(string);
+ kfree(string);
break;
default:
- errors++;
dfprintk(MOUNT, "NFS: unrecognized "
"transport protocol\n");
+ return 0;
}
- kfree(string);
break;
case Opt_mountproto:
string = match_strdup(args);
@@ -1273,9 +1343,9 @@ static int nfs_parse_mount_options(char *raw,
break;
case Opt_xprt_rdma: /* not used for side protocols */
default:
- errors++;
dfprintk(MOUNT, "NFS: unrecognized "
"transport protocol\n");
+ return 0;
}
break;
case Opt_addr:
@@ -1331,9 +1401,9 @@ static int nfs_parse_mount_options(char *raw,
mnt->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE;
break;
default:
- errors++;
dfprintk(MOUNT, "NFS: invalid "
"lookupcache argument\n");
+ return 0;
};
break;
@@ -1351,20 +1421,20 @@ static int nfs_parse_mount_options(char *raw,
break;
default:
- errors++;
+ invalid_option = 1;
dfprintk(MOUNT, "NFS: unrecognized mount option "
"'%s'\n", p);
}
}
- if (errors > 0) {
- dfprintk(MOUNT, "NFS: parsing encountered %d error%s\n",
- errors, (errors == 1 ? "" : "s"));
- if (!sloppy)
- return 0;
- }
+ if (!sloppy && invalid_option)
+ return 0;
+
return 1;
+out_invalid_value:
+ printk(KERN_INFO "NFS: bad mount option value specified: %s \n", p);
+ return 0;
out_nomem:
printk(KERN_INFO "NFS: not enough memory to parse option\n");
return 0;
@@ -1381,6 +1451,7 @@ out_security_failure:
static int nfs_try_mount(struct nfs_parsed_mount_data *args,
struct nfs_fh *root_fh)
{
+ unsigned int auth_flavor_len = 0;
struct nfs_mount_request request = {
.sap = (struct sockaddr *)
&args->mount_server.address,
@@ -1388,6 +1459,7 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
.protocol = args->mount_server.protocol,
.fh = root_fh,
.noresvport = args->flags & NFS_MOUNT_NORESVPORT,
+ .auth_flav_len = &auth_flavor_len,
};
int status;
@@ -2240,6 +2312,11 @@ static void nfs4_fill_super(struct super_block *sb)
nfs_initialise_sb(sb);
}
+static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *args)
+{
+ args->flags &= ~(NFS_MOUNT_NONLM|NFS_MOUNT_NOACL|NFS_MOUNT_VER3);
+}
+
/*
* Validate NFSv4 mount options
*/
@@ -2263,6 +2340,7 @@ static int nfs4_validate_mount_data(void *options,
args->nfs_server.port = NFS_PORT; /* 2049 unless user set port= */
args->auth_flavors[0] = RPC_AUTH_UNIX;
args->auth_flavor_len = 0;
+ args->minorversion = 0;
switch (data->version) {
case 1:
@@ -2336,6 +2414,8 @@ static int nfs4_validate_mount_data(void *options,
nfs_validate_transport_protocol(args);
+ nfs4_validate_mount_flags(args);
+
if (args->auth_flavor_len > 1)
goto out_inval_auth;
@@ -2375,12 +2455,12 @@ out_no_client_address:
}
/*
- * Get the superblock for an NFS4 mountpoint
+ * Get the superblock for the NFS4 root partition
*/
-static int nfs4_get_sb(struct file_system_type *fs_type,
+static int nfs4_remote_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
{
- struct nfs_parsed_mount_data *data;
+ struct nfs_parsed_mount_data *data = raw_data;
struct super_block *s;
struct nfs_server *server;
struct nfs_fh *mntfh;
@@ -2391,18 +2471,12 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
};
int error = -ENOMEM;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
mntfh = kzalloc(sizeof(*mntfh), GFP_KERNEL);
if (data == NULL || mntfh == NULL)
goto out_free_fh;
security_init_mnt_opts(&data->lsm_opts);
- /* Validate the mount data */
- error = nfs4_validate_mount_data(raw_data, data, dev_name);
- if (error < 0)
- goto out;
-
/* Get a volume representation */
server = nfs4_create_server(data, mntfh);
if (IS_ERR(server)) {
@@ -2415,7 +2489,7 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
compare_super = NULL;
/* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);
+ s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
if (IS_ERR(s)) {
error = PTR_ERR(s);
goto out_free;
@@ -2452,14 +2526,9 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
error = 0;
out:
- kfree(data->client_address);
- kfree(data->nfs_server.export_path);
- kfree(data->nfs_server.hostname);
- kfree(data->fscache_uniq);
security_free_mnt_opts(&data->lsm_opts);
out_free_fh:
kfree(mntfh);
- kfree(data);
return error;
out_free:
@@ -2473,16 +2542,137 @@ error_splat_super:
goto out;
}
+static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
+ int flags, void *data, const char *hostname)
+{
+ struct vfsmount *root_mnt;
+ char *root_devname;
+ size_t len;
+
+ len = strlen(hostname) + 3;
+ root_devname = kmalloc(len, GFP_KERNEL);
+ if (root_devname == NULL)
+ return ERR_PTR(-ENOMEM);
+ snprintf(root_devname, len, "%s:/", hostname);
+ root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data);
+ kfree(root_devname);
+ return root_mnt;
+}
+
+static void nfs_fix_devname(const struct path *path, struct vfsmount *mnt)
+{
+ char *page = (char *) __get_free_page(GFP_KERNEL);
+ char *devname, *tmp;
+
+ if (page == NULL)
+ return;
+ devname = nfs_path(path->mnt->mnt_devname,
+ path->mnt->mnt_root, path->dentry,
+ page, PAGE_SIZE);
+ if (devname == NULL)
+ goto out_freepage;
+ tmp = kstrdup(devname, GFP_KERNEL);
+ if (tmp == NULL)
+ goto out_freepage;
+ kfree(mnt->mnt_devname);
+ mnt->mnt_devname = tmp;
+out_freepage:
+ free_page((unsigned long)page);
+}
+
+static int nfs_follow_remote_path(struct vfsmount *root_mnt,
+ const char *export_path, struct vfsmount *mnt_target)
+{
+ struct mnt_namespace *ns_private;
+ struct nameidata nd;
+ struct super_block *s;
+ int ret;
+
+ ns_private = create_mnt_ns(root_mnt);
+ ret = PTR_ERR(ns_private);
+ if (IS_ERR(ns_private))
+ goto out_mntput;
+
+ ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt,
+ export_path, LOOKUP_FOLLOW, &nd);
+
+ put_mnt_ns(ns_private);
+
+ if (ret != 0)
+ goto out_err;
+
+ s = nd.path.mnt->mnt_sb;
+ atomic_inc(&s->s_active);
+ mnt_target->mnt_sb = s;
+ mnt_target->mnt_root = dget(nd.path.dentry);
+
+ /* Correct the device pathname */
+ nfs_fix_devname(&nd.path, mnt_target);
+
+ path_put(&nd.path);
+ down_write(&s->s_umount);
+ return 0;
+out_mntput:
+ mntput(root_mnt);
+out_err:
+ return ret;
+}
+
+/*
+ * Get the superblock for an NFS4 mountpoint
+ */
+static int nfs4_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
+{
+ struct nfs_parsed_mount_data *data;
+ char *export_path;
+ struct vfsmount *root_mnt;
+ int error = -ENOMEM;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL)
+ goto out_free_data;
+
+ /* Validate the mount data */
+ error = nfs4_validate_mount_data(raw_data, data, dev_name);
+ if (error < 0)
+ goto out;
+
+ export_path = data->nfs_server.export_path;
+ data->nfs_server.export_path = "/";
+ root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, data,
+ data->nfs_server.hostname);
+ data->nfs_server.export_path = export_path;
+
+ error = PTR_ERR(root_mnt);
+ if (IS_ERR(root_mnt))
+ goto out;
+
+ error = nfs_follow_remote_path(root_mnt, export_path, mnt);
+
+out:
+ kfree(data->client_address);
+ kfree(data->nfs_server.export_path);
+ kfree(data->nfs_server.hostname);
+ kfree(data->fscache_uniq);
+out_free_data:
+ kfree(data);
+ dprintk("<-- nfs4_get_sb() = %d%s\n", error,
+ error != 0 ? " [error]" : "");
+ return error;
+}
+
static void nfs4_kill_super(struct super_block *sb)
{
struct nfs_server *server = NFS_SB(sb);
+ dprintk("--> %s\n", __func__);
nfs_super_return_all_delegations(sb);
kill_anon_super(sb);
-
nfs4_renewd_prepare_shutdown(server);
nfs_fscache_release_super_cookie(sb);
nfs_free_server(server);
+ dprintk("<-- %s\n", __func__);
}
/*
@@ -2568,12 +2758,9 @@ error_splat_super:
return error;
}
-/*
- * Create an NFS4 server record on referral traversal
- */
-static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *raw_data,
- struct vfsmount *mnt)
+static int nfs4_remote_referral_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data,
+ struct vfsmount *mnt)
{
struct nfs_clone_mount *data = raw_data;
struct super_block *s;
@@ -2652,4 +2839,36 @@ error_splat_super:
return error;
}
+/*
+ * Create an NFS4 server record on referral traversal
+ */
+static int nfs4_referral_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data,
+ struct vfsmount *mnt)
+{
+ struct nfs_clone_mount *data = raw_data;
+ char *export_path;
+ struct vfsmount *root_mnt;
+ int error;
+
+ dprintk("--> nfs4_referral_get_sb()\n");
+
+ export_path = data->mnt_path;
+ data->mnt_path = "/";
+
+ root_mnt = nfs_do_root_mount(&nfs4_remote_referral_fs_type,
+ flags, data, data->hostname);
+ data->mnt_path = export_path;
+
+ error = PTR_ERR(root_mnt);
+ if (IS_ERR(root_mnt))
+ goto out;
+
+ error = nfs_follow_remote_path(root_mnt, export_path, mnt);
+out:
+ dprintk("<-- nfs4_referral_get_sb() = %d%s\n", error,
+ error != 0 ? " [error]" : "");
+ return error;
+}
+
#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index ecc295347775..1064c91ae810 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -15,6 +15,7 @@
#include <linux/wait.h>
#include "internal.h"
+#include "nfs4_fs.h"
struct nfs_unlinkdata {
struct hlist_node list;
@@ -82,7 +83,7 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
struct inode *dir = data->dir;
if (!NFS_PROTO(dir)->unlink_done(task, dir))
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, NFS_SERVER(dir)->nfs_client);
}
/**
@@ -102,9 +103,25 @@ static void nfs_async_unlink_release(void *calldata)
nfs_sb_deactive(sb);
}
+#if defined(CONFIG_NFS_V4_1)
+void nfs_unlink_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs_unlinkdata *data = calldata;
+ struct nfs_server *server = NFS_SERVER(data->dir);
+
+ if (nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
+ &data->res.seq_res, 1, task))
+ return;
+ rpc_call_start(task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static const struct rpc_call_ops nfs_unlink_ops = {
.rpc_call_done = nfs_async_unlink_done,
.rpc_release = nfs_async_unlink_release,
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_unlink_prepare,
+#endif /* CONFIG_NFS_V4_1 */
};
static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data)
@@ -241,6 +258,7 @@ nfs_async_unlink(struct inode *dir, struct dentry *dentry)
status = PTR_ERR(data->cred);
goto out_free;
}
+ data->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
status = -EBUSY;
spin_lock(&dentry->d_lock);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e560a78995a3..ce728829f79a 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -25,6 +25,7 @@
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
+#include "nfs4_fs.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
@@ -52,6 +53,7 @@ struct nfs_write_data *nfs_commitdata_alloc(void)
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
+ p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
}
return p;
}
@@ -71,6 +73,7 @@ struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
p->npages = pagecount;
+ p->res.seq_res.sr_slotid = NFS4_MAX_SLOT_TABLE;
if (pagecount <= ARRAY_SIZE(p->page_array))
p->pagevec = p->page_array;
else {
@@ -1048,7 +1051,23 @@ out:
nfs_writedata_release(calldata);
}
+#if defined(CONFIG_NFS_V4_1)
+void nfs_write_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+ struct nfs_client *clp = (NFS_SERVER(data->inode))->nfs_client;
+
+ if (nfs4_setup_sequence(clp, &data->args.seq_args,
+ &data->res.seq_res, 1, task))
+ return;
+ rpc_call_start(task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
static const struct rpc_call_ops nfs_write_partial_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_write_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_writeback_done_partial,
.rpc_release = nfs_writeback_release_partial,
};
@@ -1111,6 +1130,9 @@ remove_request:
}
static const struct rpc_call_ops nfs_write_full_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_write_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_writeback_done_full,
.rpc_release = nfs_writeback_release_full,
};
@@ -1123,6 +1145,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
{
struct nfs_writeargs *argp = &data->args;
struct nfs_writeres *resp = &data->res;
+ struct nfs_server *server = NFS_SERVER(data->inode);
int status;
dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
@@ -1155,7 +1178,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
if (time_before(complain, jiffies)) {
dprintk("NFS: faulty NFS server %s:"
" (committed = %d) != (stable = %d)\n",
- NFS_SERVER(data->inode)->nfs_client->cl_hostname,
+ server->nfs_client->cl_hostname,
resp->verf->committed, argp->stable);
complain = jiffies + 300 * HZ;
}
@@ -1181,7 +1204,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
*/
argp->stable = NFS_FILE_SYNC;
}
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, server->nfs_client);
return -EAGAIN;
}
if (time_before(complain, jiffies)) {
@@ -1193,6 +1216,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
/* Can't do anything about it except throw an error. */
task->tk_status = -EIO;
}
+ nfs4_sequence_free_slot(server->nfs_client, &data->res.seq_res);
return 0;
}
@@ -1349,6 +1373,9 @@ static void nfs_commit_release(void *calldata)
}
static const struct rpc_call_ops nfs_commit_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs_write_prepare,
+#endif /* CONFIG_NFS_V4_1 */
.rpc_call_done = nfs_commit_done,
.rpc_release = nfs_commit_release,
};
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 8b1f8efb4690..b92a27629fb7 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -464,16 +464,11 @@ static int secinfo_parse(char **mesg, char *buf, struct svc_export *exp)
if (err)
return err;
/*
- * Just a quick sanity check; we could also try to check
- * whether this pseudoflavor is supported, but at worst
- * an unsupported pseudoflavor on the export would just
- * be a pseudoflavor that won't match the flavor of any
- * authenticated request. The administrator will
- * probably discover the problem when someone fails to
- * authenticate.
+ * XXX: It would be nice to also check whether this
+ * pseudoflavor is supported, so we can discover the
+ * problem at export time instead of when a client fails
+ * to authenticate.
*/
- if (f->pseudoflavor < 0)
- return -EINVAL;
err = get_int(mesg, &f->flags);
if (err)
return err;
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 7c9fe838f038..a713c418a922 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -652,8 +652,6 @@ nfsd3_proc_commit(struct svc_rqst * rqstp, struct nfsd3_commitargs *argp,
* NFSv3 Server procedures.
* Only the results of non-idempotent operations are cached.
*/
-#define nfs3svc_decode_voidargs NULL
-#define nfs3svc_release_void NULL
#define nfs3svc_decode_fhandleargs nfs3svc_decode_fhandle
#define nfs3svc_encode_attrstatres nfs3svc_encode_attrstat
#define nfs3svc_encode_wccstatres nfs3svc_encode_wccstat
@@ -686,28 +684,219 @@ struct nfsd3_voidargs { int dummy; };
#define WC (7+pAT) /* WCC attributes */
static struct svc_procedure nfsd_procedures3[22] = {
- PROC(null, void, void, void, RC_NOCACHE, ST),
- PROC(getattr, fhandle, attrstat, fhandle, RC_NOCACHE, ST+AT),
- PROC(setattr, sattr, wccstat, fhandle, RC_REPLBUFF, ST+WC),
- PROC(lookup, dirop, dirop, fhandle2, RC_NOCACHE, ST+FH+pAT+pAT),
- PROC(access, access, access, fhandle, RC_NOCACHE, ST+pAT+1),
- PROC(readlink, readlink, readlink, fhandle, RC_NOCACHE, ST+pAT+1+NFS3_MAXPATHLEN/4),
- PROC(read, read, read, fhandle, RC_NOCACHE, ST+pAT+4+NFSSVC_MAXBLKSIZE/4),
- PROC(write, write, write, fhandle, RC_REPLBUFF, ST+WC+4),
- PROC(create, create, create, fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
- PROC(mkdir, mkdir, create, fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
- PROC(symlink, symlink, create, fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
- PROC(mknod, mknod, create, fhandle2, RC_REPLBUFF, ST+(1+FH+pAT)+WC),
- PROC(remove, dirop, wccstat, fhandle, RC_REPLBUFF, ST+WC),
- PROC(rmdir, dirop, wccstat, fhandle, RC_REPLBUFF, ST+WC),
- PROC(rename, rename, rename, fhandle2, RC_REPLBUFF, ST+WC+WC),
- PROC(link, link, link, fhandle2, RC_REPLBUFF, ST+pAT+WC),
- PROC(readdir, readdir, readdir, fhandle, RC_NOCACHE, 0),
- PROC(readdirplus,readdirplus, readdir, fhandle, RC_NOCACHE, 0),
- PROC(fsstat, fhandle, fsstat, void, RC_NOCACHE, ST+pAT+2*6+1),
- PROC(fsinfo, fhandle, fsinfo, void, RC_NOCACHE, ST+pAT+12),
- PROC(pathconf, fhandle, pathconf, void, RC_NOCACHE, ST+pAT+6),
- PROC(commit, commit, commit, fhandle, RC_NOCACHE, ST+WC+2),
+ [NFS3PROC_NULL] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_null,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_voidres,
+ .pc_argsize = sizeof(struct nfsd3_voidargs),
+ .pc_ressize = sizeof(struct nfsd3_voidres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST,
+ },
+ [NFS3PROC_GETATTR] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_getattr,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_attrstatres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_ressize = sizeof(struct nfsd3_attrstatres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+AT,
+ },
+ [NFS3PROC_SETATTR] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_setattr,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_sattrargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_sattrargs),
+ .pc_ressize = sizeof(struct nfsd3_wccstatres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+WC,
+ },
+ [NFS3PROC_LOOKUP] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_lookup,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_diropres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_diropargs),
+ .pc_ressize = sizeof(struct nfsd3_diropres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+FH+pAT+pAT,
+ },
+ [NFS3PROC_ACCESS] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_access,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_accessargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_accessres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_accessargs),
+ .pc_ressize = sizeof(struct nfsd3_accessres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+1,
+ },
+ [NFS3PROC_READLINK] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_readlink,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_readlinkargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_readlinkres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_readlinkargs),
+ .pc_ressize = sizeof(struct nfsd3_readlinkres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+1+NFS3_MAXPATHLEN/4,
+ },
+ [NFS3PROC_READ] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_read,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_readargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_readres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_readargs),
+ .pc_ressize = sizeof(struct nfsd3_readres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+4+NFSSVC_MAXBLKSIZE/4,
+ },
+ [NFS3PROC_WRITE] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_write,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_writeargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_writeres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_writeargs),
+ .pc_ressize = sizeof(struct nfsd3_writeres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+WC+4,
+ },
+ [NFS3PROC_CREATE] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_create,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_createargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_createargs),
+ .pc_ressize = sizeof(struct nfsd3_createres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+(1+FH+pAT)+WC,
+ },
+ [NFS3PROC_MKDIR] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_mkdir,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_mkdirargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_mkdirargs),
+ .pc_ressize = sizeof(struct nfsd3_createres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+(1+FH+pAT)+WC,
+ },
+ [NFS3PROC_SYMLINK] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_symlink,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_symlinkargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_symlinkargs),
+ .pc_ressize = sizeof(struct nfsd3_createres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+(1+FH+pAT)+WC,
+ },
+ [NFS3PROC_MKNOD] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_mknod,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_mknodargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_createres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_mknodargs),
+ .pc_ressize = sizeof(struct nfsd3_createres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+(1+FH+pAT)+WC,
+ },
+ [NFS3PROC_REMOVE] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_remove,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_diropargs),
+ .pc_ressize = sizeof(struct nfsd3_wccstatres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+WC,
+ },
+ [NFS3PROC_RMDIR] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_rmdir,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_diropargs),
+ .pc_ressize = sizeof(struct nfsd3_wccstatres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+WC,
+ },
+ [NFS3PROC_RENAME] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_rename,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_renameargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_renameres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_renameargs),
+ .pc_ressize = sizeof(struct nfsd3_renameres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+WC+WC,
+ },
+ [NFS3PROC_LINK] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_link,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_linkargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_linkres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2,
+ .pc_argsize = sizeof(struct nfsd3_linkargs),
+ .pc_ressize = sizeof(struct nfsd3_linkres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+pAT+WC,
+ },
+ [NFS3PROC_READDIR] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_readdir,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_readdirargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_readdirres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_readdirargs),
+ .pc_ressize = sizeof(struct nfsd3_readdirres),
+ .pc_cachetype = RC_NOCACHE,
+ },
+ [NFS3PROC_READDIRPLUS] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_readdirplus,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_readdirplusargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_readdirres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_readdirplusargs),
+ .pc_ressize = sizeof(struct nfsd3_readdirres),
+ .pc_cachetype = RC_NOCACHE,
+ },
+ [NFS3PROC_FSSTAT] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_fsstat,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_fsstatres,
+ .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_ressize = sizeof(struct nfsd3_fsstatres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+2*6+1,
+ },
+ [NFS3PROC_FSINFO] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_fsinfo,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_fsinfores,
+ .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_ressize = sizeof(struct nfsd3_fsinfores),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+12,
+ },
+ [NFS3PROC_PATHCONF] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_pathconf,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_pathconfres,
+ .pc_argsize = sizeof(struct nfsd3_fhandleargs),
+ .pc_ressize = sizeof(struct nfsd3_pathconfres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+pAT+6,
+ },
+ [NFS3PROC_COMMIT] = {
+ .pc_func = (svc_procfunc) nfsd3_proc_commit,
+ .pc_decode = (kxdrproc_t) nfs3svc_decode_commitargs,
+ .pc_encode = (kxdrproc_t) nfs3svc_encode_commitres,
+ .pc_release = (kxdrproc_t) nfs3svc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd3_commitargs),
+ .pc_ressize = sizeof(struct nfsd3_commitres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+WC+2,
+ },
};
struct svc_version nfsd_version3 = {
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 17d0dd997204..01d4ec1c88e0 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -272,6 +272,7 @@ void fill_post_wcc(struct svc_fh *fhp)
err = vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry,
&fhp->fh_post_attr);
+ fhp->fh_post_change = fhp->fh_dentry->d_inode->i_version;
if (err)
fhp->fh_post_saved = 0;
else
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 290289bd44f7..3fd23f7aceca 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -140,8 +140,10 @@ struct nfs4_cb_compound_hdr {
int status;
u32 ident;
u32 nops;
+ __be32 *nops_p;
+ u32 minorversion;
u32 taglen;
- char * tag;
+ char *tag;
};
static struct {
@@ -201,33 +203,39 @@ nfs_cb_stat_to_errno(int stat)
* XDR encode
*/
-static int
+static void
encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
{
__be32 * p;
RESERVE_SPACE(16);
WRITE32(0); /* tag length is always 0 */
- WRITE32(NFS4_MINOR_VERSION);
+ WRITE32(hdr->minorversion);
WRITE32(hdr->ident);
+ hdr->nops_p = p;
WRITE32(hdr->nops);
- return 0;
}
-static int
-encode_cb_recall(struct xdr_stream *xdr, struct nfs4_cb_recall *cb_rec)
+static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
+{
+ *hdr->nops_p = htonl(hdr->nops);
+}
+
+static void
+encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
+ struct nfs4_cb_compound_hdr *hdr)
{
__be32 *p;
- int len = cb_rec->cbr_fh.fh_size;
+ int len = dp->dl_fh.fh_size;
- RESERVE_SPACE(12+sizeof(cb_rec->cbr_stateid) + len);
+ RESERVE_SPACE(12+sizeof(dp->dl_stateid) + len);
WRITE32(OP_CB_RECALL);
- WRITE32(cb_rec->cbr_stateid.si_generation);
- WRITEMEM(&cb_rec->cbr_stateid.si_opaque, sizeof(stateid_opaque_t));
- WRITE32(cb_rec->cbr_trunc);
+ WRITE32(dp->dl_stateid.si_generation);
+ WRITEMEM(&dp->dl_stateid.si_opaque, sizeof(stateid_opaque_t));
+ WRITE32(0); /* truncate optimization not implemented */
WRITE32(len);
- WRITEMEM(&cb_rec->cbr_fh.fh_base, len);
- return 0;
+ WRITEMEM(&dp->dl_fh.fh_base, len);
+ hdr->nops++;
}
static int
@@ -241,17 +249,18 @@ nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
}
static int
-nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, struct nfs4_cb_recall *args)
+nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, struct nfs4_delegation *args)
{
struct xdr_stream xdr;
struct nfs4_cb_compound_hdr hdr = {
- .ident = args->cbr_ident,
- .nops = 1,
+ .ident = args->dl_ident,
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_cb_compound_hdr(&xdr, &hdr);
- return (encode_cb_recall(&xdr, args));
+ encode_cb_recall(&xdr, args, &hdr);
+ encode_cb_nops(&hdr);
+ return 0;
}
@@ -358,18 +367,21 @@ static struct rpc_program cb_program = {
.pipe_dir_name = "/nfsd4_cb",
};
+static int max_cb_time(void)
+{
+ return max(NFSD_LEASE_TIME/10, (time_t)1) * HZ;
+}
+
/* Reference counting, callback cleanup, etc., all look racy as heck.
* And why is cb_set an atomic? */
-static struct rpc_clnt *setup_callback_client(struct nfs4_client *clp)
+int setup_callback_client(struct nfs4_client *clp)
{
struct sockaddr_in addr;
- struct nfs4_callback *cb = &clp->cl_callback;
+ struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
struct rpc_timeout timeparms = {
- .to_initval = (NFSD_LEASE_TIME/4) * HZ,
- .to_retries = 5,
- .to_maxval = (NFSD_LEASE_TIME/2) * HZ,
- .to_exponential = 1,
+ .to_initval = max_cb_time(),
+ .to_retries = 0,
};
struct rpc_create_args args = {
.protocol = IPPROTO_TCP,
@@ -386,7 +398,7 @@ static struct rpc_clnt *setup_callback_client(struct nfs4_client *clp)
struct rpc_clnt *client;
if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
/* Initialize address */
memset(&addr, 0, sizeof(addr));
@@ -396,48 +408,77 @@ static struct rpc_clnt *setup_callback_client(struct nfs4_client *clp)
/* Create RPC client */
client = rpc_create(&args);
- if (IS_ERR(client))
+ if (IS_ERR(client)) {
dprintk("NFSD: couldn't create callback client: %ld\n",
PTR_ERR(client));
- return client;
+ return PTR_ERR(client);
+ }
+ cb->cb_client = client;
+ return 0;
+
+}
+
+static void warn_no_callback_path(struct nfs4_client *clp, int reason)
+{
+ dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
+ (int)clp->cl_name.len, clp->cl_name.data, reason);
+}
+
+static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_client *clp = calldata;
+
+ if (task->tk_status)
+ warn_no_callback_path(clp, task->tk_status);
+ else
+ atomic_set(&clp->cl_cb_conn.cb_set, 1);
+ put_nfs4_client(clp);
+}
+
+static const struct rpc_call_ops nfsd4_cb_probe_ops = {
+ .rpc_call_done = nfsd4_cb_probe_done,
+};
+static struct rpc_cred *lookup_cb_cred(struct nfs4_cb_conn *cb)
+{
+ struct auth_cred acred = {
+ .machine_cred = 1
+ };
+
+ /*
+ * Note in the gss case this doesn't actually have to wait for a
+ * gss upcall (or any calls to the client); this just creates a
+ * non-uptodate cred which the rpc state machine will fill in with
+ * a refresh_upcall later.
+ */
+ return rpcauth_lookup_credcache(cb->cb_client->cl_auth, &acred,
+ RPCAUTH_LOOKUP_NEW);
}
-static int do_probe_callback(void *data)
+void do_probe_callback(struct nfs4_client *clp)
{
- struct nfs4_client *clp = data;
- struct nfs4_callback *cb = &clp->cl_callback;
+ struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
struct rpc_message msg = {
.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_NULL],
.rpc_argp = clp,
};
- struct rpc_clnt *client;
+ struct rpc_cred *cred;
int status;
- client = setup_callback_client(clp);
- if (IS_ERR(client)) {
- status = PTR_ERR(client);
- dprintk("NFSD: couldn't create callback client: %d\n",
- status);
- goto out_err;
+ cred = lookup_cb_cred(cb);
+ if (IS_ERR(cred)) {
+ status = PTR_ERR(cred);
+ goto out;
+ }
+ cb->cb_cred = cred;
+ msg.rpc_cred = cb->cb_cred;
+ status = rpc_call_async(cb->cb_client, &msg, RPC_TASK_SOFT,
+ &nfsd4_cb_probe_ops, (void *)clp);
+out:
+ if (status) {
+ warn_no_callback_path(clp, status);
+ put_nfs4_client(clp);
}
-
- status = rpc_call_sync(client, &msg, RPC_TASK_SOFT);
-
- if (status)
- goto out_release_client;
-
- cb->cb_client = client;
- atomic_set(&cb->cb_set, 1);
- put_nfs4_client(clp);
- return 0;
-out_release_client:
- rpc_shutdown_client(client);
-out_err:
- dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
- (int)clp->cl_name.len, clp->cl_name.data, status);
- put_nfs4_client(clp);
- return 0;
}
/*
@@ -446,21 +487,65 @@ out_err:
void
nfsd4_probe_callback(struct nfs4_client *clp)
{
- struct task_struct *t;
+ int status;
- BUG_ON(atomic_read(&clp->cl_callback.cb_set));
+ BUG_ON(atomic_read(&clp->cl_cb_conn.cb_set));
+
+ status = setup_callback_client(clp);
+ if (status) {
+ warn_no_callback_path(clp, status);
+ return;
+ }
/* the task holds a reference to the nfs4_client struct */
atomic_inc(&clp->cl_count);
- t = kthread_run(do_probe_callback, clp, "nfs4_cb_probe");
+ do_probe_callback(clp);
+}
- if (IS_ERR(t))
- atomic_dec(&clp->cl_count);
+static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_delegation *dp = calldata;
+ struct nfs4_client *clp = dp->dl_client;
- return;
+ switch (task->tk_status) {
+ case -EIO:
+ /* Network partition? */
+ atomic_set(&clp->cl_cb_conn.cb_set, 0);
+ warn_no_callback_path(clp, task->tk_status);
+ case -EBADHANDLE:
+ case -NFS4ERR_BAD_STATEID:
+ /* Race: client probably got cb_recall
+ * before open reply granting delegation */
+ break;
+ default:
+ /* success, or error we can't handle */
+ return;
+ }
+ if (dp->dl_retries--) {
+ rpc_delay(task, 2*HZ);
+ task->tk_status = 0;
+ rpc_restart_call(task);
+ } else {
+ atomic_set(&clp->cl_cb_conn.cb_set, 0);
+ warn_no_callback_path(clp, task->tk_status);
+ }
+}
+
+static void nfsd4_cb_recall_release(void *calldata)
+{
+ struct nfs4_delegation *dp = calldata;
+ struct nfs4_client *clp = dp->dl_client;
+
+ nfs4_put_delegation(dp);
+ put_nfs4_client(clp);
}
+static const struct rpc_call_ops nfsd4_cb_recall_ops = {
+ .rpc_call_done = nfsd4_cb_recall_done,
+ .rpc_release = nfsd4_cb_recall_release,
+};
+
/*
* called with dp->dl_count inc'ed.
*/
@@ -468,41 +553,19 @@ void
nfsd4_cb_recall(struct nfs4_delegation *dp)
{
struct nfs4_client *clp = dp->dl_client;
- struct rpc_clnt *clnt = clp->cl_callback.cb_client;
- struct nfs4_cb_recall *cbr = &dp->dl_recall;
+ struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
struct rpc_message msg = {
.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
- .rpc_argp = cbr,
+ .rpc_argp = dp,
+ .rpc_cred = clp->cl_cb_conn.cb_cred
};
- int retries = 1;
- int status = 0;
-
- cbr->cbr_trunc = 0; /* XXX need to implement truncate optimization */
- cbr->cbr_dp = dp;
-
- status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT);
- while (retries--) {
- switch (status) {
- case -EIO:
- /* Network partition? */
- atomic_set(&clp->cl_callback.cb_set, 0);
- case -EBADHANDLE:
- case -NFS4ERR_BAD_STATEID:
- /* Race: client probably got cb_recall
- * before open reply granting delegation */
- break;
- default:
- goto out_put_cred;
- }
- ssleep(2);
- status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT);
+ int status;
+
+ dp->dl_retries = 1;
+ status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT,
+ &nfsd4_cb_recall_ops, dp);
+ if (status) {
+ put_nfs4_client(clp);
+ nfs4_put_delegation(dp);
}
-out_put_cred:
- /*
- * Success or failure, now we're either waiting for lease expiration
- * or deleg_return.
- */
- put_nfs4_client(clp);
- nfs4_put_delegation(dp);
- return;
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index b2883e9c6381..7c8801769a3c 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -51,6 +51,78 @@
#define NFSDDBG_FACILITY NFSDDBG_PROC
+static u32 nfsd_attrmask[] = {
+ NFSD_WRITEABLE_ATTRS_WORD0,
+ NFSD_WRITEABLE_ATTRS_WORD1,
+ NFSD_WRITEABLE_ATTRS_WORD2
+};
+
+static u32 nfsd41_ex_attrmask[] = {
+ NFSD_SUPPATTR_EXCLCREAT_WORD0,
+ NFSD_SUPPATTR_EXCLCREAT_WORD1,
+ NFSD_SUPPATTR_EXCLCREAT_WORD2
+};
+
+static __be32
+check_attr_support(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ u32 *bmval, u32 *writable)
+{
+ struct dentry *dentry = cstate->current_fh.fh_dentry;
+ struct svc_export *exp = cstate->current_fh.fh_export;
+
+ /*
+ * Check about attributes are supported by the NFSv4 server or not.
+ * According to spec, unsupported attributes return ERR_ATTRNOTSUPP.
+ */
+ if ((bmval[0] & ~nfsd_suppattrs0(cstate->minorversion)) ||
+ (bmval[1] & ~nfsd_suppattrs1(cstate->minorversion)) ||
+ (bmval[2] & ~nfsd_suppattrs2(cstate->minorversion)))
+ return nfserr_attrnotsupp;
+
+ /*
+ * Check FATTR4_WORD0_ACL & FATTR4_WORD0_FS_LOCATIONS can be supported
+ * in current environment or not.
+ */
+ if (bmval[0] & FATTR4_WORD0_ACL) {
+ if (!IS_POSIXACL(dentry->d_inode))
+ return nfserr_attrnotsupp;
+ }
+ if (bmval[0] & FATTR4_WORD0_FS_LOCATIONS) {
+ if (exp->ex_fslocs.locations == NULL)
+ return nfserr_attrnotsupp;
+ }
+
+ /*
+ * According to spec, read-only attributes return ERR_INVAL.
+ */
+ if (writable) {
+ if ((bmval[0] & ~writable[0]) || (bmval[1] & ~writable[1]) ||
+ (bmval[2] & ~writable[2]))
+ return nfserr_inval;
+ }
+
+ return nfs_ok;
+}
+
+static __be32
+nfsd4_check_open_attributes(struct svc_rqst *rqstp,
+ struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
+{
+ __be32 status = nfs_ok;
+
+ if (open->op_create == NFS4_OPEN_CREATE) {
+ if (open->op_createmode == NFS4_CREATE_UNCHECKED
+ || open->op_createmode == NFS4_CREATE_GUARDED)
+ status = check_attr_support(rqstp, cstate,
+ open->op_bmval, nfsd_attrmask);
+ else if (open->op_createmode == NFS4_CREATE_EXCLUSIVE4_1)
+ status = check_attr_support(rqstp, cstate,
+ open->op_bmval, nfsd41_ex_attrmask);
+ }
+
+ return status;
+}
+
static inline void
fh_dup2(struct svc_fh *dst, struct svc_fh *src)
{
@@ -225,6 +297,10 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out;
+ status = nfsd4_check_open_attributes(rqstp, cstate, open);
+ if (status)
+ goto out;
+
/* Openowner is now set, so sequence id will get bumped. Now we need
* these checks before we do any creates: */
status = nfserr_grace;
@@ -395,6 +471,11 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
return status;
+ status = check_attr_support(rqstp, cstate, create->cr_bmval,
+ nfsd_attrmask);
+ if (status)
+ return status;
+
switch (create->cr_type) {
case NF4LNK:
/* ugh! we have to null-terminate the linktext, or
@@ -689,6 +770,12 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
return status;
status = nfs_ok;
+
+ status = check_attr_support(rqstp, cstate, setattr->sa_bmval,
+ nfsd_attrmask);
+ if (status)
+ goto out;
+
if (setattr->sa_acl != NULL)
status = nfsd4_set_nfs4_acl(rqstp, &cstate->current_fh,
setattr->sa_acl);
@@ -763,10 +850,10 @@ _nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
return status;
- if ((verify->ve_bmval[0] & ~nfsd_suppattrs0(cstate->minorversion))
- || (verify->ve_bmval[1] & ~nfsd_suppattrs1(cstate->minorversion))
- || (verify->ve_bmval[2] & ~nfsd_suppattrs2(cstate->minorversion)))
- return nfserr_attrnotsupp;
+ status = check_attr_support(rqstp, cstate, verify->ve_bmval, NULL);
+ if (status)
+ return status;
+
if ((verify->ve_bmval[0] & FATTR4_WORD0_RDATTR_ERROR)
|| (verify->ve_bmval[1] & NFSD_WRITEONLY_ATTRS_WORD1))
return nfserr_inval;
@@ -1226,24 +1313,9 @@ static const char *nfsd4_op_name(unsigned opnum)
return "unknown_operation";
}
-#define nfs4svc_decode_voidargs NULL
-#define nfs4svc_release_void NULL
#define nfsd4_voidres nfsd4_voidargs
-#define nfs4svc_release_compound NULL
struct nfsd4_voidargs { int dummy; };
-#define PROC(name, argt, rest, relt, cache, respsize) \
- { (svc_procfunc) nfsd4_proc_##name, \
- (kxdrproc_t) nfs4svc_decode_##argt##args, \
- (kxdrproc_t) nfs4svc_encode_##rest##res, \
- (kxdrproc_t) nfs4svc_release_##relt, \
- sizeof(struct nfsd4_##argt##args), \
- sizeof(struct nfsd4_##rest##res), \
- 0, \
- cache, \
- respsize, \
- }
-
/*
* TODO: At the present time, the NFSv4 server does not do XID caching
* of requests. Implementing XID caching would not be a serious problem,
@@ -1255,8 +1327,23 @@ struct nfsd4_voidargs { int dummy; };
* better XID's.
*/
static struct svc_procedure nfsd_procedures4[2] = {
- PROC(null, void, void, void, RC_NOCACHE, 1),
- PROC(compound, compound, compound, compound, RC_NOCACHE, NFSD_BUFSIZE/4)
+ [NFSPROC4_NULL] = {
+ .pc_func = (svc_procfunc) nfsd4_proc_null,
+ .pc_encode = (kxdrproc_t) nfs4svc_encode_voidres,
+ .pc_argsize = sizeof(struct nfsd4_voidargs),
+ .pc_ressize = sizeof(struct nfsd4_voidres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = 1,
+ },
+ [NFSPROC4_COMPOUND] = {
+ .pc_func = (svc_procfunc) nfsd4_proc_compound,
+ .pc_decode = (kxdrproc_t) nfs4svc_decode_compoundargs,
+ .pc_encode = (kxdrproc_t) nfs4svc_encode_compoundres,
+ .pc_argsize = sizeof(struct nfsd4_compoundargs),
+ .pc_ressize = sizeof(struct nfsd4_compoundres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = NFSD_BUFSIZE/4,
+ },
};
struct svc_version nfsd_version4 = {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 3b711f5147a7..980a216a48c8 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -182,7 +182,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
{
struct nfs4_delegation *dp;
struct nfs4_file *fp = stp->st_file;
- struct nfs4_callback *cb = &stp->st_stateowner->so_client->cl_callback;
+ struct nfs4_cb_conn *cb = &stp->st_stateowner->so_client->cl_cb_conn;
dprintk("NFSD alloc_init_deleg\n");
if (fp->fi_had_conflict)
@@ -203,10 +203,8 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
get_file(stp->st_vfs_file);
dp->dl_vfs_file = stp->st_vfs_file;
dp->dl_type = type;
- dp->dl_recall.cbr_dp = NULL;
- dp->dl_recall.cbr_ident = cb->cb_ident;
- dp->dl_recall.cbr_trunc = 0;
- dp->dl_stateid.si_boot = boot_time;
+ dp->dl_ident = cb->cb_ident;
+ dp->dl_stateid.si_boot = get_seconds();
dp->dl_stateid.si_stateownerid = current_delegid++;
dp->dl_stateid.si_fileid = 0;
dp->dl_stateid.si_generation = 0;
@@ -427,6 +425,11 @@ static int set_forechannel_maxreqs(struct nfsd4_channel_attrs *fchan)
{
int status = 0, np = fchan->maxreqs * NFSD_PAGES_PER_SLOT;
+ if (fchan->maxreqs < 1)
+ return nfserr_inval;
+ else if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION)
+ fchan->maxreqs = NFSD_MAX_SLOTS_PER_SESSION;
+
spin_lock(&nfsd_serv->sv_lock);
if (np + nfsd_serv->sv_drc_pages_used > nfsd_serv->sv_drc_max_pages)
np = nfsd_serv->sv_drc_max_pages - nfsd_serv->sv_drc_pages_used;
@@ -446,8 +449,8 @@ static int set_forechannel_maxreqs(struct nfsd4_channel_attrs *fchan)
* fchan holds the client values on input, and the server values on output
*/
static int init_forechannel_attrs(struct svc_rqst *rqstp,
- struct nfsd4_session *session,
- struct nfsd4_channel_attrs *fchan)
+ struct nfsd4_channel_attrs *session_fchan,
+ struct nfsd4_channel_attrs *fchan)
{
int status = 0;
__u32 maxcount = svc_max_payload(rqstp);
@@ -457,21 +460,21 @@ static int init_forechannel_attrs(struct svc_rqst *rqstp,
/* Use the client's max request and max response size if possible */
if (fchan->maxreq_sz > maxcount)
fchan->maxreq_sz = maxcount;
- session->se_fmaxreq_sz = fchan->maxreq_sz;
+ session_fchan->maxreq_sz = fchan->maxreq_sz;
if (fchan->maxresp_sz > maxcount)
fchan->maxresp_sz = maxcount;
- session->se_fmaxresp_sz = fchan->maxresp_sz;
+ session_fchan->maxresp_sz = fchan->maxresp_sz;
/* Set the max response cached size our default which is
* a multiple of PAGE_SIZE and small */
- session->se_fmaxresp_cached = NFSD_PAGES_PER_SLOT * PAGE_SIZE;
- fchan->maxresp_cached = session->se_fmaxresp_cached;
+ session_fchan->maxresp_cached = NFSD_PAGES_PER_SLOT * PAGE_SIZE;
+ fchan->maxresp_cached = session_fchan->maxresp_cached;
/* Use the client's maxops if possible */
if (fchan->maxops > NFSD_MAX_OPS_PER_COMPOUND)
fchan->maxops = NFSD_MAX_OPS_PER_COMPOUND;
- session->se_fmaxops = fchan->maxops;
+ session_fchan->maxops = fchan->maxops;
/* try to use the client requested number of slots */
if (fchan->maxreqs > NFSD_MAX_SLOTS_PER_SESSION)
@@ -483,7 +486,7 @@ static int init_forechannel_attrs(struct svc_rqst *rqstp,
*/
status = set_forechannel_maxreqs(fchan);
- session->se_fnumslots = fchan->maxreqs;
+ session_fchan->maxreqs = fchan->maxreqs;
return status;
}
@@ -497,12 +500,14 @@ alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp,
memset(&tmp, 0, sizeof(tmp));
/* FIXME: For now, we just accept the client back channel attributes. */
- status = init_forechannel_attrs(rqstp, &tmp, &cses->fore_channel);
+ tmp.se_bchannel = cses->back_channel;
+ status = init_forechannel_attrs(rqstp, &tmp.se_fchannel,
+ &cses->fore_channel);
if (status)
goto out;
/* allocate struct nfsd4_session and slot table in one piece */
- slotsize = tmp.se_fnumslots * sizeof(struct nfsd4_slot);
+ slotsize = tmp.se_fchannel.maxreqs * sizeof(struct nfsd4_slot);
new = kzalloc(sizeof(*new) + slotsize, GFP_KERNEL);
if (!new)
goto out;
@@ -576,7 +581,7 @@ free_session(struct kref *kref)
int i;
ses = container_of(kref, struct nfsd4_session, se_ref);
- for (i = 0; i < ses->se_fnumslots; i++) {
+ for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
struct nfsd4_cache_entry *e = &ses->se_slots[i].sl_cache_entry;
nfsd4_release_respages(e->ce_respages, e->ce_resused);
}
@@ -632,16 +637,20 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
static void
shutdown_callback_client(struct nfs4_client *clp)
{
- struct rpc_clnt *clnt = clp->cl_callback.cb_client;
+ struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
if (clnt) {
/*
* Callback threads take a reference on the client, so there
* should be no outstanding callbacks at this point.
*/
- clp->cl_callback.cb_client = NULL;
+ clp->cl_cb_conn.cb_client = NULL;
rpc_shutdown_client(clnt);
}
+ if (clp->cl_cb_conn.cb_cred) {
+ put_rpccred(clp->cl_cb_conn.cb_cred);
+ clp->cl_cb_conn.cb_cred = NULL;
+ }
}
static inline void
@@ -714,7 +723,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir)
return NULL;
memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
atomic_set(&clp->cl_count, 1);
- atomic_set(&clp->cl_callback.cb_set, 0);
+ atomic_set(&clp->cl_cb_conn.cb_set, 0);
INIT_LIST_HEAD(&clp->cl_idhash);
INIT_LIST_HEAD(&clp->cl_strhash);
INIT_LIST_HEAD(&clp->cl_openowners);
@@ -966,7 +975,7 @@ parse_ipv4(unsigned int addr_len, char *addr_val, unsigned int *cbaddrp, unsigne
static void
gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se)
{
- struct nfs4_callback *cb = &clp->cl_callback;
+ struct nfs4_cb_conn *cb = &clp->cl_cb_conn;
/* Currently, we only support tcp for the callback channel */
if ((se->se_callback_netid_len != 3) || memcmp((char *)se->se_callback_netid_val, "tcp", 3))
@@ -975,6 +984,7 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se)
if ( !(parse_ipv4(se->se_callback_addr_len, se->se_callback_addr_val,
&cb->cb_addr, &cb->cb_port)))
goto out_err;
+ cb->cb_minorversion = 0;
cb->cb_prog = se->se_callback_prog;
cb->cb_ident = se->se_callback_ident;
return;
@@ -1128,7 +1138,7 @@ nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
* is sent (lease renewal).
*/
if (seq && nfsd4_not_cached(resp)) {
- seq->maxslots = resp->cstate.session->se_fnumslots;
+ seq->maxslots = resp->cstate.session->se_fchannel.maxreqs;
return nfs_ok;
}
@@ -1238,12 +1248,6 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
expire_client(conf);
goto out_new;
}
- if (ip_addr != conf->cl_addr &&
- !(exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A)) {
- /* Client collision. 18.35.4 case 3 */
- status = nfserr_clid_inuse;
- goto out;
- }
/*
* Set bit when the owner id and verifier map to an already
* confirmed client id (18.35.3).
@@ -1257,12 +1261,12 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
copy_verf(conf, &verf);
new = conf;
goto out_copy;
- } else {
- /* 18.35.4 case 7 */
- if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
- status = nfserr_noent;
- goto out;
- }
+ }
+
+ /* 18.35.4 case 7 */
+ if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
+ status = nfserr_noent;
+ goto out;
}
unconf = find_unconfirmed_client_by_str(dname, strhashval, true);
@@ -1471,7 +1475,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
goto out;
status = nfserr_badslot;
- if (seq->slotid >= session->se_fnumslots)
+ if (seq->slotid >= session->se_fchannel.maxreqs)
goto out;
slot = &session->se_slots[seq->slotid];
@@ -1686,9 +1690,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
else {
/* XXX: We just turn off callbacks until we can handle
* change request correctly. */
- atomic_set(&conf->cl_callback.cb_set, 0);
- gen_confirm(conf);
- nfsd4_remove_clid_dir(unconf);
+ atomic_set(&conf->cl_cb_conn.cb_set, 0);
expire_client(unconf);
status = nfs_ok;
@@ -1882,7 +1884,7 @@ init_stateid(struct nfs4_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *
stp->st_stateowner = sop;
get_nfs4_file(fp);
stp->st_file = fp;
- stp->st_stateid.si_boot = boot_time;
+ stp->st_stateid.si_boot = get_seconds();
stp->st_stateid.si_stateownerid = sop->so_id;
stp->st_stateid.si_fileid = fp->fi_id;
stp->st_stateid.si_generation = 0;
@@ -2059,19 +2061,6 @@ nfs4_file_downgrade(struct file *filp, unsigned int share_access)
}
/*
- * Recall a delegation
- */
-static int
-do_recall(void *__dp)
-{
- struct nfs4_delegation *dp = __dp;
-
- dp->dl_file->fi_had_conflict = true;
- nfsd4_cb_recall(dp);
- return 0;
-}
-
-/*
* Spawn a thread to perform a recall on the delegation represented
* by the lease (file_lock)
*
@@ -2082,8 +2071,7 @@ do_recall(void *__dp)
static
void nfsd_break_deleg_cb(struct file_lock *fl)
{
- struct nfs4_delegation *dp= (struct nfs4_delegation *)fl->fl_owner;
- struct task_struct *t;
+ struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
dprintk("NFSD nfsd_break_deleg_cb: dp %p fl %p\n",dp,fl);
if (!dp)
@@ -2111,16 +2099,8 @@ void nfsd_break_deleg_cb(struct file_lock *fl)
*/
fl->fl_break_time = 0;
- t = kthread_run(do_recall, dp, "%s", "nfs4_cb_recall");
- if (IS_ERR(t)) {
- struct nfs4_client *clp = dp->dl_client;
-
- printk(KERN_INFO "NFSD: Callback thread failed for "
- "for client (clientid %08x/%08x)\n",
- clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
- put_nfs4_client(dp->dl_client);
- nfs4_put_delegation(dp);
- }
+ dp->dl_file->fi_had_conflict = true;
+ nfsd4_cb_recall(dp);
}
/*
@@ -2422,7 +2402,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
{
struct nfs4_delegation *dp;
struct nfs4_stateowner *sop = stp->st_stateowner;
- struct nfs4_callback *cb = &sop->so_client->cl_callback;
+ struct nfs4_cb_conn *cb = &sop->so_client->cl_cb_conn;
struct file_lock fl, *flp = &fl;
int status, flag = 0;
@@ -2614,7 +2594,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
renew_client(clp);
status = nfserr_cb_path_down;
if (!list_empty(&clp->cl_delegations)
- && !atomic_read(&clp->cl_callback.cb_set))
+ && !atomic_read(&clp->cl_cb_conn.cb_set))
goto out;
status = nfs_ok;
out:
@@ -2738,12 +2718,42 @@ nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stateid *stp)
static int
STALE_STATEID(stateid_t *stateid)
{
- if (stateid->si_boot == boot_time)
- return 0;
- dprintk("NFSD: stale stateid (%08x/%08x/%08x/%08x)!\n",
- stateid->si_boot, stateid->si_stateownerid, stateid->si_fileid,
- stateid->si_generation);
- return 1;
+ if (time_after((unsigned long)boot_time,
+ (unsigned long)stateid->si_boot)) {
+ dprintk("NFSD: stale stateid (%08x/%08x/%08x/%08x)!\n",
+ stateid->si_boot, stateid->si_stateownerid,
+ stateid->si_fileid, stateid->si_generation);
+ return 1;
+ }
+ return 0;
+}
+
+static int
+EXPIRED_STATEID(stateid_t *stateid)
+{
+ if (time_before((unsigned long)boot_time,
+ ((unsigned long)stateid->si_boot)) &&
+ time_before((unsigned long)(stateid->si_boot + lease_time), get_seconds())) {
+ dprintk("NFSD: expired stateid (%08x/%08x/%08x/%08x)!\n",
+ stateid->si_boot, stateid->si_stateownerid,
+ stateid->si_fileid, stateid->si_generation);
+ return 1;
+ }
+ return 0;
+}
+
+static __be32
+stateid_error_map(stateid_t *stateid)
+{
+ if (STALE_STATEID(stateid))
+ return nfserr_stale_stateid;
+ if (EXPIRED_STATEID(stateid))
+ return nfserr_expired;
+
+ dprintk("NFSD: bad stateid (%08x/%08x/%08x/%08x)!\n",
+ stateid->si_boot, stateid->si_stateownerid,
+ stateid->si_fileid, stateid->si_generation);
+ return nfserr_bad_stateid;
}
static inline int
@@ -2867,8 +2877,10 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
status = nfserr_bad_stateid;
if (is_delegation_stateid(stateid)) {
dp = find_delegation_stateid(ino, stateid);
- if (!dp)
+ if (!dp) {
+ status = stateid_error_map(stateid);
goto out;
+ }
status = check_stateid_generation(stateid, &dp->dl_stateid,
flags);
if (status)
@@ -2881,8 +2893,10 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
*filpp = dp->dl_vfs_file;
} else { /* open or lock stateid */
stp = find_stateid(stateid, flags);
- if (!stp)
+ if (!stp) {
+ status = stateid_error_map(stateid);
goto out;
+ }
if (nfs4_check_fh(current_fh, stp))
goto out;
if (!stp->st_stateowner->so_confirmed)
@@ -2956,7 +2970,7 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
*/
sop = search_close_lru(stateid->si_stateownerid, flags);
if (sop == NULL)
- return nfserr_bad_stateid;
+ return stateid_error_map(stateid);
*sopp = sop;
goto check_replay;
}
@@ -3227,8 +3241,10 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (!is_delegation_stateid(stateid))
goto out;
dp = find_delegation_stateid(inode, stateid);
- if (!dp)
+ if (!dp) {
+ status = stateid_error_map(stateid);
goto out;
+ }
status = check_stateid_generation(stateid, &dp->dl_stateid, flags);
if (status)
goto out;
@@ -3455,7 +3471,7 @@ alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struc
stp->st_stateowner = sop;
get_nfs4_file(fp);
stp->st_file = fp;
- stp->st_stateid.si_boot = boot_time;
+ stp->st_stateid.si_boot = get_seconds();
stp->st_stateid.si_stateownerid = sop->so_id;
stp->st_stateid.si_fileid = fp->fi_id;
stp->st_stateid.si_generation = 0;
@@ -3987,6 +4003,7 @@ nfs4_state_init(void)
INIT_LIST_HEAD(&conf_str_hashtbl[i]);
INIT_LIST_HEAD(&unconf_str_hashtbl[i]);
INIT_LIST_HEAD(&unconf_id_hashtbl[i]);
+ INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
}
for (i = 0; i < SESSION_HASH_SIZE; i++)
INIT_LIST_HEAD(&sessionid_hashtbl[i]);
@@ -4009,8 +4026,6 @@ nfs4_state_init(void)
INIT_LIST_HEAD(&close_lru);
INIT_LIST_HEAD(&client_lru);
INIT_LIST_HEAD(&del_recall_lru);
- for (i = 0; i < CLIENT_HASH_SIZE; i++)
- INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
reclaim_str_hashtbl_size = 0;
return 0;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b73549d293be..2dcc7feaa6ff 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -83,16 +83,6 @@ check_filename(char *str, int len, __be32 err)
return 0;
}
-/*
- * START OF "GENERIC" DECODE ROUTINES.
- * These may look a little ugly since they are imported from a "generic"
- * set of XDR encode/decode routines which are intended to be shared by
- * all of our NFSv4 implementations (OpenBSD, MacOS X...).
- *
- * If the pain of reading these is too great, it should be a straightforward
- * task to translate them into Linux-specific versions which are more
- * consistent with the style used in NFSv2/v3...
- */
#define DECODE_HEAD \
__be32 *p; \
__be32 status
@@ -254,20 +244,8 @@ nfsd4_decode_bitmap(struct nfsd4_compoundargs *argp, u32 *bmval)
DECODE_TAIL;
}
-static u32 nfsd_attrmask[] = {
- NFSD_WRITEABLE_ATTRS_WORD0,
- NFSD_WRITEABLE_ATTRS_WORD1,
- NFSD_WRITEABLE_ATTRS_WORD2
-};
-
-static u32 nfsd41_ex_attrmask[] = {
- NFSD_SUPPATTR_EXCLCREAT_WORD0,
- NFSD_SUPPATTR_EXCLCREAT_WORD1,
- NFSD_SUPPATTR_EXCLCREAT_WORD2
-};
-
static __be32
-nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, u32 *writable,
+nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
struct iattr *iattr, struct nfs4_acl **acl)
{
int expected_len, len = 0;
@@ -280,18 +258,6 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, u32 *writable,
if ((status = nfsd4_decode_bitmap(argp, bmval)))
return status;
- /*
- * According to spec, unsupported attributes return ERR_ATTRNOTSUPP;
- * read-only attributes return ERR_INVAL.
- */
- if ((bmval[0] & ~nfsd_suppattrs0(argp->minorversion)) ||
- (bmval[1] & ~nfsd_suppattrs1(argp->minorversion)) ||
- (bmval[2] & ~nfsd_suppattrs2(argp->minorversion)))
- return nfserr_attrnotsupp;
- if ((bmval[0] & ~writable[0]) || (bmval[1] & ~writable[1]) ||
- (bmval[2] & ~writable[2]))
- return nfserr_inval;
-
READ_BUF(4);
READ32(expected_len);
@@ -424,8 +390,11 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, u32 *writable,
goto xdr_error;
}
}
- BUG_ON(bmval[2]); /* no such writeable attr supported yet */
- if (len != expected_len)
+ if (bmval[0] & ~NFSD_WRITEABLE_ATTRS_WORD0
+ || bmval[1] & ~NFSD_WRITEABLE_ATTRS_WORD1
+ || bmval[2] & ~NFSD_WRITEABLE_ATTRS_WORD2)
+ READ_BUF(expected_len - len);
+ else if (len != expected_len)
goto xdr_error;
DECODE_TAIL;
@@ -518,8 +487,8 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
if ((status = check_filename(create->cr_name, create->cr_namelen, nfserr_inval)))
return status;
- status = nfsd4_decode_fattr(argp, create->cr_bmval, nfsd_attrmask,
- &create->cr_iattr, &create->cr_acl);
+ status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr,
+ &create->cr_acl);
if (status)
goto out;
@@ -682,7 +651,7 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
case NFS4_CREATE_UNCHECKED:
case NFS4_CREATE_GUARDED:
status = nfsd4_decode_fattr(argp, open->op_bmval,
- nfsd_attrmask, &open->op_iattr, &open->op_acl);
+ &open->op_iattr, &open->op_acl);
if (status)
goto out;
break;
@@ -696,8 +665,7 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
READ_BUF(8);
COPYMEM(open->op_verf.data, 8);
status = nfsd4_decode_fattr(argp, open->op_bmval,
- nfsd41_ex_attrmask, &open->op_iattr,
- &open->op_acl);
+ &open->op_iattr, &open->op_acl);
if (status)
goto out;
break;
@@ -893,8 +861,8 @@ nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *seta
status = nfsd4_decode_stateid(argp, &setattr->sa_stateid);
if (status)
return status;
- return nfsd4_decode_fattr(argp, setattr->sa_bmval, nfsd_attrmask,
- &setattr->sa_iattr, &setattr->sa_acl);
+ return nfsd4_decode_fattr(argp, setattr->sa_bmval, &setattr->sa_iattr,
+ &setattr->sa_acl);
}
static __be32
@@ -1328,64 +1296,64 @@ static nfsd4_dec nfsd4_dec_ops[] = {
};
static nfsd4_dec nfsd41_dec_ops[] = {
- [OP_ACCESS] (nfsd4_dec)nfsd4_decode_access,
- [OP_CLOSE] (nfsd4_dec)nfsd4_decode_close,
- [OP_COMMIT] (nfsd4_dec)nfsd4_decode_commit,
- [OP_CREATE] (nfsd4_dec)nfsd4_decode_create,
- [OP_DELEGPURGE] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_DELEGRETURN] (nfsd4_dec)nfsd4_decode_delegreturn,
- [OP_GETATTR] (nfsd4_dec)nfsd4_decode_getattr,
- [OP_GETFH] (nfsd4_dec)nfsd4_decode_noop,
- [OP_LINK] (nfsd4_dec)nfsd4_decode_link,
- [OP_LOCK] (nfsd4_dec)nfsd4_decode_lock,
- [OP_LOCKT] (nfsd4_dec)nfsd4_decode_lockt,
- [OP_LOCKU] (nfsd4_dec)nfsd4_decode_locku,
- [OP_LOOKUP] (nfsd4_dec)nfsd4_decode_lookup,
- [OP_LOOKUPP] (nfsd4_dec)nfsd4_decode_noop,
- [OP_NVERIFY] (nfsd4_dec)nfsd4_decode_verify,
- [OP_OPEN] (nfsd4_dec)nfsd4_decode_open,
- [OP_OPENATTR] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_OPEN_CONFIRM] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_OPEN_DOWNGRADE] (nfsd4_dec)nfsd4_decode_open_downgrade,
- [OP_PUTFH] (nfsd4_dec)nfsd4_decode_putfh,
- [OP_PUTPUBFH] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_PUTROOTFH] (nfsd4_dec)nfsd4_decode_noop,
- [OP_READ] (nfsd4_dec)nfsd4_decode_read,
- [OP_READDIR] (nfsd4_dec)nfsd4_decode_readdir,
- [OP_READLINK] (nfsd4_dec)nfsd4_decode_noop,
- [OP_REMOVE] (nfsd4_dec)nfsd4_decode_remove,
- [OP_RENAME] (nfsd4_dec)nfsd4_decode_rename,
- [OP_RENEW] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_RESTOREFH] (nfsd4_dec)nfsd4_decode_noop,
- [OP_SAVEFH] (nfsd4_dec)nfsd4_decode_noop,
- [OP_SECINFO] (nfsd4_dec)nfsd4_decode_secinfo,
- [OP_SETATTR] (nfsd4_dec)nfsd4_decode_setattr,
- [OP_SETCLIENTID] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_SETCLIENTID_CONFIRM](nfsd4_dec)nfsd4_decode_notsupp,
- [OP_VERIFY] (nfsd4_dec)nfsd4_decode_verify,
- [OP_WRITE] (nfsd4_dec)nfsd4_decode_write,
- [OP_RELEASE_LOCKOWNER] (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
+ [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
+ [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
+ [OP_CREATE] = (nfsd4_dec)nfsd4_decode_create,
+ [OP_DELEGPURGE] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_DELEGRETURN] = (nfsd4_dec)nfsd4_decode_delegreturn,
+ [OP_GETATTR] = (nfsd4_dec)nfsd4_decode_getattr,
+ [OP_GETFH] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_LINK] = (nfsd4_dec)nfsd4_decode_link,
+ [OP_LOCK] = (nfsd4_dec)nfsd4_decode_lock,
+ [OP_LOCKT] = (nfsd4_dec)nfsd4_decode_lockt,
+ [OP_LOCKU] = (nfsd4_dec)nfsd4_decode_locku,
+ [OP_LOOKUP] = (nfsd4_dec)nfsd4_decode_lookup,
+ [OP_LOOKUPP] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_NVERIFY] = (nfsd4_dec)nfsd4_decode_verify,
+ [OP_OPEN] = (nfsd4_dec)nfsd4_decode_open,
+ [OP_OPENATTR] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_OPEN_CONFIRM] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_OPEN_DOWNGRADE] = (nfsd4_dec)nfsd4_decode_open_downgrade,
+ [OP_PUTFH] = (nfsd4_dec)nfsd4_decode_putfh,
+ [OP_PUTPUBFH] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_PUTROOTFH] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_READ] = (nfsd4_dec)nfsd4_decode_read,
+ [OP_READDIR] = (nfsd4_dec)nfsd4_decode_readdir,
+ [OP_READLINK] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_REMOVE] = (nfsd4_dec)nfsd4_decode_remove,
+ [OP_RENAME] = (nfsd4_dec)nfsd4_decode_rename,
+ [OP_RENEW] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_RESTOREFH] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_SAVEFH] = (nfsd4_dec)nfsd4_decode_noop,
+ [OP_SECINFO] = (nfsd4_dec)nfsd4_decode_secinfo,
+ [OP_SETATTR] = (nfsd4_dec)nfsd4_decode_setattr,
+ [OP_SETCLIENTID] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_SETCLIENTID_CONFIRM]= (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_VERIFY] = (nfsd4_dec)nfsd4_decode_verify,
+ [OP_WRITE] = (nfsd4_dec)nfsd4_decode_write,
+ [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_notsupp,
/* new operations for NFSv4.1 */
- [OP_BACKCHANNEL_CTL] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_BIND_CONN_TO_SESSION](nfsd4_dec)nfsd4_decode_notsupp,
- [OP_EXCHANGE_ID] (nfsd4_dec)nfsd4_decode_exchange_id,
- [OP_CREATE_SESSION] (nfsd4_dec)nfsd4_decode_create_session,
- [OP_DESTROY_SESSION] (nfsd4_dec)nfsd4_decode_destroy_session,
- [OP_FREE_STATEID] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_GET_DIR_DELEGATION] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_GETDEVICEINFO] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_GETDEVICELIST] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_LAYOUTCOMMIT] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_LAYOUTGET] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_LAYOUTRETURN] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_SECINFO_NO_NAME] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_SEQUENCE] (nfsd4_dec)nfsd4_decode_sequence,
- [OP_SET_SSV] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_TEST_STATEID] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_WANT_DELEGATION] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_DESTROY_CLIENTID] (nfsd4_dec)nfsd4_decode_notsupp,
- [OP_RECLAIM_COMPLETE] (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_BACKCHANNEL_CTL] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_EXCHANGE_ID] = (nfsd4_dec)nfsd4_decode_exchange_id,
+ [OP_CREATE_SESSION] = (nfsd4_dec)nfsd4_decode_create_session,
+ [OP_DESTROY_SESSION] = (nfsd4_dec)nfsd4_decode_destroy_session,
+ [OP_FREE_STATEID] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_GET_DIR_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_SEQUENCE] = (nfsd4_dec)nfsd4_decode_sequence,
+ [OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_TEST_STATEID] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_WANT_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_DESTROY_CLIENTID] = (nfsd4_dec)nfsd4_decode_notsupp,
+ [OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_notsupp,
};
struct nfsd4_minorversion_ops {
@@ -1489,21 +1457,6 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
DECODE_TAIL;
}
-/*
- * END OF "GENERIC" DECODE ROUTINES.
- */
-
-/*
- * START OF "GENERIC" ENCODE ROUTINES.
- * These may look a little ugly since they are imported from a "generic"
- * set of XDR encode/decode routines which are intended to be shared by
- * all of our NFSv4 implementations (OpenBSD, MacOS X...).
- *
- * If the pain of reading these is too great, it should be a straightforward
- * task to translate them into Linux-specific versions which are more
- * consistent with the style used in NFSv2/v3...
- */
-#define ENCODE_HEAD __be32 *p
#define WRITE32(n) *p++ = htonl(n)
#define WRITE64(n) do { \
@@ -1515,13 +1468,41 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
memcpy(p, ptr, nbytes); \
p += XDR_QUADLEN(nbytes); \
}} while (0)
-#define WRITECINFO(c) do { \
- *p++ = htonl(c.atomic); \
- *p++ = htonl(c.before_ctime_sec); \
- *p++ = htonl(c.before_ctime_nsec); \
- *p++ = htonl(c.after_ctime_sec); \
- *p++ = htonl(c.after_ctime_nsec); \
-} while (0)
+
+static void write32(__be32 **p, u32 n)
+{
+ *(*p)++ = n;
+}
+
+static void write64(__be32 **p, u64 n)
+{
+ write32(p, (u32)(n >> 32));
+ write32(p, (u32)n);
+}
+
+static void write_change(__be32 **p, struct kstat *stat, struct inode *inode)
+{
+ if (IS_I_VERSION(inode)) {
+ write64(p, inode->i_version);
+ } else {
+ write32(p, stat->ctime.tv_sec);
+ write32(p, stat->ctime.tv_nsec);
+ }
+}
+
+static void write_cinfo(__be32 **p, struct nfsd4_change_info *c)
+{
+ write32(p, c->atomic);
+ if (c->change_supported) {
+ write64(p, c->before_change);
+ write64(p, c->after_change);
+ } else {
+ write32(p, c->before_ctime_sec);
+ write32(p, c->before_ctime_nsec);
+ write32(p, c->after_ctime_sec);
+ write32(p, c->after_ctime_nsec);
+ }
+}
#define RESERVE_SPACE(nbytes) do { \
p = resp->p; \
@@ -1874,16 +1855,9 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
WRITE32(NFS4_FH_PERSISTENT|NFS4_FH_VOL_RENAME);
}
if (bmval0 & FATTR4_WORD0_CHANGE) {
- /*
- * Note: This _must_ be consistent with the scheme for writing
- * change_info, so any changes made here must be reflected there
- * as well. (See xdr4.h:set_change_info() and the WRITECINFO()
- * macro above.)
- */
if ((buflen -= 8) < 0)
goto out_resource;
- WRITE32(stat.ctime.tv_sec);
- WRITE32(stat.ctime.tv_nsec);
+ write_change(&p, &stat, dentry->d_inode);
}
if (bmval0 & FATTR4_WORD0_SIZE) {
if ((buflen -= 8) < 0)
@@ -2348,7 +2322,7 @@ fail:
static void
nfsd4_encode_stateid(struct nfsd4_compoundres *resp, stateid_t *sid)
{
- ENCODE_HEAD;
+ __be32 *p;
RESERVE_SPACE(sizeof(stateid_t));
WRITE32(sid->si_generation);
@@ -2359,7 +2333,7 @@ nfsd4_encode_stateid(struct nfsd4_compoundres *resp, stateid_t *sid)
static __be32
nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_access *access)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(8);
@@ -2386,7 +2360,7 @@ nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_c
static __be32
nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_commit *commit)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(8);
@@ -2399,11 +2373,11 @@ nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
static __be32
nfsd4_encode_create(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_create *create)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(32);
- WRITECINFO(create->cr_cinfo);
+ write_cinfo(&p, &create->cr_cinfo);
WRITE32(2);
WRITE32(create->cr_bmval[0]);
WRITE32(create->cr_bmval[1]);
@@ -2435,7 +2409,7 @@ nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr, struct svc_fh
{
struct svc_fh *fhp = *fhpp;
unsigned int len;
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
len = fhp->fh_handle.fh_size;
@@ -2454,7 +2428,7 @@ nfsd4_encode_getfh(struct nfsd4_compoundres *resp, __be32 nfserr, struct svc_fh
static void
nfsd4_encode_lock_denied(struct nfsd4_compoundres *resp, struct nfsd4_lock_denied *ld)
{
- ENCODE_HEAD;
+ __be32 *p;
RESERVE_SPACE(32 + XDR_LEN(ld->ld_sop ? ld->ld_sop->so_owner.len : 0));
WRITE64(ld->ld_start);
@@ -2510,11 +2484,11 @@ nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_l
static __be32
nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_link *link)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(20);
- WRITECINFO(link->li_cinfo);
+ write_cinfo(&p, &link->li_cinfo);
ADJUST_ARGS();
}
return nfserr;
@@ -2524,7 +2498,7 @@ nfsd4_encode_link(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_li
static __be32
nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open)
{
- ENCODE_HEAD;
+ __be32 *p;
ENCODE_SEQID_OP_HEAD;
if (nfserr)
@@ -2532,7 +2506,7 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op
nfsd4_encode_stateid(resp, &open->op_stateid);
RESERVE_SPACE(40);
- WRITECINFO(open->op_cinfo);
+ write_cinfo(&p, &open->op_cinfo);
WRITE32(open->op_rflags);
WRITE32(2);
WRITE32(open->op_bmval[0]);
@@ -2619,7 +2593,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
int v, pn;
unsigned long maxcount;
long len;
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
return nfserr;
@@ -2681,7 +2655,7 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd
{
int maxcount;
char *page;
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
return nfserr;
@@ -2730,7 +2704,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
int maxcount;
loff_t offset;
__be32 *page, *savep, *tailbase;
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
return nfserr;
@@ -2806,11 +2780,11 @@ err_no_verf:
static __be32
nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_remove *remove)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(20);
- WRITECINFO(remove->rm_cinfo);
+ write_cinfo(&p, &remove->rm_cinfo);
ADJUST_ARGS();
}
return nfserr;
@@ -2819,12 +2793,12 @@ nfsd4_encode_remove(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
static __be32
nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_rename *rename)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(40);
- WRITECINFO(rename->rn_sinfo);
- WRITECINFO(rename->rn_tinfo);
+ write_cinfo(&p, &rename->rn_sinfo);
+ write_cinfo(&p, &rename->rn_tinfo);
ADJUST_ARGS();
}
return nfserr;
@@ -2839,7 +2813,7 @@ nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
u32 nflavs;
struct exp_flavor_info *flavs;
struct exp_flavor_info def_flavs[2];
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
goto out;
@@ -2904,7 +2878,7 @@ out:
static __be32
nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setattr *setattr)
{
- ENCODE_HEAD;
+ __be32 *p;
RESERVE_SPACE(12);
if (nfserr) {
@@ -2924,7 +2898,7 @@ nfsd4_encode_setattr(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
static __be32
nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_setclientid *scd)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(8 + sizeof(nfs4_verifier));
@@ -2944,7 +2918,7 @@ nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct n
static __be32
nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_write *write)
{
- ENCODE_HEAD;
+ __be32 *p;
if (!nfserr) {
RESERVE_SPACE(16);
@@ -2960,7 +2934,7 @@ static __be32
nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_exchange_id *exid)
{
- ENCODE_HEAD;
+ __be32 *p;
char *major_id;
char *server_scope;
int major_id_sz;
@@ -3015,7 +2989,7 @@ static __be32
nfsd4_encode_create_session(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_create_session *sess)
{
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
return nfserr;
@@ -3071,7 +3045,7 @@ __be32
nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_sequence *seq)
{
- ENCODE_HEAD;
+ __be32 *p;
if (nfserr)
return nfserr;
@@ -3209,7 +3183,7 @@ static int nfsd4_check_drc_limit(struct nfsd4_compoundres *resp)
dprintk("%s length %u, xb->page_len %u tlen %u pad %u\n", __func__,
length, xb->page_len, tlen, pad);
- if (length <= session->se_fmaxresp_cached)
+ if (length <= session->se_fchannel.maxresp_cached)
return status;
else
return nfserr_rep_too_big_to_cache;
@@ -3219,7 +3193,7 @@ void
nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
{
__be32 *statp;
- ENCODE_HEAD;
+ __be32 *p;
RESERVE_SPACE(8);
WRITE32(op->opnum);
@@ -3253,7 +3227,7 @@ status:
void
nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
{
- ENCODE_HEAD;
+ __be32 *p;
struct nfs4_replay *rp = op->replay;
BUG_ON(!rp);
@@ -3268,10 +3242,6 @@ nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
ADJUST_ARGS();
}
-/*
- * END OF "GENERIC" ENCODE ROUTINES.
- */
-
int
nfs4svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 5bfc2ac60d54..4638635c5d87 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -29,15 +29,24 @@
*/
#define CACHESIZE 1024
#define HASHSIZE 64
-#define REQHASH(xid) (((((__force __u32)xid) >> 24) ^ ((__force __u32)xid)) & (HASHSIZE-1))
-static struct hlist_head * hash_list;
+static struct hlist_head * cache_hash;
static struct list_head lru_head;
static int cache_disabled = 1;
+/*
+ * Calculate the hash index from an XID.
+ */
+static inline u32 request_hash(u32 xid)
+{
+ u32 h = xid;
+ h ^= (xid >> 24);
+ return h & (HASHSIZE-1);
+}
+
static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
-/*
+/*
* locking for the reply cache:
* A cache entry is "single use" if c_state == RC_INPROG
* Otherwise, it when accessing _prev or _next, the lock must be held.
@@ -62,8 +71,8 @@ int nfsd_reply_cache_init(void)
i--;
}
- hash_list = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
- if (!hash_list)
+ cache_hash = kcalloc (HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL);
+ if (!cache_hash)
goto out_nomem;
cache_disabled = 0;
@@ -88,8 +97,8 @@ void nfsd_reply_cache_shutdown(void)
cache_disabled = 1;
- kfree (hash_list);
- hash_list = NULL;
+ kfree (cache_hash);
+ cache_hash = NULL;
}
/*
@@ -108,7 +117,7 @@ static void
hash_refile(struct svc_cacherep *rp)
{
hlist_del_init(&rp->c_hash);
- hlist_add_head(&rp->c_hash, hash_list + REQHASH(rp->c_xid));
+ hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid));
}
/*
@@ -138,7 +147,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp, int type)
spin_lock(&cache_lock);
rtn = RC_DOIT;
- rh = &hash_list[REQHASH(xid)];
+ rh = &cache_hash[request_hash(xid)];
hlist_for_each_entry(rp, hn, rh, c_hash) {
if (rp->c_state != RC_UNUSED &&
xid == rp->c_xid && proc == rp->c_proc &&
@@ -165,8 +174,8 @@ nfsd_cache_lookup(struct svc_rqst *rqstp, int type)
}
}
- /* This should not happen */
- if (rp == NULL) {
+ /* All entries on the LRU are in-progress. This should not happen */
+ if (&rp->c_lru == &lru_head) {
static int complaints;
printk(KERN_WARNING "nfsd: all repcache entries locked!\n");
@@ -264,7 +273,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
len >>= 2;
-
+
/* Don't cache excessive amounts of data and XDR failures */
if (!statp || len > (256 >> 2)) {
rp->c_state = RC_UNUSED;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index af16849d243a..1250fb978ac1 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -207,10 +207,14 @@ static struct file_operations pool_stats_operations = {
static ssize_t write_svc(struct file *file, char *buf, size_t size)
{
struct nfsctl_svc *data;
+ int err;
if (size < sizeof(*data))
return -EINVAL;
data = (struct nfsctl_svc*) buf;
- return nfsd_svc(data->svc_port, data->svc_nthreads);
+ err = nfsd_svc(data->svc_port, data->svc_nthreads);
+ if (err < 0)
+ return err;
+ return 0;
}
/**
@@ -692,11 +696,12 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
if (newthreads < 0)
return -EINVAL;
rv = nfsd_svc(NFS_PORT, newthreads);
- if (rv)
+ if (rv < 0)
return rv;
- }
- sprintf(buf, "%d\n", nfsd_nrthreads());
- return strlen(buf);
+ } else
+ rv = nfsd_nrthreads();
+
+ return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%d\n", rv);
}
/**
@@ -793,7 +798,7 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
{
char *mesg = buf;
char *vers, *minorp, sign;
- int len, num;
+ int len, num, remaining;
unsigned minor;
ssize_t tlen = 0;
char *sep;
@@ -840,32 +845,50 @@ static ssize_t __write_versions(struct file *file, char *buf, size_t size)
}
next:
vers += len + 1;
- tlen += len;
} while ((len = qword_get(&mesg, vers, size)) > 0);
/* If all get turned off, turn them back on, as
* having no versions is BAD
*/
nfsd_reset_versions();
}
+
/* Now write current state into reply buffer */
len = 0;
sep = "";
+ remaining = SIMPLE_TRANSACTION_LIMIT;
for (num=2 ; num <= 4 ; num++)
if (nfsd_vers(num, NFSD_AVAIL)) {
- len += sprintf(buf+len, "%s%c%d", sep,
+ len = snprintf(buf, remaining, "%s%c%d", sep,
nfsd_vers(num, NFSD_TEST)?'+':'-',
num);
sep = " ";
+
+ if (len > remaining)
+ break;
+ remaining -= len;
+ buf += len;
+ tlen += len;
}
if (nfsd_vers(4, NFSD_AVAIL))
- for (minor = 1; minor <= NFSD_SUPPORTED_MINOR_VERSION; minor++)
- len += sprintf(buf+len, " %c4.%u",
+ for (minor = 1; minor <= NFSD_SUPPORTED_MINOR_VERSION;
+ minor++) {
+ len = snprintf(buf, remaining, " %c4.%u",
(nfsd_vers(4, NFSD_TEST) &&
nfsd_minorversion(minor, NFSD_TEST)) ?
'+' : '-',
minor);
- len += sprintf(buf+len, "\n");
- return len;
+
+ if (len > remaining)
+ break;
+ remaining -= len;
+ buf += len;
+ tlen += len;
+ }
+
+ len = snprintf(buf, remaining, "\n");
+ if (len > remaining)
+ return -EINVAL;
+ return tlen + len;
}
/**
@@ -910,104 +933,143 @@ static ssize_t write_versions(struct file *file, char *buf, size_t size)
return rv;
}
-static ssize_t __write_ports(struct file *file, char *buf, size_t size)
+/*
+ * Zero-length write. Return a list of NFSD's current listener
+ * transports.
+ */
+static ssize_t __write_ports_names(char *buf)
{
- if (size == 0) {
- int len = 0;
+ if (nfsd_serv == NULL)
+ return 0;
+ return svc_xprt_names(nfsd_serv, buf, SIMPLE_TRANSACTION_LIMIT);
+}
- if (nfsd_serv)
- len = svc_xprt_names(nfsd_serv, buf, 0);
- return len;
- }
- /* Either a single 'fd' number is written, in which
- * case it must be for a socket of a supported family/protocol,
- * and we use it as an nfsd socket, or
- * A '-' followed by the 'name' of a socket in which case
- * we close the socket.
- */
- if (isdigit(buf[0])) {
- char *mesg = buf;
- int fd;
- int err;
- err = get_int(&mesg, &fd);
- if (err)
- return -EINVAL;
- if (fd < 0)
- return -EINVAL;
- err = nfsd_create_serv();
- if (!err) {
- err = svc_addsock(nfsd_serv, fd, buf);
- if (err >= 0) {
- err = lockd_up();
- if (err < 0)
- svc_sock_names(buf+strlen(buf)+1, nfsd_serv, buf);
- }
- /* Decrease the count, but don't shutdown the
- * the service
- */
- nfsd_serv->sv_nrthreads--;
- }
- return err < 0 ? err : 0;
- }
- if (buf[0] == '-' && isdigit(buf[1])) {
- char *toclose = kstrdup(buf+1, GFP_KERNEL);
- int len = 0;
- if (!toclose)
- return -ENOMEM;
- if (nfsd_serv)
- len = svc_sock_names(buf, nfsd_serv, toclose);
- if (len >= 0)
- lockd_down();
- kfree(toclose);
- return len;
- }
- /*
- * Add a transport listener by writing it's transport name
- */
- if (isalpha(buf[0])) {
- int err;
- char transport[16];
- int port;
- if (sscanf(buf, "%15s %4d", transport, &port) == 2) {
- if (port < 1 || port > 65535)
- return -EINVAL;
- err = nfsd_create_serv();
- if (!err) {
- err = svc_create_xprt(nfsd_serv,
- transport, PF_INET, port,
- SVC_SOCK_ANONYMOUS);
- if (err == -ENOENT)
- /* Give a reasonable perror msg for
- * bad transport string */
- err = -EPROTONOSUPPORT;
- }
- return err < 0 ? err : 0;
- }
- }
- /*
- * Remove a transport by writing it's transport name and port number
- */
- if (buf[0] == '-' && isalpha(buf[1])) {
- struct svc_xprt *xprt;
- int err = -EINVAL;
- char transport[16];
- int port;
- if (sscanf(&buf[1], "%15s %4d", transport, &port) == 2) {
- if (port < 1 || port > 65535)
- return -EINVAL;
- if (nfsd_serv) {
- xprt = svc_find_xprt(nfsd_serv, transport,
- AF_UNSPEC, port);
- if (xprt) {
- svc_close_xprt(xprt);
- svc_xprt_put(xprt);
- err = 0;
- } else
- err = -ENOTCONN;
- }
- return err < 0 ? err : 0;
- }
+/*
+ * A single 'fd' number was written, in which case it must be for
+ * a socket of a supported family/protocol, and we use it as an
+ * nfsd listener.
+ */
+static ssize_t __write_ports_addfd(char *buf)
+{
+ char *mesg = buf;
+ int fd, err;
+
+ err = get_int(&mesg, &fd);
+ if (err != 0 || fd < 0)
+ return -EINVAL;
+
+ err = nfsd_create_serv();
+ if (err != 0)
+ return err;
+
+ err = lockd_up();
+ if (err != 0)
+ goto out;
+
+ err = svc_addsock(nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT);
+ if (err < 0)
+ lockd_down();
+
+out:
+ /* Decrease the count, but don't shut down the service */
+ nfsd_serv->sv_nrthreads--;
+ return err;
+}
+
+/*
+ * A '-' followed by the 'name' of a socket means we close the socket.
+ */
+static ssize_t __write_ports_delfd(char *buf)
+{
+ char *toclose;
+ int len = 0;
+
+ toclose = kstrdup(buf + 1, GFP_KERNEL);
+ if (toclose == NULL)
+ return -ENOMEM;
+
+ if (nfsd_serv != NULL)
+ len = svc_sock_names(nfsd_serv, buf,
+ SIMPLE_TRANSACTION_LIMIT, toclose);
+ if (len >= 0)
+ lockd_down();
+
+ kfree(toclose);
+ return len;
+}
+
+/*
+ * A transport listener is added by writing it's transport name and
+ * a port number.
+ */
+static ssize_t __write_ports_addxprt(char *buf)
+{
+ char transport[16];
+ int port, err;
+
+ if (sscanf(buf, "%15s %4u", transport, &port) != 2)
+ return -EINVAL;
+
+ if (port < 1 || port > USHORT_MAX)
+ return -EINVAL;
+
+ err = nfsd_create_serv();
+ if (err != 0)
+ return err;
+
+ err = svc_create_xprt(nfsd_serv, transport,
+ PF_INET, port, SVC_SOCK_ANONYMOUS);
+ if (err < 0) {
+ /* Give a reasonable perror msg for bad transport string */
+ if (err == -ENOENT)
+ err = -EPROTONOSUPPORT;
+ return err;
}
+ return 0;
+}
+
+/*
+ * A transport listener is removed by writing a "-", it's transport
+ * name, and it's port number.
+ */
+static ssize_t __write_ports_delxprt(char *buf)
+{
+ struct svc_xprt *xprt;
+ char transport[16];
+ int port;
+
+ if (sscanf(&buf[1], "%15s %4u", transport, &port) != 2)
+ return -EINVAL;
+
+ if (port < 1 || port > USHORT_MAX || nfsd_serv == NULL)
+ return -EINVAL;
+
+ xprt = svc_find_xprt(nfsd_serv, transport, AF_UNSPEC, port);
+ if (xprt == NULL)
+ return -ENOTCONN;
+
+ svc_close_xprt(xprt);
+ svc_xprt_put(xprt);
+ return 0;
+}
+
+static ssize_t __write_ports(struct file *file, char *buf, size_t size)
+{
+ if (size == 0)
+ return __write_ports_names(buf);
+
+ if (isdigit(buf[0]))
+ return __write_ports_addfd(buf);
+
+ if (buf[0] == '-' && isdigit(buf[1]))
+ return __write_ports_delfd(buf);
+
+ if (isalpha(buf[0]))
+ return __write_ports_addxprt(buf);
+
+ if (buf[0] == '-' && isalpha(buf[1]))
+ return __write_ports_delxprt(buf);
+
return -EINVAL;
}
@@ -1030,7 +1092,9 @@ static ssize_t __write_ports(struct file *file, char *buf, size_t size)
* buf: C string containing an unsigned
* integer value representing a bound
* but unconnected socket that is to be
- * used as an NFSD listener
+ * used as an NFSD listener; listen(3)
+ * must be called for a SOCK_STREAM
+ * socket, otherwise it is ignored
* size: non-zero length of C string in @buf
* Output:
* On success: NFS service is started;
@@ -1138,7 +1202,9 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
nfsd_max_blksize = bsize;
mutex_unlock(&nfsd_mutex);
}
- return sprintf(buf, "%d\n", nfsd_max_blksize);
+
+ return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%d\n",
+ nfsd_max_blksize);
}
#ifdef CONFIG_NFSD_V4
@@ -1162,8 +1228,9 @@ static ssize_t __write_leasetime(struct file *file, char *buf, size_t size)
return -EINVAL;
nfs4_reset_lease(lease);
}
- sprintf(buf, "%ld\n", nfs4_lease_time());
- return strlen(buf);
+
+ return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%ld\n",
+ nfs4_lease_time());
}
/**
@@ -1219,8 +1286,9 @@ static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size)
status = nfs4_reset_recoverydir(recdir);
}
- sprintf(buf, "%s\n", nfs4_recoverydir());
- return strlen(buf);
+
+ return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%s\n",
+ nfs4_recoverydir());
}
/**
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 9f1ca17293d3..8847f3fbfc1e 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -27,9 +27,6 @@
#define NFSDDBG_FACILITY NFSDDBG_FH
-static int nfsd_nr_verified;
-static int nfsd_nr_put;
-
/*
* our acceptability function.
* if NOSUBTREECHECK, accept anything
@@ -251,7 +248,6 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
fhp->fh_dentry = dentry;
fhp->fh_export = exp;
- nfsd_nr_verified++;
return 0;
out:
exp_put(exp);
@@ -552,7 +548,6 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
return nfserr_opnotsupp;
}
- nfsd_nr_verified++;
return 0;
}
@@ -609,7 +604,6 @@ fh_put(struct svc_fh *fhp)
fhp->fh_pre_saved = 0;
fhp->fh_post_saved = 0;
#endif
- nfsd_nr_put++;
}
if (exp) {
cache_put(&exp->h, &svc_export_cache);
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index e298e260b5f1..0eb9c820b7a6 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -533,45 +533,179 @@ nfsd_proc_statfs(struct svc_rqst * rqstp, struct nfsd_fhandle *argp,
* NFSv2 Server procedures.
* Only the results of non-idempotent operations are cached.
*/
-#define nfsd_proc_none NULL
-#define nfssvc_release_none NULL
struct nfsd_void { int dummy; };
-#define PROC(name, argt, rest, relt, cache, respsize) \
- { (svc_procfunc) nfsd_proc_##name, \
- (kxdrproc_t) nfssvc_decode_##argt, \
- (kxdrproc_t) nfssvc_encode_##rest, \
- (kxdrproc_t) nfssvc_release_##relt, \
- sizeof(struct nfsd_##argt), \
- sizeof(struct nfsd_##rest), \
- 0, \
- cache, \
- respsize, \
- }
-
#define ST 1 /* status */
#define FH 8 /* filehandle */
#define AT 18 /* attributes */
static struct svc_procedure nfsd_procedures2[18] = {
- PROC(null, void, void, none, RC_NOCACHE, ST),
- PROC(getattr, fhandle, attrstat, fhandle, RC_NOCACHE, ST+AT),
- PROC(setattr, sattrargs, attrstat, fhandle, RC_REPLBUFF, ST+AT),
- PROC(none, void, void, none, RC_NOCACHE, ST),
- PROC(lookup, diropargs, diropres, fhandle, RC_NOCACHE, ST+FH+AT),
- PROC(readlink, readlinkargs, readlinkres, none, RC_NOCACHE, ST+1+NFS_MAXPATHLEN/4),
- PROC(read, readargs, readres, fhandle, RC_NOCACHE, ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4),
- PROC(none, void, void, none, RC_NOCACHE, ST),
- PROC(write, writeargs, attrstat, fhandle, RC_REPLBUFF, ST+AT),
- PROC(create, createargs, diropres, fhandle, RC_REPLBUFF, ST+FH+AT),
- PROC(remove, diropargs, void, none, RC_REPLSTAT, ST),
- PROC(rename, renameargs, void, none, RC_REPLSTAT, ST),
- PROC(link, linkargs, void, none, RC_REPLSTAT, ST),
- PROC(symlink, symlinkargs, void, none, RC_REPLSTAT, ST),
- PROC(mkdir, createargs, diropres, fhandle, RC_REPLBUFF, ST+FH+AT),
- PROC(rmdir, diropargs, void, none, RC_REPLSTAT, ST),
- PROC(readdir, readdirargs, readdirres, none, RC_NOCACHE, 0),
- PROC(statfs, fhandle, statfsres, none, RC_NOCACHE, ST+5),
+ [NFSPROC_NULL] = {
+ .pc_func = (svc_procfunc) nfsd_proc_null,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_void,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_void),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_GETATTR] = {
+ .pc_func = (svc_procfunc) nfsd_proc_getattr,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_fhandle,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_attrstat,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_fhandle),
+ .pc_ressize = sizeof(struct nfsd_attrstat),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+AT,
+ },
+ [NFSPROC_SETATTR] = {
+ .pc_func = (svc_procfunc) nfsd_proc_setattr,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_sattrargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_attrstat,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_sattrargs),
+ .pc_ressize = sizeof(struct nfsd_attrstat),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+AT,
+ },
+ [NFSPROC_ROOT] = {
+ .pc_decode = (kxdrproc_t) nfssvc_decode_void,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_void),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_LOOKUP] = {
+ .pc_func = (svc_procfunc) nfsd_proc_lookup,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_diropres,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_diropargs),
+ .pc_ressize = sizeof(struct nfsd_diropres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+FH+AT,
+ },
+ [NFSPROC_READLINK] = {
+ .pc_func = (svc_procfunc) nfsd_proc_readlink,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_readlinkargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_readlinkres,
+ .pc_argsize = sizeof(struct nfsd_readlinkargs),
+ .pc_ressize = sizeof(struct nfsd_readlinkres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+1+NFS_MAXPATHLEN/4,
+ },
+ [NFSPROC_READ] = {
+ .pc_func = (svc_procfunc) nfsd_proc_read,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_readargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_readres,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_readargs),
+ .pc_ressize = sizeof(struct nfsd_readres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4,
+ },
+ [NFSPROC_WRITECACHE] = {
+ .pc_decode = (kxdrproc_t) nfssvc_decode_void,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_void),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_WRITE] = {
+ .pc_func = (svc_procfunc) nfsd_proc_write,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_writeargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_attrstat,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_writeargs),
+ .pc_ressize = sizeof(struct nfsd_attrstat),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+AT,
+ },
+ [NFSPROC_CREATE] = {
+ .pc_func = (svc_procfunc) nfsd_proc_create,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_createargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_diropres,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_createargs),
+ .pc_ressize = sizeof(struct nfsd_diropres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+FH+AT,
+ },
+ [NFSPROC_REMOVE] = {
+ .pc_func = (svc_procfunc) nfsd_proc_remove,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_diropargs),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_REPLSTAT,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_RENAME] = {
+ .pc_func = (svc_procfunc) nfsd_proc_rename,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_renameargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_renameargs),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_REPLSTAT,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_LINK] = {
+ .pc_func = (svc_procfunc) nfsd_proc_link,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_linkargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_linkargs),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_REPLSTAT,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_SYMLINK] = {
+ .pc_func = (svc_procfunc) nfsd_proc_symlink,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_symlinkargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_symlinkargs),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_REPLSTAT,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_MKDIR] = {
+ .pc_func = (svc_procfunc) nfsd_proc_mkdir,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_createargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_diropres,
+ .pc_release = (kxdrproc_t) nfssvc_release_fhandle,
+ .pc_argsize = sizeof(struct nfsd_createargs),
+ .pc_ressize = sizeof(struct nfsd_diropres),
+ .pc_cachetype = RC_REPLBUFF,
+ .pc_xdrressize = ST+FH+AT,
+ },
+ [NFSPROC_RMDIR] = {
+ .pc_func = (svc_procfunc) nfsd_proc_rmdir,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_diropargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_void,
+ .pc_argsize = sizeof(struct nfsd_diropargs),
+ .pc_ressize = sizeof(struct nfsd_void),
+ .pc_cachetype = RC_REPLSTAT,
+ .pc_xdrressize = ST,
+ },
+ [NFSPROC_READDIR] = {
+ .pc_func = (svc_procfunc) nfsd_proc_readdir,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_readdirargs,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_readdirres,
+ .pc_argsize = sizeof(struct nfsd_readdirargs),
+ .pc_ressize = sizeof(struct nfsd_readdirres),
+ .pc_cachetype = RC_NOCACHE,
+ },
+ [NFSPROC_STATFS] = {
+ .pc_func = (svc_procfunc) nfsd_proc_statfs,
+ .pc_decode = (kxdrproc_t) nfssvc_decode_fhandle,
+ .pc_encode = (kxdrproc_t) nfssvc_encode_statfsres,
+ .pc_argsize = sizeof(struct nfsd_fhandle),
+ .pc_ressize = sizeof(struct nfsd_statfsres),
+ .pc_cachetype = RC_NOCACHE,
+ .pc_xdrressize = ST+5,
+ },
};
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index cbba4a935786..d4c9884cd54b 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -390,12 +390,14 @@ nfsd_svc(unsigned short port, int nrservs)
mutex_lock(&nfsd_mutex);
dprintk("nfsd: creating service\n");
- error = -EINVAL;
if (nrservs <= 0)
nrservs = 0;
if (nrservs > NFSD_MAXSERVS)
nrservs = NFSD_MAXSERVS;
-
+ error = 0;
+ if (nrservs == 0 && nfsd_serv == NULL)
+ goto out;
+
/* Readahead param cache - will no-op if it already exists */
error = nfsd_racache_init(2*nrservs);
if (error<0)
@@ -413,6 +415,12 @@ nfsd_svc(unsigned short port, int nrservs)
goto failure;
error = svc_set_num_threads(nfsd_serv, NULL, nrservs);
+ if (error == 0)
+ /* We are holding a reference to nfsd_serv which
+ * we don't want to count in the return value,
+ * so subtract 1
+ */
+ error = nfsd_serv->sv_nrthreads - 1;
failure:
svc_destroy(nfsd_serv); /* Release server */
out:
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 99f835753596..4145083dcf88 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -966,6 +966,43 @@ static void kill_suid(struct dentry *dentry)
mutex_unlock(&dentry->d_inode->i_mutex);
}
+/*
+ * Gathered writes: If another process is currently writing to the file,
+ * there's a high chance this is another nfsd (triggered by a bulk write
+ * from a client's biod). Rather than syncing the file with each write
+ * request, we sleep for 10 msec.
+ *
+ * I don't know if this roughly approximates C. Juszak's idea of
+ * gathered writes, but it's a nice and simple solution (IMHO), and it
+ * seems to work:-)
+ *
+ * Note: we do this only in the NFSv2 case, since v3 and higher have a
+ * better tool (separate unstable writes and commits) for solving this
+ * problem.
+ */
+static int wait_for_concurrent_writes(struct file *file)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ static ino_t last_ino;
+ static dev_t last_dev;
+ int err = 0;
+
+ if (atomic_read(&inode->i_writecount) > 1
+ || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
+ dprintk("nfsd: write defer %d\n", task_pid_nr(current));
+ msleep(10);
+ dprintk("nfsd: write resume %d\n", task_pid_nr(current));
+ }
+
+ if (inode->i_state & I_DIRTY) {
+ dprintk("nfsd: write sync %d\n", task_pid_nr(current));
+ err = nfsd_sync(file);
+ }
+ last_ino = inode->i_ino;
+ last_dev = inode->i_sb->s_dev;
+ return err;
+}
+
static __be32
nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
loff_t offset, struct kvec *vec, int vlen,
@@ -978,6 +1015,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
__be32 err = 0;
int host_err;
int stable = *stablep;
+ int use_wgather;
#ifdef MSNFS
err = nfserr_perm;
@@ -996,9 +1034,10 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
* - the sync export option has been set, or
* - the client requested O_SYNC behavior (NFSv3 feature).
* - The file system doesn't support fsync().
- * When gathered writes have been configured for this volume,
+ * When NFSv2 gathered writes have been configured for this volume,
* flushing the data to disk is handled separately below.
*/
+ use_wgather = (rqstp->rq_vers == 2) && EX_WGATHER(exp);
if (!file->f_op->fsync) {/* COMMIT3 cannot work */
stable = 2;
@@ -1007,7 +1046,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
if (!EX_ISSYNC(exp))
stable = 0;
- if (stable && !EX_WGATHER(exp)) {
+ if (stable && !use_wgather) {
spin_lock(&file->f_lock);
file->f_flags |= O_SYNC;
spin_unlock(&file->f_lock);
@@ -1017,52 +1056,20 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
oldfs = get_fs(); set_fs(KERNEL_DS);
host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
set_fs(oldfs);
- if (host_err >= 0) {
- *cnt = host_err;
- nfsdstats.io_write += host_err;
- fsnotify_modify(file->f_path.dentry);
- }
+ if (host_err < 0)
+ goto out_nfserr;
+ *cnt = host_err;
+ nfsdstats.io_write += host_err;
+ fsnotify_modify(file->f_path.dentry);
/* clear setuid/setgid flag after write */
- if (host_err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID)))
+ if (inode->i_mode & (S_ISUID | S_ISGID))
kill_suid(dentry);
- if (host_err >= 0 && stable) {
- static ino_t last_ino;
- static dev_t last_dev;
-
- /*
- * Gathered writes: If another process is currently
- * writing to the file, there's a high chance
- * this is another nfsd (triggered by a bulk write
- * from a client's biod). Rather than syncing the
- * file with each write request, we sleep for 10 msec.
- *
- * I don't know if this roughly approximates
- * C. Juszak's idea of gathered writes, but it's a
- * nice and simple solution (IMHO), and it seems to
- * work:-)
- */
- if (EX_WGATHER(exp)) {
- if (atomic_read(&inode->i_writecount) > 1
- || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
- dprintk("nfsd: write defer %d\n", task_pid_nr(current));
- msleep(10);
- dprintk("nfsd: write resume %d\n", task_pid_nr(current));
- }
-
- if (inode->i_state & I_DIRTY) {
- dprintk("nfsd: write sync %d\n", task_pid_nr(current));
- host_err=nfsd_sync(file);
- }
-#if 0
- wake_up(&inode->i_wait);
-#endif
- }
- last_ino = inode->i_ino;
- last_dev = inode->i_sb->s_dev;
- }
+ if (stable && use_wgather)
+ host_err = wait_for_concurrent_writes(file);
+out_nfserr:
dprintk("nfsd: write complete host_err=%d\n", host_err);
if (host_err >= 0)
err = 0;
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 55413e568f07..92b73b6140ff 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -625,7 +625,7 @@
*(.init.ramfs) \
VMLINUX_SYMBOL(__initramfs_end) = .;
#else
-#define INITRAMFS
+#define INIT_RAM_FS
#endif
/**
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 2643d848df90..4d668e05d458 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -69,7 +69,6 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
-extern void cpu_hotplug_init(void);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
@@ -84,10 +83,6 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
}
-static inline void cpu_hotplug_init(void)
-{
-}
-
static inline void cpu_maps_update_begin(void)
{
}
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index 1a455f1f86d7..5619f8522738 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -13,6 +13,10 @@
#define DMA_PTE_WRITE (2)
#define DMA_PTE_SNP (1 << 11)
+#define CONTEXT_TT_MULTI_LEVEL 0
+#define CONTEXT_TT_DEV_IOTLB 1
+#define CONTEXT_TT_PASS_THROUGH 2
+
struct intel_iommu;
struct dmar_domain;
struct root_entry;
@@ -21,11 +25,16 @@ extern void free_dmar_iommu(struct intel_iommu *iommu);
#ifdef CONFIG_DMAR
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
+extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
return 0;
}
+static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
+{
+ return 0;
+}
#endif
extern int dmar_disabled;
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 10ff5c498824..1731fb5fd775 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -188,6 +188,15 @@ struct dmar_rmrr_unit {
#define for_each_rmrr_units(rmrr) \
list_for_each_entry(rmrr, &dmar_rmrr_units, list)
+
+struct dmar_atsr_unit {
+ struct list_head list; /* list of ATSR units */
+ struct acpi_dmar_header *hdr; /* ACPI header */
+ struct pci_dev **devices; /* target devices */
+ int devices_cnt; /* target device count */
+ u8 include_all:1; /* include all ports */
+};
+
/* Intel DMAR initialization functions */
extern int intel_iommu_init(void);
#else
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index e584b7215e8b..9823946adbc5 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -3,6 +3,7 @@
#include <linux/completion.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
@@ -355,4 +356,90 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
int generation, int speed, unsigned long long offset,
void *payload, size_t length);
+static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
+{
+ return tag << 14 | channel << 8 | sy;
+}
+
+struct fw_descriptor {
+ struct list_head link;
+ size_t length;
+ u32 immediate;
+ u32 key;
+ const u32 *data;
+};
+
+int fw_core_add_descriptor(struct fw_descriptor *desc);
+void fw_core_remove_descriptor(struct fw_descriptor *desc);
+
+/*
+ * The iso packet format allows for an immediate header/payload part
+ * stored in 'header' immediately after the packet info plus an
+ * indirect payload part that is pointer to by the 'payload' field.
+ * Applications can use one or the other or both to implement simple
+ * low-bandwidth streaming (e.g. audio) or more advanced
+ * scatter-gather streaming (e.g. assembling video frame automatically).
+ */
+struct fw_iso_packet {
+ u16 payload_length; /* Length of indirect payload. */
+ u32 interrupt:1; /* Generate interrupt on this packet */
+ u32 skip:1; /* Set to not send packet at all. */
+ u32 tag:2;
+ u32 sy:4;
+ u32 header_length:8; /* Length of immediate header. */
+ u32 header[0];
+};
+
+#define FW_ISO_CONTEXT_TRANSMIT 0
+#define FW_ISO_CONTEXT_RECEIVE 1
+
+#define FW_ISO_CONTEXT_MATCH_TAG0 1
+#define FW_ISO_CONTEXT_MATCH_TAG1 2
+#define FW_ISO_CONTEXT_MATCH_TAG2 4
+#define FW_ISO_CONTEXT_MATCH_TAG3 8
+#define FW_ISO_CONTEXT_MATCH_ALL_TAGS 15
+
+/*
+ * An iso buffer is just a set of pages mapped for DMA in the
+ * specified direction. Since the pages are to be used for DMA, they
+ * are not mapped into the kernel virtual address space. We store the
+ * DMA address in the page private. The helper function
+ * fw_iso_buffer_map() will map the pages into a given vma.
+ */
+struct fw_iso_buffer {
+ enum dma_data_direction direction;
+ struct page **pages;
+ int page_count;
+};
+
+int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
+ int page_count, enum dma_data_direction direction);
+void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
+
+struct fw_iso_context;
+typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
+ u32 cycle, size_t header_length,
+ void *header, void *data);
+struct fw_iso_context {
+ struct fw_card *card;
+ int type;
+ int channel;
+ int speed;
+ size_t header_size;
+ fw_iso_callback_t callback;
+ void *callback_data;
+};
+
+struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
+ int type, int channel, int speed, size_t header_size,
+ fw_iso_callback_t callback, void *callback_data);
+int fw_iso_context_queue(struct fw_iso_context *ctx,
+ struct fw_iso_packet *packet,
+ struct fw_iso_buffer *buffer,
+ unsigned long payload);
+int fw_iso_context_start(struct fw_iso_context *ctx,
+ int cycle, int sync, int tags);
+int fw_iso_context_stop(struct fw_iso_context *ctx);
+void fw_iso_context_destroy(struct fw_iso_context *ctx);
+
#endif /* _LINUX_FIREWIRE_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 74a57938c880..1ff5e4e01952 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1107,6 +1107,7 @@ extern void locks_copy_lock(struct file_lock *, struct file_lock *);
extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
extern void locks_remove_posix(struct file *, fl_owner_t);
extern void locks_remove_flock(struct file *);
+extern void locks_release_private(struct file_lock *);
extern void posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
extern int posix_lock_file_wait(struct file *, struct file_lock *);
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index aa8c53171233..482dc91fd53a 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -53,6 +53,7 @@
#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
+#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
@@ -120,8 +121,10 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
(ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
#define ecap_coherent(e) ((e) & 0x1)
#define ecap_qis(e) ((e) & 0x2)
+#define ecap_pass_through(e) ((e >> 6) & 0x1)
#define ecap_eim_support(e) ((e >> 4) & 0x1)
#define ecap_ir_support(e) ((e >> 3) & 0x1)
+#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
@@ -197,6 +200,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define DMA_FSTS_PPF ((u32)2)
#define DMA_FSTS_PFO ((u32)1)
#define DMA_FSTS_IQE (1 << 4)
+#define DMA_FSTS_ICE (1 << 5)
+#define DMA_FSTS_ITE (1 << 6)
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */
@@ -225,7 +230,8 @@ do { \
enum {
QI_FREE,
QI_IN_USE,
- QI_DONE
+ QI_DONE,
+ QI_ABORT
};
#define QI_CC_TYPE 0x1
@@ -254,6 +260,12 @@ enum {
#define QI_CC_DID(did) (((u64)did) << 16)
#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
+#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
+#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
+#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_DEV_IOTLB_SIZE 1
+#define QI_DEV_IOTLB_MAX_INVS 32
+
struct qi_desc {
u64 low, high;
};
@@ -280,10 +292,10 @@ struct ir_table {
#endif
struct iommu_flush {
- int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
- u64 type, int non_present_entry_flush);
- int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type, int non_present_entry_flush);
+ void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type);
+ void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
};
enum {
@@ -302,6 +314,7 @@ struct intel_iommu {
spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */
+ int msagaw; /* max sagaw of this iommu */
unsigned int irq;
unsigned char name[13]; /* Device Name */
@@ -329,6 +342,7 @@ static inline void __iommu_flush_cache(
}
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
+extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
extern int alloc_iommu(struct dmar_drhd_unit *drhd);
extern void free_iommu(struct intel_iommu *iommu);
@@ -337,11 +351,12 @@ extern void dmar_disable_qi(struct intel_iommu *iommu);
extern int dmar_reenable_qi(struct intel_iommu *iommu);
extern void qi_global_iec(struct intel_iommu *iommu);
-extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
- u8 fm, u64 type, int non_present_entry_flush);
-extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
- unsigned int size_order, u64 type,
- int non_present_entry_flush);
+extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
+ u8 fm, u64 type);
+extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
+ unsigned int size_order, u64 type);
+extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
+ u64 addr, unsigned mask);
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 32e4b2f72294..786e7b8cece9 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -49,6 +49,8 @@ struct resource_list {
#define IORESOURCE_SIZEALIGN 0x00020000 /* size indicates alignment */
#define IORESOURCE_STARTALIGN 0x00040000 /* start field is alignment */
+#define IORESOURCE_MEM_64 0x00100000
+
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
#define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 51855dfd8adb..c325b187966b 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -195,7 +195,7 @@ extern struct svc_procedure nlmsvc_procedures4[];
extern int nlmsvc_grace_period;
extern unsigned long nlmsvc_timeout;
extern int nsm_use_hostnames;
-extern int nsm_local_state;
+extern u32 nsm_local_state;
/*
* Lockd client functions
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 3a059298cc19..3beb2592b03f 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -24,16 +24,10 @@ struct proc_mounts {
struct fs_struct;
+extern struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt);
extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
struct fs_struct *);
-extern void __put_mnt_ns(struct mnt_namespace *ns);
-
-static inline void put_mnt_ns(struct mnt_namespace *ns)
-{
- if (atomic_dec_and_lock(&ns->count, &vfsmount_lock))
- /* releases vfsmount_lock */
- __put_mnt_ns(ns);
-}
+extern void put_mnt_ns(struct mnt_namespace *ns);
static inline void exit_mnt_ns(struct task_struct *p)
{
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 7efb9be34662..4030ebada49e 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -563,6 +563,7 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
* @options: Option flags, e.g. 16bit buswidth
* @ecclayout: ecc layout info structure
* @part_probe_types: NULL-terminated array of probe types
+ * @set_parts: platform specific function to set partitions
* @priv: hardware controller specific settings
*/
struct platform_nand_chip {
@@ -574,26 +575,41 @@ struct platform_nand_chip {
int chip_delay;
unsigned int options;
const char **part_probe_types;
+ void (*set_parts)(uint64_t size,
+ struct platform_nand_chip *chip);
void *priv;
};
+/* Keep gcc happy */
+struct platform_device;
+
/**
* struct platform_nand_ctrl - controller level device structure
+ * @probe: platform specific function to probe/setup hardware
+ * @remove: platform specific function to remove/teardown hardware
* @hwcontrol: platform specific hardware control structure
* @dev_ready: platform specific function to read ready/busy pin
* @select_chip: platform specific chip select function
* @cmd_ctrl: platform specific function for controlling
* ALE/CLE/nCE. Also used to write command and address
+ * @write_buf: platform specific function for write buffer
+ * @read_buf: platform specific function for read buffer
* @priv: private data to transport driver specific settings
*
* All fields are optional and depend on the hardware driver requirements
*/
struct platform_nand_ctrl {
+ int (*probe)(struct platform_device *pdev);
+ void (*remove)(struct platform_device *pdev);
void (*hwcontrol)(struct mtd_info *mtd, int cmd);
int (*dev_ready)(struct mtd_info *mtd);
void (*select_chip)(struct mtd_info *mtd, int chip);
void (*cmd_ctrl)(struct mtd_info *mtd, int dat,
unsigned int ctrl);
+ void (*write_buf)(struct mtd_info *mtd,
+ const uint8_t *buf, int len);
+ void (*read_buf)(struct mtd_info *mtd,
+ uint8_t *buf, int len);
void *priv;
};
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 9aa2a9149b58..8ed873374381 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -17,6 +17,7 @@
#include <linux/mtd/onenand_regs.h>
#include <linux/mtd/bbm.h>
+#define MAX_DIES 2
#define MAX_BUFFERRAM 2
/* Scan and identify a OneNAND device */
@@ -51,7 +52,12 @@ struct onenand_bufferram {
/**
* struct onenand_chip - OneNAND Private Flash Chip Data
* @base: [BOARDSPECIFIC] address to access OneNAND
+ * @dies: [INTERN][FLEX-ONENAND] number of dies on chip
+ * @boundary: [INTERN][FLEX-ONENAND] Boundary of the dies
+ * @diesize: [INTERN][FLEX-ONENAND] Size of the dies
* @chipsize: [INTERN] the size of one chip for multichip arrays
+ * FIXME For Flex-OneNAND, chipsize holds maximum possible
+ * device size ie when all blocks are considered MLC
* @device_id: [INTERN] device ID
* @density_mask: chip density, used for DDP devices
* @verstion_id: [INTERN] version ID
@@ -68,6 +74,8 @@ struct onenand_bufferram {
* @command: [REPLACEABLE] hardware specific function for writing
* commands to the chip
* @wait: [REPLACEABLE] hardware specific function for wait on ready
+ * @bbt_wait: [REPLACEABLE] hardware specific function for bbt wait on ready
+ * @unlock_all: [REPLACEABLE] hardware specific function for unlock all
* @read_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
* @write_bufferram: [REPLACEABLE] hardware specific function for BufferRAM Area
* @read_word: [REPLACEABLE] hardware specific function for read
@@ -92,9 +100,13 @@ struct onenand_bufferram {
*/
struct onenand_chip {
void __iomem *base;
+ unsigned dies;
+ unsigned boundary[MAX_DIES];
+ loff_t diesize[MAX_DIES];
unsigned int chipsize;
unsigned int device_id;
unsigned int version_id;
+ unsigned int technology;
unsigned int density_mask;
unsigned int options;
@@ -108,6 +120,8 @@ struct onenand_chip {
int (*command)(struct mtd_info *mtd, int cmd, loff_t address, size_t len);
int (*wait)(struct mtd_info *mtd, int state);
+ int (*bbt_wait)(struct mtd_info *mtd, int state);
+ void (*unlock_all)(struct mtd_info *mtd);
int (*read_bufferram)(struct mtd_info *mtd, int area,
unsigned char *buffer, int offset, size_t count);
int (*write_bufferram)(struct mtd_info *mtd, int area,
@@ -145,6 +159,8 @@ struct onenand_chip {
#define ONENAND_SET_BUFFERRAM0(this) (this->bufferram_index = 0)
#define ONENAND_SET_BUFFERRAM1(this) (this->bufferram_index = 1)
+#define FLEXONENAND(this) \
+ (this->device_id & DEVICE_IS_FLEXONENAND)
#define ONENAND_GET_SYS_CFG1(this) \
(this->read_word(this->base + ONENAND_REG_SYS_CFG1))
#define ONENAND_SET_SYS_CFG1(v, this) \
@@ -153,6 +169,9 @@ struct onenand_chip {
#define ONENAND_IS_DDP(this) \
(this->device_id & ONENAND_DEVICE_IS_DDP)
+#define ONENAND_IS_MLC(this) \
+ (this->technology & ONENAND_TECHNOLOGY_IS_MLC)
+
#ifdef CONFIG_MTD_ONENAND_2X_PROGRAM
#define ONENAND_IS_2PLANE(this) \
(this->options & ONENAND_HAS_2PLANE)
@@ -169,6 +188,7 @@ struct onenand_chip {
#define ONENAND_HAS_CONT_LOCK (0x0001)
#define ONENAND_HAS_UNLOCK_ALL (0x0002)
#define ONENAND_HAS_2PLANE (0x0004)
+#define ONENAND_SKIP_UNLOCK_CHECK (0x0100)
#define ONENAND_PAGEBUF_ALLOC (0x1000)
#define ONENAND_OOBBUF_ALLOC (0x2000)
@@ -176,6 +196,7 @@ struct onenand_chip {
* OneNAND Flash Manufacturer ID Codes
*/
#define ONENAND_MFR_SAMSUNG 0xec
+#define ONENAND_MFR_NUMONYX 0x20
/**
* struct onenand_manufacturers - NAND Flash Manufacturer ID Structure
@@ -189,5 +210,8 @@ struct onenand_manufacturers {
int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops);
+unsigned onenand_block(struct onenand_chip *this, loff_t addr);
+loff_t onenand_addr(struct onenand_chip *this, int block);
+int flexonenand_region(struct mtd_info *mtd, loff_t addr);
#endif /* __LINUX_MTD_ONENAND_H */
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index 0c6bbe28f38c..86a6bbef6465 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -67,6 +67,9 @@
/*
* Device ID Register F001h (R)
*/
+#define DEVICE_IS_FLEXONENAND (1 << 9)
+#define FLEXONENAND_PI_MASK (0x3ff)
+#define FLEXONENAND_PI_UNLOCK_SHIFT (14)
#define ONENAND_DEVICE_DENSITY_MASK (0xf)
#define ONENAND_DEVICE_DENSITY_SHIFT (4)
#define ONENAND_DEVICE_IS_DDP (1 << 3)
@@ -84,6 +87,11 @@
#define ONENAND_VERSION_PROCESS_SHIFT (8)
/*
+ * Technology Register F006h (R)
+ */
+#define ONENAND_TECHNOLOGY_IS_MLC (1 << 0)
+
+/*
* Start Address 1 F100h (R/W) & Start Address 2 F101h (R/W)
*/
#define ONENAND_DDP_SHIFT (15)
@@ -93,7 +101,8 @@
/*
* Start Address 8 F107h (R/W)
*/
-#define ONENAND_FPA_MASK (0x3f)
+/* Note: It's actually 0x3f in case of SLC */
+#define ONENAND_FPA_MASK (0x7f)
#define ONENAND_FPA_SHIFT (2)
#define ONENAND_FSA_MASK (0x03)
@@ -105,7 +114,8 @@
#define ONENAND_BSA_BOOTRAM (0 << 2)
#define ONENAND_BSA_DATARAM0 (2 << 2)
#define ONENAND_BSA_DATARAM1 (3 << 2)
-#define ONENAND_BSC_MASK (0x03)
+/* Note: It's actually 0x03 in case of SLC */
+#define ONENAND_BSC_MASK (0x07)
/*
* Command Register F220h (R/W)
@@ -124,9 +134,13 @@
#define ONENAND_CMD_RESET (0xF0)
#define ONENAND_CMD_OTP_ACCESS (0x65)
#define ONENAND_CMD_READID (0x90)
+#define FLEXONENAND_CMD_PI_UPDATE (0x05)
+#define FLEXONENAND_CMD_PI_ACCESS (0x66)
+#define FLEXONENAND_CMD_RECOVER_LSB (0x05)
/* NOTE: Those are not *REAL* commands */
#define ONENAND_CMD_BUFFERRAM (0x1978)
+#define FLEXONENAND_CMD_READ_PI (0x1985)
/*
* System Configuration 1 Register F221h (R, R/W)
@@ -192,10 +206,12 @@
#define ONENAND_ECC_1BIT_ALL (0x5555)
#define ONENAND_ECC_2BIT (1 << 1)
#define ONENAND_ECC_2BIT_ALL (0xAAAA)
+#define FLEXONENAND_UNCORRECTABLE_ERROR (0x1010)
/*
* One-Time Programmable (OTP)
*/
+#define FLEXONENAND_OTP_LOCK_OFFSET (2048)
#define ONENAND_OTP_LOCK_OFFSET (14)
#endif /* __ONENAND_REG_H */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 7535a74083b9..af6dcb992bc3 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -40,7 +40,6 @@ struct mtd_partition {
uint64_t offset; /* offset within the master MTD space */
uint32_t mask_flags; /* master MTD flags to mask out for this partition */
struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/
- struct mtd_info **mtdp; /* pointer to store the MTD object */
};
#define MTDPART_OFS_NXTBLK (-2)
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index 214d499718f7..f387919bbc59 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -25,8 +25,9 @@
#define NFSMODE_SOCK 0140000
#define NFSMODE_FIFO 0010000
-#define NFS_MNT_PROGRAM 100005
-#define NFS_MNT_PORT 627
+#define NFS_MNT_PROGRAM 100005
+#define NFS_MNT_VERSION 1
+#define NFS_MNT3_VERSION 3
/*
* NFS stats. The good thing with these values is that NFSv3 errors are
diff --git a/include/linux/nfs2.h b/include/linux/nfs2.h
index 0ed9517138fc..fde24b30cc9e 100644
--- a/include/linux/nfs2.h
+++ b/include/linux/nfs2.h
@@ -64,11 +64,4 @@ struct nfs2_fh {
#define NFSPROC_READDIR 16
#define NFSPROC_STATFS 17
-#define NFS_MNT_PROGRAM 100005
-#define NFS_MNT_VERSION 1
-#define MNTPROC_NULL 0
-#define MNTPROC_MNT 1
-#define MNTPROC_UMNT 3
-#define MNTPROC_UMNTALL 4
-
#endif /* _LINUX_NFS2_H */
diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h
index 539f3b550eab..ac33806ec7f9 100644
--- a/include/linux/nfs3.h
+++ b/include/linux/nfs3.h
@@ -88,12 +88,7 @@ struct nfs3_fh {
#define NFS3PROC_PATHCONF 20
#define NFS3PROC_COMMIT 21
-#define NFS_MNT3_PROGRAM 100005
#define NFS_MNT3_VERSION 3
-#define MOUNTPROC3_NULL 0
-#define MOUNTPROC3_MNT 1
-#define MOUNTPROC3_UMNT 3
-#define MOUNTPROC3_UMNTALL 4
#if defined(__KERNEL__)
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index e3f0cbcbd0db..bd2eba530667 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -21,6 +21,7 @@
#define NFS4_FHSIZE 128
#define NFS4_MAXPATHLEN PATH_MAX
#define NFS4_MAXNAMLEN NAME_MAX
+#define NFS4_OPAQUE_LIMIT 1024
#define NFS4_MAX_SESSIONID_LEN 16
#define NFS4_ACCESS_READ 0x0001
@@ -130,6 +131,16 @@
#define NFS4_MAX_UINT64 (~(u64)0)
+/* An NFS4 sessions server must support at least NFS4_MAX_OPS operations.
+ * If a compound requires more operations, adjust NFS4_MAX_OPS accordingly.
+ */
+#define NFS4_MAX_OPS 8
+
+/* Our NFS4 client back channel server only wants the cb_sequene and the
+ * actual operation per compound
+ */
+#define NFS4_MAX_BACK_CHANNEL_OPS 2
+
enum nfs4_acl_whotype {
NFS4_ACL_WHO_NAMED = 0,
NFS4_ACL_WHO_OWNER,
@@ -462,6 +473,13 @@ enum lock_type4 {
#define NFSPROC4_NULL 0
#define NFSPROC4_COMPOUND 1
#define NFS4_MINOR_VERSION 0
+
+#if defined(CONFIG_NFS_V4_1)
+#define NFS4_MAX_MINOR_VERSION 1
+#else
+#define NFS4_MAX_MINOR_VERSION 0
+#endif /* CONFIG_NFS_V4_1 */
+
#define NFS4_DEBUG 1
/* Index of predefined Linux client operations */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 6ad75948cbf7..19fe15d12042 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -4,11 +4,17 @@
#include <linux/list.h>
#include <linux/backing-dev.h>
#include <linux/wait.h>
+#include <linux/nfs_xdr.h>
+#include <linux/sunrpc/xprt.h>
#include <asm/atomic.h>
+struct nfs4_session;
struct nfs_iostats;
struct nlm_host;
+struct nfs4_sequence_args;
+struct nfs4_sequence_res;
+struct nfs_server;
/*
* The nfs_client identifies our client state to the server.
@@ -18,6 +24,7 @@ struct nfs_client {
int cl_cons_state; /* current construction state (-ve: init error) */
#define NFS_CS_READY 0 /* ready to be used */
#define NFS_CS_INITING 1 /* busy initialising */
+#define NFS_CS_SESSION_INITING 2 /* busy initialising session */
unsigned long cl_res_state; /* NFS resources state */
#define NFS_CS_CALLBACK 1 /* - callback started */
#define NFS_CS_IDMAP 2 /* - idmap started */
@@ -32,6 +39,7 @@ struct nfs_client {
const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */
int cl_proto; /* Network transport protocol */
+ u32 cl_minorversion;/* NFSv4 minorversion */
struct rpc_cred *cl_machine_cred;
#ifdef CONFIG_NFS_V4
@@ -63,7 +71,22 @@ struct nfs_client {
*/
char cl_ipaddr[48];
unsigned char cl_id_uniquifier;
-#endif
+ int (* cl_call_sync)(struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply);
+#endif /* CONFIG_NFS_V4 */
+
+#ifdef CONFIG_NFS_V4_1
+ /* clientid returned from EXCHANGE_ID, used by session operations */
+ u64 cl_ex_clid;
+ /* The sequence id to use for the next CREATE_SESSION */
+ u32 cl_seqid;
+ /* The flags used for obtaining the clientid during EXCHANGE_ID */
+ u32 cl_exchange_flags;
+ struct nfs4_session *cl_session; /* sharred session */
+#endif /* CONFIG_NFS_V4_1 */
#ifdef CONFIG_NFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */
@@ -145,4 +168,46 @@ struct nfs_server {
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
+
+/* maximum number of slots to use */
+#define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE
+
+#if defined(CONFIG_NFS_V4_1)
+
+/* Sessions */
+#define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long)))
+struct nfs4_slot_table {
+ struct nfs4_slot *slots; /* seqid per slot */
+ unsigned long used_slots[SLOT_TABLE_SZ]; /* used/unused bitmap */
+ spinlock_t slot_tbl_lock;
+ struct rpc_wait_queue slot_tbl_waitq; /* allocators may wait here */
+ int max_slots; /* # slots in table */
+ int highest_used_slotid; /* sent to server on each SEQ.
+ * op for dynamic resizing */
+};
+
+static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
+{
+ return sp - tbl->slots;
+}
+
+/*
+ * Session related parameters
+ */
+struct nfs4_session {
+ struct nfs4_sessionid sess_id;
+ u32 flags;
+ unsigned long session_state;
+ u32 hash_alg;
+ u32 ssv_len;
+
+ /* The fore and back channel */
+ struct nfs4_channel_attrs fc_attrs;
+ struct nfs4_slot_table fc_slot_table;
+ struct nfs4_channel_attrs bc_attrs;
+ struct nfs4_slot_table bc_slot_table;
+ struct nfs_client *clp;
+};
+
+#endif /* CONFIG_NFS_V4_1 */
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index b89c34e40bc2..62f63fb0c4c8 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -145,6 +145,44 @@ struct nfs4_change_info {
};
struct nfs_seqid;
+
+/* nfs41 sessions channel attributes */
+struct nfs4_channel_attrs {
+ u32 headerpadsz;
+ u32 max_rqst_sz;
+ u32 max_resp_sz;
+ u32 max_resp_sz_cached;
+ u32 max_ops;
+ u32 max_reqs;
+};
+
+/* nfs41 sessions slot seqid */
+struct nfs4_slot {
+ u32 seq_nr;
+};
+
+struct nfs4_sequence_args {
+ struct nfs4_session *sa_session;
+ u8 sa_slotid;
+ u8 sa_cache_this;
+};
+
+struct nfs4_sequence_res {
+ struct nfs4_session *sr_session;
+ u8 sr_slotid; /* slot used to send request */
+ unsigned long sr_renewal_time;
+ int sr_status; /* sequence operation status */
+};
+
+struct nfs4_get_lease_time_args {
+ struct nfs4_sequence_args la_seq_args;
+};
+
+struct nfs4_get_lease_time_res {
+ struct nfs_fsinfo *lr_fsinfo;
+ struct nfs4_sequence_res lr_seq_res;
+};
+
/*
* Arguments to the open call.
*/
@@ -165,6 +203,7 @@ struct nfs_openargs {
const struct nfs_server *server; /* Needed for ID mapping */
const u32 * bitmask;
__u32 claim;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_openres {
@@ -181,6 +220,7 @@ struct nfs_openres {
__u32 do_recall;
__u64 maxsize;
__u32 attrset[NFS4_BITMAP_SIZE];
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -206,6 +246,7 @@ struct nfs_closeargs {
struct nfs_seqid * seqid;
fmode_t fmode;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_closeres {
@@ -213,6 +254,7 @@ struct nfs_closeres {
struct nfs_fattr * fattr;
struct nfs_seqid * seqid;
const struct nfs_server *server;
+ struct nfs4_sequence_res seq_res;
};
/*
* * Arguments to the lock,lockt, and locku call.
@@ -233,12 +275,14 @@ struct nfs_lock_args {
unsigned char block : 1;
unsigned char reclaim : 1;
unsigned char new_lock_owner : 1;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_lock_res {
nfs4_stateid stateid;
struct nfs_seqid * lock_seqid;
struct nfs_seqid * open_seqid;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_locku_args {
@@ -246,32 +290,38 @@ struct nfs_locku_args {
struct file_lock * fl;
struct nfs_seqid * seqid;
nfs4_stateid * stateid;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_locku_res {
nfs4_stateid stateid;
struct nfs_seqid * seqid;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_lockt_args {
struct nfs_fh * fh;
struct file_lock * fl;
struct nfs_lowner lock_owner;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_lockt_res {
struct file_lock * denied; /* LOCK, LOCKT failed */
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_delegreturnargs {
const struct nfs_fh *fhandle;
const nfs4_stateid *stateid;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_delegreturnres {
struct nfs_fattr * fattr;
const struct nfs_server *server;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -284,12 +334,14 @@ struct nfs_readargs {
__u32 count;
unsigned int pgbase;
struct page ** pages;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_readres {
struct nfs_fattr * fattr;
__u32 count;
int eof;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -304,6 +356,7 @@ struct nfs_writeargs {
unsigned int pgbase;
struct page ** pages;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_writeverf {
@@ -316,6 +369,7 @@ struct nfs_writeres {
struct nfs_writeverf * verf;
__u32 count;
const struct nfs_server *server;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -325,12 +379,14 @@ struct nfs_removeargs {
const struct nfs_fh *fh;
struct qstr name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_removeres {
const struct nfs_server *server;
struct nfs4_change_info cinfo;
struct nfs_fattr dir_attr;
+ struct nfs4_sequence_res seq_res;
};
/*
@@ -383,6 +439,7 @@ struct nfs_setattrargs {
struct iattr * iap;
const struct nfs_server * server; /* Needed for name mapping */
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs_setaclargs {
@@ -390,6 +447,11 @@ struct nfs_setaclargs {
size_t acl_len;
unsigned int acl_pgbase;
struct page ** acl_pages;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs_setaclres {
+ struct nfs4_sequence_res seq_res;
};
struct nfs_getaclargs {
@@ -397,11 +459,18 @@ struct nfs_getaclargs {
size_t acl_len;
unsigned int acl_pgbase;
struct page ** acl_pages;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs_getaclres {
+ size_t acl_len;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_setattrres {
struct nfs_fattr * fattr;
const struct nfs_server * server;
+ struct nfs4_sequence_res seq_res;
};
struct nfs_linkargs {
@@ -583,6 +652,7 @@ struct nfs4_accessargs {
const struct nfs_fh * fh;
const u32 * bitmask;
u32 access;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_accessres {
@@ -590,6 +660,7 @@ struct nfs4_accessres {
struct nfs_fattr * fattr;
u32 supported;
u32 access;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_create_arg {
@@ -609,6 +680,7 @@ struct nfs4_create_arg {
const struct iattr * attrs;
const struct nfs_fh * dir_fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_create_res {
@@ -617,21 +689,30 @@ struct nfs4_create_res {
struct nfs_fattr * fattr;
struct nfs4_change_info dir_cinfo;
struct nfs_fattr * dir_fattr;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_fsinfo_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_fsinfo_res {
+ struct nfs_fsinfo *fsinfo;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_getattr_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_getattr_res {
const struct nfs_server * server;
struct nfs_fattr * fattr;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_link_arg {
@@ -639,6 +720,7 @@ struct nfs4_link_arg {
const struct nfs_fh * dir_fh;
const struct qstr * name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_link_res {
@@ -646,6 +728,7 @@ struct nfs4_link_res {
struct nfs_fattr * fattr;
struct nfs4_change_info cinfo;
struct nfs_fattr * dir_attr;
+ struct nfs4_sequence_res seq_res;
};
@@ -653,21 +736,30 @@ struct nfs4_lookup_arg {
const struct nfs_fh * dir_fh;
const struct qstr * name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_lookup_res {
const struct nfs_server * server;
struct nfs_fattr * fattr;
struct nfs_fh * fh;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_lookup_root_arg {
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_pathconf_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_pathconf_res {
+ struct nfs_pathconf *pathconf;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_readdir_arg {
@@ -678,11 +770,13 @@ struct nfs4_readdir_arg {
struct page ** pages; /* zero-copy data */
unsigned int pgbase; /* zero-copy data */
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_readdir_res {
nfs4_verifier verifier;
unsigned int pgbase;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_readlink {
@@ -690,6 +784,11 @@ struct nfs4_readlink {
unsigned int pgbase;
unsigned int pglen; /* zero-copy data */
struct page ** pages; /* zero-copy data */
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_readlink_res {
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_rename_arg {
@@ -698,6 +797,7 @@ struct nfs4_rename_arg {
const struct qstr * old_name;
const struct qstr * new_name;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_rename_res {
@@ -706,6 +806,7 @@ struct nfs4_rename_res {
struct nfs_fattr * old_fattr;
struct nfs4_change_info new_cinfo;
struct nfs_fattr * new_fattr;
+ struct nfs4_sequence_res seq_res;
};
#define NFS4_SETCLIENTID_NAMELEN (127)
@@ -724,6 +825,17 @@ struct nfs4_setclientid {
struct nfs4_statfs_arg {
const struct nfs_fh * fh;
const u32 * bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_statfs_res {
+ struct nfs_fsstat *fsstat;
+ struct nfs4_sequence_res seq_res;
+};
+
+struct nfs4_server_caps_arg {
+ struct nfs_fh *fhandle;
+ struct nfs4_sequence_args seq_args;
};
struct nfs4_server_caps_res {
@@ -731,6 +843,7 @@ struct nfs4_server_caps_res {
u32 acl_bitmask;
u32 has_links;
u32 has_symlinks;
+ struct nfs4_sequence_res seq_res;
};
struct nfs4_string {
@@ -765,10 +878,68 @@ struct nfs4_fs_locations_arg {
const struct qstr *name;
struct page *page;
const u32 *bitmask;
+ struct nfs4_sequence_args seq_args;
+};
+
+struct nfs4_fs_locations_res {
+ struct nfs4_fs_locations *fs_locations;
+ struct nfs4_sequence_res seq_res;
};
#endif /* CONFIG_NFS_V4 */
+struct nfstime4 {
+ u64 seconds;
+ u32 nseconds;
+};
+
+#ifdef CONFIG_NFS_V4_1
+struct nfs_impl_id4 {
+ u32 domain_len;
+ char *domain;
+ u32 name_len;
+ char *name;
+ struct nfstime4 date;
+};
+
+#define NFS4_EXCHANGE_ID_LEN (48)
+struct nfs41_exchange_id_args {
+ struct nfs_client *client;
+ nfs4_verifier *verifier;
+ unsigned int id_len;
+ char id[NFS4_EXCHANGE_ID_LEN];
+ u32 flags;
+};
+
+struct server_owner {
+ uint64_t minor_id;
+ uint32_t major_id_sz;
+ char major_id[NFS4_OPAQUE_LIMIT];
+};
+
+struct server_scope {
+ uint32_t server_scope_sz;
+ char server_scope[NFS4_OPAQUE_LIMIT];
+};
+
+struct nfs41_exchange_id_res {
+ struct nfs_client *client;
+ u32 flags;
+};
+
+struct nfs41_create_session_args {
+ struct nfs_client *client;
+ uint32_t flags;
+ uint32_t cb_program;
+ struct nfs4_channel_attrs fc_attrs; /* Fore Channel */
+ struct nfs4_channel_attrs bc_attrs; /* Back Channel */
+};
+
+struct nfs41_create_session_res {
+ struct nfs_client *client;
+};
+#endif /* CONFIG_NFS_V4_1 */
+
struct nfs_page;
#define NFS_PAGEVEC_SIZE (8U)
diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h
index 5bccaab81056..3a3f58934f5e 100644
--- a/include/linux/nfsd/cache.h
+++ b/include/linux/nfsd/cache.h
@@ -14,8 +14,7 @@
#include <linux/uio.h>
/*
- * Representation of a reply cache entry. The first two members *must*
- * be hash_next and hash_prev.
+ * Representation of a reply cache entry.
*/
struct svc_cacherep {
struct hlist_node c_hash;
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index afa19016c4a8..8f641c908450 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -151,9 +151,15 @@ typedef struct svc_fh {
__u64 fh_pre_size; /* size before operation */
struct timespec fh_pre_mtime; /* mtime before oper */
struct timespec fh_pre_ctime; /* ctime before oper */
+ /*
+ * pre-op nfsv4 change attr: note must check IS_I_VERSION(inode)
+ * to find out if it is valid.
+ */
+ u64 fh_pre_change;
/* Post-op attributes saved in fh_unlock */
struct kstat fh_post_attr; /* full attrs after operation */
+ u64 fh_post_change; /* nfsv4 change; see above */
#endif /* CONFIG_NFSD_V3 */
} svc_fh;
@@ -298,6 +304,7 @@ fill_pre_wcc(struct svc_fh *fhp)
fhp->fh_pre_mtime = inode->i_mtime;
fhp->fh_pre_ctime = inode->i_ctime;
fhp->fh_pre_size = inode->i_size;
+ fhp->fh_pre_change = inode->i_version;
fhp->fh_pre_saved = 1;
}
}
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index 4d61c873feed..57ab2ed08459 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -41,7 +41,6 @@
#include <linux/kref.h>
#include <linux/sunrpc/clnt.h>
-#define NFS4_OPAQUE_LIMIT 1024
typedef struct {
u32 cl_boot;
u32 cl_id;
@@ -61,15 +60,6 @@ typedef struct {
#define si_stateownerid si_opaque.so_stateownerid
#define si_fileid si_opaque.so_fileid
-
-struct nfs4_cb_recall {
- u32 cbr_ident;
- int cbr_trunc;
- stateid_t cbr_stateid;
- struct knfsd_fh cbr_fh;
- struct nfs4_delegation *cbr_dp;
-};
-
struct nfs4_delegation {
struct list_head dl_perfile;
struct list_head dl_perclnt;
@@ -81,22 +71,25 @@ struct nfs4_delegation {
struct file *dl_vfs_file;
u32 dl_type;
time_t dl_time;
- struct nfs4_cb_recall dl_recall;
+/* For recall: */
+ u32 dl_ident;
+ stateid_t dl_stateid;
+ struct knfsd_fh dl_fh;
+ int dl_retries;
};
-#define dl_stateid dl_recall.cbr_stateid
-#define dl_fh dl_recall.cbr_fh
-
/* client delegation callback info */
-struct nfs4_callback {
+struct nfs4_cb_conn {
/* SETCLIENTID info */
u32 cb_addr;
unsigned short cb_port;
u32 cb_prog;
- u32 cb_ident;
+ u32 cb_minorversion;
+ u32 cb_ident; /* minorversion 0 only */
/* RPC client info */
atomic_t cb_set; /* successful CB_NULL call */
struct rpc_clnt * cb_client;
+ struct rpc_cred * cb_cred;
};
/* Maximum number of slots per session. 128 is useful for long haul TCP */
@@ -122,6 +115,17 @@ struct nfsd4_slot {
struct nfsd4_cache_entry sl_cache_entry;
};
+struct nfsd4_channel_attrs {
+ u32 headerpadsz;
+ u32 maxreq_sz;
+ u32 maxresp_sz;
+ u32 maxresp_cached;
+ u32 maxops;
+ u32 maxreqs;
+ u32 nr_rdma_attrs;
+ u32 rdma_attrs;
+};
+
struct nfsd4_session {
struct kref se_ref;
struct list_head se_hash; /* hash by sessionid */
@@ -129,11 +133,8 @@ struct nfsd4_session {
u32 se_flags;
struct nfs4_client *se_client; /* for expire_client */
struct nfs4_sessionid se_sessionid;
- u32 se_fmaxreq_sz;
- u32 se_fmaxresp_sz;
- u32 se_fmaxresp_cached;
- u32 se_fmaxops;
- u32 se_fnumslots;
+ struct nfsd4_channel_attrs se_fchannel;
+ struct nfsd4_channel_attrs se_bchannel;
struct nfsd4_slot se_slots[]; /* forward channel slots */
};
@@ -185,7 +186,7 @@ struct nfs4_client {
struct svc_cred cl_cred; /* setclientid principal */
clientid_t cl_clientid; /* generated by server */
nfs4_verifier cl_confirm; /* generated by server */
- struct nfs4_callback cl_callback; /* callback info */
+ struct nfs4_cb_conn cl_cb_conn; /* callback info */
atomic_t cl_count; /* ref count */
u32 cl_firststate; /* recovery dir creation */
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
index f80d6013fdc3..2bacf7535069 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/include/linux/nfsd/xdr4.h
@@ -64,10 +64,13 @@ static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs)
struct nfsd4_change_info {
u32 atomic;
+ bool change_supported;
u32 before_ctime_sec;
u32 before_ctime_nsec;
+ u64 before_change;
u32 after_ctime_sec;
u32 after_ctime_nsec;
+ u64 after_change;
};
struct nfsd4_access {
@@ -363,17 +366,6 @@ struct nfsd4_exchange_id {
int spa_how;
};
-struct nfsd4_channel_attrs {
- u32 headerpadsz;
- u32 maxreq_sz;
- u32 maxresp_sz;
- u32 maxresp_cached;
- u32 maxops;
- u32 maxreqs;
- u32 nr_rdma_attrs;
- u32 rdma_attrs;
-};
-
struct nfsd4_create_session {
clientid_t clientid;
struct nfs4_sessionid sessionid;
@@ -503,10 +495,16 @@ set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
{
BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved);
cinfo->atomic = 1;
- cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
- cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
- cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
- cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
+ cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode);
+ if (cinfo->change_supported) {
+ cinfo->before_change = fhp->fh_pre_change;
+ cinfo->after_change = fhp->fh_post_change;
+ } else {
+ cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
+ cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
+ cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
+ cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
+ }
}
int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 092e82e0048c..93a7c08f869d 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -15,7 +15,7 @@ static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
{
struct pci_bus *pbus = pdev->bus;
/* Find a PCI root bus */
- while (pbus->parent)
+ while (!pci_is_root_bus(pbus))
pbus = pbus->parent;
return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
pbus->number);
@@ -23,7 +23,7 @@ static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
{
- if (pbus->parent)
+ if (!pci_is_root_bus(pbus))
return DEVICE_ACPI_HANDLE(&(pbus->self->dev));
return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
pbus->number);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 8e366bb0705f..d304ddf412d0 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -196,6 +196,7 @@ struct pci_cap_saved_state {
struct pcie_link_state;
struct pci_vpd;
struct pci_sriov;
+struct pci_ats;
/*
* The pci_dev structure is used to describe PCI devices.
@@ -293,6 +294,7 @@ struct pci_dev {
struct pci_sriov *sriov; /* SR-IOV capability related */
struct pci_dev *physfn; /* the PF this VF is associated with */
};
+ struct pci_ats *ats; /* Address Translation Service */
#endif
};
@@ -607,8 +609,6 @@ extern void pci_sort_breadthfirst(void);
struct pci_dev __deprecated *pci_find_device(unsigned int vendor,
unsigned int device,
struct pci_dev *from);
-struct pci_dev __deprecated *pci_find_slot(unsigned int bus,
- unsigned int devfn);
#endif /* CONFIG_PCI_LEGACY */
enum pci_lost_interrupt_reason {
@@ -647,6 +647,7 @@ int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn,
int where, u16 val);
int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn,
int where, u32 val);
+struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val)
{
@@ -711,8 +712,8 @@ int pcix_get_mmrbc(struct pci_dev *dev);
int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
int pcie_get_readrq(struct pci_dev *dev);
int pcie_set_readrq(struct pci_dev *dev, int rq);
+int __pci_reset_function(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
-int pci_execute_reset_function(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
@@ -732,7 +733,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
void pci_pme_active(struct pci_dev *dev, bool enable);
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
+int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable);
int pci_wake_from_d3(struct pci_dev *dev, bool enable);
pci_power_t pci_target_state(struct pci_dev *dev);
int pci_prepare_to_sleep(struct pci_dev *dev);
@@ -798,7 +799,7 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
int pass);
-void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata);
int pci_cfg_space_size_ext(struct pci_dev *dev);
int pci_cfg_space_size(struct pci_dev *dev);
@@ -888,6 +889,17 @@ static inline int pcie_aspm_enabled(void)
extern int pcie_aspm_enabled(void);
#endif
+#ifndef CONFIG_PCIE_ECRC
+static inline void pcie_set_ecrc_checking(struct pci_dev *dev)
+{
+ return;
+}
+static inline void pcie_ecrc_get_policy(char *str) {};
+#else
+extern void pcie_set_ecrc_checking(struct pci_dev *dev);
+extern void pcie_ecrc_get_policy(char *str);
+#endif
+
#define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1)
#ifdef CONFIG_HT_IRQ
@@ -944,12 +956,6 @@ static inline struct pci_dev *pci_find_device(unsigned int vendor,
return NULL;
}
-static inline struct pci_dev *pci_find_slot(unsigned int bus,
- unsigned int devfn)
-{
- return NULL;
-}
-
static inline struct pci_dev *pci_get_device(unsigned int vendor,
unsigned int device,
struct pci_dev *from)
@@ -1105,6 +1111,10 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
#include <asm/pci.h>
+#ifndef PCIBIOS_MAX_MEM_32
+#define PCIBIOS_MAX_MEM_32 (-1)
+#endif
+
/* these helpers provide future and backwards compatibility
* for accessing popular PCI BAR info */
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
@@ -1261,5 +1271,10 @@ static inline irqreturn_t pci_sriov_migration(struct pci_dev *dev)
}
#endif
+#if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE)
+extern void pci_hp_create_module_link(struct pci_slot *pci_slot);
+extern void pci_hp_remove_module_link(struct pci_slot *pci_slot);
+#endif
+
#endif /* __KERNEL__ */
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 20998746518e..b3646cd7fd5a 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -66,17 +66,10 @@ enum pcie_link_speed {
PCIE_LNK_SPEED_UNKNOWN = 0xFF,
};
-struct hotplug_slot;
-struct hotplug_slot_attribute {
- struct attribute attr;
- ssize_t (*show)(struct hotplug_slot *, char *);
- ssize_t (*store)(struct hotplug_slot *, const char *, size_t);
-};
-#define to_hotplug_attr(n) container_of(n, struct hotplug_slot_attribute, attr);
-
/**
* struct hotplug_slot_ops -the callbacks that the hotplug pci core can use
* @owner: The module owner of this structure
+ * @mod_name: The module name (KBUILD_MODNAME) of this structure
* @enable_slot: Called when the user wants to enable a specific pci slot
* @disable_slot: Called when the user wants to disable a specific pci slot
* @set_attention_status: Called to set the specific slot's attention LED to
@@ -109,6 +102,7 @@ struct hotplug_slot_attribute {
*/
struct hotplug_slot_ops {
struct module *owner;
+ const char *mod_name;
int (*enable_slot) (struct hotplug_slot *slot);
int (*disable_slot) (struct hotplug_slot *slot);
int (*set_attention_status) (struct hotplug_slot *slot, u8 value);
@@ -167,12 +161,21 @@ static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
return pci_slot_name(slot->pci_slot);
}
-extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr,
- const char *name);
+extern int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *pbus,
+ int nr, const char *name,
+ struct module *owner, const char *mod_name);
extern int pci_hp_deregister(struct hotplug_slot *slot);
extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot,
struct hotplug_slot_info *info);
+static inline int pci_hp_register(struct hotplug_slot *slot,
+ struct pci_bus *pbus,
+ int devnr, const char *name)
+{
+ return __pci_hp_register(slot, pbus, devnr, name,
+ THIS_MODULE, KBUILD_MODNAME);
+}
+
/* PCI Setting Record (Type 0) */
struct hpp_type0 {
u32 revision;
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 616bf8b3c8b5..fcaee42c7ac2 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -295,8 +295,9 @@
#define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */
#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
+#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
-#define PCI_MSI_MASK_BIT 16 /* Mask bits register */
+#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
/* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */
#define PCI_MSIX_FLAGS 2
@@ -304,7 +305,6 @@
#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
#define PCI_MSIX_FLAGS_MASKALL (1 << 14)
#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
-#define PCI_MSIX_FLAGS_BITMASK (1 << 0)
/* CompactPCI Hotswap Register */
@@ -502,6 +502,7 @@
#define PCI_EXT_CAP_ID_DSN 3
#define PCI_EXT_CAP_ID_PWR 4
#define PCI_EXT_CAP_ID_ARI 14
+#define PCI_EXT_CAP_ID_ATS 15
#define PCI_EXT_CAP_ID_SRIOV 16
/* Advanced Error Reporting */
@@ -620,6 +621,15 @@
#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */
+/* Address Translation Service */
+#define PCI_ATS_CAP 0x04 /* ATS Capability Register */
+#define PCI_ATS_CAP_QDEP(x) ((x) & 0x1f) /* Invalidate Queue Depth */
+#define PCI_ATS_MAX_QDEP 32 /* Max Invalidate Queue Depth */
+#define PCI_ATS_CTRL 0x06 /* ATS Control Register */
+#define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */
+#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */
+#define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */
+
/* Single Root I/O Virtualization */
#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
#define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 16e39c7a67fc..e73e2429a1b1 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -160,8 +160,9 @@ struct rfkill * __must_check rfkill_alloc(const char *name,
* the rfkill structure. Before calling this function the driver needs
* to be ready to service method calls from rfkill.
*
- * If the software blocked state is not set before registration,
- * set_block will be called to initialize it to a default value.
+ * If rfkill_init_sw_state() is not called before registration,
+ * set_block() will be called to initialize the software blocked state
+ * to a default value.
*
* If the hardware blocked state is not set before registration,
* it is assumed to be unblocked.
@@ -234,9 +235,11 @@ bool __must_check rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
* rfkill drivers that get events when the soft-blocked state changes
* (yes, some platforms directly act on input but allow changing again)
* use this function to notify the rfkill core (and through that also
- * userspace) of the current state. It is not necessary to notify on
- * resume; since hibernation can always change the soft-blocked state,
- * the rfkill core will unconditionally restore the previous state.
+ * userspace) of the current state.
+ *
+ * Drivers should also call this function after resume if the state has
+ * been changed by the user. This only makes sense for "persistent"
+ * devices (see rfkill_init_sw_state()).
*
* This function can be called in any context, even from within rfkill
* callbacks.
@@ -247,6 +250,22 @@ bool __must_check rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked);
/**
+ * rfkill_init_sw_state - Initialize persistent software block state
+ * @rfkill: pointer to the rfkill class to modify.
+ * @state: the current software block state to set
+ *
+ * rfkill drivers that preserve their software block state over power off
+ * use this function to notify the rfkill core (and through that also
+ * userspace) of their initial state. It should only be used before
+ * registration.
+ *
+ * In addition, it marks the device as "persistent", an attribute which
+ * can be read by userspace. Persistent devices are expected to preserve
+ * their own state when suspended.
+ */
+void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked);
+
+/**
* rfkill_set_states - Set the internal rfkill block states
* @rfkill: pointer to the rfkill class to modify.
* @sw: the current software block state to set
@@ -307,6 +326,10 @@ static inline bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
return blocked;
}
+static inline void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
+{
+}
+
static inline void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
{
}
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 6fd80c4243f1..23d2fb051f97 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -171,6 +171,9 @@
/* Timberdale UART */
#define PORT_TIMBUART 87
+/* Qualcomm MSM SoCs */
+#define PORT_MSM 88
+
#ifdef __KERNEL__
#include <linux/compiler.h>
diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h
index 96c0d93fc2ca..850db2e80510 100644
--- a/include/linux/serial_reg.h
+++ b/include/linux/serial_reg.h
@@ -323,6 +323,7 @@
#define UART_OMAP_MVER 0x14 /* Module version register */
#define UART_OMAP_SYSC 0x15 /* System configuration register */
#define UART_OMAP_SYSS 0x16 /* System status register */
+#define UART_OMAP_WER 0x17 /* Wake-up enable register */
#endif /* _LINUX_SERIAL_REG_H */
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
new file mode 100644
index 000000000000..6508f0dc0eff
--- /dev/null
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -0,0 +1,49 @@
+/******************************************************************************
+
+(c) 2008 NetApp. All Rights Reserved.
+
+NetApp provides this source code under the GPL v2 License.
+The GPL v2 license is available at
+http://opensource.org/licenses/gpl-license.php.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+/*
+ * Functions to create and manage the backchannel
+ */
+
+#ifndef _LINUX_SUNRPC_BC_XPRT_H
+#define _LINUX_SUNRPC_BC_XPRT_H
+
+#include <linux/sunrpc/svcsock.h>
+#include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/sched.h>
+
+#ifdef CONFIG_NFS_V4_1
+struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt);
+void xprt_free_bc_request(struct rpc_rqst *req);
+int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
+void xprt_destroy_backchannel(struct rpc_xprt *, int max_reqs);
+void bc_release_request(struct rpc_task *);
+int bc_send(struct rpc_rqst *req);
+#else /* CONFIG_NFS_V4_1 */
+static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
+ unsigned int min_reqs)
+{
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+#endif /* _LINUX_SUNRPC_BC_XPRT_H */
+
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index c39a21040dcb..37881f1a0bd7 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -143,6 +143,7 @@ int rpc_call_sync(struct rpc_clnt *clnt,
const struct rpc_message *msg, int flags);
struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
int flags);
+void rpc_restart_call_prepare(struct rpc_task *);
void rpc_restart_call(struct rpc_task *);
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
size_t rpc_max_payload(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 64981a2f1cae..401097781fc0 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -210,6 +210,8 @@ struct rpc_wait_queue {
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
+ const struct rpc_call_ops *ops);
void rpc_put_task(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
void rpc_release_calldata(const struct rpc_call_ops *, void *);
@@ -237,6 +239,7 @@ void rpc_show_tasks(void);
int rpc_init_mempool(void);
void rpc_destroy_mempool(void);
extern struct workqueue_struct *rpciod_workqueue;
+void rpc_prepare_task(struct rpc_task *task);
static inline void rpc_exit(struct rpc_task *task, int status)
{
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 2a30775959e9..ea8009695c69 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -96,6 +96,15 @@ struct svc_serv {
svc_thread_fn sv_function; /* main function for threads */
unsigned int sv_drc_max_pages; /* Total pages for DRC */
unsigned int sv_drc_pages_used;/* DRC pages used */
+#if defined(CONFIG_NFS_V4_1)
+ struct list_head sv_cb_list; /* queue for callback requests
+ * that arrive over the same
+ * connection */
+ spinlock_t sv_cb_lock; /* protects the svc_cb_list */
+ wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
+ * entries in the svc_cb_list */
+ struct svc_xprt *bc_xprt;
+#endif /* CONFIG_NFS_V4_1 */
};
/*
@@ -411,6 +420,8 @@ int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
void svc_destroy(struct svc_serv *);
int svc_process(struct svc_rqst *);
+int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
+ struct svc_rqst *);
int svc_register(const struct svc_serv *, const int,
const unsigned short, const unsigned short);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 0d9cb6ef28b0..2223ae0b5ed5 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -83,7 +83,7 @@ int svc_port_is_privileged(struct sockaddr *sin);
int svc_print_xprts(char *buf, int maxlen);
struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
const sa_family_t af, const unsigned short port);
-int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen);
+int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen);
static inline void svc_xprt_get(struct svc_xprt *xprt)
{
@@ -118,7 +118,7 @@ static inline unsigned short svc_addr_port(const struct sockaddr *sa)
return 0;
}
-static inline size_t svc_addr_len(struct sockaddr *sa)
+static inline size_t svc_addr_len(const struct sockaddr *sa)
{
switch (sa->sa_family) {
case AF_INET:
@@ -126,7 +126,8 @@ static inline size_t svc_addr_len(struct sockaddr *sa)
case AF_INET6:
return sizeof(struct sockaddr_in6);
}
- return -EAFNOSUPPORT;
+
+ return 0;
}
static inline unsigned short svc_xprt_local_port(const struct svc_xprt *xprt)
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 483e10380aae..04dba23c59f2 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -38,10 +38,15 @@ int svc_recv(struct svc_rqst *, long);
int svc_send(struct svc_rqst *);
void svc_drop(struct svc_rqst *);
void svc_sock_update_bufs(struct svc_serv *serv);
-int svc_sock_names(char *buf, struct svc_serv *serv, char *toclose);
-int svc_addsock(struct svc_serv *serv, int fd, char *name_return);
+int svc_sock_names(struct svc_serv *serv, char *buf,
+ const size_t buflen,
+ const char *toclose);
+int svc_addsock(struct svc_serv *serv, const int fd,
+ char *name_return, const size_t len);
void svc_init_xprt_sock(void);
void svc_cleanup_xprt_sock(void);
+struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot);
+void svc_sock_destroy(struct svc_xprt *);
/*
* svc_makesock socket characteristics
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 08afe43118f4..1175d58efc2e 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -67,7 +67,8 @@ struct rpc_rqst {
struct rpc_task * rq_task; /* RPC task data */
__be32 rq_xid; /* request XID */
int rq_cong; /* has incremented xprt->cong */
- int rq_received; /* receive completed */
+ int rq_reply_bytes_recvd; /* number of reply */
+ /* bytes received */
u32 rq_seqno; /* gss seq no. used on req. */
int rq_enc_pages_num;
struct page **rq_enc_pages; /* scratch pages for use by
@@ -97,6 +98,12 @@ struct rpc_rqst {
unsigned long rq_xtime; /* when transmitted */
int rq_ntrans;
+
+#if defined(CONFIG_NFS_V4_1)
+ struct list_head rq_bc_list; /* Callback service list */
+ unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
+ struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
+#endif /* CONFIG_NFS_V4_1 */
};
#define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len
@@ -174,6 +181,15 @@ struct rpc_xprt {
spinlock_t reserve_lock; /* lock slot table */
u32 xid; /* Next XID value to use */
struct rpc_task * snd_task; /* Task blocked in send */
+#if defined(CONFIG_NFS_V4_1)
+ struct svc_serv *bc_serv; /* The RPC service which will */
+ /* process the callback */
+ unsigned int bc_alloc_count; /* Total number of preallocs */
+ spinlock_t bc_pa_lock; /* Protects the preallocated
+ * items */
+ struct list_head bc_pa_list; /* List of preallocated
+ * backchannel rpc_rqst's */
+#endif /* CONFIG_NFS_V4_1 */
struct list_head recv;
struct {
@@ -192,6 +208,26 @@ struct rpc_xprt {
const char *address_strings[RPC_DISPLAY_MAX];
};
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * Backchannel flags
+ */
+#define RPC_BC_PA_IN_USE 0x0001 /* Preallocated backchannel */
+ /* buffer in use */
+#endif /* CONFIG_NFS_V4_1 */
+
+#if defined(CONFIG_NFS_V4_1)
+static inline int bc_prealloc(struct rpc_rqst *req)
+{
+ return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
+}
+#else
+static inline int bc_prealloc(struct rpc_rqst *req)
+{
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
struct xprt_create {
int ident; /* XPRT_TRANSPORT identifier */
struct sockaddr * srcaddr; /* optional local address */
diff --git a/include/mtd/Kbuild b/include/mtd/Kbuild
index 8eb018f96002..192f8fb7d546 100644
--- a/include/mtd/Kbuild
+++ b/include/mtd/Kbuild
@@ -1,5 +1,4 @@
header-y += inftl-user.h
-header-y += jffs2-user.h
header-y += mtd-abi.h
header-y += mtd-user.h
header-y += nftl-user.h
diff --git a/include/mtd/jffs2-user.h b/include/mtd/jffs2-user.h
deleted file mode 100644
index fa94b0eb67c1..000000000000
--- a/include/mtd/jffs2-user.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * JFFS2 definitions for use in user space only
- */
-
-#ifndef __JFFS2_USER_H__
-#define __JFFS2_USER_H__
-
-/* This file is blessed for inclusion by userspace */
-#include <linux/jffs2.h>
-#include <linux/types.h>
-#include <endian.h>
-#include <byteswap.h>
-
-#undef cpu_to_je16
-#undef cpu_to_je32
-#undef cpu_to_jemode
-#undef je16_to_cpu
-#undef je32_to_cpu
-#undef jemode_to_cpu
-
-extern int target_endian;
-
-#define t16(x) ({ __u16 __b = (x); (target_endian==__BYTE_ORDER)?__b:bswap_16(__b); })
-#define t32(x) ({ __u32 __b = (x); (target_endian==__BYTE_ORDER)?__b:bswap_32(__b); })
-
-#define cpu_to_je16(x) ((jint16_t){t16(x)})
-#define cpu_to_je32(x) ((jint32_t){t32(x)})
-#define cpu_to_jemode(x) ((jmode_t){t32(x)})
-
-#define je16_to_cpu(x) (t16((x).v16))
-#define je32_to_cpu(x) (t32((x).v32))
-#define jemode_to_cpu(x) (t32((x).m))
-
-#endif /* __JFFS2_USER_H__ */
diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h
index b6595b3c68b6..be51ae2bd0ff 100644
--- a/include/mtd/mtd-abi.h
+++ b/include/mtd/mtd-abi.h
@@ -12,12 +12,24 @@ struct erase_info_user {
__u32 length;
};
+struct erase_info_user64 {
+ __u64 start;
+ __u64 length;
+};
+
struct mtd_oob_buf {
__u32 start;
__u32 length;
unsigned char __user *ptr;
};
+struct mtd_oob_buf64 {
+ __u64 start;
+ __u32 pad;
+ __u32 length;
+ __u64 usr_ptr;
+};
+
#define MTD_ABSENT 0
#define MTD_RAM 1
#define MTD_ROM 2
@@ -95,6 +107,9 @@ struct otp_info {
#define ECCGETLAYOUT _IOR('M', 17, struct nand_ecclayout)
#define ECCGETSTATS _IOR('M', 18, struct mtd_ecc_stats)
#define MTDFILEMODE _IO('M', 19)
+#define MEMERASE64 _IOW('M', 20, struct erase_info_user64)
+#define MEMWRITEOOB64 _IOWR('M', 21, struct mtd_oob_buf64)
+#define MEMREADOOB64 _IOWR('M', 22, struct mtd_oob_buf64)
/*
* Obsolete legacy interface. Keep it in order not to break userspace
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index 21ee49ffcbaf..f82a1e877372 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -94,8 +94,6 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
poll_table *wait);
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
-int iucv_sock_wait_state(struct sock *sk, int state, int state2,
- unsigned long timeo);
int iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo);
void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
void iucv_accept_unlink(struct sock *sk);
diff --git a/init/main.c b/init/main.c
index 09131ec090c1..4870dfeb9ee5 100644
--- a/init/main.c
+++ b/init/main.c
@@ -678,7 +678,6 @@ asmlinkage void __init start_kernel(void)
#endif
page_cgroup_init();
enable_debug_pagealloc();
- cpu_hotplug_init();
kmemtrace_init();
kmemleak_init();
debug_objects_mem_init();
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 395b6974dc8d..8ce10043e4ac 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -34,14 +34,11 @@ static struct {
* an ongoing cpu hotplug operation.
*/
int refcount;
-} cpu_hotplug;
-
-void __init cpu_hotplug_init(void)
-{
- cpu_hotplug.active_writer = NULL;
- mutex_init(&cpu_hotplug.lock);
- cpu_hotplug.refcount = 0;
-}
+} cpu_hotplug = {
+ .active_writer = NULL,
+ .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
+ .refcount = 0,
+};
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e8fa2d9eb212..54155268dfca 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -932,7 +932,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
continue;
if (__isolate_lru_page(cursor_page, mode, file) == 0) {
list_move(&cursor_page->lru, dst);
- mem_cgroup_del_lru(page);
+ mem_cgroup_del_lru(cursor_page);
nr_taken++;
scan++;
}
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 882a927cefae..3bb6bdb1dac1 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -39,14 +39,6 @@
#include "af802154.h"
-#define DBG_DUMP(data, len) { \
- int i; \
- pr_debug("function: %s: data: len %d:\n", __func__, len); \
- for (i = 0; i < len; i++) {\
- pr_debug("%02x: %02x\n", i, (data)[i]); \
- } \
-}
-
/*
* Utility function for families
*/
@@ -302,10 +294,12 @@ static struct net_proto_family ieee802154_family_ops = {
static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
- DBG_DUMP(skb->data, skb->len);
if (!netif_running(dev))
return -ENODEV;
pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
+#ifdef DEBUG
+ print_hex_dump_bytes("ieee802154_rcv ", DUMP_PREFIX_NONE, skb->data, skb->len);
+#endif
if (!net_eq(dev_net(dev), &init_net))
goto drop;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cd76b3cb7092..65b3a8b11a6c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1085,8 +1085,16 @@ restart:
now = jiffies;
if (!rt_caching(dev_net(rt->u.dst.dev))) {
- rt_drop(rt);
- return 0;
+ /*
+ * If we're not caching, just tell the caller we
+ * were successful and don't touch the route. The
+ * caller hold the sole reference to the cache entry, and
+ * it will be released when the caller is done with it.
+ * If we drop it here, the callers have no way to resolve routes
+ * when we're not caching. Instead, just point *rp at rt, so
+ * the caller gets a single use out of the route
+ */
+ goto report_and_exit;
}
rthp = &rt_hash_table[hash].chain;
@@ -1217,6 +1225,8 @@ restart:
rcu_assign_pointer(rt_hash_table[hash].chain, rt);
spin_unlock_bh(rt_hash_lock_addr(hash));
+
+report_and_exit:
if (rp)
*rp = rt;
else
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 656cbd195825..6be5f92d1094 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -54,6 +54,38 @@ static const u8 iprm_shutdown[8] =
#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
#define CB_TRGCLS_LEN (TRGCLS_SIZE)
+#define __iucv_sock_wait(sk, condition, timeo, ret) \
+do { \
+ DEFINE_WAIT(__wait); \
+ long __timeo = timeo; \
+ ret = 0; \
+ while (!(condition)) { \
+ prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
+ if (!__timeo) { \
+ ret = -EAGAIN; \
+ break; \
+ } \
+ if (signal_pending(current)) { \
+ ret = sock_intr_errno(__timeo); \
+ break; \
+ } \
+ release_sock(sk); \
+ __timeo = schedule_timeout(__timeo); \
+ lock_sock(sk); \
+ ret = sock_error(sk); \
+ if (ret) \
+ break; \
+ } \
+ finish_wait(sk->sk_sleep, &__wait); \
+} while (0)
+
+#define iucv_sock_wait(sk, condition, timeo) \
+({ \
+ int __ret = 0; \
+ if (!(condition)) \
+ __iucv_sock_wait(sk, condition, timeo, __ret); \
+ __ret; \
+})
static void iucv_sock_kill(struct sock *sk);
static void iucv_sock_close(struct sock *sk);
@@ -238,6 +270,48 @@ static inline size_t iucv_msg_length(struct iucv_message *msg)
return msg->length;
}
+/**
+ * iucv_sock_in_state() - check for specific states
+ * @sk: sock structure
+ * @state: first iucv sk state
+ * @state: second iucv sk state
+ *
+ * Returns true if the socket in either in the first or second state.
+ */
+static int iucv_sock_in_state(struct sock *sk, int state, int state2)
+{
+ return (sk->sk_state == state || sk->sk_state == state2);
+}
+
+/**
+ * iucv_below_msglim() - function to check if messages can be sent
+ * @sk: sock structure
+ *
+ * Returns true if the send queue length is lower than the message limit.
+ * Always returns true if the socket is not connected (no iucv path for
+ * checking the message limit).
+ */
+static inline int iucv_below_msglim(struct sock *sk)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+
+ if (sk->sk_state != IUCV_CONNECTED)
+ return 1;
+ return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
+}
+
+/**
+ * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
+ */
+static void iucv_sock_wake_msglim(struct sock *sk)
+{
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_all(sk->sk_sleep);
+ sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ read_unlock(&sk->sk_callback_lock);
+}
+
/* Timers */
static void iucv_sock_timeout(unsigned long arg)
{
@@ -329,7 +403,9 @@ static void iucv_sock_close(struct sock *sk)
timeo = sk->sk_lingertime;
else
timeo = IUCV_DISCONN_TIMEOUT;
- err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
+ err = iucv_sock_wait(sk,
+ iucv_sock_in_state(sk, IUCV_CLOSED, 0),
+ timeo);
}
case IUCV_CLOSING: /* fall through */
@@ -510,39 +586,6 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
return NULL;
}
-int iucv_sock_wait_state(struct sock *sk, int state, int state2,
- unsigned long timeo)
-{
- DECLARE_WAITQUEUE(wait, current);
- int err = 0;
-
- add_wait_queue(sk->sk_sleep, &wait);
- while (sk->sk_state != state && sk->sk_state != state2) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- if (!timeo) {
- err = -EAGAIN;
- break;
- }
-
- if (signal_pending(current)) {
- err = sock_intr_errno(timeo);
- break;
- }
-
- release_sock(sk);
- timeo = schedule_timeout(timeo);
- lock_sock(sk);
-
- err = sock_error(sk);
- if (err)
- break;
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(sk->sk_sleep, &wait);
- return err;
-}
-
/* Bind an unbound socket */
static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
int addr_len)
@@ -687,8 +730,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
}
if (sk->sk_state != IUCV_CONNECTED) {
- err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
- sock_sndtimeo(sk, flags & O_NONBLOCK));
+ err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
+ IUCV_DISCONN),
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
}
if (sk->sk_state == IUCV_DISCONN) {
@@ -842,9 +886,11 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
struct iucv_message txmsg;
struct cmsghdr *cmsg;
int cmsg_done;
+ long timeo;
char user_id[9];
char appl_id[9];
int err;
+ int noblock = msg->msg_flags & MSG_DONTWAIT;
err = sock_error(sk);
if (err)
@@ -864,108 +910,119 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
goto out;
}
- if (sk->sk_state == IUCV_CONNECTED) {
- /* initialize defaults */
- cmsg_done = 0; /* check for duplicate headers */
- txmsg.class = 0;
+ /* Return if the socket is not in connected state */
+ if (sk->sk_state != IUCV_CONNECTED) {
+ err = -ENOTCONN;
+ goto out;
+ }
- /* iterate over control messages */
- for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
- cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ /* initialize defaults */
+ cmsg_done = 0; /* check for duplicate headers */
+ txmsg.class = 0;
- if (!CMSG_OK(msg, cmsg)) {
- err = -EINVAL;
- goto out;
- }
+ /* iterate over control messages */
+ for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
+ cmsg = CMSG_NXTHDR(msg, cmsg)) {
+
+ if (!CMSG_OK(msg, cmsg)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (cmsg->cmsg_level != SOL_IUCV)
+ continue;
- if (cmsg->cmsg_level != SOL_IUCV)
- continue;
+ if (cmsg->cmsg_type & cmsg_done) {
+ err = -EINVAL;
+ goto out;
+ }
+ cmsg_done |= cmsg->cmsg_type;
- if (cmsg->cmsg_type & cmsg_done) {
+ switch (cmsg->cmsg_type) {
+ case SCM_IUCV_TRGCLS:
+ if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
err = -EINVAL;
goto out;
}
- cmsg_done |= cmsg->cmsg_type;
-
- switch (cmsg->cmsg_type) {
- case SCM_IUCV_TRGCLS:
- if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
- err = -EINVAL;
- goto out;
- }
- /* set iucv message target class */
- memcpy(&txmsg.class,
- (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
+ /* set iucv message target class */
+ memcpy(&txmsg.class,
+ (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
- break;
+ break;
- default:
- err = -EINVAL;
- goto out;
- break;
- }
+ default:
+ err = -EINVAL;
+ goto out;
+ break;
}
+ }
- /* allocate one skb for each iucv message:
- * this is fine for SOCK_SEQPACKET (unless we want to support
- * segmented records using the MSG_EOR flag), but
- * for SOCK_STREAM we might want to improve it in future */
- if (!(skb = sock_alloc_send_skb(sk, len,
- msg->msg_flags & MSG_DONTWAIT,
- &err)))
- goto out;
+ /* allocate one skb for each iucv message:
+ * this is fine for SOCK_SEQPACKET (unless we want to support
+ * segmented records using the MSG_EOR flag), but
+ * for SOCK_STREAM we might want to improve it in future */
+ skb = sock_alloc_send_skb(sk, len, noblock, &err);
+ if (!skb)
+ goto out;
+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
+ err = -EFAULT;
+ goto fail;
+ }
- if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
- err = -EFAULT;
- goto fail;
- }
+ /* wait if outstanding messages for iucv path has reached */
+ timeo = sock_sndtimeo(sk, noblock);
+ err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
+ if (err)
+ goto fail;
- /* increment and save iucv message tag for msg_completion cbk */
- txmsg.tag = iucv->send_tag++;
- memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
- skb_queue_tail(&iucv->send_skb_q, skb);
+ /* return -ECONNRESET if the socket is no longer connected */
+ if (sk->sk_state != IUCV_CONNECTED) {
+ err = -ECONNRESET;
+ goto fail;
+ }
- if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
- && skb->len <= 7) {
- err = iucv_send_iprm(iucv->path, &txmsg, skb);
+ /* increment and save iucv message tag for msg_completion cbk */
+ txmsg.tag = iucv->send_tag++;
+ memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
+ skb_queue_tail(&iucv->send_skb_q, skb);
- /* on success: there is no message_complete callback
- * for an IPRMDATA msg; remove skb from send queue */
- if (err == 0) {
- skb_unlink(skb, &iucv->send_skb_q);
- kfree_skb(skb);
- }
+ if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
+ && skb->len <= 7) {
+ err = iucv_send_iprm(iucv->path, &txmsg, skb);
- /* this error should never happen since the
- * IUCV_IPRMDATA path flag is set... sever path */
- if (err == 0x15) {
- iucv_path_sever(iucv->path, NULL);
- skb_unlink(skb, &iucv->send_skb_q);
- err = -EPIPE;
- goto fail;
- }
- } else
- err = iucv_message_send(iucv->path, &txmsg, 0, 0,
- (void *) skb->data, skb->len);
- if (err) {
- if (err == 3) {
- user_id[8] = 0;
- memcpy(user_id, iucv->dst_user_id, 8);
- appl_id[8] = 0;
- memcpy(appl_id, iucv->dst_name, 8);
- pr_err("Application %s on z/VM guest %s"
- " exceeds message limit\n",
- user_id, appl_id);
- }
+ /* on success: there is no message_complete callback
+ * for an IPRMDATA msg; remove skb from send queue */
+ if (err == 0) {
+ skb_unlink(skb, &iucv->send_skb_q);
+ kfree_skb(skb);
+ }
+
+ /* this error should never happen since the
+ * IUCV_IPRMDATA path flag is set... sever path */
+ if (err == 0x15) {
+ iucv_path_sever(iucv->path, NULL);
skb_unlink(skb, &iucv->send_skb_q);
err = -EPIPE;
goto fail;
}
-
- } else {
- err = -ENOTCONN;
- goto out;
+ } else
+ err = iucv_message_send(iucv->path, &txmsg, 0, 0,
+ (void *) skb->data, skb->len);
+ if (err) {
+ if (err == 3) {
+ user_id[8] = 0;
+ memcpy(user_id, iucv->dst_user_id, 8);
+ appl_id[8] = 0;
+ memcpy(appl_id, iucv->dst_name, 8);
+ pr_err("Application %s on z/VM guest %s"
+ " exceeds message limit\n",
+ appl_id, user_id);
+ err = -EAGAIN;
+ } else
+ err = -EPIPE;
+ skb_unlink(skb, &iucv->send_skb_q);
+ goto fail;
}
release_sock(sk);
@@ -1581,7 +1638,11 @@ static void iucv_callback_txdone(struct iucv_path *path,
spin_unlock_irqrestore(&list->lock, flags);
- kfree_skb(this);
+ if (this) {
+ kfree_skb(this);
+ /* wake up any process waiting for sending */
+ iucv_sock_wake_msglim(sk);
+ }
}
BUG_ON(!this);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 4e68ab439d5d..79693fe2001e 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -56,7 +56,6 @@ struct rfkill {
u32 idx;
bool registered;
- bool suspended;
bool persistent;
const struct rfkill_ops *ops;
@@ -224,7 +223,7 @@ static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
static void rfkill_event(struct rfkill *rfkill)
{
- if (!rfkill->registered || rfkill->suspended)
+ if (!rfkill->registered)
return;
kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
@@ -270,6 +269,9 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
unsigned long flags;
int err;
+ if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
+ return;
+
/*
* Some platforms (...!) generate input events which affect the
* _hard_ kill state -- whenever something tries to change the
@@ -292,9 +294,6 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
spin_unlock_irqrestore(&rfkill->lock, flags);
- if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
- return;
-
err = rfkill->ops->set_block(rfkill->data, blocked);
spin_lock_irqsave(&rfkill->lock, flags);
@@ -508,19 +507,32 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
blocked = blocked || hwblock;
spin_unlock_irqrestore(&rfkill->lock, flags);
- if (!rfkill->registered) {
- rfkill->persistent = true;
- } else {
- if (prev != blocked && !hwblock)
- schedule_work(&rfkill->uevent_work);
+ if (!rfkill->registered)
+ return blocked;
- rfkill_led_trigger_event(rfkill);
- }
+ if (prev != blocked && !hwblock)
+ schedule_work(&rfkill->uevent_work);
+
+ rfkill_led_trigger_event(rfkill);
return blocked;
}
EXPORT_SYMBOL(rfkill_set_sw_state);
+void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
+{
+ unsigned long flags;
+
+ BUG_ON(!rfkill);
+ BUG_ON(rfkill->registered);
+
+ spin_lock_irqsave(&rfkill->lock, flags);
+ __rfkill_set_sw_state(rfkill, blocked);
+ rfkill->persistent = true;
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+}
+EXPORT_SYMBOL(rfkill_init_sw_state);
+
void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
{
unsigned long flags;
@@ -598,6 +610,15 @@ static ssize_t rfkill_idx_show(struct device *dev,
return sprintf(buf, "%d\n", rfkill->idx);
}
+static ssize_t rfkill_persistent_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct rfkill *rfkill = to_rfkill(dev);
+
+ return sprintf(buf, "%d\n", rfkill->persistent);
+}
+
static u8 user_state_from_blocked(unsigned long state)
{
if (state & RFKILL_BLOCK_HW)
@@ -656,6 +677,7 @@ static struct device_attribute rfkill_dev_attrs[] = {
__ATTR(name, S_IRUGO, rfkill_name_show, NULL),
__ATTR(type, S_IRUGO, rfkill_type_show, NULL),
__ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
+ __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
__ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
__ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
__ATTR_NULL
@@ -718,8 +740,6 @@ static int rfkill_suspend(struct device *dev, pm_message_t state)
rfkill_pause_polling(rfkill);
- rfkill->suspended = true;
-
return 0;
}
@@ -728,10 +748,10 @@ static int rfkill_resume(struct device *dev)
struct rfkill *rfkill = to_rfkill(dev);
bool cur;
- cur = !!(rfkill->state & RFKILL_BLOCK_SW);
- rfkill_set_block(rfkill, cur);
-
- rfkill->suspended = false;
+ if (!rfkill->persistent) {
+ cur = !!(rfkill->state & RFKILL_BLOCK_SW);
+ rfkill_set_block(rfkill, cur);
+ }
rfkill_resume_polling(rfkill);
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index 5369aa369b35..db73fd2a3f0e 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -13,5 +13,6 @@ sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
rpcb_clnt.o timer.o xdr.o \
sunrpc_syms.o cache.o rpc_pipe.o \
svc_xprt.o
+sunrpc-$(CONFIG_NFS_V4_1) += backchannel_rqst.o bc_svc.o
sunrpc-$(CONFIG_PROC_FS) += stats.o
sunrpc-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
new file mode 100644
index 000000000000..553621fb2c41
--- /dev/null
+++ b/net/sunrpc/backchannel_rqst.c
@@ -0,0 +1,281 @@
+/******************************************************************************
+
+(c) 2007 Network Appliance, Inc. All Rights Reserved.
+(c) 2009 NetApp. All Rights Reserved.
+
+NetApp provides this source code under the GPL v2 License.
+The GPL v2 license is available at
+http://opensource.org/licenses/gpl-license.php.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#include <linux/tcp.h>
+#include <linux/sunrpc/xprt.h>
+
+#ifdef RPC_DEBUG
+#define RPCDBG_FACILITY RPCDBG_TRANS
+#endif
+
+#if defined(CONFIG_NFS_V4_1)
+
+/*
+ * Helper routines that track the number of preallocation elements
+ * on the transport.
+ */
+static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
+{
+ return xprt->bc_alloc_count > 0;
+}
+
+static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
+{
+ xprt->bc_alloc_count += n;
+}
+
+static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
+{
+ return xprt->bc_alloc_count -= n;
+}
+
+/*
+ * Free the preallocated rpc_rqst structure and the memory
+ * buffers hanging off of it.
+ */
+static void xprt_free_allocation(struct rpc_rqst *req)
+{
+ struct xdr_buf *xbufp;
+
+ dprintk("RPC: free allocations for req= %p\n", req);
+ BUG_ON(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
+ xbufp = &req->rq_private_buf;
+ free_page((unsigned long)xbufp->head[0].iov_base);
+ xbufp = &req->rq_snd_buf;
+ free_page((unsigned long)xbufp->head[0].iov_base);
+ list_del(&req->rq_bc_pa_list);
+ kfree(req);
+}
+
+/*
+ * Preallocate up to min_reqs structures and related buffers for use
+ * by the backchannel. This function can be called multiple times
+ * when creating new sessions that use the same rpc_xprt. The
+ * preallocated buffers are added to the pool of resources used by
+ * the rpc_xprt. Anyone of these resources may be used used by an
+ * incoming callback request. It's up to the higher levels in the
+ * stack to enforce that the maximum number of session slots is not
+ * being exceeded.
+ *
+ * Some callback arguments can be large. For example, a pNFS server
+ * using multiple deviceids. The list can be unbound, but the client
+ * has the ability to tell the server the maximum size of the callback
+ * requests. Each deviceID is 16 bytes, so allocate one page
+ * for the arguments to have enough room to receive a number of these
+ * deviceIDs. The NFS client indicates to the pNFS server that its
+ * callback requests can be up to 4096 bytes in size.
+ */
+int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
+{
+ struct page *page_rcv = NULL, *page_snd = NULL;
+ struct xdr_buf *xbufp = NULL;
+ struct rpc_rqst *req, *tmp;
+ struct list_head tmp_list;
+ int i;
+
+ dprintk("RPC: setup backchannel transport\n");
+
+ /*
+ * We use a temporary list to keep track of the preallocated
+ * buffers. Once we're done building the list we splice it
+ * into the backchannel preallocation list off of the rpc_xprt
+ * struct. This helps minimize the amount of time the list
+ * lock is held on the rpc_xprt struct. It also makes cleanup
+ * easier in case of memory allocation errors.
+ */
+ INIT_LIST_HEAD(&tmp_list);
+ for (i = 0; i < min_reqs; i++) {
+ /* Pre-allocate one backchannel rpc_rqst */
+ req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
+ if (req == NULL) {
+ printk(KERN_ERR "Failed to create bc rpc_rqst\n");
+ goto out_free;
+ }
+
+ /* Add the allocated buffer to the tmp list */
+ dprintk("RPC: adding req= %p\n", req);
+ list_add(&req->rq_bc_pa_list, &tmp_list);
+
+ req->rq_xprt = xprt;
+ INIT_LIST_HEAD(&req->rq_list);
+ INIT_LIST_HEAD(&req->rq_bc_list);
+
+ /* Preallocate one XDR receive buffer */
+ page_rcv = alloc_page(GFP_KERNEL);
+ if (page_rcv == NULL) {
+ printk(KERN_ERR "Failed to create bc receive xbuf\n");
+ goto out_free;
+ }
+ xbufp = &req->rq_rcv_buf;
+ xbufp->head[0].iov_base = page_address(page_rcv);
+ xbufp->head[0].iov_len = PAGE_SIZE;
+ xbufp->tail[0].iov_base = NULL;
+ xbufp->tail[0].iov_len = 0;
+ xbufp->page_len = 0;
+ xbufp->len = PAGE_SIZE;
+ xbufp->buflen = PAGE_SIZE;
+
+ /* Preallocate one XDR send buffer */
+ page_snd = alloc_page(GFP_KERNEL);
+ if (page_snd == NULL) {
+ printk(KERN_ERR "Failed to create bc snd xbuf\n");
+ goto out_free;
+ }
+
+ xbufp = &req->rq_snd_buf;
+ xbufp->head[0].iov_base = page_address(page_snd);
+ xbufp->head[0].iov_len = 0;
+ xbufp->tail[0].iov_base = NULL;
+ xbufp->tail[0].iov_len = 0;
+ xbufp->page_len = 0;
+ xbufp->len = 0;
+ xbufp->buflen = PAGE_SIZE;
+ }
+
+ /*
+ * Add the temporary list to the backchannel preallocation list
+ */
+ spin_lock_bh(&xprt->bc_pa_lock);
+ list_splice(&tmp_list, &xprt->bc_pa_list);
+ xprt_inc_alloc_count(xprt, min_reqs);
+ spin_unlock_bh(&xprt->bc_pa_lock);
+
+ dprintk("RPC: setup backchannel transport done\n");
+ return 0;
+
+out_free:
+ /*
+ * Memory allocation failed, free the temporary list
+ */
+ list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
+ xprt_free_allocation(req);
+
+ dprintk("RPC: setup backchannel transport failed\n");
+ return -1;
+}
+EXPORT_SYMBOL(xprt_setup_backchannel);
+
+/*
+ * Destroys the backchannel preallocated structures.
+ * Since these structures may have been allocated by multiple calls
+ * to xprt_setup_backchannel, we only destroy up to the maximum number
+ * of reqs specified by the caller.
+ * @xprt: the transport holding the preallocated strucures
+ * @max_reqs the maximum number of preallocated structures to destroy
+ */
+void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
+{
+ struct rpc_rqst *req = NULL, *tmp = NULL;
+
+ dprintk("RPC: destroy backchannel transport\n");
+
+ BUG_ON(max_reqs == 0);
+ spin_lock_bh(&xprt->bc_pa_lock);
+ xprt_dec_alloc_count(xprt, max_reqs);
+ list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
+ dprintk("RPC: req=%p\n", req);
+ xprt_free_allocation(req);
+ if (--max_reqs == 0)
+ break;
+ }
+ spin_unlock_bh(&xprt->bc_pa_lock);
+
+ dprintk("RPC: backchannel list empty= %s\n",
+ list_empty(&xprt->bc_pa_list) ? "true" : "false");
+}
+EXPORT_SYMBOL(xprt_destroy_backchannel);
+
+/*
+ * One or more rpc_rqst structure have been preallocated during the
+ * backchannel setup. Buffer space for the send and private XDR buffers
+ * has been preallocated as well. Use xprt_alloc_bc_request to allocate
+ * to this request. Use xprt_free_bc_request to return it.
+ *
+ * We know that we're called in soft interrupt context, grab the spin_lock
+ * since there is no need to grab the bottom half spin_lock.
+ *
+ * Return an available rpc_rqst, otherwise NULL if non are available.
+ */
+struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt)
+{
+ struct rpc_rqst *req;
+
+ dprintk("RPC: allocate a backchannel request\n");
+ spin_lock(&xprt->bc_pa_lock);
+ if (!list_empty(&xprt->bc_pa_list)) {
+ req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
+ rq_bc_pa_list);
+ list_del(&req->rq_bc_pa_list);
+ } else {
+ req = NULL;
+ }
+ spin_unlock(&xprt->bc_pa_lock);
+
+ if (req != NULL) {
+ set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
+ req->rq_reply_bytes_recvd = 0;
+ req->rq_bytes_sent = 0;
+ memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
+ sizeof(req->rq_private_buf));
+ }
+ dprintk("RPC: backchannel req=%p\n", req);
+ return req;
+}
+
+/*
+ * Return the preallocated rpc_rqst structure and XDR buffers
+ * associated with this rpc_task.
+ */
+void xprt_free_bc_request(struct rpc_rqst *req)
+{
+ struct rpc_xprt *xprt = req->rq_xprt;
+
+ dprintk("RPC: free backchannel req=%p\n", req);
+
+ smp_mb__before_clear_bit();
+ BUG_ON(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
+ clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
+ smp_mb__after_clear_bit();
+
+ if (!xprt_need_to_requeue(xprt)) {
+ /*
+ * The last remaining session was destroyed while this
+ * entry was in use. Free the entry and don't attempt
+ * to add back to the list because there is no need to
+ * have anymore preallocated entries.
+ */
+ dprintk("RPC: Last session removed req=%p\n", req);
+ xprt_free_allocation(req);
+ return;
+ }
+
+ /*
+ * Return it to the list of preallocations so that it
+ * may be reused by a new callback request.
+ */
+ spin_lock_bh(&xprt->bc_pa_lock);
+ list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list);
+ spin_unlock_bh(&xprt->bc_pa_lock);
+}
+
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
new file mode 100644
index 000000000000..13f214f53120
--- /dev/null
+++ b/net/sunrpc/bc_svc.c
@@ -0,0 +1,81 @@
+/******************************************************************************
+
+(c) 2007 Network Appliance, Inc. All Rights Reserved.
+(c) 2009 NetApp. All Rights Reserved.
+
+NetApp provides this source code under the GPL v2 License.
+The GPL v2 license is available at
+http://opensource.org/licenses/gpl-license.php.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+/*
+ * The NFSv4.1 callback service helper routines.
+ * They implement the transport level processing required to send the
+ * reply over an existing open connection previously established by the client.
+ */
+
+#if defined(CONFIG_NFS_V4_1)
+
+#include <linux/module.h>
+
+#include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/bc_xprt.h>
+
+#define RPCDBG_FACILITY RPCDBG_SVCDSP
+
+void bc_release_request(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+
+ dprintk("RPC: bc_release_request: task= %p\n", task);
+
+ /*
+ * Release this request only if it's a backchannel
+ * preallocated request
+ */
+ if (!bc_prealloc(req))
+ return;
+ xprt_free_bc_request(req);
+}
+
+/* Empty callback ops */
+static const struct rpc_call_ops nfs41_callback_ops = {
+};
+
+
+/*
+ * Send the callback reply
+ */
+int bc_send(struct rpc_rqst *req)
+{
+ struct rpc_task *task;
+ int ret;
+
+ dprintk("RPC: bc_send req= %p\n", req);
+ task = rpc_run_bc_task(req, &nfs41_callback_ops);
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+ else {
+ BUG_ON(atomic_read(&task->tk_count) != 1);
+ ret = task->tk_status;
+ rpc_put_task(task);
+ }
+ return ret;
+ dprintk("RPC: bc_send ret= %d \n", ret);
+}
+
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 20029a79a5de..ff0c23053d2f 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -488,7 +488,7 @@ static void do_cache_clean(struct work_struct *work)
{
int delay = 5;
if (cache_clean() == -1)
- delay = 30*HZ;
+ delay = round_jiffies_relative(30*HZ);
if (list_empty(&cache_list))
delay = 0;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 5abab094441f..5bc2f45bddf0 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -36,7 +36,9 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/sunrpc/metrics.h>
+#include <linux/sunrpc/bc_xprt.h>
+#include "sunrpc.h"
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_CALL
@@ -63,6 +65,9 @@ static void call_decode(struct rpc_task *task);
static void call_bind(struct rpc_task *task);
static void call_bind_status(struct rpc_task *task);
static void call_transmit(struct rpc_task *task);
+#if defined(CONFIG_NFS_V4_1)
+static void call_bc_transmit(struct rpc_task *task);
+#endif /* CONFIG_NFS_V4_1 */
static void call_status(struct rpc_task *task);
static void call_transmit_status(struct rpc_task *task);
static void call_refresh(struct rpc_task *task);
@@ -613,6 +618,50 @@ rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
}
EXPORT_SYMBOL_GPL(rpc_call_async);
+#if defined(CONFIG_NFS_V4_1)
+/**
+ * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
+ * rpc_execute against it
+ * @ops: RPC call ops
+ */
+struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
+ const struct rpc_call_ops *tk_ops)
+{
+ struct rpc_task *task;
+ struct xdr_buf *xbufp = &req->rq_snd_buf;
+ struct rpc_task_setup task_setup_data = {
+ .callback_ops = tk_ops,
+ };
+
+ dprintk("RPC: rpc_run_bc_task req= %p\n", req);
+ /*
+ * Create an rpc_task to send the data
+ */
+ task = rpc_new_task(&task_setup_data);
+ if (!task) {
+ xprt_free_bc_request(req);
+ goto out;
+ }
+ task->tk_rqstp = req;
+
+ /*
+ * Set up the xdr_buf length.
+ * This also indicates that the buffer is XDR encoded already.
+ */
+ xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
+ xbufp->tail[0].iov_len;
+
+ task->tk_action = call_bc_transmit;
+ atomic_inc(&task->tk_count);
+ BUG_ON(atomic_read(&task->tk_count) != 2);
+ rpc_execute(task);
+
+out:
+ dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
+ return task;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
void
rpc_call_start(struct rpc_task *task)
{
@@ -695,6 +744,19 @@ void rpc_force_rebind(struct rpc_clnt *clnt)
EXPORT_SYMBOL_GPL(rpc_force_rebind);
/*
+ * Restart an (async) RPC call from the call_prepare state.
+ * Usually called from within the exit handler.
+ */
+void
+rpc_restart_call_prepare(struct rpc_task *task)
+{
+ if (RPC_ASSASSINATED(task))
+ return;
+ task->tk_action = rpc_prepare_task;
+}
+EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
+
+/*
* Restart an (async) RPC call. Usually called from within the
* exit handler.
*/
@@ -1085,7 +1147,7 @@ call_transmit(struct rpc_task *task)
* in order to allow access to the socket to other RPC requests.
*/
call_transmit_status(task);
- if (task->tk_msg.rpc_proc->p_decode != NULL)
+ if (rpc_reply_expected(task))
return;
task->tk_action = rpc_exit_task;
rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
@@ -1120,6 +1182,72 @@ call_transmit_status(struct rpc_task *task)
}
}
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * 5b. Send the backchannel RPC reply. On error, drop the reply. In
+ * addition, disconnect on connectivity errors.
+ */
+static void
+call_bc_transmit(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+
+ BUG_ON(task->tk_status != 0);
+ task->tk_status = xprt_prepare_transmit(task);
+ if (task->tk_status == -EAGAIN) {
+ /*
+ * Could not reserve the transport. Try again after the
+ * transport is released.
+ */
+ task->tk_status = 0;
+ task->tk_action = call_bc_transmit;
+ return;
+ }
+
+ task->tk_action = rpc_exit_task;
+ if (task->tk_status < 0) {
+ printk(KERN_NOTICE "RPC: Could not send backchannel reply "
+ "error: %d\n", task->tk_status);
+ return;
+ }
+
+ xprt_transmit(task);
+ xprt_end_transmit(task);
+ dprint_status(task);
+ switch (task->tk_status) {
+ case 0:
+ /* Success */
+ break;
+ case -EHOSTDOWN:
+ case -EHOSTUNREACH:
+ case -ENETUNREACH:
+ case -ETIMEDOUT:
+ /*
+ * Problem reaching the server. Disconnect and let the
+ * forechannel reestablish the connection. The server will
+ * have to retransmit the backchannel request and we'll
+ * reprocess it. Since these ops are idempotent, there's no
+ * need to cache our reply at this time.
+ */
+ printk(KERN_NOTICE "RPC: Could not send backchannel reply "
+ "error: %d\n", task->tk_status);
+ xprt_conditional_disconnect(task->tk_xprt,
+ req->rq_connect_cookie);
+ break;
+ default:
+ /*
+ * We were unable to reply and will have to drop the
+ * request. The server should reconnect and retransmit.
+ */
+ BUG_ON(task->tk_status == -EAGAIN);
+ printk(KERN_NOTICE "RPC: Could not send backchannel reply "
+ "error: %d\n", task->tk_status);
+ break;
+ }
+ rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
/*
* 6. Sort out the RPC call status
*/
@@ -1130,8 +1258,8 @@ call_status(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
int status;
- if (req->rq_received > 0 && !req->rq_bytes_sent)
- task->tk_status = req->rq_received;
+ if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
+ task->tk_status = req->rq_reply_bytes_recvd;
dprint_status(task);
@@ -1248,7 +1376,7 @@ call_decode(struct rpc_task *task)
/*
* Ensure that we see all writes made by xprt_complete_rqst()
- * before it changed req->rq_received.
+ * before it changed req->rq_reply_bytes_recvd.
*/
smp_rmb();
req->rq_rcv_buf.len = req->rq_private_buf.len;
@@ -1289,7 +1417,7 @@ out_retry:
task->tk_status = 0;
/* Note: rpc_verify_header() may have freed the RPC slot */
if (task->tk_rqstp == req) {
- req->rq_received = req->rq_rcv_buf.len = 0;
+ req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
if (task->tk_client->cl_discrtry)
xprt_conditional_disconnect(task->tk_xprt,
req->rq_connect_cookie);
@@ -1377,13 +1505,14 @@ rpc_verify_header(struct rpc_task *task)
}
if ((len -= 3) < 0)
goto out_overflow;
- p += 1; /* skip XID */
+ p += 1; /* skip XID */
if ((n = ntohl(*p++)) != RPC_REPLY) {
dprintk("RPC: %5u %s: not an RPC reply: %x\n",
- task->tk_pid, __func__, n);
+ task->tk_pid, __func__, n);
goto out_garbage;
}
+
if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
if (--len < 0)
goto out_overflow;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index ff50a0546865..1102ce1251f7 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(rpc_delay);
/*
* Helper to call task->tk_ops->rpc_call_prepare
*/
-static void rpc_prepare_task(struct rpc_task *task)
+void rpc_prepare_task(struct rpc_task *task)
{
task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
}
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 1ef6e46d9da2..1b4e6791ecf3 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -141,12 +141,14 @@ EXPORT_SYMBOL_GPL(rpc_free_iostats);
void rpc_count_iostats(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
- struct rpc_iostats *stats = task->tk_client->cl_metrics;
+ struct rpc_iostats *stats;
struct rpc_iostats *op_metrics;
long rtt, execute, queue;
- if (!stats || !req)
+ if (!task->tk_client || !task->tk_client->cl_metrics || !req)
return;
+
+ stats = task->tk_client->cl_metrics;
op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx];
op_metrics->om_ops++;
@@ -154,7 +156,7 @@ void rpc_count_iostats(struct rpc_task *task)
op_metrics->om_timeouts += task->tk_timeouts;
op_metrics->om_bytes_sent += task->tk_bytes_sent;
- op_metrics->om_bytes_recv += req->rq_received;
+ op_metrics->om_bytes_recv += req->rq_reply_bytes_recvd;
queue = (long)req->rq_xtime - task->tk_start;
if (queue < 0)
diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
new file mode 100644
index 000000000000..5d9dd742264b
--- /dev/null
+++ b/net/sunrpc/sunrpc.h
@@ -0,0 +1,37 @@
+/******************************************************************************
+
+(c) 2008 NetApp. All Rights Reserved.
+
+NetApp provides this source code under the GPL v2 License.
+The GPL v2 license is available at
+http://opensource.org/licenses/gpl-license.php.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+/*
+ * Functions and macros used internally by RPC
+ */
+
+#ifndef _NET_SUNRPC_SUNRPC_H
+#define _NET_SUNRPC_SUNRPC_H
+
+static inline int rpc_reply_expected(struct rpc_task *task)
+{
+ return (task->tk_msg.rpc_proc != NULL) &&
+ (task->tk_msg.rpc_proc->p_decode != NULL);
+}
+
+#endif /* _NET_SUNRPC_SUNRPC_H */
+
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 5ed8931dfe98..952f206ff307 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -25,6 +25,7 @@
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/bc_xprt.h>
#define RPCDBG_FACILITY RPCDBG_SVCDSP
@@ -486,6 +487,10 @@ svc_destroy(struct svc_serv *serv)
if (svc_serv_is_pooled(serv))
svc_pool_map_put();
+#if defined(CONFIG_NFS_V4_1)
+ svc_sock_destroy(serv->bc_xprt);
+#endif /* CONFIG_NFS_V4_1 */
+
svc_unregister(serv);
kfree(serv->sv_pools);
kfree(serv);
@@ -970,20 +975,18 @@ svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
}
/*
- * Process the RPC request.
+ * Common routine for processing the RPC request.
*/
-int
-svc_process(struct svc_rqst *rqstp)
+static int
+svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
{
struct svc_program *progp;
struct svc_version *versp = NULL; /* compiler food */
struct svc_procedure *procp = NULL;
- struct kvec * argv = &rqstp->rq_arg.head[0];
- struct kvec * resv = &rqstp->rq_res.head[0];
struct svc_serv *serv = rqstp->rq_server;
kxdrproc_t xdr;
__be32 *statp;
- u32 dir, prog, vers, proc;
+ u32 prog, vers, proc;
__be32 auth_stat, rpc_stat;
int auth_res;
__be32 *reply_statp;
@@ -993,19 +996,6 @@ svc_process(struct svc_rqst *rqstp)
if (argv->iov_len < 6*4)
goto err_short_len;
- /* setup response xdr_buf.
- * Initially it has just one page
- */
- rqstp->rq_resused = 1;
- resv->iov_base = page_address(rqstp->rq_respages[0]);
- resv->iov_len = 0;
- rqstp->rq_res.pages = rqstp->rq_respages + 1;
- rqstp->rq_res.len = 0;
- rqstp->rq_res.page_base = 0;
- rqstp->rq_res.page_len = 0;
- rqstp->rq_res.buflen = PAGE_SIZE;
- rqstp->rq_res.tail[0].iov_base = NULL;
- rqstp->rq_res.tail[0].iov_len = 0;
/* Will be turned off only in gss privacy case: */
rqstp->rq_splice_ok = 1;
/* Will be turned off only when NFSv4 Sessions are used */
@@ -1014,17 +1004,13 @@ svc_process(struct svc_rqst *rqstp)
/* Setup reply header */
rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
- rqstp->rq_xid = svc_getu32(argv);
svc_putu32(resv, rqstp->rq_xid);
- dir = svc_getnl(argv);
vers = svc_getnl(argv);
/* First words of reply: */
svc_putnl(resv, 1); /* REPLY */
- if (dir != 0) /* direction != CALL */
- goto err_bad_dir;
if (vers != 2) /* RPC version number */
goto err_bad_rpc;
@@ -1147,7 +1133,7 @@ svc_process(struct svc_rqst *rqstp)
sendit:
if (svc_authorise(rqstp))
goto dropit;
- return svc_send(rqstp);
+ return 1; /* Caller can now send it */
dropit:
svc_authorise(rqstp); /* doesn't hurt to call this twice */
@@ -1161,12 +1147,6 @@ err_short_len:
goto dropit; /* drop request */
-err_bad_dir:
- svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
-
- serv->sv_stats->rpcbadfmt++;
- goto dropit; /* drop request */
-
err_bad_rpc:
serv->sv_stats->rpcbadfmt++;
svc_putnl(resv, 1); /* REJECT */
@@ -1220,6 +1200,100 @@ err_bad:
EXPORT_SYMBOL_GPL(svc_process);
/*
+ * Process the RPC request.
+ */
+int
+svc_process(struct svc_rqst *rqstp)
+{
+ struct kvec *argv = &rqstp->rq_arg.head[0];
+ struct kvec *resv = &rqstp->rq_res.head[0];
+ struct svc_serv *serv = rqstp->rq_server;
+ u32 dir;
+ int error;
+
+ /*
+ * Setup response xdr_buf.
+ * Initially it has just one page
+ */
+ rqstp->rq_resused = 1;
+ resv->iov_base = page_address(rqstp->rq_respages[0]);
+ resv->iov_len = 0;
+ rqstp->rq_res.pages = rqstp->rq_respages + 1;
+ rqstp->rq_res.len = 0;
+ rqstp->rq_res.page_base = 0;
+ rqstp->rq_res.page_len = 0;
+ rqstp->rq_res.buflen = PAGE_SIZE;
+ rqstp->rq_res.tail[0].iov_base = NULL;
+ rqstp->rq_res.tail[0].iov_len = 0;
+
+ rqstp->rq_xid = svc_getu32(argv);
+
+ dir = svc_getnl(argv);
+ if (dir != 0) {
+ /* direction != CALL */
+ svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
+ serv->sv_stats->rpcbadfmt++;
+ svc_drop(rqstp);
+ return 0;
+ }
+
+ error = svc_process_common(rqstp, argv, resv);
+ if (error <= 0)
+ return error;
+
+ return svc_send(rqstp);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * Process a backchannel RPC request that arrived over an existing
+ * outbound connection
+ */
+int
+bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
+ struct svc_rqst *rqstp)
+{
+ struct kvec *argv = &rqstp->rq_arg.head[0];
+ struct kvec *resv = &rqstp->rq_res.head[0];
+ int error;
+
+ /* Build the svc_rqst used by the common processing routine */
+ rqstp->rq_xprt = serv->bc_xprt;
+ rqstp->rq_xid = req->rq_xid;
+ rqstp->rq_prot = req->rq_xprt->prot;
+ rqstp->rq_server = serv;
+
+ rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
+ memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
+ memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
+ memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
+
+ /* reset result send buffer "put" position */
+ resv->iov_len = 0;
+
+ if (rqstp->rq_prot != IPPROTO_TCP) {
+ printk(KERN_ERR "No support for Non-TCP transports!\n");
+ BUG();
+ }
+
+ /*
+ * Skip the next two words because they've already been
+ * processed in the trasport
+ */
+ svc_getu32(argv); /* XID */
+ svc_getnl(argv); /* CALLDIR */
+
+ error = svc_process_common(rqstp, argv, resv);
+ if (error <= 0)
+ return error;
+
+ memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
+ return bc_send(req);
+}
+EXPORT_SYMBOL(bc_svc_process);
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
* Return (transport-specific) limit on the rpc payload.
*/
u32 svc_max_payload(const struct svc_rqst *rqstp)
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index c200d92e57e4..6f33d33cc064 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -11,6 +11,7 @@
#include <net/sock.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/svc_xprt.h>
+#include <linux/sunrpc/svcsock.h>
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
@@ -1097,36 +1098,58 @@ struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
}
EXPORT_SYMBOL_GPL(svc_find_xprt);
-/*
- * Format a buffer with a list of the active transports. A zero for
- * the buflen parameter disables target buffer overflow checking.
+static int svc_one_xprt_name(const struct svc_xprt *xprt,
+ char *pos, int remaining)
+{
+ int len;
+
+ len = snprintf(pos, remaining, "%s %u\n",
+ xprt->xpt_class->xcl_name,
+ svc_xprt_local_port(xprt));
+ if (len >= remaining)
+ return -ENAMETOOLONG;
+ return len;
+}
+
+/**
+ * svc_xprt_names - format a buffer with a list of transport names
+ * @serv: pointer to an RPC service
+ * @buf: pointer to a buffer to be filled in
+ * @buflen: length of buffer to be filled in
+ *
+ * Fills in @buf with a string containing a list of transport names,
+ * each name terminated with '\n'.
+ *
+ * Returns positive length of the filled-in string on success; otherwise
+ * a negative errno value is returned if an error occurs.
*/
-int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen)
+int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen)
{
struct svc_xprt *xprt;
- char xprt_str[64];
- int totlen = 0;
- int len;
+ int len, totlen;
+ char *pos;
/* Sanity check args */
if (!serv)
return 0;
spin_lock_bh(&serv->sv_lock);
+
+ pos = buf;
+ totlen = 0;
list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) {
- len = snprintf(xprt_str, sizeof(xprt_str),
- "%s %d\n", xprt->xpt_class->xcl_name,
- svc_xprt_local_port(xprt));
- /* If the string was truncated, replace with error string */
- if (len >= sizeof(xprt_str))
- strcpy(xprt_str, "name-too-long\n");
- /* Don't overflow buffer */
- len = strlen(xprt_str);
- if (buflen && (len + totlen >= buflen))
+ len = svc_one_xprt_name(xprt, pos, buflen - totlen);
+ if (len < 0) {
+ *buf = '\0';
+ totlen = len;
+ }
+ if (len <= 0)
break;
- strcpy(buf+totlen, xprt_str);
+
+ pos += len;
totlen += len;
}
+
spin_unlock_bh(&serv->sv_lock);
return totlen;
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 9d504234af4a..23128ee191ae 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -240,42 +240,76 @@ out:
/*
* Report socket names for nfsdfs
*/
-static int one_sock_name(char *buf, struct svc_sock *svsk)
+static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
{
+ const struct sock *sk = svsk->sk_sk;
+ const char *proto_name = sk->sk_protocol == IPPROTO_UDP ?
+ "udp" : "tcp";
int len;
- switch(svsk->sk_sk->sk_family) {
- case AF_INET:
- len = sprintf(buf, "ipv4 %s %pI4 %d\n",
- svsk->sk_sk->sk_protocol == IPPROTO_UDP ?
- "udp" : "tcp",
- &inet_sk(svsk->sk_sk)->rcv_saddr,
- inet_sk(svsk->sk_sk)->num);
+ switch (sk->sk_family) {
+ case PF_INET:
+ len = snprintf(buf, remaining, "ipv4 %s %pI4 %d\n",
+ proto_name,
+ &inet_sk(sk)->rcv_saddr,
+ inet_sk(sk)->num);
+ break;
+ case PF_INET6:
+ len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n",
+ proto_name,
+ &inet6_sk(sk)->rcv_saddr,
+ inet_sk(sk)->num);
break;
default:
- len = sprintf(buf, "*unknown-%d*\n",
- svsk->sk_sk->sk_family);
+ len = snprintf(buf, remaining, "*unknown-%d*\n",
+ sk->sk_family);
+ }
+
+ if (len >= remaining) {
+ *buf = '\0';
+ return -ENAMETOOLONG;
}
return len;
}
-int
-svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
+/**
+ * svc_sock_names - construct a list of listener names in a string
+ * @serv: pointer to RPC service
+ * @buf: pointer to a buffer to fill in with socket names
+ * @buflen: size of the buffer to be filled
+ * @toclose: pointer to '\0'-terminated C string containing the name
+ * of a listener to be closed
+ *
+ * Fills in @buf with a '\n'-separated list of names of listener
+ * sockets. If @toclose is not NULL, the socket named by @toclose
+ * is closed, and is not included in the output list.
+ *
+ * Returns positive length of the socket name string, or a negative
+ * errno value on error.
+ */
+int svc_sock_names(struct svc_serv *serv, char *buf, const size_t buflen,
+ const char *toclose)
{
struct svc_sock *svsk, *closesk = NULL;
int len = 0;
if (!serv)
return 0;
+
spin_lock_bh(&serv->sv_lock);
list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) {
- int onelen = one_sock_name(buf+len, svsk);
- if (toclose && strcmp(toclose, buf+len) == 0)
+ int onelen = svc_one_sock_name(svsk, buf + len, buflen - len);
+ if (onelen < 0) {
+ len = onelen;
+ break;
+ }
+ if (toclose && strcmp(toclose, buf + len) == 0)
closesk = svsk;
else
len += onelen;
}
spin_unlock_bh(&serv->sv_lock);
+
if (closesk)
/* Should unregister with portmap, but you cannot
* unregister just one protocol...
@@ -346,6 +380,7 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
sock->sk->sk_sndbuf = snd * 2;
sock->sk->sk_rcvbuf = rcv * 2;
sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
+ sock->sk->sk_write_space(sock->sk);
release_sock(sock->sk);
#endif
}
@@ -387,6 +422,15 @@ static void svc_write_space(struct sock *sk)
}
}
+static void svc_tcp_write_space(struct sock *sk)
+{
+ struct socket *sock = sk->sk_socket;
+
+ if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock)
+ clear_bit(SOCK_NOSPACE, &sock->flags);
+ svc_write_space(sk);
+}
+
/*
* Copy the UDP datagram's destination address to the rqstp structure.
* The 'destination' address in this case is the address to which the
@@ -427,13 +471,14 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
long all[SVC_PKTINFO_SPACE / sizeof(long)];
} buffer;
struct cmsghdr *cmh = &buffer.hdr;
- int err, len;
struct msghdr msg = {
.msg_name = svc_addr(rqstp),
.msg_control = cmh,
.msg_controllen = sizeof(buffer),
.msg_flags = MSG_DONTWAIT,
};
+ size_t len;
+ int err;
if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
/* udp sockets need large rcvbuf as all pending
@@ -465,8 +510,8 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
return -EAGAIN;
}
len = svc_addr_len(svc_addr(rqstp));
- if (len < 0)
- return len;
+ if (len == 0)
+ return -EAFNOSUPPORT;
rqstp->rq_addrlen = len;
if (skb->tstamp.tv64 == 0) {
skb->tstamp = ktime_get_real();
@@ -980,25 +1025,16 @@ static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
static int svc_tcp_has_wspace(struct svc_xprt *xprt)
{
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
- struct svc_serv *serv = svsk->sk_xprt.xpt_server;
+ struct svc_serv *serv = svsk->sk_xprt.xpt_server;
int required;
- int wspace;
- /*
- * Set the SOCK_NOSPACE flag before checking the available
- * sock space.
- */
+ if (test_bit(XPT_LISTENER, &xprt->xpt_flags))
+ return 1;
+ required = atomic_read(&xprt->xpt_reserved) + serv->sv_max_mesg;
+ if (sk_stream_wspace(svsk->sk_sk) >= required)
+ return 1;
set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
- required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg;
- wspace = sk_stream_wspace(svsk->sk_sk);
-
- if (wspace < sk_stream_min_wspace(svsk->sk_sk))
- return 0;
- if (required * 2 > wspace)
- return 0;
-
- clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
- return 1;
+ return 0;
}
static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
@@ -1054,7 +1090,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
dprintk("setting up TCP socket for reading\n");
sk->sk_state_change = svc_tcp_state_change;
sk->sk_data_ready = svc_tcp_data_ready;
- sk->sk_write_space = svc_write_space;
+ sk->sk_write_space = svc_tcp_write_space;
svsk->sk_reclen = 0;
svsk->sk_tcplen = 0;
@@ -1148,9 +1184,19 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
return svsk;
}
-int svc_addsock(struct svc_serv *serv,
- int fd,
- char *name_return)
+/**
+ * svc_addsock - add a listener socket to an RPC service
+ * @serv: pointer to RPC service to which to add a new listener
+ * @fd: file descriptor of the new listener
+ * @name_return: pointer to buffer to fill in with name of listener
+ * @len: size of the buffer
+ *
+ * Fills in socket name and returns positive length of name if successful.
+ * Name is terminated with '\n'. On error, returns a negative errno
+ * value.
+ */
+int svc_addsock(struct svc_serv *serv, const int fd, char *name_return,
+ const size_t len)
{
int err = 0;
struct socket *so = sockfd_lookup(fd, &err);
@@ -1190,7 +1236,7 @@ int svc_addsock(struct svc_serv *serv,
sockfd_put(so);
return err;
}
- return one_sock_name(name_return, svsk);
+ return svc_one_sock_name(svsk, name_return, len);
}
EXPORT_SYMBOL_GPL(svc_addsock);
@@ -1327,3 +1373,42 @@ static void svc_sock_free(struct svc_xprt *xprt)
sock_release(svsk->sk_sock);
kfree(svsk);
}
+
+/*
+ * Create a svc_xprt.
+ *
+ * For internal use only (e.g. nfsv4.1 backchannel).
+ * Callers should typically use the xpo_create() method.
+ */
+struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot)
+{
+ struct svc_sock *svsk;
+ struct svc_xprt *xprt = NULL;
+
+ dprintk("svc: %s\n", __func__);
+ svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
+ if (!svsk)
+ goto out;
+
+ xprt = &svsk->sk_xprt;
+ if (prot == IPPROTO_TCP)
+ svc_xprt_init(&svc_tcp_class, xprt, serv);
+ else if (prot == IPPROTO_UDP)
+ svc_xprt_init(&svc_udp_class, xprt, serv);
+ else
+ BUG();
+out:
+ dprintk("svc: %s return %p\n", __func__, xprt);
+ return xprt;
+}
+EXPORT_SYMBOL_GPL(svc_sock_create);
+
+/*
+ * Destroy a svc_sock.
+ */
+void svc_sock_destroy(struct svc_xprt *xprt)
+{
+ if (xprt)
+ kfree(container_of(xprt, struct svc_sock, sk_xprt));
+}
+EXPORT_SYMBOL_GPL(svc_sock_destroy);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 06ca058572f2..f412a852bc73 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -12,8 +12,9 @@
* - Next, the caller puts together the RPC message, stuffs it into
* the request struct, and calls xprt_transmit().
* - xprt_transmit sends the message and installs the caller on the
- * transport's wait list. At the same time, it installs a timer that
- * is run after the packet's timeout has expired.
+ * transport's wait list. At the same time, if a reply is expected,
+ * it installs a timer that is run after the packet's timeout has
+ * expired.
* - When a packet arrives, the data_ready handler walks the list of
* pending requests for that transport. If a matching XID is found, the
* caller is woken up, and the timer removed.
@@ -46,6 +47,8 @@
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/metrics.h>
+#include "sunrpc.h"
+
/*
* Local variables
*/
@@ -192,8 +195,8 @@ EXPORT_SYMBOL_GPL(xprt_load_transport);
*/
int xprt_reserve_xprt(struct rpc_task *task)
{
- struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
if (task == xprt->snd_task)
@@ -803,9 +806,10 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
list_del_init(&req->rq_list);
req->rq_private_buf.len = copied;
- /* Ensure all writes are done before we update req->rq_received */
+ /* Ensure all writes are done before we update */
+ /* req->rq_reply_bytes_recvd */
smp_wmb();
- req->rq_received = copied;
+ req->rq_reply_bytes_recvd = copied;
rpc_wake_up_queued_task(&xprt->pending, task);
}
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
@@ -820,7 +824,7 @@ static void xprt_timer(struct rpc_task *task)
dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
spin_lock_bh(&xprt->transport_lock);
- if (!req->rq_received) {
+ if (!req->rq_reply_bytes_recvd) {
if (xprt->ops->timer)
xprt->ops->timer(task);
} else
@@ -842,8 +846,8 @@ int xprt_prepare_transmit(struct rpc_task *task)
dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
spin_lock_bh(&xprt->transport_lock);
- if (req->rq_received && !req->rq_bytes_sent) {
- err = req->rq_received;
+ if (req->rq_reply_bytes_recvd && !req->rq_bytes_sent) {
+ err = req->rq_reply_bytes_recvd;
goto out_unlock;
}
if (!xprt->ops->reserve_xprt(task))
@@ -855,7 +859,7 @@ out_unlock:
void xprt_end_transmit(struct rpc_task *task)
{
- xprt_release_write(task->tk_xprt, task);
+ xprt_release_write(task->tk_rqstp->rq_xprt, task);
}
/**
@@ -872,8 +876,11 @@ void xprt_transmit(struct rpc_task *task)
dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
- if (!req->rq_received) {
- if (list_empty(&req->rq_list)) {
+ if (!req->rq_reply_bytes_recvd) {
+ if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
+ /*
+ * Add to the list only if we're expecting a reply
+ */
spin_lock_bh(&xprt->transport_lock);
/* Update the softirq receive buffer */
memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
@@ -908,8 +915,13 @@ void xprt_transmit(struct rpc_task *task)
/* Don't race with disconnect */
if (!xprt_connected(xprt))
task->tk_status = -ENOTCONN;
- else if (!req->rq_received)
+ else if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) {
+ /*
+ * Sleep on the pending queue since
+ * we're expecting a reply.
+ */
rpc_sleep_on(&xprt->pending, task, xprt_timer);
+ }
spin_unlock_bh(&xprt->transport_lock);
}
@@ -982,11 +994,17 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
*/
void xprt_release(struct rpc_task *task)
{
- struct rpc_xprt *xprt = task->tk_xprt;
+ struct rpc_xprt *xprt;
struct rpc_rqst *req;
+ int is_bc_request;
if (!(req = task->tk_rqstp))
return;
+
+ /* Preallocated backchannel request? */
+ is_bc_request = bc_prealloc(req);
+
+ xprt = req->rq_xprt;
rpc_count_iostats(task);
spin_lock_bh(&xprt->transport_lock);
xprt->ops->release_xprt(xprt, task);
@@ -999,10 +1017,19 @@ void xprt_release(struct rpc_task *task)
mod_timer(&xprt->timer,
xprt->last_used + xprt->idle_timeout);
spin_unlock_bh(&xprt->transport_lock);
- xprt->ops->buf_free(req->rq_buffer);
+ if (!bc_prealloc(req))
+ xprt->ops->buf_free(req->rq_buffer);
task->tk_rqstp = NULL;
if (req->rq_release_snd_buf)
req->rq_release_snd_buf(req);
+
+ /*
+ * Early exit if this is a backchannel preallocated request.
+ * There is no need to have it added to the RPC slot list.
+ */
+ if (is_bc_request)
+ return;
+
memset(req, 0, sizeof(*req)); /* mark unused */
dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
@@ -1049,6 +1076,11 @@ found:
INIT_LIST_HEAD(&xprt->free);
INIT_LIST_HEAD(&xprt->recv);
+#if defined(CONFIG_NFS_V4_1)
+ spin_lock_init(&xprt->bc_pa_lock);
+ INIT_LIST_HEAD(&xprt->bc_pa_list);
+#endif /* CONFIG_NFS_V4_1 */
+
INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
setup_timer(&xprt->timer, xprt_init_autodisconnect,
(unsigned long)xprt);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 42a6f9f20285..9e884383134f 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -397,14 +397,14 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
if (!ch)
return 0;
- /* Allocate temporary reply and chunk maps */
- rpl_map = svc_rdma_get_req_map();
- chl_map = svc_rdma_get_req_map();
-
svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count);
if (ch_count > RPCSVC_MAXPAGES)
return -EINVAL;
+ /* Allocate temporary reply and chunk maps */
+ rpl_map = svc_rdma_get_req_map();
+ chl_map = svc_rdma_get_req_map();
+
if (!xprt->sc_frmr_pg_list_len)
sge_count = map_read_chunks(xprt, rqstp, hdr_ctxt, rmsgp,
rpl_map, chl_map, ch_count,
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 6c2d61586551..83c73c4d017a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -34,6 +34,9 @@
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xprtsock.h>
#include <linux/file.h>
+#ifdef CONFIG_NFS_V4_1
+#include <linux/sunrpc/bc_xprt.h>
+#endif
#include <net/sock.h>
#include <net/checksum.h>
@@ -270,6 +273,13 @@ struct sock_xprt {
#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
#define TCP_RCV_COPY_XID (1UL << 2)
#define TCP_RCV_COPY_DATA (1UL << 3)
+#define TCP_RCV_READ_CALLDIR (1UL << 4)
+#define TCP_RCV_COPY_CALLDIR (1UL << 5)
+
+/*
+ * TCP RPC flags
+ */
+#define TCP_RPC_REPLY (1UL << 6)
static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
{
@@ -956,7 +966,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea
transport->tcp_offset = 0;
/* Sanity check of the record length */
- if (unlikely(transport->tcp_reclen < 4)) {
+ if (unlikely(transport->tcp_reclen < 8)) {
dprintk("RPC: invalid TCP record fragment length\n");
xprt_force_disconnect(xprt);
return;
@@ -991,33 +1001,77 @@ static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_r
if (used != len)
return;
transport->tcp_flags &= ~TCP_RCV_COPY_XID;
- transport->tcp_flags |= TCP_RCV_COPY_DATA;
+ transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
transport->tcp_copied = 4;
- dprintk("RPC: reading reply for XID %08x\n",
+ dprintk("RPC: reading %s XID %08x\n",
+ (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
+ : "request with",
ntohl(transport->tcp_xid));
xs_tcp_check_fraghdr(transport);
}
-static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
+static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
+ struct xdr_skb_reader *desc)
{
- struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
- struct rpc_rqst *req;
+ size_t len, used;
+ u32 offset;
+ __be32 calldir;
+
+ /*
+ * We want transport->tcp_offset to be 8 at the end of this routine
+ * (4 bytes for the xid and 4 bytes for the call/reply flag).
+ * When this function is called for the first time,
+ * transport->tcp_offset is 4 (after having already read the xid).
+ */
+ offset = transport->tcp_offset - sizeof(transport->tcp_xid);
+ len = sizeof(calldir) - offset;
+ dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
+ used = xdr_skb_read_bits(desc, &calldir, len);
+ transport->tcp_offset += used;
+ if (used != len)
+ return;
+ transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
+ transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
+ transport->tcp_flags |= TCP_RCV_COPY_DATA;
+ /*
+ * We don't yet have the XDR buffer, so we will write the calldir
+ * out after we get the buffer from the 'struct rpc_rqst'
+ */
+ if (ntohl(calldir) == RPC_REPLY)
+ transport->tcp_flags |= TCP_RPC_REPLY;
+ else
+ transport->tcp_flags &= ~TCP_RPC_REPLY;
+ dprintk("RPC: reading %s CALL/REPLY flag %08x\n",
+ (transport->tcp_flags & TCP_RPC_REPLY) ?
+ "reply for" : "request with", calldir);
+ xs_tcp_check_fraghdr(transport);
+}
+
+static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc,
+ struct rpc_rqst *req)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
struct xdr_buf *rcvbuf;
size_t len;
ssize_t r;
- /* Find and lock the request corresponding to this xid */
- spin_lock(&xprt->transport_lock);
- req = xprt_lookup_rqst(xprt, transport->tcp_xid);
- if (!req) {
- transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
- dprintk("RPC: XID %08x request not found!\n",
- ntohl(transport->tcp_xid));
- spin_unlock(&xprt->transport_lock);
- return;
+ rcvbuf = &req->rq_private_buf;
+
+ if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
+ /*
+ * Save the RPC direction in the XDR buffer
+ */
+ __be32 calldir = transport->tcp_flags & TCP_RPC_REPLY ?
+ htonl(RPC_REPLY) : 0;
+
+ memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
+ &calldir, sizeof(calldir));
+ transport->tcp_copied += sizeof(calldir);
+ transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
}
- rcvbuf = &req->rq_private_buf;
len = desc->count;
if (len > transport->tcp_reclen - transport->tcp_offset) {
struct xdr_skb_reader my_desc;
@@ -1054,7 +1108,7 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_rea
"tcp_offset = %u, tcp_reclen = %u\n",
xprt, transport->tcp_copied,
transport->tcp_offset, transport->tcp_reclen);
- goto out;
+ return;
}
dprintk("RPC: XID %08x read %Zd bytes\n",
@@ -1070,11 +1124,125 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_rea
transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
}
-out:
+ return;
+}
+
+/*
+ * Finds the request corresponding to the RPC xid and invokes the common
+ * tcp read code to read the data.
+ */
+static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+ struct rpc_rqst *req;
+
+ dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
+
+ /* Find and lock the request corresponding to this xid */
+ spin_lock(&xprt->transport_lock);
+ req = xprt_lookup_rqst(xprt, transport->tcp_xid);
+ if (!req) {
+ dprintk("RPC: XID %08x request not found!\n",
+ ntohl(transport->tcp_xid));
+ spin_unlock(&xprt->transport_lock);
+ return -1;
+ }
+
+ xs_tcp_read_common(xprt, desc, req);
+
if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
xprt_complete_rqst(req->rq_task, transport->tcp_copied);
+
spin_unlock(&xprt->transport_lock);
- xs_tcp_check_fraghdr(transport);
+ return 0;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * Obtains an rpc_rqst previously allocated and invokes the common
+ * tcp read code to read the data. The result is placed in the callback
+ * queue.
+ * If we're unable to obtain the rpc_rqst we schedule the closing of the
+ * connection and return -1.
+ */
+static inline int xs_tcp_read_callback(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+ struct rpc_rqst *req;
+
+ req = xprt_alloc_bc_request(xprt);
+ if (req == NULL) {
+ printk(KERN_WARNING "Callback slot table overflowed\n");
+ xprt_force_disconnect(xprt);
+ return -1;
+ }
+
+ req->rq_xid = transport->tcp_xid;
+ dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
+ xs_tcp_read_common(xprt, desc, req);
+
+ if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) {
+ struct svc_serv *bc_serv = xprt->bc_serv;
+
+ /*
+ * Add callback request to callback list. The callback
+ * service sleeps on the sv_cb_waitq waiting for new
+ * requests. Wake it up after adding enqueing the
+ * request.
+ */
+ dprintk("RPC: add callback request to list\n");
+ spin_lock(&bc_serv->sv_cb_lock);
+ list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
+ spin_unlock(&bc_serv->sv_cb_lock);
+ wake_up(&bc_serv->sv_cb_waitq);
+ }
+
+ req->rq_private_buf.len = transport->tcp_copied;
+
+ return 0;
+}
+
+static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+
+ return (transport->tcp_flags & TCP_RPC_REPLY) ?
+ xs_tcp_read_reply(xprt, desc) :
+ xs_tcp_read_callback(xprt, desc);
+}
+#else
+static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc)
+{
+ return xs_tcp_read_reply(xprt, desc);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
+ * Read data off the transport. This can be either an RPC_CALL or an
+ * RPC_REPLY. Relay the processing to helper functions.
+ */
+static void xs_tcp_read_data(struct rpc_xprt *xprt,
+ struct xdr_skb_reader *desc)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+
+ if (_xs_tcp_read_data(xprt, desc) == 0)
+ xs_tcp_check_fraghdr(transport);
+ else {
+ /*
+ * The transport_lock protects the request handling.
+ * There's no need to hold it to update the tcp_flags.
+ */
+ transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
+ }
}
static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
@@ -1114,9 +1282,14 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns
xs_tcp_read_xid(transport, &desc);
continue;
}
+ /* Read in the call/reply flag */
+ if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
+ xs_tcp_read_calldir(transport, &desc);
+ continue;
+ }
/* Read in the request data */
if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
- xs_tcp_read_request(xprt, &desc);
+ xs_tcp_read_data(xprt, &desc);
continue;
}
/* Skip over any trailing bytes on short reads */
@@ -1792,6 +1965,7 @@ static void xs_tcp_setup_socket(struct rpc_xprt *xprt,
*/
set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
xprt_force_disconnect(xprt);
+ break;
case -ECONNREFUSED:
case -ECONNRESET:
case -ENETUNREACH:
@@ -2010,6 +2184,9 @@ static struct rpc_xprt_ops xs_tcp_ops = {
.buf_free = rpc_free,
.send_request = xs_tcp_send_request,
.set_retrans_timeout = xprt_set_retrans_timeout_def,
+#if defined(CONFIG_NFS_V4_1)
+ .release_request = bc_release_request,
+#endif /* CONFIG_NFS_V4_1 */
.close = xs_tcp_close,
.destroy = xs_destroy,
.print_stats = xs_tcp_print_stats,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 24168560ebae..241bddd0b4f1 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1687,13 +1687,52 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
if (err)
goto out_rtnl;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
- err = -EINVAL;
+ err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
+ if (err)
goto out;
+
+ /* validate settings */
+ err = 0;
+
+ switch (dev->ieee80211_ptr->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ /* disallow mesh-specific things */
+ if (params.plink_action)
+ err = -EINVAL;
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* disallow everything but AUTHORIZED flag */
+ if (params.plink_action)
+ err = -EINVAL;
+ if (params.vlan)
+ err = -EINVAL;
+ if (params.supported_rates)
+ err = -EINVAL;
+ if (params.ht_capa)
+ err = -EINVAL;
+ if (params.listen_interval >= 0)
+ err = -EINVAL;
+ if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
+ err = -EINVAL;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ /* disallow things mesh doesn't support */
+ if (params.vlan)
+ err = -EINVAL;
+ if (params.ht_capa)
+ err = -EINVAL;
+ if (params.listen_interval >= 0)
+ err = -EINVAL;
+ if (params.supported_rates)
+ err = -EINVAL;
+ if (params.sta_flags_mask)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
}
- err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
if (err)
goto out;
@@ -1728,9 +1767,6 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[NL80211_ATTR_MAC])
return -EINVAL;
- if (!info->attrs[NL80211_ATTR_STA_AID])
- return -EINVAL;
-
if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL])
return -EINVAL;
@@ -1745,9 +1781,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
params.listen_interval =
nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
- params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
- if (!params.aid || params.aid > IEEE80211_MAX_AID)
- return -EINVAL;
+ if (info->attrs[NL80211_ATTR_STA_AID]) {
+ params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
+ if (!params.aid || params.aid > IEEE80211_MAX_AID)
+ return -EINVAL;
+ }
if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
params.ht_capa =
@@ -1762,13 +1800,39 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
if (err)
goto out_rtnl;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
- err = -EINVAL;
+ err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
+ if (err)
goto out;
+
+ /* validate settings */
+ err = 0;
+
+ switch (dev->ieee80211_ptr->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ /* all ok but must have AID */
+ if (!params.aid)
+ err = -EINVAL;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ /* disallow things mesh doesn't support */
+ if (params.vlan)
+ err = -EINVAL;
+ if (params.aid)
+ err = -EINVAL;
+ if (params.ht_capa)
+ err = -EINVAL;
+ if (params.listen_interval >= 0)
+ err = -EINVAL;
+ if (params.supported_rates)
+ err = -EINVAL;
+ if (params.sta_flags_mask)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
}
- err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
if (err)
goto out;
@@ -1812,7 +1876,8 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
goto out_rtnl;
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
err = -EINVAL;
goto out;
}
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index d5be2b30cda5..fefe1a57f31a 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -320,38 +320,6 @@ static struct snd_soc_device corgi_snd_devdata = {
.codec_dev = &soc_codec_dev_wm8731,
};
-/*
- * FIXME: This is a temporary bodge to avoid cross-tree merge issues.
- * New drivers should register the wm8731 I2C device in the machine
- * setup code (under arch/arm for ARM systems).
- */
-static int wm8731_i2c_register(void)
-{
- struct i2c_board_info info;
- struct i2c_adapter *adapter;
- struct i2c_client *client;
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = 0x1b;
- strlcpy(info.type, "wm8731", I2C_NAME_SIZE);
-
- adapter = i2c_get_adapter(0);
- if (!adapter) {
- printk(KERN_ERR "can't get i2c adapter 0\n");
- return -ENODEV;
- }
-
- client = i2c_new_device(adapter, &info);
- i2c_put_adapter(adapter);
- if (!client) {
- printk(KERN_ERR "can't add i2c device at 0x%x\n",
- (unsigned int)info.addr);
- return -ENODEV;
- }
-
- return 0;
-}
-
static struct platform_device *corgi_snd_device;
static int __init corgi_init(void)
@@ -362,10 +330,6 @@ static int __init corgi_init(void)
machine_is_husky()))
return -ENODEV;
- ret = wm8731_i2c_register();
- if (ret != 0)
- return ret;
-
corgi_snd_device = platform_device_alloc("soc-audio", -1);
if (!corgi_snd_device)
return -ENOMEM;
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index a51058f66747..c5f36e0eab58 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -280,38 +280,6 @@ static struct snd_soc_card snd_soc_poodle = {
.num_links = 1,
};
-/*
- * FIXME: This is a temporary bodge to avoid cross-tree merge issues.
- * New drivers should register the wm8731 I2C device in the machine
- * setup code (under arch/arm for ARM systems).
- */
-static int wm8731_i2c_register(void)
-{
- struct i2c_board_info info;
- struct i2c_adapter *adapter;
- struct i2c_client *client;
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- info.addr = 0x1b;
- strlcpy(info.type, "wm8731", I2C_NAME_SIZE);
-
- adapter = i2c_get_adapter(0);
- if (!adapter) {
- printk(KERN_ERR "can't get i2c adapter 0\n");
- return -ENODEV;
- }
-
- client = i2c_new_device(adapter, &info);
- i2c_put_adapter(adapter);
- if (!client) {
- printk(KERN_ERR "can't add i2c device at 0x%x\n",
- (unsigned int)info.addr);
- return -ENODEV;
- }
-
- return 0;
-}
-
/* poodle audio subsystem */
static struct snd_soc_device poodle_snd_devdata = {
.card = &snd_soc_poodle,
@@ -327,10 +295,6 @@ static int __init poodle_init(void)
if (!machine_is_poodle())
return -ENODEV;
- ret = wm8731_i2c_register();
- if (ret != 0)
- return ret;
-
locomo_gpio_set_dir(&poodle_locomo_device.dev,
POODLE_LOCOMO_GPIO_AMP_ON, 0);
/* should we mute HP at startup - burning power ?*/
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index bccb529dac08..ceb68aa51f7f 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -13,6 +13,12 @@
#define cpu_relax() asm volatile ("" ::: "memory");
#endif
+#ifdef __s390__
+#include "../../arch/s390/include/asm/unistd.h"
+#define rmb() asm volatile("bcr 15,0" ::: "memory")
+#define cpu_relax() asm volatile("" ::: "memory");
+#endif
+
#include <time.h>
#include <unistd.h>
#include <sys/types.h>