summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/cpu-freq/governors.txt11
-rw-r--r--Documentation/filesystems/nfs/pnfs.txt7
-rw-r--r--Documentation/hwmon/f71882fg16
-rw-r--r--Documentation/hwmon/lineage-pem77
-rw-r--r--Documentation/hwmon/lm8512
-rw-r--r--Documentation/hwmon/ltc415147
-rw-r--r--Documentation/hwmon/max663949
-rw-r--r--Documentation/hwmon/pmbus215
-rw-r--r--Documentation/hwmon/sysfs-interface11
-rw-r--r--Documentation/hwmon/w83627ehf60
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas23
-rw-r--r--Documentation/scsi/hpsa.txt23
-rw-r--r--Documentation/scsi/scsi_mid_low_api.txt14
-rw-r--r--MAINTAINERS19
-rw-r--r--arch/arm/mach-s5pv210/cpufreq.c3
-rw-r--r--arch/arm/mach-s5pv310/cpufreq.c3
-rw-r--r--arch/arm/plat-s3c24xx/cpu-freq.c2
-rw-r--r--arch/mips/include/asm/mach-jz4740/platform.h1
-rw-r--r--arch/mips/jz4740/platform.c16
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c2
-rw-r--r--arch/s390/Kconfig21
-rw-r--r--arch/s390/Kconfig.debug3
-rw-r--r--arch/s390/Makefile3
-rw-r--r--arch/s390/boot/compressed/Makefile6
-rw-r--r--arch/s390/boot/compressed/misc.c5
-rw-r--r--arch/s390/include/asm/cacheflush.h4
-rw-r--r--arch/s390/kernel/machine_kexec.c2
-rw-r--r--arch/s390/mm/Makefile1
-rw-r--r--arch/s390/mm/pageattr.c55
-rw-r--r--arch/sh/Kconfig3
-rw-r--r--arch/sh/boards/board-espt.c7
-rw-r--r--arch/sh/boards/board-sh7757lcr.c185
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c2
-rw-r--r--arch/sh/boards/mach-sh7763rdp/setup.c6
-rw-r--r--arch/sh/boot/compressed/Makefile2
-rw-r--r--arch/sh/configs/sh7757lcr_defconfig19
-rw-r--r--arch/sh/drivers/pci/pcie-sh7786.c46
-rw-r--r--arch/sh/include/asm/unistd_32.h5
-rw-r--r--arch/sh/include/asm/unistd_64.h5
-rw-r--r--arch/sh/include/cpu-sh4/cpu/dma-register.h5
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7757.h32
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7757.c7
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7757.c542
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c6
-rw-r--r--arch/sh/kernel/irq.c61
-rw-r--r--arch/sh/kernel/syscalls_32.S3
-rw-r--r--arch/sh/kernel/syscalls_64.S3
-rw-r--r--arch/sh/mm/Makefile2
-rw-r--r--arch/sparc/Kconfig34
-rw-r--r--arch/sparc/Makefile3
-rw-r--r--arch/sparc/boot/Makefile31
-rw-r--r--arch/sparc/include/asm/irq_64.h18
-rw-r--r--arch/sparc/include/asm/leon.h3
-rw-r--r--arch/sparc/include/asm/leon_amba.h6
-rw-r--r--arch/sparc/include/asm/mmu_32.h3
-rw-r--r--arch/sparc/include/asm/smp_32.h6
-rw-r--r--arch/sparc/kernel/Makefile2
-rw-r--r--arch/sparc/kernel/cpu.c2
-rw-r--r--arch/sparc/kernel/entry.h4
-rw-r--r--arch/sparc/kernel/iommu.c3
-rw-r--r--arch/sparc/kernel/ioport.c108
-rw-r--r--arch/sparc/kernel/irq.h42
-rw-r--r--arch/sparc/kernel/irq_32.c260
-rw-r--r--arch/sparc/kernel/irq_64.c312
-rw-r--r--arch/sparc/kernel/kernel.h49
-rw-r--r--arch/sparc/kernel/ldc.c28
-rw-r--r--arch/sparc/kernel/leon_kernel.c11
-rw-r--r--arch/sparc/kernel/leon_pmc.c82
-rw-r--r--arch/sparc/kernel/leon_smp.c36
-rw-r--r--arch/sparc/kernel/of_device_32.c59
-rw-r--r--arch/sparc/kernel/pci.c11
-rw-r--r--arch/sparc/kernel/pci_common.c11
-rw-r--r--arch/sparc/kernel/pci_fire.c10
-rw-r--r--arch/sparc/kernel/pci_impl.h4
-rw-r--r--arch/sparc/kernel/pci_msi.c44
-rw-r--r--arch/sparc/kernel/pci_schizo.c4
-rw-r--r--arch/sparc/kernel/pci_sun4v.c9
-rw-r--r--arch/sparc/kernel/pcr.c2
-rw-r--r--arch/sparc/kernel/prom_irqtrans.c16
-rw-r--r--arch/sparc/kernel/ptrace_64.c3
-rw-r--r--arch/sparc/kernel/setup_32.c19
-rw-r--r--arch/sparc/kernel/smp_64.c11
-rw-r--r--arch/sparc/kernel/sun4c_irq.c85
-rw-r--r--arch/sparc/kernel/sun4d_irq.c298
-rw-r--r--arch/sparc/kernel/sun4d_smp.c176
-rw-r--r--arch/sparc/kernel/sun4m_irq.c202
-rw-r--r--arch/sparc/kernel/sun4m_smp.c91
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c21
-rw-r--r--arch/sparc/kernel/tick14.c39
-rw-r--r--arch/sparc/kernel/time_32.c2
-rw-r--r--arch/sparc/kernel/time_64.c4
-rw-r--r--arch/sparc/kernel/traps_64.c3
-rw-r--r--arch/sparc/kernel/una_asm_64.S2
-rw-r--r--arch/sparc/mm/fault_32.c3
-rw-r--r--arch/sparc/prom/misc_32.c4
-rw-r--r--arch/unicore32/.gitignore21
-rw-r--r--arch/unicore32/Kconfig275
-rw-r--r--arch/unicore32/Kconfig.debug68
-rw-r--r--arch/unicore32/Makefile95
-rw-r--r--arch/unicore32/boot/Makefile47
-rw-r--r--arch/unicore32/boot/compressed/Makefile68
-rw-r--r--arch/unicore32/boot/compressed/head.S204
-rw-r--r--arch/unicore32/boot/compressed/misc.c126
-rw-r--r--arch/unicore32/boot/compressed/piggy.S.in6
-rw-r--r--arch/unicore32/boot/compressed/vmlinux.lds.in61
-rw-r--r--arch/unicore32/configs/debug_defconfig215
-rw-r--r--arch/unicore32/include/asm/Kbuild2
-rw-r--r--arch/unicore32/include/asm/assembler.h131
-rw-r--r--arch/unicore32/include/asm/bitops.h47
-rw-r--r--arch/unicore32/include/asm/byteorder.h24
-rw-r--r--arch/unicore32/include/asm/cache.h27
-rw-r--r--arch/unicore32/include/asm/cacheflush.h211
-rw-r--r--arch/unicore32/include/asm/checksum.h41
-rw-r--r--arch/unicore32/include/asm/cpu-single.h45
-rw-r--r--arch/unicore32/include/asm/cputype.h33
-rw-r--r--arch/unicore32/include/asm/delay.h52
-rw-r--r--arch/unicore32/include/asm/dma-mapping.h124
-rw-r--r--arch/unicore32/include/asm/dma.h23
-rw-r--r--arch/unicore32/include/asm/elf.h94
-rw-r--r--arch/unicore32/include/asm/fpstate.h26
-rw-r--r--arch/unicore32/include/asm/fpu-ucf64.h53
-rw-r--r--arch/unicore32/include/asm/futex.h143
-rw-r--r--arch/unicore32/include/asm/gpio.h104
-rw-r--r--arch/unicore32/include/asm/hwcap.h32
-rw-r--r--arch/unicore32/include/asm/io.h55
-rw-r--r--arch/unicore32/include/asm/irq.h105
-rw-r--r--arch/unicore32/include/asm/irqflags.h53
-rw-r--r--arch/unicore32/include/asm/linkage.h22
-rw-r--r--arch/unicore32/include/asm/memblock.h46
-rw-r--r--arch/unicore32/include/asm/memory.h123
-rw-r--r--arch/unicore32/include/asm/mmu.h17
-rw-r--r--arch/unicore32/include/asm/mmu_context.h87
-rw-r--r--arch/unicore32/include/asm/mutex.h20
-rw-r--r--arch/unicore32/include/asm/page.h80
-rw-r--r--arch/unicore32/include/asm/pci.h46
-rw-r--r--arch/unicore32/include/asm/pgalloc.h110
-rw-r--r--arch/unicore32/include/asm/pgtable-hwdef.h55
-rw-r--r--arch/unicore32/include/asm/pgtable.h317
-rw-r--r--arch/unicore32/include/asm/processor.h92
-rw-r--r--arch/unicore32/include/asm/ptrace.h133
-rw-r--r--arch/unicore32/include/asm/sigcontext.h29
-rw-r--r--arch/unicore32/include/asm/stacktrace.h31
-rw-r--r--arch/unicore32/include/asm/string.h38
-rw-r--r--arch/unicore32/include/asm/suspend.h30
-rw-r--r--arch/unicore32/include/asm/system.h161
-rw-r--r--arch/unicore32/include/asm/thread_info.h154
-rw-r--r--arch/unicore32/include/asm/timex.h34
-rw-r--r--arch/unicore32/include/asm/tlb.h28
-rw-r--r--arch/unicore32/include/asm/tlbflush.h195
-rw-r--r--arch/unicore32/include/asm/traps.h21
-rw-r--r--arch/unicore32/include/asm/uaccess.h47
-rw-r--r--arch/unicore32/include/asm/unistd.h18
-rw-r--r--arch/unicore32/include/mach/PKUnity.h108
-rw-r--r--arch/unicore32/include/mach/bitfield.h24
-rw-r--r--arch/unicore32/include/mach/dma.h48
-rw-r--r--arch/unicore32/include/mach/hardware.h38
-rw-r--r--arch/unicore32/include/mach/map.h20
-rw-r--r--arch/unicore32/include/mach/memory.h58
-rw-r--r--arch/unicore32/include/mach/ocd.h36
-rw-r--r--arch/unicore32/include/mach/pm.h43
-rw-r--r--arch/unicore32/include/mach/regs-ac97.h32
-rw-r--r--arch/unicore32/include/mach/regs-dmac.h81
-rw-r--r--arch/unicore32/include/mach/regs-gpio.h70
-rw-r--r--arch/unicore32/include/mach/regs-i2c.h63
-rw-r--r--arch/unicore32/include/mach/regs-intc.h28
-rw-r--r--arch/unicore32/include/mach/regs-nand.h79
-rw-r--r--arch/unicore32/include/mach/regs-ost.h92
-rw-r--r--arch/unicore32/include/mach/regs-pci.h94
-rw-r--r--arch/unicore32/include/mach/regs-pm.h126
-rw-r--r--arch/unicore32/include/mach/regs-ps2.h20
-rw-r--r--arch/unicore32/include/mach/regs-resetc.h34
-rw-r--r--arch/unicore32/include/mach/regs-rtc.h37
-rw-r--r--arch/unicore32/include/mach/regs-sdc.h156
-rw-r--r--arch/unicore32/include/mach/regs-spi.h98
-rw-r--r--arch/unicore32/include/mach/regs-uart.h3
-rw-r--r--arch/unicore32/include/mach/regs-umal.h229
-rw-r--r--arch/unicore32/include/mach/regs-unigfx.h200
-rw-r--r--arch/unicore32/include/mach/uncompress.h34
-rw-r--r--arch/unicore32/kernel/Makefile33
-rw-r--r--arch/unicore32/kernel/asm-offsets.c112
-rw-r--r--arch/unicore32/kernel/clock.c390
-rw-r--r--arch/unicore32/kernel/cpu-ucv2.c93
-rw-r--r--arch/unicore32/kernel/debug-macro.S89
-rw-r--r--arch/unicore32/kernel/debug.S85
-rw-r--r--arch/unicore32/kernel/dma.c183
-rw-r--r--arch/unicore32/kernel/early_printk.c59
-rw-r--r--arch/unicore32/kernel/elf.c38
-rw-r--r--arch/unicore32/kernel/entry.S824
-rw-r--r--arch/unicore32/kernel/fpu-ucf64.c126
-rw-r--r--arch/unicore32/kernel/gpio.c122
-rw-r--r--arch/unicore32/kernel/head.S252
-rw-r--r--arch/unicore32/kernel/hibernate.c160
-rw-r--r--arch/unicore32/kernel/hibernate_asm.S117
-rw-r--r--arch/unicore32/kernel/init_task.c44
-rw-r--r--arch/unicore32/kernel/irq.c426
-rw-r--r--arch/unicore32/kernel/ksyms.c99
-rw-r--r--arch/unicore32/kernel/ksyms.h15
-rw-r--r--arch/unicore32/kernel/module.c152
-rw-r--r--arch/unicore32/kernel/pci.c404
-rw-r--r--arch/unicore32/kernel/pm.c123
-rw-r--r--arch/unicore32/kernel/process.c389
-rw-r--r--arch/unicore32/kernel/ptrace.c149
-rw-r--r--arch/unicore32/kernel/puv3-core.c285
-rw-r--r--arch/unicore32/kernel/puv3-nb0916.c145
-rw-r--r--arch/unicore32/kernel/pwm.c263
-rw-r--r--arch/unicore32/kernel/rtc.c380
-rw-r--r--arch/unicore32/kernel/setup.c360
-rw-r--r--arch/unicore32/kernel/setup.h30
-rw-r--r--arch/unicore32/kernel/signal.c494
-rw-r--r--arch/unicore32/kernel/sleep.S202
-rw-r--r--arch/unicore32/kernel/stacktrace.c131
-rw-r--r--arch/unicore32/kernel/sys.c126
-rw-r--r--arch/unicore32/kernel/time.c143
-rw-r--r--arch/unicore32/kernel/traps.c333
-rw-r--r--arch/unicore32/kernel/vmlinux.lds.S61
-rw-r--r--arch/unicore32/lib/Makefile27
-rw-r--r--arch/unicore32/lib/backtrace.S163
-rw-r--r--arch/unicore32/lib/clear_user.S57
-rw-r--r--arch/unicore32/lib/copy_from_user.S108
-rw-r--r--arch/unicore32/lib/copy_page.S39
-rw-r--r--arch/unicore32/lib/copy_template.S214
-rw-r--r--arch/unicore32/lib/copy_to_user.S96
-rw-r--r--arch/unicore32/lib/delay.S51
-rw-r--r--arch/unicore32/lib/findbit.S98
-rw-r--r--arch/unicore32/lib/strncpy_from_user.S45
-rw-r--r--arch/unicore32/lib/strnlen_user.S42
-rw-r--r--arch/unicore32/mm/Kconfig50
-rw-r--r--arch/unicore32/mm/Makefile15
-rw-r--r--arch/unicore32/mm/alignment.c523
-rw-r--r--arch/unicore32/mm/cache-ucv2.S212
-rw-r--r--arch/unicore32/mm/dma-swiotlb.c34
-rw-r--r--arch/unicore32/mm/extable.c24
-rw-r--r--arch/unicore32/mm/fault.c479
-rw-r--r--arch/unicore32/mm/flush.c98
-rw-r--r--arch/unicore32/mm/init.c517
-rw-r--r--arch/unicore32/mm/ioremap.c261
-rw-r--r--arch/unicore32/mm/mm.h39
-rw-r--r--arch/unicore32/mm/mmu.c533
-rw-r--r--arch/unicore32/mm/pgd.c102
-rw-r--r--arch/unicore32/mm/proc-macros.S145
-rw-r--r--arch/unicore32/mm/proc-syms.c23
-rw-r--r--arch/unicore32/mm/proc-ucv2.S134
-rw-r--r--arch/unicore32/mm/tlb-ucv2.S89
-rw-r--r--arch/x86/kernel/amd_nb.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c3
-rw-r--r--arch/x86/pci/xen.c41
-rw-r--r--arch/x86/xen/mmu.c3
-rw-r--r--block/blk-core.c23
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c123
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c122
-rw-r--r--drivers/edac/amd64_edac.c1442
-rw-r--r--drivers/edac/amd64_edac.h369
-rw-r--r--drivers/edac/amd64_edac_inj.c8
-rw-r--r--drivers/edac/edac_mc_sysfs.c26
-rw-r--r--drivers/edac/mce_amd.c8
-rw-r--r--drivers/edac/mce_amd.h28
-rw-r--r--drivers/gpu/drm/Kconfig47
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_crtc.c33
-rw-r--r--drivers/gpu/drm/drm_drv.c49
-rw-r--r--drivers/gpu/drm/drm_edid.c59
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c5
-rw-r--r--drivers/gpu/drm/drm_gem.c5
-rw-r--r--drivers/gpu/drm/drm_hashtab.c27
-rw-r--r--drivers/gpu/drm/drm_info.c27
-rw-r--r--drivers/gpu/drm/drm_ioctl.c134
-rw-r--r--drivers/gpu/drm/drm_irq.c14
-rw-r--r--drivers/gpu/drm/drm_mm.c570
-rw-r--r--drivers/gpu/drm/drm_modes.c6
-rw-r--r--drivers/gpu/drm/drm_pci.c205
-rw-r--r--drivers/gpu/drm/drm_platform.c75
-rw-r--r--drivers/gpu/drm/drm_stub.c21
-rw-r--r--drivers/gpu/drm/drm_sysfs.c7
-rw-r--r--drivers/gpu/drm/drm_usb.c117
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c18
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c20
-rw-r--r--drivers/gpu/drm/i830/Makefile8
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c1560
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c107
-rw-r--r--drivers/gpu/drm/i830/i830_drv.h295
-rw-r--r--drivers/gpu/drm/i830/i830_irq.c186
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c78
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c40
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c33
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h125
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c406
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c176
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c10
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c214
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h484
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c435
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h301
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c53
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c33
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1847
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c157
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h13
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c13
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c11
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c30
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c4
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c41
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c25
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c30
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h29
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c61
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c10
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c2
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c255
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c75
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c207
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c173
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c368
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h19
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv_modes.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c59
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c164
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c191
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h42
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c290
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c199
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c149
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c65
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c62
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c14
-rw-r--r--drivers/gpu/drm/radeon/Makefile7
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c12
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c55
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.h32
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c16
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c130
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h17
-rw-r--r--drivers/gpu/drm/radeon/ni.c1294
-rw-r--r--drivers/gpu/drm/radeon/nid.h495
-rw-r--r--drivers/gpu/drm/radeon/r100.c16
-rw-r--r--drivers/gpu/drm/radeon/r600.c16
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c427
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c1
-rw-r--r--drivers/gpu/drm/radeon/r600d.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon.h137
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h87
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c52
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c98
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c10
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman619
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c14
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c13
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c34
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c12
-rw-r--r--drivers/gpu/drm/via/via_drv.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c5
-rw-r--r--drivers/hwmon/Kconfig92
-rw-r--r--drivers/hwmon/Makefile10
-rw-r--r--drivers/hwmon/f71882fg.c522
-rw-r--r--drivers/hwmon/lineage-pem.c586
-rw-r--r--drivers/hwmon/lis3lv02d_spi.c19
-rw-r--r--drivers/hwmon/lm85.c136
-rw-r--r--drivers/hwmon/ltc4151.c256
-rw-r--r--drivers/hwmon/max16064.c91
-rw-r--r--drivers/hwmon/max34440.c199
-rw-r--r--drivers/hwmon/max6639.c653
-rw-r--r--drivers/hwmon/max8688.c158
-rw-r--r--drivers/hwmon/pmbus.c203
-rw-r--r--drivers/hwmon/pmbus.h313
-rw-r--r--drivers/hwmon/pmbus_core.c1658
-rw-r--r--drivers/hwmon/w83627ehf.c1351
-rw-r--r--drivers/i2c/busses/Kconfig30
-rw-r--r--drivers/i2c/busses/Makefile3
-rw-r--r--drivers/i2c/busses/i2c-mxs.c412
-rw-r--r--drivers/i2c/busses/i2c-puv3.c306
-rw-r--r--drivers/i2c/busses/i2c-tegra.c700
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c26
-rw-r--r--drivers/input/serio/i8042-unicore32io.h73
-rw-r--r--drivers/input/serio/i8042.h2
-rw-r--r--drivers/md/dm-mpath.c22
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h1
-rw-r--r--drivers/message/fusion/mptbase.c7
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptsas.c7
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c7
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/s390/block/dasd_eckd.c1
-rw-r--r--drivers/s390/cio/chsc_sch.c17
-rw-r--r--drivers/s390/cio/cio.c43
-rw-r--r--drivers/s390/cio/cio.h11
-rw-r--r--drivers/s390/cio/css.c6
-rw-r--r--drivers/s390/cio/css.h10
-rw-r--r--drivers/s390/cio/device.c23
-rw-r--r--drivers/s390/cio/io_sch.h114
-rw-r--r--drivers/s390/cio/ioasm.h34
-rw-r--r--drivers/s390/cio/orb.h67
-rw-r--r--drivers/s390/scsi/zfcp_aux.c80
-rw-r--r--drivers/s390/scsi/zfcp_def.h15
-rw-r--r--drivers/s390/scsi/zfcp_erp.c4
-rw-r--r--drivers/s390/scsi/zfcp_ext.h9
-rw-r--r--drivers/s390/scsi/zfcp_fc.c333
-rw-r--r--drivers/s390/scsi/zfcp_fc.h124
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c27
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c71
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR5380.c3
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c18
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h4
-rw-r--r--drivers/scsi/be2iscsi/be_main.c3
-rw-r--r--drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h1080
-rw-r--r--drivers/scsi/bnx2fc/Kconfig11
-rw-r--r--drivers/scsi/bnx2fc/Makefile3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h511
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_constants.h206
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h70
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c515
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2535
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c1868
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c1833
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c844
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h4
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c125
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c29
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c53
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c56
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.h19
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c7
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c66
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h5
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c112
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c18
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c7
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c26
-rw-r--r--drivers/scsi/fcoe/Makefile2
-rw-r--r--drivers/scsi/fcoe/fcoe.c621
-rw-r--r--drivers/scsi/fcoe/fcoe.h50
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c (renamed from drivers/scsi/fcoe/libfcoe.c)40
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c770
-rw-r--r--drivers/scsi/fcoe/libfcoe.h31
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/vnic_dev.c2
-rw-r--r--drivers/scsi/hpsa.c579
-rw-r--r--drivers/scsi/hpsa.h9
-rw-r--r--drivers/scsi/hpsa_cmd.h4
-rw-r--r--drivers/scsi/ipr.c9
-rw-r--r--drivers/scsi/iscsi_tcp.c134
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/libfc/fc_exch.c111
-rw-r--r--drivers/scsi/libfc/fc_fcp.c39
-rw-r--r--drivers/scsi/libfc/fc_libfc.c120
-rw-r--r--drivers/scsi/libfc/fc_libfc.h14
-rw-r--r--drivers/scsi/libfc/fc_lport.c69
-rw-r--r--drivers/scsi/libfc/fc_npiv.c10
-rw-r--r--drivers/scsi/libfc/fc_rport.c191
-rw-r--r--drivers/scsi/libiscsi.c44
-rw-r--r--drivers/scsi/libsas/Kconfig8
-rw-r--r--drivers/scsi/libsas/Makefile4
-rw-r--r--drivers/scsi/libsas/sas_ata.c80
-rw-r--r--drivers/scsi/libsas/sas_dump.c4
-rw-r--r--drivers/scsi/libsas/sas_dump.h12
-rw-r--r--drivers/scsi/libsas/sas_expander.c5
-rw-r--r--drivers/scsi/libsas/sas_internal.h6
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c123
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c962
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h60
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c173
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h104
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c134
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c113
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c217
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c147
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c19
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h5
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h6
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_history.txt384
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_sas.h7
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c128
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h40
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1
-rw-r--r--drivers/scsi/osd/osd_initiator.c20
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c37
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c27
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h10
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c45
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c191
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/scsi_debug.c192
-rw-r--r--drivers/scsi/scsi_devinfo.c85
-rw-r--r--drivers/scsi/scsi_error.c24
-rw-r--r--drivers/scsi/scsi_lib.c36
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c104
-rw-r--r--drivers/scsi/sd.c229
-rw-r--r--drivers/scsi/sd.h25
-rw-r--r--drivers/target/target_core_cdb.c8
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/tty/serial/sh-sci.c431
-rw-r--r--drivers/tty/serial/sh-sci.h39
-rw-r--r--drivers/video/Kconfig12
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/fb-puv3.c846
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c222
-rw-r--r--drivers/video/sh_mobile_lcdcfb.h4
-rw-r--r--drivers/video/via/chip.h9
-rw-r--r--drivers/video/via/dvi.c4
-rw-r--r--drivers/video/via/hw.c772
-rw-r--r--drivers/video/via/hw.h2
-rw-r--r--drivers/video/via/lcd.c83
-rw-r--r--drivers/video/via/share.h141
-rw-r--r--drivers/video/via/tblDPASetting.c23
-rw-r--r--drivers/video/via/tblDPASetting.h2
-rw-r--r--drivers/video/via/via_i2c.c3
-rw-r--r--drivers/video/via/viafbdev.c6
-rw-r--r--drivers/video/via/viamode.c507
-rw-r--r--drivers/video/via/viamode.h9
-rw-r--r--drivers/video/via/vt1636.c43
-rw-r--r--drivers/watchdog/Kconfig42
-rw-r--r--drivers/watchdog/Makefile5
-rw-r--r--drivers/watchdog/alim1535_wdt.c10
-rw-r--r--drivers/watchdog/alim7101_wdt.c2
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c4
-rw-r--r--drivers/watchdog/bfin_wdt.c4
-rw-r--r--drivers/watchdog/booke_wdt.c19
-rw-r--r--drivers/watchdog/cpwd.c36
-rw-r--r--drivers/watchdog/eurotechwdt.c2
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--drivers/watchdog/i6300esb.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c4
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c572
-rw-r--r--drivers/watchdog/intel_scu_watchdog.h66
-rw-r--r--drivers/watchdog/it8712f_wdt.c2
-rw-r--r--drivers/watchdog/it87_wdt.c28
-rw-r--r--drivers/watchdog/jz4740_wdt.c322
-rw-r--r--drivers/watchdog/machzwd.c2
-rw-r--r--drivers/watchdog/max63xx_wdt.c2
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c6
-rw-r--r--drivers/watchdog/mpcore_wdt.c2
-rw-r--r--drivers/watchdog/mtx-1_wdt.c14
-rw-r--r--drivers/watchdog/nv_tco.c2
-rw-r--r--drivers/watchdog/omap_wdt.h2
-rw-r--r--drivers/watchdog/pc87413_wdt.c2
-rw-r--r--drivers/watchdog/pcwd_pci.c2
-rw-r--r--drivers/watchdog/pnx4008_wdt.c2
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/watchdog/sbc8360.c2
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c2
-rw-r--r--drivers/watchdog/shwdt.c365
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c4
-rw-r--r--drivers/watchdog/softdog.c2
-rw-r--r--drivers/watchdog/sp5100_tco.c2
-rw-r--r--drivers/watchdog/ts72xx_wdt.c2
-rw-r--r--drivers/watchdog/w83697ug_wdt.c4
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt977.c2
-rw-r--r--drivers/watchdog/wdt_pci.c6
-rw-r--r--drivers/watchdog/xen_wdt.c359
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/Makefile6
-rw-r--r--drivers/xen/balloon.c359
-rw-r--r--drivers/xen/events.c439
-rw-r--r--drivers/xen/gntalloc.c545
-rw-r--r--drivers/xen/gntdev.c382
-rw-r--r--drivers/xen/grant-table.c10
-rw-r--r--drivers/xen/manage.c16
-rw-r--r--drivers/xen/xen-balloon.c256
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c12
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h3
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c11
-rw-r--r--fs/ext3/balloc.c21
-rw-r--r--fs/ext3/namei.c2
-rw-r--r--fs/ext3/super.c7
-rw-r--r--fs/jbd/journal.c2
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/nfs/callback_proc.c2
-rw-r--r--fs/nfs/client.c131
-rw-r--r--fs/nfs/direct.c8
-rw-r--r--fs/nfs/file.c4
-rw-r--r--fs/nfs/idmap.c90
-rw-r--r--fs/nfs/internal.h22
-rw-r--r--fs/nfs/nfs3proc.c1
-rw-r--r--fs/nfs/nfs4_fs.h28
-rw-r--r--fs/nfs/nfs4filelayout.c361
-rw-r--r--fs/nfs/nfs4filelayout.h19
-rw-r--r--fs/nfs/nfs4filelayoutdev.c252
-rw-r--r--fs/nfs/nfs4proc.c123
-rw-r--r--fs/nfs/nfs4renewd.c6
-rw-r--r--fs/nfs/nfs4state.c6
-rw-r--r--fs/nfs/nfs4xdr.c38
-rw-r--r--fs/nfs/pagelist.c22
-rw-r--r--fs/nfs/pnfs.c330
-rw-r--r--fs/nfs/pnfs.h118
-rw-r--r--fs/nfs/proc.c1
-rw-r--r--fs/nfs/read.c127
-rw-r--r--fs/nfs/super.c284
-rw-r--r--fs/nfs/write.c153
-rw-r--r--fs/quota/quota_v2.c2
-rw-r--r--fs/udf/balloc.c4
-rw-r--r--fs/udf/file.c7
-rw-r--r--fs/udf/inode.c239
-rw-r--r--fs/udf/truncate.c146
-rw-r--r--fs/udf/udfdecl.h12
-rw-r--r--include/asm-generic/ftrace.h16
-rw-r--r--include/asm-generic/io.h33
-rw-r--r--include/asm-generic/sizes.h47
-rw-r--r--include/asm-generic/uaccess.h8
-rw-r--r--include/drm/Kbuild1
-rw-r--r--include/drm/drm.h13
-rw-r--r--include/drm/drmP.h122
-rw-r--r--include/drm/drm_crtc.h13
-rw-r--r--include/drm/drm_hashtab.h6
-rw-r--r--include/drm/drm_mm.h49
-rw-r--r--include/drm/drm_mode.h29
-rw-r--r--include/drm/drm_pciids.h14
-rw-r--r--include/drm/drm_usb.h15
-rw-r--r--include/drm/i830_drm.h342
-rw-r--r--include/drm/i915_drm.h1
-rw-r--r--include/drm/nouveau_drm.h1
-rw-r--r--include/drm/radeon_drm.h1
-rw-r--r--include/drm/ttm/ttm_bo_driver.h6
-rw-r--r--include/drm/ttm/ttm_page_alloc.h8
-rw-r--r--include/linux/cpufreq.h11
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/i2c-tegra.h25
-rw-r--r--include/linux/i2c/max6639.h14
-rw-r--r--include/linux/i2c/pmbus.h45
-rw-r--r--include/linux/mm.h11
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/nfs_fs_sb.h4
-rw-r--r--include/linux/nfs_idmap.h9
-rw-r--r--include/linux/nfs_iostat.h2
-rw-r--r--include/linux/nfs_page.h6
-rw-r--r--include/linux/nfs_xdr.h16
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/serial_sci.h16
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--include/scsi/fc/fc_ns.h11
-rw-r--r--include/scsi/fc_encode.h26
-rw-r--r--include/scsi/libfc.h74
-rw-r--r--include/scsi/libfcoe.h105
-rw-r--r--include/scsi/libiscsi.h8
-rw-r--r--include/scsi/scsi.h5
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/scsi/scsi_transport_iscsi.h6
-rw-r--r--include/trace/events/scsi.h28
-rw-r--r--include/video/sh_mobile_lcdc.h10
-rw-r--r--include/xen/balloon.h25
-rw-r--r--include/xen/events.h24
-rw-r--r--include/xen/gntalloc.h82
-rw-r--r--include/xen/gntdev.h31
-rw-r--r--include/xen/interface/sched.h34
-rw-r--r--include/xen/xenbus.h2
-rw-r--r--kernel/gcov/Kconfig2
-rw-r--r--kernel/smp.c71
-rw-r--r--mm/page_alloc.c4
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c2
-rw-r--r--net/sunrpc/clnt.c18
-rw-r--r--net/sunrpc/sched.c29
-rw-r--r--net/sunrpc/xprt.c25
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c86
-rw-r--r--net/sunrpc/xprtrdma/verbs.c53
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h1
756 files changed, 60774 insertions, 15603 deletions
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index 737988fca64d..e74d0a2eb1cf 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -158,6 +158,17 @@ intensive calculation on your laptop that you do not care how long it
takes to complete as you can 'nice' it and prevent it from taking part
in the deciding process of whether to increase your CPU frequency.
+sampling_down_factor: this parameter controls the rate at which the
+kernel makes a decision on when to decrease the frequency while running
+at top speed. When set to 1 (the default) decisions to reevaluate load
+are made at the same interval regardless of current clock speed. But
+when set to greater than 1 (e.g. 100) it acts as a multiplier for the
+scheduling interval for reevaluating load when the CPU is at its top
+speed due to high load. This improves performance by reducing the overhead
+of load evaluation and helping the CPU stay at its top speed when truly
+busy, rather than shifting back and forth in speed. This tunable has no
+effect on behavior at lower speeds/lower CPU loads.
+
2.5 Conservative
----------------
diff --git a/Documentation/filesystems/nfs/pnfs.txt b/Documentation/filesystems/nfs/pnfs.txt
index bc0b9cfe095b..983e14abe7e9 100644
--- a/Documentation/filesystems/nfs/pnfs.txt
+++ b/Documentation/filesystems/nfs/pnfs.txt
@@ -46,3 +46,10 @@ data server cache
file driver devices refer to data servers, which are kept in a module
level cache. Its reference is held over the lifetime of the deviceid
pointing to it.
+
+lseg
+----
+lseg maintains an extra reference corresponding to the NFS_LSEG_VALID
+bit which holds it in the pnfs_layout_hdr's list. When the final lseg
+is removed from the pnfs_layout_hdr's list, the NFS_LAYOUT_DESTROYED
+bit is set, preventing any new lsegs from being added.
diff --git a/Documentation/hwmon/f71882fg b/Documentation/hwmon/f71882fg
index a7952c2bd959..4d0bc70f1852 100644
--- a/Documentation/hwmon/f71882fg
+++ b/Documentation/hwmon/f71882fg
@@ -10,6 +10,10 @@ Supported chips:
Prefix: 'f71862fg'
Addresses scanned: none, address read from Super I/O config space
Datasheet: Available from the Fintek website
+ * Fintek F71869F and F71869E
+ Prefix: 'f71869'
+ Addresses scanned: none, address read from Super I/O config space
+ Datasheet: Available from the Fintek website
* Fintek F71882FG and F71883FG
Prefix: 'f71882fg'
Addresses scanned: none, address read from Super I/O config space
@@ -17,6 +21,10 @@ Supported chips:
* Fintek F71889FG
Prefix: 'f71889fg'
Addresses scanned: none, address read from Super I/O config space
+ Datasheet: Available from the Fintek website
+ * Fintek F71889ED
+ Prefix: 'f71889ed'
+ Addresses scanned: none, address read from Super I/O config space
Datasheet: Should become available on the Fintek website soon
* Fintek F8000
Prefix: 'f8000'
@@ -29,9 +37,9 @@ Author: Hans de Goede <hdegoede@redhat.com>
Description
-----------
-Fintek F718xxFG/F8000 Super I/O chips include complete hardware monitoring
-capabilities. They can monitor up to 9 voltages (3 for the F8000), 4 fans and
-3 temperature sensors.
+Fintek F718xx/F8000 Super I/O chips include complete hardware monitoring
+capabilities. They can monitor up to 9 voltages, 4 fans and 3 temperature
+sensors.
These chips also have fan controlling features, using either DC or PWM, in
three different modes (one manual, two automatic).
@@ -99,5 +107,5 @@ Writing an unsupported mode will result in an invalid parameter error.
The fan speed is regulated to keep the temp the fan is mapped to between
temp#_auto_point2_temp and temp#_auto_point3_temp.
-Both of the automatic modes require that pwm1 corresponds to fan1, pwm2 to
+All of the automatic modes require that pwm1 corresponds to fan1, pwm2 to
fan2 and pwm3 to fan3.
diff --git a/Documentation/hwmon/lineage-pem b/Documentation/hwmon/lineage-pem
new file mode 100644
index 000000000000..2ba5ed126858
--- /dev/null
+++ b/Documentation/hwmon/lineage-pem
@@ -0,0 +1,77 @@
+Kernel driver lineage-pem
+=========================
+
+Supported devices:
+ * Lineage Compact Power Line Power Entry Modules
+ Prefix: 'lineage-pem'
+ Addresses scanned: -
+ Documentation:
+ http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports various Lineage Compact Power Line DC/DC and AC/DC
+converters such as CP1800, CP2000AC, CP2000DC, CP2100DC, and others.
+
+Lineage CPL power entry modules are nominally PMBus compliant. However, most
+standard PMBus commands are not supported. Specifically, all hardware monitoring
+and status reporting commands are non-standard. For this reason, a standard
+PMBus driver can not be used.
+
+
+Usage Notes
+-----------
+
+This driver does not probe for Lineage CPL devices, since there is no register
+which can be safely used to identify the chip. You will have to instantiate
+the devices explicitly.
+
+Example: the following will load the driver for a Lineage PEM at address 0x40
+on I2C bus #1:
+$ modprobe lineage-pem
+$ echo lineage-pem 0x40 > /sys/bus/i2c/devices/i2c-1/new_device
+
+All Lineage CPL power entry modules have a built-in I2C bus master selector
+(PCA9541). To ensure device access, this driver should only be used as client
+driver to the pca9541 I2C master selector driver.
+
+
+Sysfs entries
+-------------
+
+All Lineage CPL devices report output voltage and device temperature as well as
+alarms for output voltage, temperature, input voltage, input current, input power,
+and fan status.
+
+Input voltage, input current, input power, and fan speed measurement is only
+supported on newer devices. The driver detects if those attributes are supported,
+and only creates respective sysfs entries if they are.
+
+in1_input Output voltage (mV)
+in1_min_alarm Output undervoltage alarm
+in1_max_alarm Output overvoltage alarm
+in1_crit Output voltage critical alarm
+
+in2_input Input voltage (mV, optional)
+in2_alarm Input voltage alarm
+
+curr1_input Input current (mA, optional)
+curr1_alarm Input overcurrent alarm
+
+power1_input Input power (uW, optional)
+power1_alarm Input power alarm
+
+fan1_input Fan 1 speed (rpm, optional)
+fan2_input Fan 2 speed (rpm, optional)
+fan3_input Fan 3 speed (rpm, optional)
+
+temp1_input
+temp1_max
+temp1_crit
+temp1_alarm
+temp1_crit_alarm
+temp1_fault
diff --git a/Documentation/hwmon/lm85 b/Documentation/hwmon/lm85
index 239258a63c81..7c49feaa79d2 100644
--- a/Documentation/hwmon/lm85
+++ b/Documentation/hwmon/lm85
@@ -26,6 +26,14 @@ Supported chips:
Prefix: 'emc6d102'
Addresses scanned: I2C 0x2c, 0x2d, 0x2e
Datasheet: http://www.smsc.com/main/catalog/emc6d102.html
+ * SMSC EMC6D103
+ Prefix: 'emc6d103'
+ Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+ Datasheet: http://www.smsc.com/main/catalog/emc6d103.html
+ * SMSC EMC6D103S
+ Prefix: 'emc6d103s'
+ Addresses scanned: I2C 0x2c, 0x2d, 0x2e
+ Datasheet: http://www.smsc.com/main/catalog/emc6d103s.html
Authors:
Philip Pokorny <ppokorny@penguincomputing.com>,
@@ -122,9 +130,11 @@ to be register compatible. The EMC6D100 offers all the features of the
EMC6D101 plus additional voltage monitoring and system control features.
Unfortunately it is not possible to distinguish between the package
versions on register level so these additional voltage inputs may read
-zero. The EMC6D102 features addtional ADC bits thus extending precision
+zero. EMC6D102 and EMC6D103 feature additional ADC bits thus extending precision
of voltage and temperature channels.
+SMSC EMC6D103S is similar to EMC6D103, but does not support pwm#_auto_pwm_minctl
+and temp#_auto_temp_off.
Hardware Configurations
-----------------------
diff --git a/Documentation/hwmon/ltc4151 b/Documentation/hwmon/ltc4151
new file mode 100644
index 000000000000..43c667e6677a
--- /dev/null
+++ b/Documentation/hwmon/ltc4151
@@ -0,0 +1,47 @@
+Kernel driver ltc4151
+=====================
+
+Supported chips:
+ * Linear Technology LTC4151
+ Prefix: 'ltc4151'
+ Addresses scanned: -
+ Datasheet:
+ http://www.linear.com/docs/Datasheet/4151fc.pdf
+
+Author: Per Dalen <per.dalen@appeartv.com>
+
+
+Description
+-----------
+
+The LTC4151 is a High Voltage I2C Current and Voltage Monitor.
+
+
+Usage Notes
+-----------
+
+This driver does not probe for LTC4151 devices, since there is no register
+which can be safely used to identify the chip. You will have to instantiate
+the devices explicitly.
+
+Example: the following will load the driver for an LTC4151 at address 0x6f
+on I2C bus #0:
+# modprobe ltc4151
+# echo ltc4151 0x6f > /sys/bus/i2c/devices/i2c-0/new_device
+
+
+Sysfs entries
+-------------
+
+Voltage readings provided by this driver are reported as obtained from the ADIN
+and VIN registers.
+
+Current reading provided by this driver is reported as obtained from the Current
+Sense register. The reported value assumes that a 1 mOhm sense resistor is
+installed.
+
+in1_input VDIN voltage (mV)
+
+in2_input ADIN voltage (mV)
+
+curr1_input SENSE current (mA)
diff --git a/Documentation/hwmon/max6639 b/Documentation/hwmon/max6639
new file mode 100644
index 000000000000..dc49f8be7167
--- /dev/null
+++ b/Documentation/hwmon/max6639
@@ -0,0 +1,49 @@
+Kernel driver max6639
+=====================
+
+Supported chips:
+ * Maxim MAX6639
+ Prefix: 'max6639'
+ Addresses scanned: I2C 0x2c, 0x2e, 0x2f
+ Datasheet: http://pdfserv.maxim-ic.com/en/ds/MAX6639.pdf
+
+Authors:
+ He Changqing <hechangqing@semptian.com>
+ Roland Stigge <stigge@antcom.de>
+
+Description
+-----------
+
+This driver implements support for the Maxim MAX6639. This chip is a 2-channel
+temperature monitor with dual PWM fan speed controller. It can monitor its own
+temperature and one external diode-connected transistor or two external
+diode-connected transistors.
+
+The following device attributes are implemented via sysfs:
+
+Attribute R/W Contents
+----------------------------------------------------------------------------
+temp1_input R Temperature channel 1 input (0..150 C)
+temp2_input R Temperature channel 2 input (0..150 C)
+temp1_fault R Temperature channel 1 diode fault
+temp2_fault R Temperature channel 2 diode fault
+temp1_max RW Set THERM temperature for input 1
+ (in C, see datasheet)
+temp2_max RW Set THERM temperature for input 2
+temp1_crit RW Set ALERT temperature for input 1
+temp2_crit RW Set ALERT temperature for input 2
+temp1_emergency RW Set OT temperature for input 1
+ (in C, see datasheet)
+temp2_emergency RW Set OT temperature for input 2
+pwm1 RW Fan 1 target duty cycle (0..255)
+pwm2 RW Fan 2 target duty cycle (0..255)
+fan1_input R TACH1 fan tachometer input (in RPM)
+fan2_input R TACH2 fan tachometer input (in RPM)
+fan1_fault R Fan 1 fault
+fan2_fault R Fan 2 fault
+temp1_max_alarm R Alarm on THERM temperature on channel 1
+temp2_max_alarm R Alarm on THERM temperature on channel 2
+temp1_crit_alarm R Alarm on ALERT temperature on channel 1
+temp2_crit_alarm R Alarm on ALERT temperature on channel 2
+temp1_emergency_alarm R Alarm on OT temperature on channel 1
+temp2_emergency_alarm R Alarm on OT temperature on channel 2
diff --git a/Documentation/hwmon/pmbus b/Documentation/hwmon/pmbus
new file mode 100644
index 000000000000..f2d42e8bdf48
--- /dev/null
+++ b/Documentation/hwmon/pmbus
@@ -0,0 +1,215 @@
+Kernel driver pmbus
+====================
+
+Supported chips:
+ * Ericsson BMR45X series
+ DC/DC Converter
+ Prefixes: 'bmr450', 'bmr451', 'bmr453', 'bmr454'
+ Addresses scanned: -
+ Datasheet:
+ http://archive.ericsson.net/service/internet/picov/get?DocNo=28701-EN/LZT146395
+ * Linear Technology LTC2978
+ Octal PMBus Power Supply Monitor and Controller
+ Prefix: 'ltc2978'
+ Addresses scanned: -
+ Datasheet: http://cds.linear.com/docs/Datasheet/2978fa.pdf
+ * Maxim MAX16064
+ Quad Power-Supply Controller
+ Prefix: 'max16064'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX16064.pdf
+ * Maxim MAX34440
+ PMBus 6-Channel Power-Supply Manager
+ Prefixes: 'max34440'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
+ * Maxim MAX34441
+ PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
+ Prefixes: 'max34441'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
+ * Maxim MAX8688
+ Digital Power-Supply Controller/Monitor
+ Prefix: 'max8688'
+ Addresses scanned: -
+ Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX8688.pdf
+ * Generic PMBus devices
+ Prefix: 'pmbus'
+ Addresses scanned: -
+ Datasheet: n.a.
+
+Author: Guenter Roeck <guenter.roeck@ericsson.com>
+
+
+Description
+-----------
+
+This driver supports hardware montoring for various PMBus compliant devices.
+It supports voltage, current, power, and temperature sensors as supported
+by the device.
+
+Each monitored channel has its own high and low limits, plus a critical
+limit.
+
+Fan support will be added in a later version of this driver.
+
+
+Usage Notes
+-----------
+
+This driver does not probe for PMBus devices, since there is no register
+which can be safely used to identify the chip (The MFG_ID register is not
+supported by all chips), and since there is no well defined address range for
+PMBus devices. You will have to instantiate the devices explicitly.
+
+Example: the following will load the driver for an LTC2978 at address 0x60
+on I2C bus #1:
+$ modprobe pmbus
+$ echo ltc2978 0x60 > /sys/bus/i2c/devices/i2c-1/new_device
+
+
+Platform data support
+---------------------
+
+Support for additional PMBus chips can be added by defining chip parameters in
+a new chip specific driver file. For example, (untested) code to add support for
+Emerson DS1200 power modules might look as follows.
+
+static struct pmbus_driver_info ds1200_info = {
+ .pages = 1,
+ /* Note: All other sensors are in linear mode */
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .direct[PSC_CURRENT_OUT] = true,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 3,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 3,
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_PIN | PMBUS_HAVE_POUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP
+ | PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12,
+};
+
+static int ds1200_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &ds1200_info);
+}
+
+static int ds1200_remove(struct i2c_client *client)
+{
+ return pmbus_do_remove(client);
+}
+
+static const struct i2c_device_id ds1200_id[] = {
+ {"ds1200", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, ds1200_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ds1200_driver = {
+ .driver = {
+ .name = "ds1200",
+ },
+ .probe = ds1200_probe,
+ .remove = ds1200_remove,
+ .id_table = ds1200_id,
+};
+
+static int __init ds1200_init(void)
+{
+ return i2c_add_driver(&ds1200_driver);
+}
+
+static void __exit ds1200_exit(void)
+{
+ i2c_del_driver(&ds1200_driver);
+}
+
+
+Sysfs entries
+-------------
+
+When probing the chip, the driver identifies which PMBus registers are
+supported, and determines available sensors from this information.
+Attribute files only exist if respective sensors are suported by the chip.
+Labels are provided to inform the user about the sensor associated with
+a given sysfs entry.
+
+The following attributes are supported. Limits are read-write; all other
+attributes are read-only.
+
+inX_input Measured voltage. From READ_VIN or READ_VOUT register.
+inX_min Minumum Voltage.
+ From VIN_UV_WARN_LIMIT or VOUT_UV_WARN_LIMIT register.
+inX_max Maximum voltage.
+ From VIN_OV_WARN_LIMIT or VOUT_OV_WARN_LIMIT register.
+inX_lcrit Critical minumum Voltage.
+ From VIN_UV_FAULT_LIMIT or VOUT_UV_FAULT_LIMIT register.
+inX_crit Critical maximum voltage.
+ From VIN_OV_FAULT_LIMIT or VOUT_OV_FAULT_LIMIT register.
+inX_min_alarm Voltage low alarm. From VOLTAGE_UV_WARNING status.
+inX_max_alarm Voltage high alarm. From VOLTAGE_OV_WARNING status.
+inX_lcrit_alarm Voltage critical low alarm.
+ From VOLTAGE_UV_FAULT status.
+inX_crit_alarm Voltage critical high alarm.
+ From VOLTAGE_OV_FAULT status.
+inX_label "vin", "vcap", or "voutY"
+
+currX_input Measured current. From READ_IIN or READ_IOUT register.
+currX_max Maximum current.
+ From IIN_OC_WARN_LIMIT or IOUT_OC_WARN_LIMIT register.
+currX_lcrit Critical minumum output current.
+ From IOUT_UC_FAULT_LIMIT register.
+currX_crit Critical maximum current.
+ From IIN_OC_FAULT_LIMIT or IOUT_OC_FAULT_LIMIT register.
+currX_alarm Current high alarm.
+ From IIN_OC_WARNING or IOUT_OC_WARNING status.
+currX_lcrit_alarm Output current critical low alarm.
+ From IOUT_UC_FAULT status.
+currX_crit_alarm Current critical high alarm.
+ From IIN_OC_FAULT or IOUT_OC_FAULT status.
+currX_label "iin" or "vinY"
+
+powerX_input Measured power. From READ_PIN or READ_POUT register.
+powerX_cap Output power cap. From POUT_MAX register.
+powerX_max Power limit. From PIN_OP_WARN_LIMIT or
+ POUT_OP_WARN_LIMIT register.
+powerX_crit Critical output power limit.
+ From POUT_OP_FAULT_LIMIT register.
+powerX_alarm Power high alarm.
+ From PIN_OP_WARNING or POUT_OP_WARNING status.
+powerX_crit_alarm Output power critical high alarm.
+ From POUT_OP_FAULT status.
+powerX_label "pin" or "poutY"
+
+tempX_input Measured tempererature.
+ From READ_TEMPERATURE_X register.
+tempX_min Mimimum tempererature. From UT_WARN_LIMIT register.
+tempX_max Maximum tempererature. From OT_WARN_LIMIT register.
+tempX_lcrit Critical low tempererature.
+ From UT_FAULT_LIMIT register.
+tempX_crit Critical high tempererature.
+ From OT_FAULT_LIMIT register.
+tempX_min_alarm Chip temperature low alarm. Set by comparing
+ READ_TEMPERATURE_X with UT_WARN_LIMIT if
+ TEMP_UT_WARNING status is set.
+tempX_max_alarm Chip temperature high alarm. Set by comparing
+ READ_TEMPERATURE_X with OT_WARN_LIMIT if
+ TEMP_OT_WARNING status is set.
+tempX_lcrit_alarm Chip temperature critical low alarm. Set by comparing
+ READ_TEMPERATURE_X with UT_FAULT_LIMIT if
+ TEMP_UT_FAULT status is set.
+tempX_crit_alarm Chip temperature critical high alarm. Set by comparing
+ READ_TEMPERATURE_X with OT_FAULT_LIMIT if
+ TEMP_OT_FAULT status is set.
diff --git a/Documentation/hwmon/sysfs-interface b/Documentation/hwmon/sysfs-interface
index c6559f153589..83a698773ade 100644
--- a/Documentation/hwmon/sysfs-interface
+++ b/Documentation/hwmon/sysfs-interface
@@ -187,6 +187,17 @@ fan[1-*]_div Fan divisor.
Note that this is actually an internal clock divisor, which
affects the measurable speed range, not the read value.
+fan[1-*]_pulses Number of tachometer pulses per fan revolution.
+ Integer value, typically between 1 and 4.
+ RW
+ This value is a characteristic of the fan connected to the
+ device's input, so it has to be set in accordance with the fan
+ model.
+ Should only be created if the chip has a register to configure
+ the number of pulses. In the absence of such a register (and
+ thus attribute) the value assumed by all devices is 2 pulses
+ per fan revolution.
+
fan[1-*]_target
Desired fan speed
Unit: revolution/min (RPM)
diff --git a/Documentation/hwmon/w83627ehf b/Documentation/hwmon/w83627ehf
index 13d556112fc0..76ffef94ed75 100644
--- a/Documentation/hwmon/w83627ehf
+++ b/Documentation/hwmon/w83627ehf
@@ -5,13 +5,11 @@ Supported chips:
* Winbond W83627EHF/EHG (ISA access ONLY)
Prefix: 'w83627ehf'
Addresses scanned: ISA address retrieved from Super I/O registers
- Datasheet:
- http://www.nuvoton.com.tw/NR/rdonlyres/A6A258F0-F0C9-4F97-81C0-C4D29E7E943E/0/W83627EHF.pdf
+ Datasheet: not available
* Winbond W83627DHG
Prefix: 'w83627dhg'
Addresses scanned: ISA address retrieved from Super I/O registers
- Datasheet:
- http://www.nuvoton.com.tw/NR/rdonlyres/7885623D-A487-4CF9-A47F-30C5F73D6FE6/0/W83627DHG.pdf
+ Datasheet: not available
* Winbond W83627DHG-P
Prefix: 'w83627dhg'
Addresses scanned: ISA address retrieved from Super I/O registers
@@ -24,6 +22,14 @@ Supported chips:
Prefix: 'w83667hg'
Addresses scanned: ISA address retrieved from Super I/O registers
Datasheet: Available from Nuvoton upon request
+ * Nuvoton NCT6775F/W83667HG-I
+ Prefix: 'nct6775'
+ Addresses scanned: ISA address retrieved from Super I/O registers
+ Datasheet: Available from Nuvoton upon request
+ * Nuvoton NCT6776F
+ Prefix: 'nct6776'
+ Addresses scanned: ISA address retrieved from Super I/O registers
+ Datasheet: Available from Nuvoton upon request
Authors:
Jean Delvare <khali@linux-fr.org>
@@ -36,19 +42,28 @@ Description
-----------
This driver implements support for the Winbond W83627EHF, W83627EHG,
-W83627DHG, W83627DHG-P, W83667HG and W83667HG-B super I/O chips.
-We will refer to them collectively as Winbond chips.
-
-The chips implement three temperature sensors, five fan rotation
-speed sensors, ten analog voltage sensors (only nine for the 627DHG), one
-VID (6 pins for the 627EHF/EHG, 8 pins for the 627DHG and 667HG), alarms
-with beep warnings (control unimplemented), and some automatic fan
-regulation strategies (plus manual fan control mode).
+W83627DHG, W83627DHG-P, W83667HG, W83667HG-B, W83667HG-I (NCT6775F),
+and NCT6776F super I/O chips. We will refer to them collectively as
+Winbond chips.
+
+The chips implement three temperature sensors (up to four for 667HG-B, and nine
+for NCT6775F and NCT6776F), five fan rotation speed sensors, ten analog voltage
+sensors (only nine for the 627DHG), one VID (6 pins for the 627EHF/EHG, 8 pins
+for the 627DHG and 667HG), alarms with beep warnings (control unimplemented),
+and some automatic fan regulation strategies (plus manual fan control mode).
+
+The temperature sensor sources on W82677HG-B, NCT6775F, and NCT6776F are
+configurable. temp4 and higher attributes are only reported if its temperature
+source differs from the temperature sources of the already reported temperature
+sensors. The configured source for each of the temperature sensors is provided
+in tempX_label.
Temperatures are measured in degrees Celsius and measurement resolution is 1
-degC for temp1 and 0.5 degC for temp2 and temp3. An alarm is triggered when
-the temperature gets higher than high limit; it stays on until the temperature
-falls below the hysteresis value.
+degC for temp1 and and 0.5 degC for temp2 and temp3. For temp4 and higher,
+resolution is 1 degC for W83667HG-B and 0.0 degC for NCT6775F and NCT6776F.
+An alarm is triggered when the temperature gets higher than high limit;
+it stays on until the temperature falls below the hysteresis value.
+Alarms are only supported for temp1, temp2, and temp3.
Fan rotation speeds are reported in RPM (rotations per minute). An alarm is
triggered if the rotation speed has dropped below a programmable limit. Fan
@@ -80,7 +95,8 @@ prog -> pwm4 (not on 667HG and 667HG-B; the programmable setting is not
name - this is a standard hwmon device entry. For the W83627EHF and W83627EHG,
it is set to "w83627ehf", for the W83627DHG it is set to "w83627dhg",
- and for the W83667HG it is set to "w83667hg".
+ for the W83667HG and W83667HG-B it is set to "w83667hg", for NCT6775F it
+ is set to "nct6775", and for NCT6776F it is set to "nct6776".
pwm[1-4] - this file stores PWM duty cycle or DC value (fan speed) in range:
0 (stop) to 255 (full)
@@ -90,6 +106,18 @@ pwm[1-4]_enable - this file controls mode of fan/temperature control:
* 2 "Thermal Cruise" mode
* 3 "Fan Speed Cruise" mode
* 4 "Smart Fan III" mode
+ * 5 "Smart Fan IV" mode
+
+ SmartFan III mode is not supported on NCT6776F.
+
+ SmartFan IV mode is configurable only if it was configured at system
+ startup, and is only supported for W83677HG-B, NCT6775F, and NCT6776F.
+ SmartFan IV operational parameters can not be configured at this time,
+ and the various pwm attributes are not used in SmartFan IV mode.
+ The attributes can be written to, which is useful if you plan to
+ configure the system for a different pwm mode. However, the information
+ returned when reading pwm attributes is unrelated to SmartFan IV
+ operation.
pwm[1-4]_mode - controls if output is PWM or DC level
* 0 DC output (0 - 12v)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 738c6fda3fb0..534dbaf9d618 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1580,6 +1580,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
of returning the full 64-bit number.
The default is to return 64-bit inode numbers.
+ nfs.nfs4_disable_idmapping=
+ [NFSv4] When set, this option disables the NFSv4
+ idmapper on the client, but only if the mount
+ is using the 'sec=sys' security flavour. This may
+ make migration from legacy NFSv2/v3 systems easier
+ provided that the server has the appropriate support.
+ The default is to always enable NFSv4 idmapping.
+
nmi_debug= [KNL,AVR32,SH] Specify one or more actions to take
when a NMI is triggered.
Format: [state][,regs][,debounce][,die]
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index b64d10d221ec..4d9ce73ff730 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,26 @@
+Release Date : Thu. Feb 24, 2011 17:00:00 PST 2010 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Adam Radford
+Current Version : 00.00.05.34-rc1
+Old Version : 00.00.05.29-rc1
+ 1. Fix some failure gotos from megasas_probe_one(), etc.
+ 2. Add missing check_and_restore_queue_depth() call in
+ complete_cmd_fusion().
+ 3. Enable MSI-X before calling megasas_init_fw().
+ 4. Call tasklet_schedule() even if outbound_intr_status == 0 for MFI based
+ boards in MSI-X mode.
+ 5. Fix megasas_probe_one() to clear PCI_MSIX_FLAGS_ENABLE in msi control
+ register in kdump kernel.
+ 6. Fix megasas_get_cmd() to only print "Command pool empty" if
+ megasas_dbg_lvl is set.
+ 7. Fix megasas_build_dcdb_fusion() to not filter by TYPE_DISK.
+ 8. Fix megasas_build_dcdb_fusion() to use io_request->LUN[1] field.
+ 9. Add MR_EVT_CFG_CLEARED to megasas_aen_polling().
+ 10. Fix tasklet_init() in megasas_init_fw() to use instancet->tasklet.
+ 11. Fix fault state handling in megasas_transition_to_ready().
+ 12. Fix max_sectors setting for IEEE SGL's.
+ 13. Fix iMR OCR support to work correctly.
+-------------------------------------------------------------------------------
Release Date : Tues. Dec 14, 2010 17:00:00 PST 2010 -
(emaild-id:megaraidlinux@lsi.com)
Adam Radford
diff --git a/Documentation/scsi/hpsa.txt b/Documentation/scsi/hpsa.txt
index dca658362cbf..891435a72fce 100644
--- a/Documentation/scsi/hpsa.txt
+++ b/Documentation/scsi/hpsa.txt
@@ -28,6 +28,12 @@ boot parameter "hpsa_allow_any=1" is specified, however these are not tested
nor supported by HP with this driver. For older Smart Arrays, the cciss
driver should still be used.
+The "hpsa_simple_mode=1" boot parameter may be used to prevent the driver from
+putting the controller into "performant" mode. The difference is that with simple
+mode, each command completion requires an interrupt, while with "performant mode"
+(the default, and ordinarily better performing) it is possible to have multiple
+command completions indicated by a single interrupt.
+
HPSA specific entries in /sys
-----------------------------
@@ -39,6 +45,8 @@ HPSA specific entries in /sys
/sys/class/scsi_host/host*/rescan
/sys/class/scsi_host/host*/firmware_revision
+ /sys/class/scsi_host/host*/resettable
+ /sys/class/scsi_host/host*/transport_mode
the host "rescan" attribute is a write only attribute. Writing to this
attribute will cause the driver to scan for new, changed, or removed devices
@@ -55,6 +63,21 @@ HPSA specific entries in /sys
root@host:/sys/class/scsi_host/host4# cat firmware_revision
7.14
+ The transport_mode indicates whether the controller is in "performant"
+ or "simple" mode. This is controlled by the "hpsa_simple_mode" module
+ parameter.
+
+ The "resettable" read-only attribute indicates whether a particular
+ controller is able to honor the "reset_devices" kernel parameter. If the
+ device is resettable, this file will contain a "1", otherwise, a "0". This
+ parameter is used by kdump, for example, to reset the controller at driver
+ load time to eliminate any outstanding commands on the controller and get the
+ controller into a known state so that the kdump initiated i/o will work right
+ and not be disrupted in any way by stale commands or other stale state
+ remaining on the controller from the previous kernel. This attribute enables
+ kexec tools to warn the user if they attempt to designate a device which is
+ unable to honor the reset_devices kernel parameter as a dump device.
+
HPSA specific disk attributes:
------------------------------
diff --git a/Documentation/scsi/scsi_mid_low_api.txt b/Documentation/scsi/scsi_mid_low_api.txt
index df322c103466..5f17d29c59b5 100644
--- a/Documentation/scsi/scsi_mid_low_api.txt
+++ b/Documentation/scsi/scsi_mid_low_api.txt
@@ -1343,7 +1343,7 @@ Members of interest:
underruns (overruns should be rare). If possible an LLD
should set 'resid' prior to invoking 'done'. The most
interesting case is data transfers from a SCSI target
- device device (i.e. READs) that underrun.
+ device (e.g. READs) that underrun.
underflow - LLD should place (DID_ERROR << 16) in 'result' if
actual number of bytes transferred is less than this
figure. Not many LLDs implement this check and some that
@@ -1351,6 +1351,18 @@ Members of interest:
report a DID_ERROR. Better for an LLD to implement
'resid'.
+It is recommended that a LLD set 'resid' on data transfers from a SCSI
+target device (e.g. READs). It is especially important that 'resid' is set
+when such data transfers have sense keys of MEDIUM ERROR and HARDWARE ERROR
+(and possibly RECOVERED ERROR). In these cases if a LLD is in doubt how much
+data has been received then the safest approach is to indicate no bytes have
+been received. For example: to indicate that no valid data has been received
+a LLD might use these helpers:
+ scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
+where 'SCpnt' is a pointer to a scsi_cmnd object. To indicate only three 512
+bytes blocks has been received 'resid' could be set like this:
+ scsi_set_resid(SCpnt, scsi_bufflen(SCpnt) - (3 * 512));
+
The scsi_cmnd structure is defined in include/scsi/scsi_cmnd.h
diff --git a/MAINTAINERS b/MAINTAINERS
index caec208de1ae..ab61fb44b4a3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4907,6 +4907,15 @@ S: Maintained
F: drivers/block/pktcdvd.c
F: include/linux/pktcdvd.h
+PKUNITY SOC DRIVERS
+M: Guan Xuetao <gxt@mprc.pku.edu.cn>
+W: http://mprc.pku.edu.cn/~guanxuetao/linux
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.git
+F: drivers/input/serio/i8042-unicore32io.h
+F: drivers/i2c/busses/i2c-puv3.c
+F: drivers/video/fb-puv3.c
+
PMC SIERRA MaxRAID DRIVER
M: Anil Ravindranath <anil_ravindranath@pmc-sierra.com>
L: linux-scsi@vger.kernel.org
@@ -5350,8 +5359,7 @@ S: Supported
F: drivers/s390/crypto/
S390 ZFCP DRIVER
-M: Christof Schmitt <christof.schmitt@de.ibm.com>
-M: Swen Schillig <swen@vnet.ibm.com>
+M: Steffen Maier <maier@linux.vnet.ibm.com>
M: linux390@de.ibm.com
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -6270,6 +6278,13 @@ F: drivers/uwb/
F: include/linux/uwb.h
F: include/linux/uwb/
+UNICORE32 ARCHITECTURE:
+M: Guan Xuetao <gxt@mprc.pku.edu.cn>
+W: http://mprc.pku.edu.cn/~guanxuetao/linux
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.git
+F: arch/unicore32/
+
UNIFDEF
M: Tony Finch <dot@dotat.at>
W: http://dotat.at/prog/unifdef
diff --git a/arch/arm/mach-s5pv210/cpufreq.c b/arch/arm/mach-s5pv210/cpufreq.c
index a6f22920a2c2..22046e2f53c2 100644
--- a/arch/arm/mach-s5pv210/cpufreq.c
+++ b/arch/arm/mach-s5pv210/cpufreq.c
@@ -390,8 +390,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
}
#ifdef CONFIG_PM
-static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy,
- pm_message_t pmsg)
+static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy)
{
return 0;
}
diff --git a/arch/arm/mach-s5pv310/cpufreq.c b/arch/arm/mach-s5pv310/cpufreq.c
index b04cbc731128..7c08ad7d8887 100644
--- a/arch/arm/mach-s5pv310/cpufreq.c
+++ b/arch/arm/mach-s5pv310/cpufreq.c
@@ -458,8 +458,7 @@ static int s5pv310_target(struct cpufreq_policy *policy,
}
#ifdef CONFIG_PM
-static int s5pv310_cpufreq_suspend(struct cpufreq_policy *policy,
- pm_message_t pmsg)
+static int s5pv310_cpufreq_suspend(struct cpufreq_policy *policy)
{
return 0;
}
diff --git a/arch/arm/plat-s3c24xx/cpu-freq.c b/arch/arm/plat-s3c24xx/cpu-freq.c
index 25a8fc7f512e..eea75ff81d15 100644
--- a/arch/arm/plat-s3c24xx/cpu-freq.c
+++ b/arch/arm/plat-s3c24xx/cpu-freq.c
@@ -433,7 +433,7 @@ static int s3c_cpufreq_verify(struct cpufreq_policy *policy)
static struct cpufreq_frequency_table suspend_pll;
static unsigned int suspend_freq;
-static int s3c_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
+static int s3c_cpufreq_suspend(struct cpufreq_policy *policy)
{
suspend_pll.frequency = clk_get_rate(_clk_mpll);
suspend_pll.index = __raw_readl(S3C2410_MPLLCON);
diff --git a/arch/mips/include/asm/mach-jz4740/platform.h b/arch/mips/include/asm/mach-jz4740/platform.h
index 8987a76e9676..564ab81d6cdc 100644
--- a/arch/mips/include/asm/mach-jz4740/platform.h
+++ b/arch/mips/include/asm/mach-jz4740/platform.h
@@ -30,6 +30,7 @@ extern struct platform_device jz4740_i2s_device;
extern struct platform_device jz4740_pcm_device;
extern struct platform_device jz4740_codec_device;
extern struct platform_device jz4740_adc_device;
+extern struct platform_device jz4740_wdt_device;
void jz4740_serial_device_register(void);
diff --git a/arch/mips/jz4740/platform.c b/arch/mips/jz4740/platform.c
index 1cc9e544d16b..10929e2bc6d8 100644
--- a/arch/mips/jz4740/platform.c
+++ b/arch/mips/jz4740/platform.c
@@ -289,3 +289,19 @@ void jz4740_serial_device_register(void)
platform_device_register(&jz4740_uart_device);
}
+
+/* Watchdog */
+static struct resource jz4740_wdt_resources[] = {
+ {
+ .start = JZ4740_WDT_BASE_ADDR,
+ .end = JZ4740_WDT_BASE_ADDR + 0x10 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device jz4740_wdt_device = {
+ .name = "jz4740-wdt",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(jz4740_wdt_resources),
+ .resource = jz4740_wdt_resources,
+};
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index 415ca6d6b273..04af5f48b4eb 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -429,7 +429,7 @@ static u32 read_gpio(struct device_node *np)
return offset;
}
-static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg)
+static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
{
/* Ok, this could be made a bit smarter, but let's be robust for now. We
* always force a speed change to high speed before sleep, to make sure
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 636bcb81d068..2508a6f31588 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -85,6 +85,7 @@ config S390
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_XZ
select HAVE_GET_USER_PAGES_FAST
select HAVE_ARCH_MUTEX_CPU_RELAX
select ARCH_INLINE_SPIN_TRYLOCK
@@ -341,26 +342,16 @@ config STACK_GUARD
The minimum size for the stack guard should be 256 for 31 bit and
512 for 64 bit.
-config WARN_STACK
+config WARN_DYNAMIC_STACK
def_bool n
- prompt "Emit compiler warnings for function with broken stack usage"
+ prompt "Emit compiler warnings for function with dynamic stack usage"
help
- This option enables the compiler options -mwarn-framesize and
- -mwarn-dynamicstack. If the compiler supports these options it
- will generate warnings for function which either use alloca or
- create a stack frame bigger than CONFIG_WARN_STACK_SIZE.
+ This option enables the compiler option -mwarn-dynamicstack. If the
+ compiler supports this options generates warnings for functions
+ that dynamically allocate stack space using alloca.
Say N if you are unsure.
-config WARN_STACK_SIZE
- int "Maximum frame size considered safe (128-2048)"
- range 128 2048
- depends on WARN_STACK
- default "2048"
- help
- This allows you to specify the maximum frame size a function may
- have without the compiler complaining about it.
-
config ARCH_POPULATES_NODE_MAP
def_bool y
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 2b380df95606..d76cef3fef37 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -31,4 +31,7 @@ config DEBUG_STRICT_USER_COPY_CHECKS
If unsure, or if you run an older (pre 4.4) gcc, say N.
+config DEBUG_SET_MODULE_RONX
+ def_bool y
+ depends on MODULES
endmenu
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index d5b8a6ade525..27a0b5df5ead 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -80,8 +80,7 @@ endif
endif
ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
-cflags-$(CONFIG_WARN_STACK) += -mwarn-dynamicstack
-cflags-$(CONFIG_WARN_STACK) += -mwarn-framesize=$(CONFIG_WARN_STACK_SIZE)
+cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
endif
KBUILD_CFLAGS += -mbackchain -msoft-float $(cflags-y)
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 1c999f726a58..10e22c4ec4a7 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -7,7 +7,8 @@
BITS := $(if $(CONFIG_64BIT),64,31)
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
- vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o sizes.h head$(BITS).o
+ vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o \
+ sizes.h head$(BITS).o
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += $(cflags-y)
@@ -48,6 +49,7 @@ suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
suffix-$(CONFIG_KERNEL_LZMA) := lzma
suffix-$(CONFIG_KERNEL_LZO) := lzo
+suffix-$(CONFIG_KERNEL_XZ) := xz
$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
$(call if_changed,gzip)
@@ -57,6 +59,8 @@ $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
$(call if_changed,lzma)
$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
$(call if_changed,lzo)
+$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
+ $(call if_changed,xzkern)
LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 2751b3a8a66f..028f23ea81d1 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -19,6 +19,7 @@
#undef memset
#undef memcpy
#undef memmove
+#define memmove memmove
#define memzero(s, n) memset((s), 0, (n))
/* Symbols defined by linker scripts */
@@ -54,6 +55,10 @@ static unsigned long free_mem_end_ptr;
#include "../../../../lib/decompress_unlzo.c"
#endif
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
extern _sclp_print_early(const char *);
int puts(const char *s)
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
index 7e1f77620624..43a5c78046db 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/cacheflush.h
@@ -8,4 +8,8 @@
void kernel_map_pages(struct page *page, int numpages, int enable);
#endif
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+
#endif /* _S390_CACHEFLUSH_H */
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index a922d51df6bf..b09b9c62573e 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -12,6 +12,7 @@
#include <linux/kexec.h>
#include <linux/delay.h>
#include <linux/reboot.h>
+#include <linux/ftrace.h>
#include <asm/cio.h>
#include <asm/setup.h>
#include <asm/pgtable.h>
@@ -71,6 +72,7 @@ static void __machine_kexec(void *data)
void machine_kexec(struct kimage *image)
{
+ tracer_disable();
smp_send_stop();
smp_switch_to_ipl_cpu(__machine_kexec, image);
}
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 6fbc6f3fbdf2..d98fe9004a52 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -6,3 +6,4 @@ obj-y := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o \
page-states.o gup.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_DEBUG_SET_MODULE_RONX) += pageattr.o
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
new file mode 100644
index 000000000000..122ffbd08ce0
--- /dev/null
+++ b/arch/s390/mm/pageattr.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright IBM Corp. 2011
+ * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <asm/pgtable.h>
+
+static void change_page_attr(unsigned long addr, int numpages,
+ pte_t (*set) (pte_t))
+{
+ pte_t *ptep, pte;
+ pmd_t *pmdp;
+ pud_t *pudp;
+ pgd_t *pgdp;
+ int i;
+
+ for (i = 0; i < numpages; i++) {
+ pgdp = pgd_offset(&init_mm, addr);
+ pudp = pud_offset(pgdp, addr);
+ pmdp = pmd_offset(pudp, addr);
+ if (pmd_huge(*pmdp)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+ ptep = pte_offset_kernel(pmdp, addr + i * PAGE_SIZE);
+
+ pte = *ptep;
+ pte = set(pte);
+ ptep_invalidate(&init_mm, addr + i * PAGE_SIZE, ptep);
+ *ptep = pte;
+ }
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ change_page_attr(addr, numpages, pte_wrprotect);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(set_memory_ro);
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ change_page_attr(addr, numpages, pte_mkwrite);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(set_memory_rw);
+
+/* not possible */
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(set_memory_nx);
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 8a9011dced14..2d264fa84959 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -25,6 +25,7 @@ config SUPERH
select GENERIC_ATOMIC64
# Support the deprecated APIs until MFD and GPIOLIB catch up.
select GENERIC_HARDIRQS_NO_DEPRECATED if !MFD_SUPPORT && !GPIOLIB
+ select GENERIC_IRQ_SHOW
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
@@ -434,6 +435,8 @@ config CPU_SUBTYPE_SH7757
select CPU_SH4A
select CPU_SHX2
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select USB_ARCH_HAS_OHCI
+ select USB_ARCH_HAS_EHCI
help
Select SH7757 if you have a SH4A SH7757 CPU.
diff --git a/arch/sh/boards/board-espt.c b/arch/sh/boards/board-espt.c
index d5ce5e18eb37..9da92ac36533 100644
--- a/arch/sh/boards/board-espt.c
+++ b/arch/sh/boards/board-espt.c
@@ -66,6 +66,11 @@ static struct resource sh_eth_resources[] = {
.end = 0xFEE00F7C - 1,
.flags = IORESOURCE_MEM,
}, {
+ .start = 0xFEE01800, /* TSU */
+ .end = 0xFEE01FFF,
+ .flags = IORESOURCE_MEM,
+ }, {
+
.start = 57, /* irq number */
.flags = IORESOURCE_IRQ,
},
@@ -74,6 +79,8 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh7763_eth_pdata = {
.phy = 0,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
+ .register_type = SH_ETH_REG_GIGABIT,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device espt_eth_device = {
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index c475f1056ab4..a9e33569ad38 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -15,6 +15,9 @@
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#include <linux/io.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sh_mmcif.h>
+#include <linux/mfd/sh_mobile_sdhi.h>
#include <cpu/sh7757.h>
#include <asm/sh_eth.h>
#include <asm/heartbeat.h>
@@ -44,6 +47,17 @@ static struct platform_device heartbeat_device = {
};
/* Fast Ethernet */
+#define GBECONT 0xffc10100
+#define GBECONT_RMII1 BIT(17)
+#define GBECONT_RMII0 BIT(16)
+static void sh7757_eth_set_mdio_gate(unsigned long addr)
+{
+ if ((addr & 0x00000fff) < 0x0800)
+ writel(readl(GBECONT) | GBECONT_RMII0, GBECONT);
+ else
+ writel(readl(GBECONT) | GBECONT_RMII1, GBECONT);
+}
+
static struct resource sh_eth0_resources[] = {
{
.start = 0xfef00000,
@@ -59,6 +73,8 @@ static struct resource sh_eth0_resources[] = {
static struct sh_eth_plat_data sh7757_eth0_pdata = {
.phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
+ .register_type = SH_ETH_REG_FAST_SH4,
+ .set_mdio_gate = sh7757_eth_set_mdio_gate,
};
static struct platform_device sh7757_eth0_device = {
@@ -86,6 +102,8 @@ static struct resource sh_eth1_resources[] = {
static struct sh_eth_plat_data sh7757_eth1_pdata = {
.phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
+ .register_type = SH_ETH_REG_FAST_SH4,
+ .set_mdio_gate = sh7757_eth_set_mdio_gate,
};
static struct platform_device sh7757_eth1_device = {
@@ -98,10 +116,173 @@ static struct platform_device sh7757_eth1_device = {
},
};
+static void sh7757_eth_giga_set_mdio_gate(unsigned long addr)
+{
+ if ((addr & 0x00000fff) < 0x0800) {
+ gpio_set_value(GPIO_PTT4, 1);
+ writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT);
+ } else {
+ gpio_set_value(GPIO_PTT4, 0);
+ writel(readl(GBECONT) & ~GBECONT_RMII1, GBECONT);
+ }
+}
+
+static struct resource sh_eth_giga0_resources[] = {
+ {
+ .start = 0xfee00000,
+ .end = 0xfee007ff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ /* TSU */
+ .start = 0xfee01800,
+ .end = 0xfee01fff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = 315,
+ .end = 315,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct sh_eth_plat_data sh7757_eth_giga0_pdata = {
+ .phy = 18,
+ .edmac_endian = EDMAC_LITTLE_ENDIAN,
+ .register_type = SH_ETH_REG_GIGABIT,
+ .set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
+ .phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
+};
+
+static struct platform_device sh7757_eth_giga0_device = {
+ .name = "sh-eth",
+ .resource = sh_eth_giga0_resources,
+ .id = 2,
+ .num_resources = ARRAY_SIZE(sh_eth_giga0_resources),
+ .dev = {
+ .platform_data = &sh7757_eth_giga0_pdata,
+ },
+};
+
+static struct resource sh_eth_giga1_resources[] = {
+ {
+ .start = 0xfee00800,
+ .end = 0xfee00fff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = 316,
+ .end = 316,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct sh_eth_plat_data sh7757_eth_giga1_pdata = {
+ .phy = 19,
+ .edmac_endian = EDMAC_LITTLE_ENDIAN,
+ .register_type = SH_ETH_REG_GIGABIT,
+ .set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
+ .phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
+};
+
+static struct platform_device sh7757_eth_giga1_device = {
+ .name = "sh-eth",
+ .resource = sh_eth_giga1_resources,
+ .id = 3,
+ .num_resources = ARRAY_SIZE(sh_eth_giga1_resources),
+ .dev = {
+ .platform_data = &sh7757_eth_giga1_pdata,
+ },
+};
+
+/* SH_MMCIF */
+static struct resource sh_mmcif_resources[] = {
+ [0] = {
+ .start = 0xffcb0000,
+ .end = 0xffcb00ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 211,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = 212,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct sh_mmcif_dma sh7757lcr_mmcif_dma = {
+ .chan_priv_tx = SHDMA_SLAVE_MMCIF_TX,
+ .chan_priv_rx = SHDMA_SLAVE_MMCIF_RX,
+};
+
+static struct sh_mmcif_plat_data sh_mmcif_plat = {
+ .dma = &sh7757lcr_mmcif_dma,
+ .sup_pclk = 0x0f,
+ .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
+ .ocr = MMC_VDD_32_33 | MMC_VDD_33_34,
+};
+
+static struct platform_device sh_mmcif_device = {
+ .name = "sh_mmcif",
+ .id = 0,
+ .dev = {
+ .platform_data = &sh_mmcif_plat,
+ },
+ .num_resources = ARRAY_SIZE(sh_mmcif_resources),
+ .resource = sh_mmcif_resources,
+};
+
+/* SDHI0 */
+static struct sh_mobile_sdhi_info sdhi_info = {
+ .dma_slave_tx = SHDMA_SLAVE_SDHI_TX,
+ .dma_slave_rx = SHDMA_SLAVE_SDHI_RX,
+ .tmio_caps = MMC_CAP_SD_HIGHSPEED,
+};
+
+static struct resource sdhi_resources[] = {
+ [0] = {
+ .start = 0xffe50000,
+ .end = 0xffe501ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 20,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device sdhi_device = {
+ .name = "sh_mobile_sdhi",
+ .num_resources = ARRAY_SIZE(sdhi_resources),
+ .resource = sdhi_resources,
+ .id = 0,
+ .dev = {
+ .platform_data = &sdhi_info,
+ },
+};
+
static struct platform_device *sh7757lcr_devices[] __initdata = {
&heartbeat_device,
&sh7757_eth0_device,
&sh7757_eth1_device,
+ &sh7757_eth_giga0_device,
+ &sh7757_eth_giga1_device,
+ &sh_mmcif_device,
+ &sdhi_device,
+};
+
+static struct flash_platform_data spi_flash_data = {
+ .name = "m25p80",
+ .type = "m25px64",
+};
+
+static struct spi_board_info spi_board_info[] = {
+ {
+ .modalias = "m25p80",
+ .max_speed_hz = 25000000,
+ .bus_num = 0,
+ .chip_select = 1,
+ .platform_data = &spi_flash_data,
+ },
};
static int __init sh7757lcr_devices_setup(void)
@@ -332,6 +513,10 @@ static int __init sh7757lcr_devices_setup(void)
gpio_request(GPIO_PTT5, NULL); /* eMMC_PRST# */
gpio_direction_output(GPIO_PTT5, 1);
+ /* register SPI device information */
+ spi_register_board_info(spi_board_info,
+ ARRAY_SIZE(spi_board_info));
+
/* General platform */
return platform_add_devices(sh7757lcr_devices,
ARRAY_SIZE(sh7757lcr_devices));
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 701667acfd89..3b71d2190de1 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -142,6 +142,8 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh_eth_plat = {
.phy = 0x1f, /* SMSC LAN8700 */
.edmac_endian = EDMAC_LITTLE_ENDIAN,
+ .register_type = SH_ETH_REG_FAST_SH4,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
.ether_link_active_low = 1
};
diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c
index f64a6918224c..f3d828f133e5 100644
--- a/arch/sh/boards/mach-sh7763rdp/setup.c
+++ b/arch/sh/boards/mach-sh7763rdp/setup.c
@@ -75,6 +75,10 @@ static struct resource sh_eth_resources[] = {
.end = 0xFEE00F7C - 1,
.flags = IORESOURCE_MEM,
}, {
+ .start = 0xFEE01800, /* TSU */
+ .end = 0xFEE01FFF,
+ .flags = IORESOURCE_MEM,
+ }, {
.start = 57, /* irq number */
.flags = IORESOURCE_IRQ,
},
@@ -83,6 +87,8 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh7763_eth_pdata = {
.phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
+ .register_type = SH_ETH_REG_GIGABIT,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device sh7763rdp_eth_device = {
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index e0b0293bae63..780e083e4d17 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -11,6 +11,8 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz \
OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/cache.o
+GCOV_PROFILE := n
+
#
# IMAGE_OFFSET is the load offset of the compression loader
#
diff --git a/arch/sh/configs/sh7757lcr_defconfig b/arch/sh/configs/sh7757lcr_defconfig
index 5f7f667b9f3b..fa0ecf87034c 100644
--- a/arch/sh/configs/sh7757lcr_defconfig
+++ b/arch/sh/configs/sh7757lcr_defconfig
@@ -38,7 +38,15 @@ CONFIG_IPV6=y
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
CONFIG_VITESSE_PHY=y
CONFIG_NET_ETHERNET=y
@@ -53,8 +61,17 @@ CONFIG_SERIAL_SH_SCI_NR_UARTS=3
CONFIG_SERIAL_SH_SCI_CONSOLE=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
+CONFIG_SPI=y
+CONFIG_SPI_SH=y
# CONFIG_HWMON is not set
-# CONFIG_USB_SUPPORT is not set
+CONFIG_MFD_SH_MOBILE_SDHI=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_TMIO=y
+CONFIG_MMC_SH_MMCIF=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_ISO9660_FS=y
diff --git a/arch/sh/drivers/pci/pcie-sh7786.c b/arch/sh/drivers/pci/pcie-sh7786.c
index 96e9b058aa1d..4418f9070ed1 100644
--- a/arch/sh/drivers/pci/pcie-sh7786.c
+++ b/arch/sh/drivers/pci/pcie-sh7786.c
@@ -1,16 +1,19 @@
/*
* Low-Level PCI Express Support for the SH7786
*
- * Copyright (C) 2009 - 2010 Paul Mundt
+ * Copyright (C) 2009 - 2011 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
+#define pr_fmt(fmt) "PCI: " fmt
+
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/async.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/clk.h>
@@ -31,7 +34,7 @@ static unsigned int nr_ports;
static struct sh7786_pcie_hwops {
int (*core_init)(void);
- int (*port_init_hw)(struct sh7786_pcie_port *port);
+ async_func_ptr *port_init_hw;
} *sh7786_pcie_hwops;
static struct resource sh7786_pci0_resources[] = {
@@ -474,8 +477,9 @@ static int __init sh7786_pcie_core_init(void)
return test_mode_pin(MODE_PIN12) ? 3 : 2;
}
-static int __init sh7786_pcie_init_hw(struct sh7786_pcie_port *port)
+static void __init sh7786_pcie_init_hw(void *data, async_cookie_t cookie)
{
+ struct sh7786_pcie_port *port = data;
int ret;
/*
@@ -488,18 +492,30 @@ static int __init sh7786_pcie_init_hw(struct sh7786_pcie_port *port)
* Setup clocks, needed both for PHY and PCIe registers.
*/
ret = pcie_clk_init(port);
- if (unlikely(ret < 0))
- return ret;
+ if (unlikely(ret < 0)) {
+ pr_err("clock initialization failed for port#%d\n",
+ port->index);
+ return;
+ }
ret = phy_init(port);
- if (unlikely(ret < 0))
- return ret;
+ if (unlikely(ret < 0)) {
+ pr_err("phy initialization failed for port#%d\n",
+ port->index);
+ return;
+ }
ret = pcie_init(port);
- if (unlikely(ret < 0))
- return ret;
+ if (unlikely(ret < 0)) {
+ pr_err("core initialization failed for port#%d\n",
+ port->index);
+ return;
+ }
- return register_pci_controller(port->hose);
+ /* In the interest of preserving device ordering, synchronize */
+ async_synchronize_cookie(cookie);
+
+ register_pci_controller(port->hose);
}
static struct sh7786_pcie_hwops sh7786_65nm_pcie_hwops __initdata = {
@@ -510,7 +526,7 @@ static struct sh7786_pcie_hwops sh7786_65nm_pcie_hwops __initdata = {
static int __init sh7786_pcie_init(void)
{
struct clk *platclk;
- int ret = 0, i;
+ int i;
printk(KERN_NOTICE "PCI: Starting initialization.\n");
@@ -552,14 +568,10 @@ static int __init sh7786_pcie_init(void)
port->hose = sh7786_pci_channels + i;
port->hose->io_map_base = port->hose->resources[0].start;
- ret |= sh7786_pcie_hwops->port_init_hw(port);
+ async_schedule(sh7786_pcie_hwops->port_init_hw, port);
}
- if (unlikely(ret)) {
- clk_disable(platclk);
- clk_put(platclk);
- return ret;
- }
+ async_synchronize_full();
return 0;
}
diff --git a/arch/sh/include/asm/unistd_32.h b/arch/sh/include/asm/unistd_32.h
index d6741fca89a4..b5a74e88028d 100644
--- a/arch/sh/include/asm/unistd_32.h
+++ b/arch/sh/include/asm/unistd_32.h
@@ -369,8 +369,11 @@
#define __NR_recvmsg 356
#define __NR_recvmmsg 357
#define __NR_accept4 358
+#define __NR_name_to_handle_at 359
+#define __NR_open_by_handle_at 360
+#define __NR_clock_adjtime 361
-#define NR_syscalls 359
+#define NR_syscalls 362
#ifdef __KERNEL__
diff --git a/arch/sh/include/asm/unistd_64.h b/arch/sh/include/asm/unistd_64.h
index 09aa93f9eb70..953da4a52199 100644
--- a/arch/sh/include/asm/unistd_64.h
+++ b/arch/sh/include/asm/unistd_64.h
@@ -390,10 +390,13 @@
#define __NR_fanotify_init 367
#define __NR_fanotify_mark 368
#define __NR_prlimit64 369
+#define __NR_name_to_handle_at 370
+#define __NR_open_by_handle_at 371
+#define __NR_clock_adjtime 372
#ifdef __KERNEL__
-#define NR_syscalls 370
+#define NR_syscalls 373
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/sh/include/cpu-sh4/cpu/dma-register.h b/arch/sh/include/cpu-sh4/cpu/dma-register.h
index 9a6125eb0079..18fa80aba15e 100644
--- a/arch/sh/include/cpu-sh4/cpu/dma-register.h
+++ b/arch/sh/include/cpu-sh4/cpu/dma-register.h
@@ -40,6 +40,11 @@
#define CHCR_TS_LOW_SHIFT 3
#define CHCR_TS_HIGH_MASK 0
#define CHCR_TS_HIGH_SHIFT 0
+#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
+#define CHCR_TS_LOW_MASK 0x00000018
+#define CHCR_TS_LOW_SHIFT 3
+#define CHCR_TS_HIGH_MASK 0x00100000
+#define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */
#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
#define CHCR_TS_LOW_MASK 0x00000018
#define CHCR_TS_LOW_SHIFT 3
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7757.h b/arch/sh/include/cpu-sh4/cpu/sh7757.h
index 15f3de11c55a..05b8196c7753 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7757.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7757.h
@@ -251,4 +251,36 @@ enum {
GPIO_FN_ON_DQ3, GPIO_FN_ON_DQ2, GPIO_FN_ON_DQ1, GPIO_FN_ON_DQ0,
};
+enum {
+ SHDMA_SLAVE_SDHI_TX,
+ SHDMA_SLAVE_SDHI_RX,
+ SHDMA_SLAVE_MMCIF_TX,
+ SHDMA_SLAVE_MMCIF_RX,
+ SHDMA_SLAVE_SCIF2_TX,
+ SHDMA_SLAVE_SCIF2_RX,
+ SHDMA_SLAVE_SCIF3_TX,
+ SHDMA_SLAVE_SCIF3_RX,
+ SHDMA_SLAVE_SCIF4_TX,
+ SHDMA_SLAVE_SCIF4_RX,
+ SHDMA_SLAVE_RIIC0_TX,
+ SHDMA_SLAVE_RIIC0_RX,
+ SHDMA_SLAVE_RIIC1_TX,
+ SHDMA_SLAVE_RIIC1_RX,
+ SHDMA_SLAVE_RIIC2_TX,
+ SHDMA_SLAVE_RIIC2_RX,
+ SHDMA_SLAVE_RIIC3_TX,
+ SHDMA_SLAVE_RIIC3_RX,
+ SHDMA_SLAVE_RIIC4_TX,
+ SHDMA_SLAVE_RIIC4_RX,
+ SHDMA_SLAVE_RIIC5_TX,
+ SHDMA_SLAVE_RIIC5_RX,
+ SHDMA_SLAVE_RIIC6_TX,
+ SHDMA_SLAVE_RIIC6_RX,
+ SHDMA_SLAVE_RIIC7_TX,
+ SHDMA_SLAVE_RIIC7_RX,
+ SHDMA_SLAVE_RIIC8_TX,
+ SHDMA_SLAVE_RIIC8_RX,
+ SHDMA_SLAVE_RIIC9_TX,
+ SHDMA_SLAVE_RIIC9_RX,
+};
#endif /* __ASM_SH7757_H__ */
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
index e073e3eb4c3d..eedddad13835 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c
@@ -77,9 +77,10 @@ struct clk div4_clks[DIV4_NR] = {
#define MSTPCR0 0xffc80030
#define MSTPCR1 0xffc80034
+#define MSTPCR2 0xffc10028
enum { MSTP004, MSTP000, MSTP114, MSTP113, MSTP112,
- MSTP111, MSTP110, MSTP103, MSTP102,
+ MSTP111, MSTP110, MSTP103, MSTP102, MSTP220,
MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
@@ -95,6 +96,9 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP110] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 10, 0),
[MSTP103] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 3, 0),
[MSTP102] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 2, 0),
+
+ /* MSTPCR2 */
+ [MSTP220] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 20, 0),
};
#define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
@@ -140,6 +144,7 @@ static struct clk_lookup lookups[] = {
.clk = &mstp_clks[MSTP110],
},
CLKDEV_CON_ID("usb0", &mstp_clks[MSTP102]),
+ CLKDEV_CON_ID("mmc0", &mstp_clks[MSTP220]),
};
int __init arch_clk_init(void)
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
index 9c1de2633ac3..423dabf542d3 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c
@@ -1,7 +1,7 @@
/*
* SH7757 Setup
*
- * Copyright (C) 2009 Renesas Solutions Corp.
+ * Copyright (C) 2009, 2011 Renesas Solutions Corp.
*
* based on setup-sh7785.c : Copyright (C) 2007 Paul Mundt
*
@@ -16,6 +16,10 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sh_timer.h>
+#include <linux/sh_dma.h>
+
+#include <cpu/dma-register.h>
+#include <cpu/sh7757.h>
static struct plat_sci_port scif2_platform_data = {
.mapbase = 0xfe4b0000, /* SCIF2 */
@@ -124,12 +128,548 @@ static struct platform_device tmu1_device = {
.num_resources = ARRAY_SIZE(tmu1_resources),
};
+static struct resource spi0_resources[] = {
+ [0] = {
+ .start = 0xfe002000,
+ .end = 0xfe0020ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = 86,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* DMA */
+static const struct sh_dmae_slave_config sh7757_dmae0_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_SDHI_TX,
+ .addr = 0x1fe50030,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc5,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SDHI_RX,
+ .addr = 0x1fe50030,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_16BIT),
+ .mid_rid = 0xc6,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_MMCIF_TX,
+ .addr = 0x1fcb0034,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xd3,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_MMCIF_RX,
+ .addr = 0x1fcb0034,
+ .chcr = DM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_32BIT),
+ .mid_rid = 0xd7,
+ },
+};
+
+static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_SCIF2_TX,
+ .addr = 0x1f4b000c,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x21,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SCIF2_RX,
+ .addr = 0x1f4b0014,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x22,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SCIF3_TX,
+ .addr = 0x1f4c000c,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x29,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SCIF3_RX,
+ .addr = 0x1f4c0014,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2a,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SCIF4_TX,
+ .addr = 0x1f4d000c,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x41,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_SCIF4_RX,
+ .addr = 0x1f4d0014,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x42,
+ },
+};
+
+static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_RIIC0_TX,
+ .addr = 0x1e500012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x21,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC0_RX,
+ .addr = 0x1e500013,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x22,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC1_TX,
+ .addr = 0x1e510012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x29,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC1_RX,
+ .addr = 0x1e510013,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2a,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC2_TX,
+ .addr = 0x1e520012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xa1,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC2_RX,
+ .addr = 0x1e520013,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xa2,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC3_TX,
+ .addr = 0x1e530012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xab,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC3_RX,
+ .addr = 0x1e530013,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xaf,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC4_TX,
+ .addr = 0x1e540012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xc1,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC4_RX,
+ .addr = 0x1e540013,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0xc2,
+ },
+};
+
+static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = {
+ {
+ .slave_id = SHDMA_SLAVE_RIIC5_TX,
+ .addr = 0x1e550012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x21,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC5_RX,
+ .addr = 0x1e550013,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x22,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC6_TX,
+ .addr = 0x1e560012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x29,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC6_RX,
+ .addr = 0x1e560013,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x2a,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC7_TX,
+ .addr = 0x1e570012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x41,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC7_RX,
+ .addr = 0x1e570013,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x42,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC8_TX,
+ .addr = 0x1e580012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x45,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC8_RX,
+ .addr = 0x1e580013,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x46,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC9_TX,
+ .addr = 0x1e590012,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x51,
+ },
+ {
+ .slave_id = SHDMA_SLAVE_RIIC9_RX,
+ .addr = 0x1e590013,
+ .chcr = SM_INC | 0x800 | 0x40000000 |
+ TS_INDEX2VAL(XMIT_SZ_8BIT),
+ .mid_rid = 0x52,
+ },
+};
+
+static const struct sh_dmae_channel sh7757_dmae_channels[] = {
+ {
+ .offset = 0,
+ .dmars = 0,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x10,
+ .dmars = 0,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x20,
+ .dmars = 4,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x30,
+ .dmars = 4,
+ .dmars_bit = 8,
+ }, {
+ .offset = 0x50,
+ .dmars = 8,
+ .dmars_bit = 0,
+ }, {
+ .offset = 0x60,
+ .dmars = 8,
+ .dmars_bit = 8,
+ }
+};
+
+static const unsigned int ts_shift[] = TS_SHIFT;
+
+static struct sh_dmae_pdata dma0_platform_data = {
+ .slave = sh7757_dmae0_slaves,
+ .slave_num = ARRAY_SIZE(sh7757_dmae0_slaves),
+ .channel = sh7757_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7757_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct sh_dmae_pdata dma1_platform_data = {
+ .slave = sh7757_dmae1_slaves,
+ .slave_num = ARRAY_SIZE(sh7757_dmae1_slaves),
+ .channel = sh7757_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7757_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct sh_dmae_pdata dma2_platform_data = {
+ .slave = sh7757_dmae2_slaves,
+ .slave_num = ARRAY_SIZE(sh7757_dmae2_slaves),
+ .channel = sh7757_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7757_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+static struct sh_dmae_pdata dma3_platform_data = {
+ .slave = sh7757_dmae3_slaves,
+ .slave_num = ARRAY_SIZE(sh7757_dmae3_slaves),
+ .channel = sh7757_dmae_channels,
+ .channel_num = ARRAY_SIZE(sh7757_dmae_channels),
+ .ts_low_shift = CHCR_TS_LOW_SHIFT,
+ .ts_low_mask = CHCR_TS_LOW_MASK,
+ .ts_high_shift = CHCR_TS_HIGH_SHIFT,
+ .ts_high_mask = CHCR_TS_HIGH_MASK,
+ .ts_shift = ts_shift,
+ .ts_shift_num = ARRAY_SIZE(ts_shift),
+ .dmaor_init = DMAOR_INIT,
+};
+
+/* channel 0 to 5 */
+static struct resource sh7757_dmae0_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xff608020,
+ .end = 0xff60808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xff609000,
+ .end = 0xff60900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 34,
+ .end = 34,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+/* channel 6 to 11 */
+static struct resource sh7757_dmae1_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xff618020,
+ .end = 0xff61808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xff619000,
+ .end = 0xff61900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* DMA error */
+ .start = 34,
+ .end = 34,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 4 */
+ .start = 46,
+ .end = 46,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 5 */
+ .start = 46,
+ .end = 46,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 6 */
+ .start = 88,
+ .end = 88,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 7 */
+ .start = 88,
+ .end = 88,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 8 */
+ .start = 88,
+ .end = 88,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 9 */
+ .start = 88,
+ .end = 88,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 10 */
+ .start = 88,
+ .end = 88,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+ {
+ /* IRQ for channels 11 */
+ .start = 88,
+ .end = 88,
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
+ },
+};
+
+/* channel 12 to 17 */
+static struct resource sh7757_dmae2_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xff708020,
+ .end = 0xff70808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xff709000,
+ .end = 0xff70900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* DMA error */
+ .start = 323,
+ .end = 323,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 12 to 16 */
+ .start = 272,
+ .end = 276,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channel 17 */
+ .start = 279,
+ .end = 279,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* channel 18 to 23 */
+static struct resource sh7757_dmae3_resources[] = {
+ [0] = {
+ /* Channel registers and DMAOR */
+ .start = 0xff718020,
+ .end = 0xff71808f,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMARSx */
+ .start = 0xff719000,
+ .end = 0xff71900b,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ /* DMA error */
+ .start = 324,
+ .end = 324,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channels 18 to 22 */
+ .start = 280,
+ .end = 284,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ /* IRQ for channel 23 */
+ .start = 288,
+ .end = 288,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device dma0_device = {
+ .name = "sh-dma-engine",
+ .id = 0,
+ .resource = sh7757_dmae0_resources,
+ .num_resources = ARRAY_SIZE(sh7757_dmae0_resources),
+ .dev = {
+ .platform_data = &dma0_platform_data,
+ },
+};
+
+static struct platform_device dma1_device = {
+ .name = "sh-dma-engine",
+ .id = 1,
+ .resource = sh7757_dmae1_resources,
+ .num_resources = ARRAY_SIZE(sh7757_dmae1_resources),
+ .dev = {
+ .platform_data = &dma1_platform_data,
+ },
+};
+
+static struct platform_device dma2_device = {
+ .name = "sh-dma-engine",
+ .id = 2,
+ .resource = sh7757_dmae2_resources,
+ .num_resources = ARRAY_SIZE(sh7757_dmae2_resources),
+ .dev = {
+ .platform_data = &dma2_platform_data,
+ },
+};
+
+static struct platform_device dma3_device = {
+ .name = "sh-dma-engine",
+ .id = 3,
+ .resource = sh7757_dmae3_resources,
+ .num_resources = ARRAY_SIZE(sh7757_dmae3_resources),
+ .dev = {
+ .platform_data = &dma3_platform_data,
+ },
+};
+
+static struct platform_device spi0_device = {
+ .name = "sh_spi",
+ .id = 0,
+ .dev = {
+ .dma_mask = NULL,
+ .coherent_dma_mask = 0xffffffff,
+ },
+ .num_resources = ARRAY_SIZE(spi0_resources),
+ .resource = spi0_resources,
+};
+
static struct platform_device *sh7757_devices[] __initdata = {
&scif2_device,
&scif3_device,
&scif4_device,
&tmu0_device,
&tmu1_device,
+ &dma0_device,
+ &dma1_device,
+ &dma2_device,
+ &dma3_device,
+ &spi0_device,
};
static int __init sh7757_devices_setup(void)
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
index c19e2a940e3f..e4469e7233cb 100644
--- a/arch/sh/kernel/cpu/shmobile/cpuidle.c
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -75,7 +75,7 @@ void sh_mobile_setup_cpuidle(void)
i = CPUIDLE_DRIVER_STATE_START;
state = &dev->states[i++];
- snprintf(state->name, CPUIDLE_NAME_LEN, "C0");
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
state->exit_latency = 1;
state->target_residency = 1 * 2;
@@ -88,7 +88,7 @@ void sh_mobile_setup_cpuidle(void)
if (sh_mobile_sleep_supported & SUSP_SH_SF) {
state = &dev->states[i++];
- snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
strncpy(state->desc, "SuperH Sleep Mode [SF]",
CPUIDLE_DESC_LEN);
state->exit_latency = 100;
@@ -101,7 +101,7 @@ void sh_mobile_setup_cpuidle(void)
if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) {
state = &dev->states[i++];
- snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
+ snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
strncpy(state->desc, "SuperH Mobile Standby Mode [SF]",
CPUIDLE_DESC_LEN);
state->exit_latency = 2300;
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 68ecbe6c881a..64ea0b165399 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -34,9 +34,9 @@ void ack_bad_irq(unsigned int irq)
#if defined(CONFIG_PROC_FS)
/*
- * /proc/interrupts printing:
+ * /proc/interrupts printing for arch specific interrupts
*/
-static int show_other_interrupts(struct seq_file *p, int prec)
+int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
@@ -49,63 +49,6 @@ static int show_other_interrupts(struct seq_file *p, int prec)
return 0;
}
-
-int show_interrupts(struct seq_file *p, void *v)
-{
- unsigned long flags, any_count = 0;
- int i = *(loff_t *)v, j, prec;
- struct irqaction *action;
- struct irq_desc *desc;
- struct irq_data *data;
- struct irq_chip *chip;
-
- if (i > nr_irqs)
- return 0;
-
- for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
- j *= 10;
-
- if (i == nr_irqs)
- return show_other_interrupts(p, prec);
-
- if (i == 0) {
- seq_printf(p, "%*s", prec + 8, "");
- for_each_online_cpu(j)
- seq_printf(p, "CPU%-8d", j);
- seq_putc(p, '\n');
- }
-
- desc = irq_to_desc(i);
- if (!desc)
- return 0;
-
- data = irq_get_irq_data(i);
- chip = irq_data_get_irq_chip(data);
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- for_each_online_cpu(j)
- any_count |= kstat_irqs_cpu(i, j);
- action = desc->action;
- if (!action && !any_count)
- goto out;
-
- seq_printf(p, "%*d: ", prec, i);
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
- seq_printf(p, " %14s", chip->name);
- seq_printf(p, "-%-8s", desc->name);
-
- if (action) {
- seq_printf(p, " %s", action->name);
- while ((action = action->next) != NULL)
- seq_printf(p, ", %s", action->name);
- }
-
- seq_putc(p, '\n');
-out:
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- return 0;
-}
#endif
#ifdef CONFIG_IRQSTACKS
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 6fc347ebe59d..768fb33fdd35 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -376,3 +376,6 @@ ENTRY(sys_call_table)
.long sys_recvmsg
.long sys_recvmmsg
.long sys_accept4
+ .long sys_name_to_handle_at
+ .long sys_open_by_handle_at /* 360 */
+ .long sys_clock_adjtime
diff --git a/arch/sh/kernel/syscalls_64.S b/arch/sh/kernel/syscalls_64.S
index 66585708ce90..44e7b00c8067 100644
--- a/arch/sh/kernel/syscalls_64.S
+++ b/arch/sh/kernel/syscalls_64.S
@@ -396,3 +396,6 @@ sys_call_table:
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64
+ .long sys_name_to_handle_at /* 370 */
+ .long sys_open_by_handle_at
+ .long sys_clock_adjtime
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 150aa326afff..2228c8cee4d6 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -42,6 +42,8 @@ obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o
obj-$(CONFIG_UNCACHED_MAPPING) += uncached.o
obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o
+GCOV_PROFILE_pmb.o := n
+
# Special flags for fault_64.o. This puts restrictions on the number of
# caller-save registers that the compiler can target when building this file.
# This is required because the code is called from a context in entry.S where
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 95695e97703e..e48f471be547 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -51,6 +51,7 @@ config SPARC64
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
select HAVE_GENERIC_HARDIRQS
+ select GENERIC_HARDIRQS_NO_DEPRECATED
config ARCH_DEFCONFIG
string
@@ -460,6 +461,39 @@ config SPARC_LEON
from www.gaisler.com. You can download a sparc-linux cross-compilation
toolchain at www.gaisler.com.
+if SPARC_LEON
+menu "U-Boot options"
+
+config UBOOT_LOAD_ADDR
+ hex "uImage Load Address"
+ default 0x40004000
+ ---help---
+ U-Boot kernel load address, the address in physical address space
+ where u-boot will place the Linux kernel before booting it.
+ This address is normally the base address of main memory + 0x4000.
+
+config UBOOT_FLASH_ADDR
+ hex "uImage.o Load Address"
+ default 0x00080000
+ ---help---
+ Optional setting only affecting the uImage.o ELF-image used to
+ download the uImage file to the target using a ELF-loader other than
+ U-Boot. It may for example be used to download an uImage to FLASH with
+ the GRMON utility before even starting u-boot.
+
+config UBOOT_ENTRY_ADDR
+ hex "uImage Entry Address"
+ default 0xf0004000
+ ---help---
+ Do not change this unless you know what you're doing. This is
+ hardcoded by the SPARC32 and LEON port.
+
+ This is the virtual address u-boot jumps to when booting the Linux
+ Kernel.
+
+endmenu
+endif
+
endmenu
menu "Bus options (PCI etc.)"
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 113225b241e0..ad1fb5d969f3 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -88,7 +88,7 @@ boot := arch/sparc/boot
# Default target
all: zImage
-image zImage tftpboot.img vmlinux.aout: vmlinux
+image zImage uImage tftpboot.img vmlinux.aout: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
archclean:
@@ -102,6 +102,7 @@ ifeq ($(ARCH),sparc)
define archhelp
echo '* image - kernel image ($(boot)/image)'
echo '* zImage - stripped kernel image ($(boot)/zImage)'
+ echo ' uImage - U-Boot SPARC32 Image (only for LEON)'
echo ' tftpboot.img - image prepared for tftp'
endef
else
diff --git a/arch/sparc/boot/Makefile b/arch/sparc/boot/Makefile
index a2c5898c1ab1..9205416b1e67 100644
--- a/arch/sparc/boot/Makefile
+++ b/arch/sparc/boot/Makefile
@@ -5,6 +5,7 @@
ROOT_IMG := /usr/src/root.img
ELFTOAOUT := elftoaout
+MKIMAGE := $(srctree)/scripts/mkuboot.sh
hostprogs-y := piggyback btfixupprep
targets := tftpboot.img btfix.o btfix.S image zImage vmlinux.aout
@@ -77,6 +78,36 @@ $(obj)/zImage: $(obj)/image
$(obj)/vmlinux.aout: vmlinux FORCE
$(call if_changed,elftoaout)
@echo ' kernel: $@ is ready'
+else
+
+# The following lines make a readable image for U-Boot.
+# uImage - Binary file read by U-boot
+# uImage.o - object file of uImage for loading with a
+# flash programmer understanding ELF.
+
+OBJCOPYFLAGS_image.bin := -S -O binary -R .note -R .comment
+$(obj)/image.bin: $(obj)/image FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/image.gz: $(obj)/image.bin
+ $(call if_changed,gzip)
+
+quiet_cmd_uimage = UIMAGE $@
+ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A sparc -O linux -T kernel \
+ -C gzip -a $(CONFIG_UBOOT_LOAD_ADDR) \
+ -e $(CONFIG_UBOOT_ENTRY_ADDR) -n 'Linux-$(KERNELRELEASE)' \
+ -d $< $@
+
+quiet_cmd_uimage.o = UIMAGE.O $@
+ cmd_uimage.o = $(LD) -Tdata $(CONFIG_UBOOT_FLASH_ADDR) \
+ -r -b binary $@ -o $@.o
+
+targets += uImage
+$(obj)/uImage: $(obj)/image.gz
+ $(call if_changed,uimage)
+ $(call if_changed,uimage.o)
+ @echo ' Image $@ is ready'
+
endif
$(obj)/tftpboot.img: $(obj)/image $(obj)/piggyback System.map $(ROOT_IMG) FORCE
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index a0b443cb3c1f..4f09666f0798 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -33,34 +33,34 @@
/* The largest number of unique interrupt sources we support.
* If this needs to ever be larger than 255, you need to change
- * the type of ino_bucket->virt_irq as appropriate.
+ * the type of ino_bucket->irq as appropriate.
*
- * ino_bucket->virt_irq allocation is made during {sun4v_,}build_irq().
+ * ino_bucket->irq allocation is made during {sun4v_,}build_irq().
*/
#define NR_IRQS 255
-extern void irq_install_pre_handler(int virt_irq,
+extern void irq_install_pre_handler(int irq,
void (*func)(unsigned int, void *, void *),
void *arg1, void *arg2);
#define irq_canonicalize(irq) (irq)
extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
extern unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino);
-extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
+extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *irq_p,
unsigned int msi_devino_start,
unsigned int msi_devino_end);
-extern void sun4v_destroy_msi(unsigned int virt_irq);
-extern unsigned int sun4u_build_msi(u32 portid, unsigned int *virt_irq_p,
+extern void sun4v_destroy_msi(unsigned int irq);
+extern unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
unsigned int msi_devino_start,
unsigned int msi_devino_end,
unsigned long imap_base,
unsigned long iclr_base);
-extern void sun4u_destroy_msi(unsigned int virt_irq);
+extern void sun4u_destroy_msi(unsigned int irq);
-extern unsigned char virt_irq_alloc(unsigned int dev_handle,
+extern unsigned char irq_alloc(unsigned int dev_handle,
unsigned int dev_ino);
#ifdef CONFIG_PCI_MSI
-extern void virt_irq_free(unsigned int virt_irq);
+extern void irq_free(unsigned int irq);
#endif
extern void __init init_IRQ(void);
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 8580d1764f90..c04f96fb753c 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -375,9 +375,6 @@ void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
extern unsigned int real_irq_entry[], smpleon_ticker[];
extern unsigned int patchme_maybe_smp_msg[];
-extern unsigned long trapbase_cpu1[];
-extern unsigned long trapbase_cpu2[];
-extern unsigned long trapbase_cpu3[];
extern unsigned int t_nmi[], linux_trap_ipi15_leon[];
extern unsigned int linux_trap_ipi15_sun4m[];
diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h
index 263c719e96f5..e50f326e71bd 100644
--- a/arch/sparc/include/asm/leon_amba.h
+++ b/arch/sparc/include/asm/leon_amba.h
@@ -180,6 +180,7 @@ struct amba_ahb_device {
struct device_node;
void _amba_init(struct device_node *dp, struct device_node ***nextp);
+extern unsigned long amba_system_id;
extern struct leon3_irqctrl_regs_map *leon3_irqctrl_regs;
extern struct leon3_gptimer_regs_map *leon3_gptimer_regs;
extern struct amba_apb_device leon_percpu_timer_dev[16];
@@ -254,6 +255,11 @@ extern unsigned int sparc_leon_eirq;
#define GAISLER_L2C 0xffe /* internal device: leon2compat */
#define GAISLER_PLUGPLAY 0xfff /* internal device: plug & play configarea */
+/* Chip IDs */
+#define AEROFLEX_UT699 0x0699
+#define LEON4_NEXTREME1 0x0102
+#define GAISLER_GR712RC 0x0712
+
#define amba_vendor(x) (((x) >> 24) & 0xff)
#define amba_device(x) (((x) >> 12) & 0xfff)
diff --git a/arch/sparc/include/asm/mmu_32.h b/arch/sparc/include/asm/mmu_32.h
index ccd36d26615a..6f056e535cf8 100644
--- a/arch/sparc/include/asm/mmu_32.h
+++ b/arch/sparc/include/asm/mmu_32.h
@@ -4,4 +4,7 @@
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
+/* mm/srmmu.c */
+extern ctxd_t *srmmu_ctx_table_phys;
+
#endif
diff --git a/arch/sparc/include/asm/smp_32.h b/arch/sparc/include/asm/smp_32.h
index 841905c10215..d82d7f4c0a79 100644
--- a/arch/sparc/include/asm/smp_32.h
+++ b/arch/sparc/include/asm/smp_32.h
@@ -29,10 +29,16 @@
*/
extern unsigned char boot_cpu_id;
+extern volatile unsigned long cpu_callin_map[NR_CPUS];
+extern cpumask_t smp_commenced_mask;
+extern struct linux_prom_registers smp_penguin_ctable;
typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long);
+void cpu_panic(void);
+extern void smp4m_irq_rotate(int cpu);
+
/*
* General functions that each host system must provide.
*/
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 599398fbbc7c..99aa4db6e9c2 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -42,7 +42,6 @@ obj-$(CONFIG_SPARC32) += windows.o
obj-y += cpu.o
obj-$(CONFIG_SPARC32) += devices.o
obj-$(CONFIG_SPARC32) += tadpole.o
-obj-$(CONFIG_SPARC32) += tick14.o
obj-y += ptrace_$(BITS).o
obj-y += unaligned_$(BITS).o
obj-y += una_asm_$(BITS).o
@@ -54,6 +53,7 @@ obj-y += of_device_$(BITS).o
obj-$(CONFIG_SPARC64) += prom_irqtrans.o
obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o
+obj-$(CONFIG_SPARC_LEON)+= leon_pmc.o
obj-$(CONFIG_SPARC64) += reboot.o
obj-$(CONFIG_SPARC64) += sysfs.o
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 0dc714fa23d8..7925c54f4133 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -324,7 +324,7 @@ void __cpuinit cpu_probe(void)
psr = get_psr();
put_psr(psr | PSR_EF);
#ifdef CONFIG_SPARC_LEON
- fpu_vers = 7;
+ fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
#else
fpu_vers = ((get_fsr() >> 17) & 0x7);
#endif
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index c011b932bb17..d1f1361c4167 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -213,8 +213,8 @@ extern struct cheetah_err_info *cheetah_error_log;
struct ino_bucket {
/*0x00*/unsigned long __irq_chain_pa;
- /* Virtual interrupt number assigned to this INO. */
-/*0x08*/unsigned int __virt_irq;
+ /* Interrupt number assigned to this INO. */
+/*0x08*/unsigned int __irq;
/*0x0c*/unsigned int __pad;
};
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 72509d0e34be..6f01e8c83197 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -333,13 +333,10 @@ static void dma_4u_free_coherent(struct device *dev, size_t size,
void *cpu, dma_addr_t dvma)
{
struct iommu *iommu;
- iopte_t *iopte;
unsigned long flags, order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
- iopte = iommu->page_table +
- ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
spin_lock_irqsave(&iommu->lock, flags);
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 41f7e4e0f72a..c6ce9a6a4790 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -50,10 +50,14 @@
#include <asm/io-unit.h>
#include <asm/leon.h>
-#ifdef CONFIG_SPARC_LEON
-#define mmu_inval_dma_area(p, l) leon_flush_dcache_all()
-#else
+#ifndef CONFIG_SPARC_LEON
#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */
+#else
+static inline void mmu_inval_dma_area(void *va, unsigned long len)
+{
+ if (!sparc_leon3_snooping_enabled())
+ leon_flush_dcache_all();
+}
#endif
static struct resource *_sparc_find_resource(struct resource *r,
@@ -254,7 +258,7 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
dma_addr_t *dma_addrp, gfp_t gfp)
{
struct platform_device *op = to_platform_device(dev);
- unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
+ unsigned long len_total = PAGE_ALIGN(len);
unsigned long va;
struct resource *res;
int order;
@@ -280,7 +284,8 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total);
goto err_nova;
}
- mmu_inval_dma_area(va, len_total);
+ mmu_inval_dma_area((void *)va, len_total);
+
// XXX The mmu_map_dma_area does this for us below, see comments.
// sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
/*
@@ -297,9 +302,9 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len,
err_noiommu:
release_resource(res);
err_nova:
- free_pages(va, order);
-err_nomem:
kfree(res);
+err_nomem:
+ free_pages(va, order);
err_nopages:
return NULL;
}
@@ -321,7 +326,7 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p,
return;
}
- n = (n + PAGE_SIZE-1) & PAGE_MASK;
+ n = PAGE_ALIGN(n);
if ((res->end-res->start)+1 != n) {
printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n",
(long)((res->end-res->start)+1), n);
@@ -408,9 +413,6 @@ struct dma_map_ops sbus_dma_ops = {
.sync_sg_for_device = sbus_sync_sg_for_device,
};
-struct dma_map_ops *dma_ops = &sbus_dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
static int __init sparc_register_ioport(void)
{
register_proc_sparc_ioport();
@@ -422,7 +424,9 @@ arch_initcall(sparc_register_ioport);
#endif /* CONFIG_SBUS */
-#ifdef CONFIG_PCI
+
+/* LEON reuses PCI DMA ops */
+#if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON)
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices.
@@ -430,8 +434,8 @@ arch_initcall(sparc_register_ioport);
static void *pci32_alloc_coherent(struct device *dev, size_t len,
dma_addr_t *pba, gfp_t gfp)
{
- unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK;
- unsigned long va;
+ unsigned long len_total = PAGE_ALIGN(len);
+ void *va;
struct resource *res;
int order;
@@ -443,34 +447,34 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len,
}
order = get_order(len_total);
- va = __get_free_pages(GFP_KERNEL, order);
- if (va == 0) {
+ va = (void *) __get_free_pages(GFP_KERNEL, order);
+ if (va == NULL) {
printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
- return NULL;
+ goto err_nopages;
}
if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
- free_pages(va, order);
printk("pci_alloc_consistent: no core\n");
- return NULL;
+ goto err_nomem;
}
if (allocate_resource(&_sparc_dvma, res, len_total,
_sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
- free_pages(va, order);
- kfree(res);
- return NULL;
+ goto err_nova;
}
mmu_inval_dma_area(va, len_total);
-#if 0
-/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n",
- (long)va, (long)res->start, (long)virt_to_phys(va), len_total);
-#endif
sparc_mapiorange(0, virt_to_phys(va), res->start, len_total);
*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
return (void *) res->start;
+
+err_nova:
+ kfree(res);
+err_nomem:
+ free_pages((unsigned long)va, order);
+err_nopages:
+ return NULL;
}
/* Free and unmap a consistent DMA buffer.
@@ -485,7 +489,7 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
dma_addr_t ba)
{
struct resource *res;
- unsigned long pgp;
+ void *pgp;
if ((res = _sparc_find_resource(&_sparc_dvma,
(unsigned long)p)) == NULL) {
@@ -498,21 +502,21 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p,
return;
}
- n = (n + PAGE_SIZE-1) & PAGE_MASK;
+ n = PAGE_ALIGN(n);
if ((res->end-res->start)+1 != n) {
printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
(long)((res->end-res->start)+1), (long)n);
return;
}
- pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */
+ pgp = phys_to_virt(ba); /* bus_to_virt actually */
mmu_inval_dma_area(pgp, n);
sparc_unmapiorange((unsigned long)p, n);
release_resource(res);
kfree(res);
- free_pages(pgp, get_order(n));
+ free_pages((unsigned long)pgp, get_order(n));
}
/*
@@ -527,6 +531,13 @@ static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
return page_to_phys(page) + offset;
}
+static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ if (dir != PCI_DMA_TODEVICE)
+ mmu_inval_dma_area(phys_to_virt(ba), PAGE_ALIGN(size));
+}
+
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
* above pci_map_single interface. Here the scatter gather list
@@ -572,9 +583,8 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
- mmu_inval_dma_area(
- (unsigned long) page_address(sg_page(sg)),
- (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+ mmu_inval_dma_area(page_address(sg_page(sg)),
+ PAGE_ALIGN(sg->length));
}
}
}
@@ -593,8 +603,8 @@ static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
size_t size, enum dma_data_direction dir)
{
if (dir != PCI_DMA_TODEVICE) {
- mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
- (size + PAGE_SIZE-1) & PAGE_MASK);
+ mmu_inval_dma_area(phys_to_virt(ba),
+ PAGE_ALIGN(size));
}
}
@@ -602,8 +612,8 @@ static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
size_t size, enum dma_data_direction dir)
{
if (dir != PCI_DMA_TODEVICE) {
- mmu_inval_dma_area((unsigned long)phys_to_virt(ba),
- (size + PAGE_SIZE-1) & PAGE_MASK);
+ mmu_inval_dma_area(phys_to_virt(ba),
+ PAGE_ALIGN(size));
}
}
@@ -622,9 +632,8 @@ static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
- mmu_inval_dma_area(
- (unsigned long) page_address(sg_page(sg)),
- (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+ mmu_inval_dma_area(page_address(sg_page(sg)),
+ PAGE_ALIGN(sg->length));
}
}
}
@@ -638,9 +647,8 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
if (dir != PCI_DMA_TODEVICE) {
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
- mmu_inval_dma_area(
- (unsigned long) page_address(sg_page(sg)),
- (sg->length + PAGE_SIZE-1) & PAGE_MASK);
+ mmu_inval_dma_area(page_address(sg_page(sg)),
+ PAGE_ALIGN(sg->length));
}
}
}
@@ -649,6 +657,7 @@ struct dma_map_ops pci32_dma_ops = {
.alloc_coherent = pci32_alloc_coherent,
.free_coherent = pci32_free_coherent,
.map_page = pci32_map_page,
+ .unmap_page = pci32_unmap_page,
.map_sg = pci32_map_sg,
.unmap_sg = pci32_unmap_sg,
.sync_single_for_cpu = pci32_sync_single_for_cpu,
@@ -658,7 +667,16 @@ struct dma_map_ops pci32_dma_ops = {
};
EXPORT_SYMBOL(pci32_dma_ops);
-#endif /* CONFIG_PCI */
+#endif /* CONFIG_PCI || CONFIG_SPARC_LEON */
+
+#ifdef CONFIG_SPARC_LEON
+struct dma_map_ops *dma_ops = &pci32_dma_ops;
+#elif defined(CONFIG_SBUS)
+struct dma_map_ops *dma_ops = &sbus_dma_ops;
+#endif
+
+EXPORT_SYMBOL(dma_ops);
+
/*
* Return whether the given PCI device DMA address mask can be
@@ -717,7 +735,7 @@ static const struct file_operations sparc_io_proc_fops = {
static struct resource *_sparc_find_resource(struct resource *root,
unsigned long hit)
{
- struct resource *tmp;
+ struct resource *tmp;
for (tmp = root->child; tmp != 0; tmp = tmp->sibling) {
if (tmp->start <= hit && tmp->end >= hit)
diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h
index db7513881530..008453b798ec 100644
--- a/arch/sparc/kernel/irq.h
+++ b/arch/sparc/kernel/irq.h
@@ -1,5 +1,41 @@
+#include <linux/platform_device.h>
+
#include <asm/btfixup.h>
+/* sun4m specific type definitions */
+
+/* This maps direct to CPU specific interrupt registers */
+struct sun4m_irq_percpu {
+ u32 pending;
+ u32 clear;
+ u32 set;
+};
+
+/* This maps direct to global interrupt registers */
+struct sun4m_irq_global {
+ u32 pending;
+ u32 mask;
+ u32 mask_clear;
+ u32 mask_set;
+ u32 interrupt_target;
+};
+
+extern struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
+extern struct sun4m_irq_global __iomem *sun4m_irq_global;
+
+/*
+ * Platform specific irq configuration
+ * The individual platforms assign their platform
+ * specifics in their init functions.
+ */
+struct sparc_irq_config {
+ void (*init_timers)(irq_handler_t);
+ unsigned int (*build_device_irq)(struct platform_device *op,
+ unsigned int real_irq);
+};
+extern struct sparc_irq_config sparc_irq_config;
+
+
/* Dave Redman (djhr@tadpole.co.uk)
* changed these to function pointers.. it saves cycles and will allow
* the irq dependencies to be split into different files at a later date
@@ -45,12 +81,6 @@ static inline void load_profile_irq(int cpu, int limit)
BTFIXUP_CALL(load_profile_irq)(cpu, limit);
}
-extern void (*sparc_init_timers)(irq_handler_t lvl10_irq);
-
-extern void claim_ticker14(irq_handler_t irq_handler,
- int irq,
- unsigned int timeout);
-
#ifdef CONFIG_SMP
BTFIXUPDEF_CALL(void, set_cpu_int, int, int)
BTFIXUPDEF_CALL(void, clear_cpu_int, int, int)
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index 5ad6e5c5dbb3..7c93df4099cb 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -1,8 +1,8 @@
/*
- * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the
- * Sparc the IRQs are basically 'cast in stone'
- * and you are supposed to probe the prom's device
- * node trees to find out who's got which IRQ.
+ * Interrupt request handling routines. On the
+ * Sparc the IRQs are basically 'cast in stone'
+ * and you are supposed to probe the prom's device
+ * node trees to find out who's got which IRQ.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx)
@@ -11,40 +11,11 @@
* Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org)
*/
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/errno.h>
-#include <linux/linkage.h>
#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/delay.h>
-#include <linux/threads.h>
-#include <linux/spinlock.h>
#include <linux/seq_file.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/psr.h>
-#include <asm/smp.h>
-#include <asm/vaddrs.h>
-#include <asm/timer.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
-#include <asm/traps.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/pcic.h>
#include <asm/cacheflush.h>
-#include <asm/irq_regs.h>
+#include <asm/pcic.h>
#include <asm/leon.h>
#include "kernel.h"
@@ -57,6 +28,10 @@
#define SMP_NOP2
#define SMP_NOP3
#endif /* SMP */
+
+/* platform specific irq setup */
+struct sparc_irq_config sparc_irq_config;
+
unsigned long arch_local_irq_save(void)
{
unsigned long retval;
@@ -128,15 +103,7 @@ EXPORT_SYMBOL(arch_local_irq_restore);
*
*/
-static void irq_panic(void)
-{
- extern char *cputypval;
- prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval);
- prom_halt();
-}
-void (*sparc_init_timers)(irq_handler_t ) =
- (void (*)(irq_handler_t )) irq_panic;
/*
* Dave Redman (djhr@tadpole.co.uk)
@@ -145,7 +112,7 @@ void (*sparc_init_timers)(irq_handler_t ) =
* instead, because some of the devices attach very early, I do something
* equally sucky but at least we'll never try to free statically allocated
* space or call kmalloc before kmalloc_init :(.
- *
+ *
* In fact it's the timer10 that attaches first.. then timer14
* then kmalloc_init is called.. then the tty interrupts attach.
* hmmm....
@@ -166,22 +133,20 @@ DEFINE_SPINLOCK(irq_action_lock);
int show_interrupts(struct seq_file *p, void *v)
{
- int i = *(loff_t *) v;
- struct irqaction * action;
+ int i = *(loff_t *)v;
+ struct irqaction *action;
unsigned long flags;
#ifdef CONFIG_SMP
int j;
#endif
- if (sparc_cpu_model == sun4d) {
- extern int show_sun4d_interrupts(struct seq_file *, void *);
-
+ if (sparc_cpu_model == sun4d)
return show_sun4d_interrupts(p, v);
- }
+
spin_lock_irqsave(&irq_action_lock, flags);
if (i < NR_IRQS) {
action = sparc_irq[i].action;
- if (!action)
+ if (!action)
goto out_unlock;
seq_printf(p, "%3d: ", i);
#ifndef CONFIG_SMP
@@ -195,7 +160,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, " %c %s",
(action->flags & IRQF_DISABLED) ? '+' : ' ',
action->name);
- for (action=action->next; action; action = action->next) {
+ for (action = action->next; action; action = action->next) {
seq_printf(p, ",%s %s",
(action->flags & IRQF_DISABLED) ? " +" : "",
action->name);
@@ -209,22 +174,20 @@ out_unlock:
void free_irq(unsigned int irq, void *dev_id)
{
- struct irqaction * action;
+ struct irqaction *action;
struct irqaction **actionp;
- unsigned long flags;
+ unsigned long flags;
unsigned int cpu_irq;
-
+
if (sparc_cpu_model == sun4d) {
- extern void sun4d_free_irq(unsigned int, void *);
-
sun4d_free_irq(irq, dev_id);
return;
}
cpu_irq = irq & (NR_IRQS - 1);
- if (cpu_irq > 14) { /* 14 irq levels on the sparc */
- printk("Trying to free bogus IRQ %d\n", irq);
- return;
- }
+ if (cpu_irq > 14) { /* 14 irq levels on the sparc */
+ printk(KERN_ERR "Trying to free bogus IRQ %d\n", irq);
+ return;
+ }
spin_lock_irqsave(&irq_action_lock, flags);
@@ -232,7 +195,7 @@ void free_irq(unsigned int irq, void *dev_id)
action = *actionp;
if (!action->handler) {
- printk("Trying to free free IRQ%d\n",irq);
+ printk(KERN_ERR "Trying to free free IRQ%d\n", irq);
goto out_unlock;
}
if (dev_id) {
@@ -242,19 +205,21 @@ void free_irq(unsigned int irq, void *dev_id)
actionp = &action->next;
}
if (!action) {
- printk("Trying to free free shared IRQ%d\n",irq);
+ printk(KERN_ERR "Trying to free free shared IRQ%d\n",
+ irq);
goto out_unlock;
}
} else if (action->flags & IRQF_SHARED) {
- printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
+ printk(KERN_ERR "Trying to free shared IRQ%d with NULL device ID\n",
+ irq);
goto out_unlock;
}
- if (action->flags & SA_STATIC_ALLOC)
- {
- /* This interrupt is marked as specially allocated
+ if (action->flags & SA_STATIC_ALLOC) {
+ /*
+ * This interrupt is marked as specially allocated
* so it is a bad idea to free it.
*/
- printk("Attempt to free statically allocated IRQ%d (%s)\n",
+ printk(KERN_ERR "Attempt to free statically allocated IRQ%d (%s)\n",
irq, action->name);
goto out_unlock;
}
@@ -275,7 +240,6 @@ void free_irq(unsigned int irq, void *dev_id)
out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
}
-
EXPORT_SYMBOL(free_irq);
/*
@@ -297,64 +261,62 @@ void synchronize_irq(unsigned int irq)
EXPORT_SYMBOL(synchronize_irq);
#endif /* SMP */
-void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
+void unexpected_irq(int irq, void *dev_id, struct pt_regs *regs)
{
- int i;
- struct irqaction * action;
+ int i;
+ struct irqaction *action;
unsigned int cpu_irq;
-
+
cpu_irq = irq & (NR_IRQS - 1);
action = sparc_irq[cpu_irq].action;
- printk("IO device interrupt, irq = %d\n", irq);
- printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
+ printk(KERN_ERR "IO device interrupt, irq = %d\n", irq);
+ printk(KERN_ERR "PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
regs->npc, regs->u_regs[14]);
if (action) {
- printk("Expecting: ");
- for (i = 0; i < 16; i++)
- if (action->handler)
- printk("[%s:%d:0x%x] ", action->name,
- (int) i, (unsigned int) action->handler);
+ printk(KERN_ERR "Expecting: ");
+ for (i = 0; i < 16; i++)
+ if (action->handler)
+ printk(KERN_CONT "[%s:%d:0x%x] ", action->name,
+ i, (unsigned int)action->handler);
}
- printk("AIEEE\n");
+ printk(KERN_ERR "AIEEE\n");
panic("bogus interrupt received");
}
-void handler_irq(int irq, struct pt_regs * regs)
+void handler_irq(int pil, struct pt_regs *regs)
{
struct pt_regs *old_regs;
- struct irqaction * action;
+ struct irqaction *action;
int cpu = smp_processor_id();
-#ifdef CONFIG_SMP
- extern void smp4m_irq_rotate(int cpu);
-#endif
old_regs = set_irq_regs(regs);
irq_enter();
- disable_pil_irq(irq);
+ disable_pil_irq(pil);
#ifdef CONFIG_SMP
/* Only rotate on lower priority IRQs (scsi, ethernet, etc.). */
- if((sparc_cpu_model==sun4m) && (irq < 10))
+ if ((sparc_cpu_model==sun4m) && (pil < 10))
smp4m_irq_rotate(cpu);
#endif
- action = sparc_irq[irq].action;
- sparc_irq[irq].flags |= SPARC_IRQ_INPROGRESS;
- kstat_cpu(cpu).irqs[irq]++;
+ action = sparc_irq[pil].action;
+ sparc_irq[pil].flags |= SPARC_IRQ_INPROGRESS;
+ kstat_cpu(cpu).irqs[pil]++;
do {
if (!action || !action->handler)
- unexpected_irq(irq, NULL, regs);
- action->handler(irq, action->dev_id);
+ unexpected_irq(pil, NULL, regs);
+ action->handler(pil, action->dev_id);
action = action->next;
} while (action);
- sparc_irq[irq].flags &= ~SPARC_IRQ_INPROGRESS;
- enable_pil_irq(irq);
+ sparc_irq[pil].flags &= ~SPARC_IRQ_INPROGRESS;
+ enable_pil_irq(pil);
irq_exit();
set_irq_regs(old_regs);
}
#if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
-/* Fast IRQs on the Sparc can only have one routine attached to them,
+/*
+ * Fast IRQs on the Sparc can only have one routine attached to them,
* thus no sharing possible.
*/
static int request_fast_irq(unsigned int irq,
@@ -367,15 +329,13 @@ static int request_fast_irq(unsigned int irq,
int ret;
#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
struct tt_entry *trap_table;
- extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3;
#endif
-
cpu_irq = irq & (NR_IRQS - 1);
- if(cpu_irq > 14) {
+ if (cpu_irq > 14) {
ret = -EINVAL;
goto out;
}
- if(!handler) {
+ if (!handler) {
ret = -EINVAL;
goto out;
}
@@ -383,34 +343,33 @@ static int request_fast_irq(unsigned int irq,
spin_lock_irqsave(&irq_action_lock, flags);
action = sparc_irq[cpu_irq].action;
- if(action) {
- if(action->flags & IRQF_SHARED)
+ if (action) {
+ if (action->flags & IRQF_SHARED)
panic("Trying to register fast irq when already shared.\n");
- if(irqflags & IRQF_SHARED)
+ if (irqflags & IRQF_SHARED)
panic("Trying to register fast irq as shared.\n");
/* Anyway, someone already owns it so cannot be made fast. */
- printk("request_fast_irq: Trying to register yet already owned.\n");
+ printk(KERN_ERR "request_fast_irq: Trying to register yet already owned.\n");
ret = -EBUSY;
goto out_unlock;
}
- /* If this is flagged as statically allocated then we use our
+ /*
+ * If this is flagged as statically allocated then we use our
* private struct which is never freed.
*/
if (irqflags & SA_STATIC_ALLOC) {
- if (static_irq_count < MAX_STATIC_ALLOC)
- action = &static_irqaction[static_irq_count++];
- else
- printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
- irq, devname);
+ if (static_irq_count < MAX_STATIC_ALLOC)
+ action = &static_irqaction[static_irq_count++];
+ else
+ printk(KERN_ERR "Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
+ irq, devname);
}
-
+
if (action == NULL)
- action = kmalloc(sizeof(struct irqaction),
- GFP_ATOMIC);
-
- if (!action) {
+ action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+ if (!action) {
ret = -ENOMEM;
goto out_unlock;
}
@@ -426,9 +385,12 @@ static int request_fast_irq(unsigned int irq,
INSTANTIATE(sparc_ttable)
#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
- trap_table = &trapbase_cpu1; INSTANTIATE(trap_table)
- trap_table = &trapbase_cpu2; INSTANTIATE(trap_table)
- trap_table = &trapbase_cpu3; INSTANTIATE(trap_table)
+ trap_table = &trapbase_cpu1;
+ INSTANTIATE(trap_table)
+ trap_table = &trapbase_cpu2;
+ INSTANTIATE(trap_table)
+ trap_table = &trapbase_cpu3;
+ INSTANTIATE(trap_table)
#endif
#undef INSTANTIATE
/*
@@ -454,7 +416,8 @@ out:
return ret;
}
-/* These variables are used to access state from the assembler
+/*
+ * These variables are used to access state from the assembler
* interrupt handler, floppy_hardint, so we cannot put these in
* the floppy driver image because that would not work in the
* modular case.
@@ -477,8 +440,6 @@ EXPORT_SYMBOL(pdma_base);
unsigned long pdma_areasize;
EXPORT_SYMBOL(pdma_areasize);
-extern void floppy_hardint(void);
-
static irq_handler_t floppy_irq_handler;
void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
@@ -494,9 +455,11 @@ void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs)
irq_exit();
enable_pil_irq(irq);
set_irq_regs(old_regs);
- // XXX Eek, it's totally changed with preempt_count() and such
- // if (softirq_pending(cpu))
- // do_softirq();
+ /*
+ * XXX Eek, it's totally changed with preempt_count() and such
+ * if (softirq_pending(cpu))
+ * do_softirq();
+ */
}
int sparc_floppy_request_irq(int irq, unsigned long flags,
@@ -511,21 +474,18 @@ EXPORT_SYMBOL(sparc_floppy_request_irq);
int request_irq(unsigned int irq,
irq_handler_t handler,
- unsigned long irqflags, const char * devname, void *dev_id)
+ unsigned long irqflags, const char *devname, void *dev_id)
{
- struct irqaction * action, **actionp;
+ struct irqaction *action, **actionp;
unsigned long flags;
unsigned int cpu_irq;
int ret;
-
- if (sparc_cpu_model == sun4d) {
- extern int sun4d_request_irq(unsigned int,
- irq_handler_t ,
- unsigned long, const char *, void *);
+
+ if (sparc_cpu_model == sun4d)
return sun4d_request_irq(irq, handler, irqflags, devname, dev_id);
- }
+
cpu_irq = irq & (NR_IRQS - 1);
- if(cpu_irq > 14) {
+ if (cpu_irq > 14) {
ret = -EINVAL;
goto out;
}
@@ -533,7 +493,7 @@ int request_irq(unsigned int irq,
ret = -EINVAL;
goto out;
}
-
+
spin_lock_irqsave(&irq_action_lock, flags);
actionp = &sparc_irq[cpu_irq].action;
@@ -544,7 +504,8 @@ int request_irq(unsigned int irq,
goto out_unlock;
}
if ((action->flags & IRQF_DISABLED) != (irqflags & IRQF_DISABLED)) {
- printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
+ printk(KERN_ERR "Attempt to mix fast and slow interrupts on IRQ%d denied\n",
+ irq);
ret = -EBUSY;
goto out_unlock;
}
@@ -559,14 +520,12 @@ int request_irq(unsigned int irq,
if (static_irq_count < MAX_STATIC_ALLOC)
action = &static_irqaction[static_irq_count++];
else
- printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
+ printk(KERN_ERR "Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
+ irq, devname);
}
-
if (action == NULL)
- action = kmalloc(sizeof(struct irqaction),
- GFP_ATOMIC);
-
- if (!action) {
+ action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+ if (!action) {
ret = -ENOMEM;
goto out_unlock;
}
@@ -587,7 +546,6 @@ out_unlock:
out:
return ret;
}
-
EXPORT_SYMBOL(request_irq);
void disable_irq_nosync(unsigned int irq)
@@ -606,26 +564,30 @@ void enable_irq(unsigned int irq)
{
__enable_irq(irq);
}
-
EXPORT_SYMBOL(enable_irq);
-/* We really don't need these at all on the Sparc. We only have
+/*
+ * We really don't need these at all on the Sparc. We only have
* stubs here because they are exported to modules.
*/
unsigned long probe_irq_on(void)
{
return 0;
}
-
EXPORT_SYMBOL(probe_irq_on);
int probe_irq_off(unsigned long mask)
{
return 0;
}
-
EXPORT_SYMBOL(probe_irq_off);
+static unsigned int build_device_irq(struct platform_device *op,
+ unsigned int real_irq)
+{
+ return real_irq;
+}
+
/* djhr
* This could probably be made indirect too and assigned in the CPU
* bits of the code. That would be much nicer I think and would also
@@ -636,11 +598,9 @@ EXPORT_SYMBOL(probe_irq_off);
void __init init_IRQ(void)
{
- extern void sun4c_init_IRQ( void );
- extern void sun4m_init_IRQ( void );
- extern void sun4d_init_IRQ( void );
+ sparc_irq_config.build_device_irq = build_device_irq;
- switch(sparc_cpu_model) {
+ switch (sparc_cpu_model) {
case sun4c:
case sun4:
sun4c_init_IRQ();
@@ -656,7 +616,7 @@ void __init init_IRQ(void)
#endif
sun4m_init_IRQ();
break;
-
+
case sun4d:
sun4d_init_IRQ();
break;
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 830d70a3e20b..eb16e3b8a2dd 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -82,7 +82,7 @@ static void bucket_clear_chain_pa(unsigned long bucket_pa)
"i" (ASI_PHYS_USE_EC));
}
-static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
+static unsigned int bucket_get_irq(unsigned long bucket_pa)
{
unsigned int ret;
@@ -90,21 +90,20 @@ static unsigned int bucket_get_virt_irq(unsigned long bucket_pa)
: "=&r" (ret)
: "r" (bucket_pa +
offsetof(struct ino_bucket,
- __virt_irq)),
+ __irq)),
"i" (ASI_PHYS_USE_EC));
return ret;
}
-static void bucket_set_virt_irq(unsigned long bucket_pa,
- unsigned int virt_irq)
+static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq)
{
__asm__ __volatile__("stwa %0, [%1] %2"
: /* no outputs */
- : "r" (virt_irq),
+ : "r" (irq),
"r" (bucket_pa +
offsetof(struct ino_bucket,
- __virt_irq)),
+ __irq)),
"i" (ASI_PHYS_USE_EC));
}
@@ -114,50 +113,49 @@ static struct {
unsigned int dev_handle;
unsigned int dev_ino;
unsigned int in_use;
-} virt_irq_table[NR_IRQS];
-static DEFINE_SPINLOCK(virt_irq_alloc_lock);
+} irq_table[NR_IRQS];
+static DEFINE_SPINLOCK(irq_alloc_lock);
-unsigned char virt_irq_alloc(unsigned int dev_handle,
- unsigned int dev_ino)
+unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino)
{
unsigned long flags;
unsigned char ent;
BUILD_BUG_ON(NR_IRQS >= 256);
- spin_lock_irqsave(&virt_irq_alloc_lock, flags);
+ spin_lock_irqsave(&irq_alloc_lock, flags);
for (ent = 1; ent < NR_IRQS; ent++) {
- if (!virt_irq_table[ent].in_use)
+ if (!irq_table[ent].in_use)
break;
}
if (ent >= NR_IRQS) {
printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
ent = 0;
} else {
- virt_irq_table[ent].dev_handle = dev_handle;
- virt_irq_table[ent].dev_ino = dev_ino;
- virt_irq_table[ent].in_use = 1;
+ irq_table[ent].dev_handle = dev_handle;
+ irq_table[ent].dev_ino = dev_ino;
+ irq_table[ent].in_use = 1;
}
- spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
+ spin_unlock_irqrestore(&irq_alloc_lock, flags);
return ent;
}
#ifdef CONFIG_PCI_MSI
-void virt_irq_free(unsigned int virt_irq)
+void irq_free(unsigned int irq)
{
unsigned long flags;
- if (virt_irq >= NR_IRQS)
+ if (irq >= NR_IRQS)
return;
- spin_lock_irqsave(&virt_irq_alloc_lock, flags);
+ spin_lock_irqsave(&irq_alloc_lock, flags);
- virt_irq_table[virt_irq].in_use = 0;
+ irq_table[irq].in_use = 0;
- spin_unlock_irqrestore(&virt_irq_alloc_lock, flags);
+ spin_unlock_irqrestore(&irq_alloc_lock, flags);
}
#endif
@@ -190,7 +188,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
- seq_printf(p, " %9s", irq_desc[i].chip->name);
+ seq_printf(p, " %9s", irq_desc[i].irq_data.chip->name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -253,39 +251,38 @@ struct irq_handler_data {
};
#ifdef CONFIG_SMP
-static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity)
+static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity)
{
cpumask_t mask;
int cpuid;
cpumask_copy(&mask, affinity);
if (cpus_equal(mask, cpu_online_map)) {
- cpuid = map_to_cpu(virt_irq);
+ cpuid = map_to_cpu(irq);
} else {
cpumask_t tmp;
cpus_and(tmp, cpu_online_map, mask);
- cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
+ cpuid = cpus_empty(tmp) ? map_to_cpu(irq) : first_cpu(tmp);
}
return cpuid;
}
#else
-#define irq_choose_cpu(virt_irq, affinity) \
+#define irq_choose_cpu(irq, affinity) \
real_hard_smp_processor_id()
#endif
-static void sun4u_irq_enable(unsigned int virt_irq)
+static void sun4u_irq_enable(struct irq_data *data)
{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+ struct irq_handler_data *handler_data = data->handler_data;
- if (likely(data)) {
+ if (likely(handler_data)) {
unsigned long cpuid, imap, val;
unsigned int tid;
- cpuid = irq_choose_cpu(virt_irq,
- irq_desc[virt_irq].affinity);
- imap = data->imap;
+ cpuid = irq_choose_cpu(data->irq, data->affinity);
+ imap = handler_data->imap;
tid = sun4u_compute_tid(imap, cpuid);
@@ -294,21 +291,21 @@ static void sun4u_irq_enable(unsigned int virt_irq)
IMAP_AID_SAFARI | IMAP_NID_SAFARI);
val |= tid | IMAP_VALID;
upa_writeq(val, imap);
- upa_writeq(ICLR_IDLE, data->iclr);
+ upa_writeq(ICLR_IDLE, handler_data->iclr);
}
}
-static int sun4u_set_affinity(unsigned int virt_irq,
- const struct cpumask *mask)
+static int sun4u_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
+ struct irq_handler_data *handler_data = data->handler_data;
- if (likely(data)) {
+ if (likely(handler_data)) {
unsigned long cpuid, imap, val;
unsigned int tid;
- cpuid = irq_choose_cpu(virt_irq, mask);
- imap = data->imap;
+ cpuid = irq_choose_cpu(data->irq, mask);
+ imap = handler_data->imap;
tid = sun4u_compute_tid(imap, cpuid);
@@ -317,7 +314,7 @@ static int sun4u_set_affinity(unsigned int virt_irq,
IMAP_AID_SAFARI | IMAP_NID_SAFARI);
val |= tid | IMAP_VALID;
upa_writeq(val, imap);
- upa_writeq(ICLR_IDLE, data->iclr);
+ upa_writeq(ICLR_IDLE, handler_data->iclr);
}
return 0;
@@ -340,27 +337,26 @@ static int sun4u_set_affinity(unsigned int virt_irq,
* sees that, it also hooks up a default ->shutdown method which
* invokes ->mask() which we do not want. See irq_chip_set_defaults().
*/
-static void sun4u_irq_disable(unsigned int virt_irq)
+static void sun4u_irq_disable(struct irq_data *data)
{
}
-static void sun4u_irq_eoi(unsigned int virt_irq)
+static void sun4u_irq_eoi(struct irq_data *data)
{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
- struct irq_desc *desc = irq_desc + virt_irq;
+ struct irq_handler_data *handler_data = data->handler_data;
+ struct irq_desc *desc = irq_desc + data->irq;
if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
return;
- if (likely(data))
- upa_writeq(ICLR_IDLE, data->iclr);
+ if (likely(handler_data))
+ upa_writeq(ICLR_IDLE, handler_data->iclr);
}
-static void sun4v_irq_enable(unsigned int virt_irq)
+static void sun4v_irq_enable(struct irq_data *data)
{
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
- unsigned long cpuid = irq_choose_cpu(virt_irq,
- irq_desc[virt_irq].affinity);
+ unsigned int ino = irq_table[data->irq].dev_ino;
+ unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity);
int err;
err = sun4v_intr_settarget(ino, cpuid);
@@ -377,11 +373,11 @@ static void sun4v_irq_enable(unsigned int virt_irq)
ino, err);
}
-static int sun4v_set_affinity(unsigned int virt_irq,
- const struct cpumask *mask)
+static int sun4v_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
{
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
- unsigned long cpuid = irq_choose_cpu(virt_irq, mask);
+ unsigned int ino = irq_table[data->irq].dev_ino;
+ unsigned long cpuid = irq_choose_cpu(data->irq, mask);
int err;
err = sun4v_intr_settarget(ino, cpuid);
@@ -392,9 +388,9 @@ static int sun4v_set_affinity(unsigned int virt_irq,
return 0;
}
-static void sun4v_irq_disable(unsigned int virt_irq)
+static void sun4v_irq_disable(struct irq_data *data)
{
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
+ unsigned int ino = irq_table[data->irq].dev_ino;
int err;
err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
@@ -403,10 +399,10 @@ static void sun4v_irq_disable(unsigned int virt_irq)
"err(%d)\n", ino, err);
}
-static void sun4v_irq_eoi(unsigned int virt_irq)
+static void sun4v_irq_eoi(struct irq_data *data)
{
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
- struct irq_desc *desc = irq_desc + virt_irq;
+ unsigned int ino = irq_table[data->irq].dev_ino;
+ struct irq_desc *desc = irq_desc + data->irq;
int err;
if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@@ -418,15 +414,15 @@ static void sun4v_irq_eoi(unsigned int virt_irq)
"err(%d)\n", ino, err);
}
-static void sun4v_virq_enable(unsigned int virt_irq)
+static void sun4v_virq_enable(struct irq_data *data)
{
unsigned long cpuid, dev_handle, dev_ino;
int err;
- cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity);
+ cpuid = irq_choose_cpu(data->irq, data->affinity);
- dev_handle = virt_irq_table[virt_irq].dev_handle;
- dev_ino = virt_irq_table[virt_irq].dev_ino;
+ dev_handle = irq_table[data->irq].dev_handle;
+ dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
@@ -447,16 +443,16 @@ static void sun4v_virq_enable(unsigned int virt_irq)
dev_handle, dev_ino, err);
}
-static int sun4v_virt_set_affinity(unsigned int virt_irq,
- const struct cpumask *mask)
+static int sun4v_virt_set_affinity(struct irq_data *data,
+ const struct cpumask *mask, bool force)
{
unsigned long cpuid, dev_handle, dev_ino;
int err;
- cpuid = irq_choose_cpu(virt_irq, mask);
+ cpuid = irq_choose_cpu(data->irq, mask);
- dev_handle = virt_irq_table[virt_irq].dev_handle;
- dev_ino = virt_irq_table[virt_irq].dev_ino;
+ dev_handle = irq_table[data->irq].dev_handle;
+ dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
if (err != HV_EOK)
@@ -467,13 +463,13 @@ static int sun4v_virt_set_affinity(unsigned int virt_irq,
return 0;
}
-static void sun4v_virq_disable(unsigned int virt_irq)
+static void sun4v_virq_disable(struct irq_data *data)
{
unsigned long dev_handle, dev_ino;
int err;
- dev_handle = virt_irq_table[virt_irq].dev_handle;
- dev_ino = virt_irq_table[virt_irq].dev_ino;
+ dev_handle = irq_table[data->irq].dev_handle;
+ dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_valid(dev_handle, dev_ino,
HV_INTR_DISABLED);
@@ -483,17 +479,17 @@ static void sun4v_virq_disable(unsigned int virt_irq)
dev_handle, dev_ino, err);
}
-static void sun4v_virq_eoi(unsigned int virt_irq)
+static void sun4v_virq_eoi(struct irq_data *data)
{
- struct irq_desc *desc = irq_desc + virt_irq;
+ struct irq_desc *desc = irq_desc + data->irq;
unsigned long dev_handle, dev_ino;
int err;
if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)))
return;
- dev_handle = virt_irq_table[virt_irq].dev_handle;
- dev_ino = virt_irq_table[virt_irq].dev_ino;
+ dev_handle = irq_table[data->irq].dev_handle;
+ dev_ino = irq_table[data->irq].dev_ino;
err = sun4v_vintr_set_state(dev_handle, dev_ino,
HV_INTR_STATE_IDLE);
@@ -504,50 +500,49 @@ static void sun4v_virq_eoi(unsigned int virt_irq)
}
static struct irq_chip sun4u_irq = {
- .name = "sun4u",
- .enable = sun4u_irq_enable,
- .disable = sun4u_irq_disable,
- .eoi = sun4u_irq_eoi,
- .set_affinity = sun4u_set_affinity,
+ .name = "sun4u",
+ .irq_enable = sun4u_irq_enable,
+ .irq_disable = sun4u_irq_disable,
+ .irq_eoi = sun4u_irq_eoi,
+ .irq_set_affinity = sun4u_set_affinity,
};
static struct irq_chip sun4v_irq = {
- .name = "sun4v",
- .enable = sun4v_irq_enable,
- .disable = sun4v_irq_disable,
- .eoi = sun4v_irq_eoi,
- .set_affinity = sun4v_set_affinity,
+ .name = "sun4v",
+ .irq_enable = sun4v_irq_enable,
+ .irq_disable = sun4v_irq_disable,
+ .irq_eoi = sun4v_irq_eoi,
+ .irq_set_affinity = sun4v_set_affinity,
};
static struct irq_chip sun4v_virq = {
- .name = "vsun4v",
- .enable = sun4v_virq_enable,
- .disable = sun4v_virq_disable,
- .eoi = sun4v_virq_eoi,
- .set_affinity = sun4v_virt_set_affinity,
+ .name = "vsun4v",
+ .irq_enable = sun4v_virq_enable,
+ .irq_disable = sun4v_virq_disable,
+ .irq_eoi = sun4v_virq_eoi,
+ .irq_set_affinity = sun4v_virt_set_affinity,
};
-static void pre_flow_handler(unsigned int virt_irq,
- struct irq_desc *desc)
+static void pre_flow_handler(unsigned int irq, struct irq_desc *desc)
{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
+ struct irq_handler_data *handler_data = get_irq_data(irq);
+ unsigned int ino = irq_table[irq].dev_ino;
- data->pre_handler(ino, data->arg1, data->arg2);
+ handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2);
- handle_fasteoi_irq(virt_irq, desc);
+ handle_fasteoi_irq(irq, desc);
}
-void irq_install_pre_handler(int virt_irq,
+void irq_install_pre_handler(int irq,
void (*func)(unsigned int, void *, void *),
void *arg1, void *arg2)
{
- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
- struct irq_desc *desc = irq_desc + virt_irq;
+ struct irq_handler_data *handler_data = get_irq_data(irq);
+ struct irq_desc *desc = irq_desc + irq;
- data->pre_handler = func;
- data->arg1 = arg1;
- data->arg2 = arg2;
+ handler_data->pre_handler = func;
+ handler_data->arg1 = arg1;
+ handler_data->arg2 = arg2;
desc->handle_irq = pre_flow_handler;
}
@@ -555,81 +550,81 @@ void irq_install_pre_handler(int virt_irq,
unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
{
struct ino_bucket *bucket;
- struct irq_handler_data *data;
- unsigned int virt_irq;
+ struct irq_handler_data *handler_data;
+ unsigned int irq;
int ino;
BUG_ON(tlb_type == hypervisor);
ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
bucket = &ivector_table[ino];
- virt_irq = bucket_get_virt_irq(__pa(bucket));
- if (!virt_irq) {
- virt_irq = virt_irq_alloc(0, ino);
- bucket_set_virt_irq(__pa(bucket), virt_irq);
- set_irq_chip_and_handler_name(virt_irq,
+ irq = bucket_get_irq(__pa(bucket));
+ if (!irq) {
+ irq = irq_alloc(0, ino);
+ bucket_set_irq(__pa(bucket), irq);
+ set_irq_chip_and_handler_name(irq,
&sun4u_irq,
handle_fasteoi_irq,
"IVEC");
}
- data = get_irq_chip_data(virt_irq);
- if (unlikely(data))
+ handler_data = get_irq_data(irq);
+ if (unlikely(handler_data))
goto out;
- data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
- if (unlikely(!data)) {
+ handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+ if (unlikely(!handler_data)) {
prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
prom_halt();
}
- set_irq_chip_data(virt_irq, data);
+ set_irq_data(irq, handler_data);
- data->imap = imap;
- data->iclr = iclr;
+ handler_data->imap = imap;
+ handler_data->iclr = iclr;
out:
- return virt_irq;
+ return irq;
}
static unsigned int sun4v_build_common(unsigned long sysino,
struct irq_chip *chip)
{
struct ino_bucket *bucket;
- struct irq_handler_data *data;
- unsigned int virt_irq;
+ struct irq_handler_data *handler_data;
+ unsigned int irq;
BUG_ON(tlb_type != hypervisor);
bucket = &ivector_table[sysino];
- virt_irq = bucket_get_virt_irq(__pa(bucket));
- if (!virt_irq) {
- virt_irq = virt_irq_alloc(0, sysino);
- bucket_set_virt_irq(__pa(bucket), virt_irq);
- set_irq_chip_and_handler_name(virt_irq, chip,
+ irq = bucket_get_irq(__pa(bucket));
+ if (!irq) {
+ irq = irq_alloc(0, sysino);
+ bucket_set_irq(__pa(bucket), irq);
+ set_irq_chip_and_handler_name(irq, chip,
handle_fasteoi_irq,
"IVEC");
}
- data = get_irq_chip_data(virt_irq);
- if (unlikely(data))
+ handler_data = get_irq_data(irq);
+ if (unlikely(handler_data))
goto out;
- data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
- if (unlikely(!data)) {
+ handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+ if (unlikely(!handler_data)) {
prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
prom_halt();
}
- set_irq_chip_data(virt_irq, data);
+ set_irq_data(irq, handler_data);
/* Catch accidental accesses to these things. IMAP/ICLR handling
* is done by hypervisor calls on sun4v platforms, not by direct
* register accesses.
*/
- data->imap = ~0UL;
- data->iclr = ~0UL;
+ handler_data->imap = ~0UL;
+ handler_data->iclr = ~0UL;
out:
- return virt_irq;
+ return irq;
}
unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
@@ -641,11 +636,11 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
{
- struct irq_handler_data *data;
+ struct irq_handler_data *handler_data;
unsigned long hv_err, cookie;
struct ino_bucket *bucket;
struct irq_desc *desc;
- unsigned int virt_irq;
+ unsigned int irq;
bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC);
if (unlikely(!bucket))
@@ -662,32 +657,32 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
((unsigned long) bucket +
sizeof(struct ino_bucket)));
- virt_irq = virt_irq_alloc(devhandle, devino);
- bucket_set_virt_irq(__pa(bucket), virt_irq);
+ irq = irq_alloc(devhandle, devino);
+ bucket_set_irq(__pa(bucket), irq);
- set_irq_chip_and_handler_name(virt_irq, &sun4v_virq,
+ set_irq_chip_and_handler_name(irq, &sun4v_virq,
handle_fasteoi_irq,
"IVEC");
- data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
- if (unlikely(!data))
+ handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
+ if (unlikely(!handler_data))
return 0;
/* In order to make the LDC channel startup sequence easier,
* especially wrt. locking, we do not let request_irq() enable
* the interrupt.
*/
- desc = irq_desc + virt_irq;
+ desc = irq_desc + irq;
desc->status |= IRQ_NOAUTOEN;
- set_irq_chip_data(virt_irq, data);
+ set_irq_data(irq, handler_data);
/* Catch accidental accesses to these things. IMAP/ICLR handling
* is done by hypervisor calls on sun4v platforms, not by direct
* register accesses.
*/
- data->imap = ~0UL;
- data->iclr = ~0UL;
+ handler_data->imap = ~0UL;
+ handler_data->iclr = ~0UL;
cookie = ~__pa(bucket);
hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie);
@@ -697,30 +692,30 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
prom_halt();
}
- return virt_irq;
+ return irq;
}
-void ack_bad_irq(unsigned int virt_irq)
+void ack_bad_irq(unsigned int irq)
{
- unsigned int ino = virt_irq_table[virt_irq].dev_ino;
+ unsigned int ino = irq_table[irq].dev_ino;
if (!ino)
ino = 0xdeadbeef;
- printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
- ino, virt_irq);
+ printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n",
+ ino, irq);
}
void *hardirq_stack[NR_CPUS];
void *softirq_stack[NR_CPUS];
-void __irq_entry handler_irq(int irq, struct pt_regs *regs)
+void __irq_entry handler_irq(int pil, struct pt_regs *regs)
{
unsigned long pstate, bucket_pa;
struct pt_regs *old_regs;
void *orig_sp;
- clear_softint(1 << irq);
+ clear_softint(1 << pil);
old_regs = set_irq_regs(regs);
irq_enter();
@@ -741,16 +736,16 @@ void __irq_entry handler_irq(int irq, struct pt_regs *regs)
while (bucket_pa) {
struct irq_desc *desc;
unsigned long next_pa;
- unsigned int virt_irq;
+ unsigned int irq;
next_pa = bucket_get_chain_pa(bucket_pa);
- virt_irq = bucket_get_virt_irq(bucket_pa);
+ irq = bucket_get_irq(bucket_pa);
bucket_clear_chain_pa(bucket_pa);
- desc = irq_desc + virt_irq;
+ desc = irq_desc + irq;
if (!(desc->status & IRQ_DISABLED))
- desc->handle_irq(virt_irq, desc);
+ desc->handle_irq(irq, desc);
bucket_pa = next_pa;
}
@@ -798,9 +793,12 @@ void fixup_irqs(void)
raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
if (irq_desc[irq].action &&
!(irq_desc[irq].status & IRQ_PER_CPU)) {
- if (irq_desc[irq].chip->set_affinity)
- irq_desc[irq].chip->set_affinity(irq,
- irq_desc[irq].affinity);
+ struct irq_data *data = irq_get_irq_data(irq);
+
+ if (data->chip->irq_set_affinity)
+ data->chip->irq_set_affinity(data,
+ data->affinity,
+ false);
}
raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
}
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index 15d8a3f645c9..24ad449886be 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -3,6 +3,8 @@
#include <linux/interrupt.h>
+#include <asm/traps.h>
+
/* cpu.c */
extern const char *sparc_cpu_type;
extern const char *sparc_pmu_type;
@@ -26,6 +28,53 @@ extern int static_irq_count;
extern spinlock_t irq_action_lock;
extern void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
+extern void init_IRQ(void);
+
+/* sun4c_irq.c */
+extern void sun4c_init_IRQ(void);
+
+/* sun4m_irq.c */
+extern unsigned int lvl14_resolution;
+
+extern void sun4m_init_IRQ(void);
+extern void sun4m_clear_profile_irq(int cpu);
+
+/* sun4d_irq.c */
+extern spinlock_t sun4d_imsk_lock;
+
+extern void sun4d_init_IRQ(void);
+extern int sun4d_request_irq(unsigned int irq,
+ irq_handler_t handler,
+ unsigned long irqflags,
+ const char *devname, void *dev_id);
+extern int show_sun4d_interrupts(struct seq_file *, void *);
+extern void sun4d_distribute_irqs(void);
+extern void sun4d_free_irq(unsigned int irq, void *dev_id);
+
+/* head_32.S */
+extern unsigned int t_nmi[];
+extern unsigned int linux_trap_ipi15_sun4d[];
+extern unsigned int linux_trap_ipi15_sun4m[];
+
+extern struct tt_entry trapbase_cpu1;
+extern struct tt_entry trapbase_cpu2;
+extern struct tt_entry trapbase_cpu3;
+
+extern char cputypval[];
+
+/* entry.S */
+extern unsigned long lvl14_save[4];
+extern unsigned int real_irq_entry[];
+extern unsigned int smp4d_ticker[];
+extern unsigned int patchme_maybe_smp_msg[];
+
+extern void floppy_hardint(void);
+
+/* trampoline_32.S */
+extern int __smp4m_processor_id(void);
+extern int __smp4d_processor_id(void);
+extern unsigned long sun4m_cpu_startup;
+extern unsigned long sun4d_cpu_startup;
#else /* CONFIG_SPARC32 */
#endif /* CONFIG_SPARC32 */
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index df39a0f0d27a..732b0bce6001 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -790,16 +790,20 @@ static void send_events(struct ldc_channel *lp, unsigned int event_mask)
static irqreturn_t ldc_rx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
- unsigned long orig_state, hv_err, flags;
+ unsigned long orig_state, flags;
unsigned int event_mask;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
- hv_err = sun4v_ldc_rx_get_state(lp->id,
- &lp->rx_head,
- &lp->rx_tail,
- &lp->chan_state);
+
+ /* We should probably check for hypervisor errors here and
+ * reset the LDC channel if we get one.
+ */
+ sun4v_ldc_rx_get_state(lp->id,
+ &lp->rx_head,
+ &lp->rx_tail,
+ &lp->chan_state);
ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
orig_state, lp->chan_state, lp->rx_head, lp->rx_tail);
@@ -904,16 +908,20 @@ out:
static irqreturn_t ldc_tx(int irq, void *dev_id)
{
struct ldc_channel *lp = dev_id;
- unsigned long flags, hv_err, orig_state;
+ unsigned long flags, orig_state;
unsigned int event_mask = 0;
spin_lock_irqsave(&lp->lock, flags);
orig_state = lp->chan_state;
- hv_err = sun4v_ldc_tx_get_state(lp->id,
- &lp->tx_head,
- &lp->tx_tail,
- &lp->chan_state);
+
+ /* We should probably check for hypervisor errors here and
+ * reset the LDC channel if we get one.
+ */
+ sun4v_ldc_tx_get_state(lp->id,
+ &lp->tx_head,
+ &lp->tx_tail,
+ &lp->chan_state);
ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n",
orig_state, lp->chan_state, lp->tx_head, lp->tx_tail);
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index fdab7f854f80..2969f777fa11 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -30,6 +30,7 @@ struct amba_apb_device leon_percpu_timer_dev[16];
int leondebug_irq_disable;
int leon_debug_irqout;
static int dummy_master_l10_counter;
+unsigned long amba_system_id;
unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
@@ -117,10 +118,16 @@ void __init leon_init_timers(irq_handler_t counter_fn)
master_l10_counter = (unsigned int *)&dummy_master_l10_counter;
dummy_master_l10_counter = 0;
- /*Find IRQMP IRQ Controller Registers base address otherwise bail out.*/
rootnp = of_find_node_by_path("/ambapp0");
if (!rootnp)
goto bad;
+
+ /* Find System ID: GRLIB build ID and optional CHIP ID */
+ pp = of_find_property(rootnp, "systemid", &len);
+ if (pp)
+ amba_system_id = *(unsigned long *)pp->value;
+
+ /* Find IRQMP IRQ Controller Registers base adr otherwise bail out */
np = of_find_node_by_name(rootnp, "GAISLER_IRQMP");
if (!np) {
np = of_find_node_by_name(rootnp, "01_00d");
@@ -340,7 +347,7 @@ void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu)
void __init leon_init_IRQ(void)
{
- sparc_init_timers = leon_init_timers;
+ sparc_irq_config.init_timers = leon_init_timers;
BTFIXUPSET_CALL(enable_irq, leon_enable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(disable_irq, leon_disable_irq, BTFIXUPCALL_NORM);
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
new file mode 100644
index 000000000000..519ca923f59f
--- /dev/null
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -0,0 +1,82 @@
+/* leon_pmc.c: LEON Power-down cpu_idle() handler
+ *
+ * Copyright (C) 2011 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
+ */
+
+#include <linux/init.h>
+#include <linux/pm.h>
+
+#include <asm/leon_amba.h>
+#include <asm/leon.h>
+
+/* List of Systems that need fixup instructions around power-down instruction */
+unsigned int pmc_leon_fixup_ids[] = {
+ AEROFLEX_UT699,
+ GAISLER_GR712RC,
+ LEON4_NEXTREME1,
+ 0
+};
+
+int pmc_leon_need_fixup(void)
+{
+ unsigned int systemid = amba_system_id >> 16;
+ unsigned int *id;
+
+ id = &pmc_leon_fixup_ids[0];
+ while (*id != 0) {
+ if (*id == systemid)
+ return 1;
+ id++;
+ }
+
+ return 0;
+}
+
+/*
+ * CPU idle callback function for systems that need some extra handling
+ * See .../arch/sparc/kernel/process.c
+ */
+void pmc_leon_idle_fixup(void)
+{
+ /* Prepare an address to a non-cachable region. APB is always
+ * none-cachable. One instruction is executed after the Sleep
+ * instruction, we make sure to read the bus and throw away the
+ * value by accessing a non-cachable area, also we make sure the
+ * MMU does not get a TLB miss here by using the MMU BYPASS ASI.
+ */
+ register unsigned int address = (unsigned int)leon3_irqctrl_regs;
+ __asm__ __volatile__ (
+ "mov %%g0, %%asr19\n"
+ "lda [%0] %1, %%g0\n"
+ :
+ : "r"(address), "i"(ASI_LEON_BYPASS));
+}
+
+/*
+ * CPU idle callback function
+ * See .../arch/sparc/kernel/process.c
+ */
+void pmc_leon_idle(void)
+{
+ /* For systems without power-down, this will be no-op */
+ __asm__ __volatile__ ("mov %g0, %asr19\n\t");
+}
+
+/* Install LEON Power Down function */
+static int __init leon_pmc_install(void)
+{
+ /* Assign power management IDLE handler */
+ if (pmc_leon_need_fixup())
+ pm_idle = pmc_leon_idle_fixup;
+ else
+ pm_idle = pmc_leon_idle;
+
+ printk(KERN_INFO "leon: power management initialized\n");
+
+ return 0;
+}
+
+/* This driver is not critical to the boot process, don't care
+ * if initialized late.
+ */
+late_initcall(leon_pmc_install);
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 16582d85368a..8f5de4aa3c0a 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -41,6 +41,8 @@
#include <asm/leon.h>
#include <asm/leon_amba.h>
+#include "kernel.h"
+
#ifdef CONFIG_SPARC_LEON
#include "irq.h"
@@ -261,23 +263,23 @@ void __init leon_smp_done(void)
/* Free unneeded trap tables */
if (!cpu_isset(1, cpu_present_map)) {
- ClearPageReserved(virt_to_page(trapbase_cpu1));
- init_page_count(virt_to_page(trapbase_cpu1));
- free_page((unsigned long)trapbase_cpu1);
+ ClearPageReserved(virt_to_page(&trapbase_cpu1));
+ init_page_count(virt_to_page(&trapbase_cpu1));
+ free_page((unsigned long)&trapbase_cpu1);
totalram_pages++;
num_physpages++;
}
if (!cpu_isset(2, cpu_present_map)) {
- ClearPageReserved(virt_to_page(trapbase_cpu2));
- init_page_count(virt_to_page(trapbase_cpu2));
- free_page((unsigned long)trapbase_cpu2);
+ ClearPageReserved(virt_to_page(&trapbase_cpu2));
+ init_page_count(virt_to_page(&trapbase_cpu2));
+ free_page((unsigned long)&trapbase_cpu2);
totalram_pages++;
num_physpages++;
}
if (!cpu_isset(3, cpu_present_map)) {
- ClearPageReserved(virt_to_page(trapbase_cpu3));
- init_page_count(virt_to_page(trapbase_cpu3));
- free_page((unsigned long)trapbase_cpu3);
+ ClearPageReserved(virt_to_page(&trapbase_cpu3));
+ init_page_count(virt_to_page(&trapbase_cpu3));
+ free_page((unsigned long)&trapbase_cpu3);
totalram_pages++;
num_physpages++;
}
@@ -437,15 +439,6 @@ void __init leon_blackbox_current(unsigned *addr)
}
-/*
- * CPU idle callback function
- * See .../arch/sparc/kernel/process.c
- */
-void pmc_leon_idle(void)
-{
- __asm__ volatile ("mov %g0, %asr19");
-}
-
void __init leon_init_smp(void)
{
/* Patch ipi15 trap table */
@@ -456,13 +449,6 @@ void __init leon_init_smp(void)
BTFIXUPSET_CALL(smp_cross_call, leon_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__hard_smp_processor_id, __leon_processor_id,
BTFIXUPCALL_NORM);
-
-#ifndef PMC_NO_IDLE
- /* Assign power management IDLE handler */
- pm_idle = pmc_leon_idle;
- printk(KERN_INFO "leon: power management initialized\n");
-#endif
-
}
#endif /* CONFIG_SPARC_LEON */
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index 2d055a1e9cc2..a312af40ea84 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -13,6 +13,7 @@
#include <asm/leon_amba.h>
#include "of_device_common.h"
+#include "irq.h"
/*
* PCI bus specific translator
@@ -355,7 +356,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
if (intr) {
op->archdata.num_irqs = len / sizeof(struct linux_prom_irqs);
for (i = 0; i < op->archdata.num_irqs; i++)
- op->archdata.irqs[i] = intr[i].pri;
+ op->archdata.irqs[i] =
+ sparc_irq_config.build_device_irq(op, intr[i].pri);
} else {
const unsigned int *irq =
of_get_property(dp, "interrupts", &len);
@@ -363,64 +365,13 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
if (irq) {
op->archdata.num_irqs = len / sizeof(unsigned int);
for (i = 0; i < op->archdata.num_irqs; i++)
- op->archdata.irqs[i] = irq[i];
+ op->archdata.irqs[i] =
+ sparc_irq_config.build_device_irq(op, irq[i]);
} else {
op->archdata.num_irqs = 0;
}
}
- if (sparc_cpu_model == sun4d) {
- static int pil_to_sbus[] = {
- 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
- };
- struct device_node *io_unit, *sbi = dp->parent;
- const struct linux_prom_registers *regs;
- int board, slot;
-
- while (sbi) {
- if (!strcmp(sbi->name, "sbi"))
- break;
-
- sbi = sbi->parent;
- }
- if (!sbi)
- goto build_resources;
-
- regs = of_get_property(dp, "reg", NULL);
- if (!regs)
- goto build_resources;
-
- slot = regs->which_io;
-
- /* If SBI's parent is not io-unit or the io-unit lacks
- * a "board#" property, something is very wrong.
- */
- if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) {
- printk("%s: Error, parent is not io-unit.\n",
- sbi->full_name);
- goto build_resources;
- }
- io_unit = sbi->parent;
- board = of_getintprop_default(io_unit, "board#", -1);
- if (board == -1) {
- printk("%s: Error, lacks board# property.\n",
- io_unit->full_name);
- goto build_resources;
- }
-
- for (i = 0; i < op->archdata.num_irqs; i++) {
- int this_irq = op->archdata.irqs[i];
- int sbusl = pil_to_sbus[this_irq];
-
- if (sbusl)
- this_irq = (((board + 1) << 5) +
- (sbusl << 2) +
- slot);
-
- op->archdata.irqs[i] = this_irq;
- }
- }
-build_resources:
build_device_resources(op, parent);
op->dev.parent = parent;
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 4137579d9adc..44f41e312f73 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -675,6 +675,7 @@ static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus)
* humanoid.
*/
err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
+ (void) err;
}
list_for_each_entry(child_bus, &bus->children, node)
pci_bus_register_of_sysfs(child_bus);
@@ -1001,22 +1002,22 @@ EXPORT_SYMBOL(pci_domain_nr);
int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
- unsigned int virt_irq;
+ unsigned int irq;
if (!pbm->setup_msi_irq)
return -EINVAL;
- return pbm->setup_msi_irq(&virt_irq, pdev, desc);
+ return pbm->setup_msi_irq(&irq, pdev, desc);
}
-void arch_teardown_msi_irq(unsigned int virt_irq)
+void arch_teardown_msi_irq(unsigned int irq)
{
- struct msi_desc *entry = get_irq_msi(virt_irq);
+ struct msi_desc *entry = get_irq_msi(irq);
struct pci_dev *pdev = entry->dev;
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
if (pbm->teardown_msi_irq)
- pbm->teardown_msi_irq(virt_irq, pdev);
+ pbm->teardown_msi_irq(irq, pdev);
}
#endif /* !(CONFIG_PCI_MSI) */
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index 6c7a33af3ba6..6e3874b64488 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -295,14 +295,17 @@ static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
unsigned int bus = bus_dev->number;
unsigned int device = PCI_SLOT(devfn);
unsigned int func = PCI_FUNC(devfn);
- unsigned long ret;
if (config_out_of_range(pbm, bus, devfn, where)) {
/* Do nothing. */
} else {
- ret = pci_sun4v_config_put(devhandle,
- HV_PCI_DEVICE_BUILD(bus, device, func),
- where, size, value);
+ /* We don't check for hypervisor errors here, but perhaps
+ * we should and influence our return value depending upon
+ * what kind of error is thrown.
+ */
+ pci_sun4v_config_put(devhandle,
+ HV_PCI_DEVICE_BUILD(bus, device, func),
+ where, size, value);
}
return PCIBIOS_SUCCESSFUL;
}
diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c
index be5e2441c6d7..3d70f8326efd 100644
--- a/arch/sparc/kernel/pci_fire.c
+++ b/arch/sparc/kernel/pci_fire.c
@@ -214,11 +214,9 @@ static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
{
- unsigned long msiqid;
u64 val;
val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
- msiqid = (val & MSI_MAP_EQNUM);
val &= ~MSI_MAP_VALID;
@@ -277,7 +275,7 @@ static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
{
unsigned long cregs = (unsigned long) pbm->pbm_regs;
unsigned long imap_reg, iclr_reg, int_ctrlr;
- unsigned int virt_irq;
+ unsigned int irq;
int fixup;
u64 val;
@@ -293,14 +291,14 @@ static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
- virt_irq = build_irq(fixup, iclr_reg, imap_reg);
- if (!virt_irq)
+ irq = build_irq(fixup, iclr_reg, imap_reg);
+ if (!irq)
return -ENOMEM;
upa_writeq(EVENT_QUEUE_CONTROL_SET_EN,
pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid));
- return virt_irq;
+ return irq;
}
static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
diff --git a/arch/sparc/kernel/pci_impl.h b/arch/sparc/kernel/pci_impl.h
index e20ed5f06e9c..6beb60df31d0 100644
--- a/arch/sparc/kernel/pci_impl.h
+++ b/arch/sparc/kernel/pci_impl.h
@@ -131,9 +131,9 @@ struct pci_pbm_info {
void *msi_queues;
unsigned long *msi_bitmap;
unsigned int *msi_irq_table;
- int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev,
+ int (*setup_msi_irq)(unsigned int *irq_p, struct pci_dev *pdev,
struct msi_desc *entry);
- void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev);
+ void (*teardown_msi_irq)(unsigned int irq, struct pci_dev *pdev);
const struct sparc64_msiq_ops *msi_ops;
#endif /* !(CONFIG_PCI_MSI) */
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index b210416ace7b..550e937720e7 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -31,12 +31,12 @@ static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
if (likely(err > 0)) {
struct irq_desc *desc;
- unsigned int virt_irq;
+ unsigned int irq;
- virt_irq = pbm->msi_irq_table[msi - pbm->msi_first];
- desc = irq_desc + virt_irq;
+ irq = pbm->msi_irq_table[msi - pbm->msi_first];
+ desc = irq_desc + irq;
- desc->handle_irq(virt_irq, desc);
+ desc->handle_irq(irq, desc);
}
if (unlikely(err < 0))
@@ -121,7 +121,7 @@ static struct irq_chip msi_irq = {
/* XXX affinity XXX */
};
-static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
+static int sparc64_setup_msi_irq(unsigned int *irq_p,
struct pci_dev *pdev,
struct msi_desc *entry)
{
@@ -131,17 +131,17 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
int msi, err;
u32 msiqid;
- *virt_irq_p = virt_irq_alloc(0, 0);
+ *irq_p = irq_alloc(0, 0);
err = -ENOMEM;
- if (!*virt_irq_p)
+ if (!*irq_p)
goto out_err;
- set_irq_chip_and_handler_name(*virt_irq_p, &msi_irq,
+ set_irq_chip_and_handler_name(*irq_p, &msi_irq,
handle_simple_irq, "MSI");
err = alloc_msi(pbm);
if (unlikely(err < 0))
- goto out_virt_irq_free;
+ goto out_irq_free;
msi = err;
@@ -152,7 +152,7 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
if (err)
goto out_msi_free;
- pbm->msi_irq_table[msi - pbm->msi_first] = *virt_irq_p;
+ pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p;
if (entry->msi_attrib.is_64) {
msg.address_hi = pbm->msi64_start >> 32;
@@ -163,24 +163,24 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
}
msg.data = msi;
- set_irq_msi(*virt_irq_p, entry);
- write_msi_msg(*virt_irq_p, &msg);
+ set_irq_msi(*irq_p, entry);
+ write_msi_msg(*irq_p, &msg);
return 0;
out_msi_free:
free_msi(pbm, msi);
-out_virt_irq_free:
- set_irq_chip(*virt_irq_p, NULL);
- virt_irq_free(*virt_irq_p);
- *virt_irq_p = 0;
+out_irq_free:
+ set_irq_chip(*irq_p, NULL);
+ irq_free(*irq_p);
+ *irq_p = 0;
out_err:
return err;
}
-static void sparc64_teardown_msi_irq(unsigned int virt_irq,
+static void sparc64_teardown_msi_irq(unsigned int irq,
struct pci_dev *pdev)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
@@ -189,12 +189,12 @@ static void sparc64_teardown_msi_irq(unsigned int virt_irq,
int i, err;
for (i = 0; i < pbm->msi_num; i++) {
- if (pbm->msi_irq_table[i] == virt_irq)
+ if (pbm->msi_irq_table[i] == irq)
break;
}
if (i >= pbm->msi_num) {
printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
- pbm->name, virt_irq);
+ pbm->name, irq);
return;
}
@@ -205,14 +205,14 @@ static void sparc64_teardown_msi_irq(unsigned int virt_irq,
if (err) {
printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
"irq %u, gives error %d\n",
- pbm->name, msi_num, virt_irq, err);
+ pbm->name, msi_num, irq, err);
return;
}
free_msi(pbm, msi_num);
- set_irq_chip(virt_irq, NULL);
- virt_irq_free(virt_irq);
+ set_irq_chip(irq, NULL);
+ irq_free(irq);
}
static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index 6783410ceb02..1d41af73a92f 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -1313,7 +1313,7 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
const struct linux_prom64_registers *regs;
struct device_node *dp = op->dev.of_node;
const char *chipset_name;
- int is_pbm_a, err;
+ int err;
switch (chip_type) {
case PBM_CHIP_TYPE_TOMATILLO:
@@ -1343,8 +1343,6 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
*/
regs = of_get_property(dp, "reg", NULL);
- is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000);
-
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index 158cd739b263..6cf534681788 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -580,7 +580,7 @@ static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
struct iommu *iommu = pbm->iommu;
- unsigned long num_tsb_entries, sz, tsbsize;
+ unsigned long num_tsb_entries, sz;
u32 dma_mask, dma_offset;
const u32 *vdma;
@@ -596,7 +596,6 @@ static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
- tsbsize = num_tsb_entries * sizeof(iopte_t);
dma_offset = vdma[0];
@@ -844,9 +843,9 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
unsigned long msiqid,
unsigned long devino)
{
- unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino);
+ unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
- if (!virt_irq)
+ if (!irq)
return -ENOMEM;
if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
@@ -854,7 +853,7 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
return -EINVAL;
- return virt_irq;
+ return irq;
}
static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index 7c2ced612b8f..8ac23e660080 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -81,7 +81,7 @@ static void n2_pcr_write(u64 val)
unsigned long ret;
ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
- if (val != HV_EOK)
+ if (ret != HV_EOK)
write_pcr(val);
}
diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c
index ce651147fabc..570b98f6e897 100644
--- a/arch/sparc/kernel/prom_irqtrans.c
+++ b/arch/sparc/kernel/prom_irqtrans.c
@@ -227,7 +227,7 @@ static unsigned int sabre_irq_build(struct device_node *dp,
unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
int inofixup = 0;
- int virt_irq;
+ int irq;
ino &= 0x3f;
if (ino < SABRE_ONBOARD_IRQ_BASE) {
@@ -247,7 +247,7 @@ static unsigned int sabre_irq_build(struct device_node *dp,
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
- virt_irq = build_irq(inofixup, iclr, imap);
+ irq = build_irq(inofixup, iclr, imap);
/* If the parent device is a PCI<->PCI bridge other than
* APB, we have to install a pre-handler to ensure that
@@ -256,13 +256,13 @@ static unsigned int sabre_irq_build(struct device_node *dp,
*/
regs = of_get_property(dp, "reg", NULL);
if (regs && sabre_device_needs_wsync(dp)) {
- irq_install_pre_handler(virt_irq,
+ irq_install_pre_handler(irq,
sabre_wsync_handler,
(void *) (long) regs->phys_hi,
(void *) irq_data);
}
- return virt_irq;
+ return irq;
}
static void __init sabre_irq_trans_init(struct device_node *dp)
@@ -382,7 +382,7 @@ static unsigned int schizo_irq_build(struct device_node *dp,
unsigned long pbm_regs = irq_data->pbm_regs;
unsigned long imap, iclr;
int ign_fixup;
- int virt_irq;
+ int irq;
int is_tomatillo;
ino &= 0x3f;
@@ -409,17 +409,17 @@ static unsigned int schizo_irq_build(struct device_node *dp,
ign_fixup = (1 << 6);
}
- virt_irq = build_irq(ign_fixup, iclr, imap);
+ irq = build_irq(ign_fixup, iclr, imap);
if (is_tomatillo) {
- irq_install_pre_handler(virt_irq,
+ irq_install_pre_handler(irq,
tomatillo_wsync_handler,
((irq_data->chip_version <= 4) ?
(void *) 1 : (void *) 0),
(void *) irq_data->sync_reg);
}
- return virt_irq;
+ return irq;
}
static void __init __schizo_irq_trans_init(struct device_node *dp,
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 9ccc812bc09e..96ee50a80661 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -1086,6 +1086,7 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
asmlinkage void syscall_trace_leave(struct pt_regs *regs)
{
+#ifdef CONFIG_AUDITSYSCALL
if (unlikely(current->audit_context)) {
unsigned long tstate = regs->tstate;
int result = AUDITSC_SUCCESS;
@@ -1095,7 +1096,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(result, regs->u_regs[UREG_I0]);
}
-
+#endif
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->u_regs[UREG_G1]);
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 648f2161b851..7b8b76c9557f 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -184,7 +184,6 @@ static void __init boot_flags_init(char *commands)
*/
extern void sun4c_probe_vac(void);
-extern char cputypval;
extern unsigned short root_flags;
extern unsigned short root_dev;
@@ -218,21 +217,21 @@ void __init setup_arch(char **cmdline_p)
/* Set sparc_cpu_model */
sparc_cpu_model = sun_unknown;
- if (!strcmp(&cputypval,"sun4 "))
+ if (!strcmp(&cputypval[0], "sun4 "))
sparc_cpu_model = sun4;
- if (!strcmp(&cputypval,"sun4c"))
+ if (!strcmp(&cputypval[0], "sun4c"))
sparc_cpu_model = sun4c;
- if (!strcmp(&cputypval,"sun4m"))
+ if (!strcmp(&cputypval[0], "sun4m"))
sparc_cpu_model = sun4m;
- if (!strcmp(&cputypval,"sun4s"))
+ if (!strcmp(&cputypval[0], "sun4s"))
sparc_cpu_model = sun4m; /* CP-1200 with PROM 2.30 -E */
- if (!strcmp(&cputypval,"sun4d"))
+ if (!strcmp(&cputypval[0], "sun4d"))
sparc_cpu_model = sun4d;
- if (!strcmp(&cputypval,"sun4e"))
+ if (!strcmp(&cputypval[0], "sun4e"))
sparc_cpu_model = sun4e;
- if (!strcmp(&cputypval,"sun4u"))
+ if (!strcmp(&cputypval[0], "sun4u"))
sparc_cpu_model = sun4u;
- if (!strncmp(&cputypval, "leon" , 4))
+ if (!strncmp(&cputypval[0], "leon" , 4))
sparc_cpu_model = sparc_leon;
printk("ARCH: ");
@@ -335,7 +334,7 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
prom_rev,
romvec->pv_printrev >> 16,
romvec->pv_printrev & 0xffff,
- &cputypval,
+ &cputypval[0],
ncpus_probed,
num_online_cpus()
#ifndef CONFIG_SMP
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 555a76d1f4a1..3e94a8c23238 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -189,7 +189,7 @@ static inline long get_delta (long *rt, long *master)
void smp_synchronize_tick_client(void)
{
long i, delta, adj, adjust_latency = 0, done = 0;
- unsigned long flags, rt, master_time_stamp, bound;
+ unsigned long flags, rt, master_time_stamp;
#if DEBUG_TICK_SYNC
struct {
long rt; /* roundtrip time */
@@ -208,10 +208,8 @@ void smp_synchronize_tick_client(void)
{
for (i = 0; i < NUM_ROUNDS; i++) {
delta = get_delta(&rt, &master_time_stamp);
- if (delta == 0) {
+ if (delta == 0)
done = 1; /* let's lock on to this... */
- bound = rt;
- }
if (!done) {
if (i > 0) {
@@ -933,13 +931,12 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
{
void *pg_addr;
- int this_cpu;
u64 data0;
if (tlb_type == hypervisor)
return;
- this_cpu = get_cpu();
+ preempt_disable();
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes);
@@ -964,7 +961,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
}
__local_flush_dcache_page(page);
- put_cpu();
+ preempt_enable();
}
void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
diff --git a/arch/sparc/kernel/sun4c_irq.c b/arch/sparc/kernel/sun4c_irq.c
index 892fb884910a..90eea38ad66f 100644
--- a/arch/sparc/kernel/sun4c_irq.c
+++ b/arch/sparc/kernel/sun4c_irq.c
@@ -1,5 +1,5 @@
-/* sun4c_irq.c
- * arch/sparc/kernel/sun4c_irq.c:
+/*
+ * sun4c irq support
*
* djhr: Hacked out of irq.c into a CPU dependent version.
*
@@ -9,31 +9,41 @@
* Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
*/
-#include <linux/errno.h>
-#include <linux/linkage.h>
-#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/interrupt.h>
#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include "irq.h"
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/psr.h>
-#include <asm/vaddrs.h>
-#include <asm/timer.h>
-#include <asm/openprom.h>
#include <asm/oplib.h>
-#include <asm/traps.h>
+#include <asm/timer.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/idprom.h>
-#include <asm/machines.h>
+
+#include "irq.h"
+
+/* Sun4c interrupts are typically laid out as follows:
+ *
+ * 1 - Software interrupt, SBUS level 1
+ * 2 - SBUS level 2
+ * 3 - ESP SCSI, SBUS level 3
+ * 4 - Software interrupt
+ * 5 - Lance ethernet, SBUS level 4
+ * 6 - Software interrupt
+ * 7 - Graphics card, SBUS level 5
+ * 8 - SBUS level 6
+ * 9 - SBUS level 7
+ * 10 - Counter timer
+ * 11 - Floppy
+ * 12 - Zilog uart
+ * 13 - CS4231 audio
+ * 14 - Profiling timer
+ * 15 - NMI
+ *
+ * The interrupt enable bits in the interrupt mask register are
+ * really only used to enable/disable the timer interrupts, and
+ * for signalling software interrupts. There is also a master
+ * interrupt enable bit in this register.
+ *
+ * Interrupts are enabled by setting the SUN4C_INT_* bits, they
+ * are disabled by clearing those bits.
+ */
/*
* Bit field defines for the interrupt registers on various
@@ -49,26 +59,21 @@
#define SUN4C_INT_E4 0x04 /* Enable level 4 IRQ. */
#define SUN4C_INT_E1 0x02 /* Enable level 1 IRQ. */
-/* Pointer to the interrupt enable byte
- *
- * Dave Redman (djhr@tadpole.co.uk)
- * What you may not be aware of is that entry.S requires this variable.
- *
- * --- linux_trap_nmi_sun4c --
- *
- * so don't go making it static, like I tried. sigh.
+/*
+ * Pointer to the interrupt enable byte
+ * Used by entry.S
*/
-unsigned char __iomem *interrupt_enable = NULL;
+unsigned char __iomem *interrupt_enable;
static void sun4c_disable_irq(unsigned int irq_nr)
{
unsigned long flags;
unsigned char current_mask, new_mask;
-
+
local_irq_save(flags);
irq_nr &= (NR_IRQS - 1);
current_mask = sbus_readb(interrupt_enable);
- switch(irq_nr) {
+ switch (irq_nr) {
case 1:
new_mask = ((current_mask) & (~(SUN4C_INT_E1)));
break;
@@ -93,11 +98,11 @@ static void sun4c_enable_irq(unsigned int irq_nr)
{
unsigned long flags;
unsigned char current_mask, new_mask;
-
+
local_irq_save(flags);
irq_nr &= (NR_IRQS - 1);
current_mask = sbus_readb(interrupt_enable);
- switch(irq_nr) {
+ switch (irq_nr) {
case 1:
new_mask = ((current_mask) | SUN4C_INT_E1);
break;
@@ -180,12 +185,14 @@ static void __init sun4c_init_timers(irq_handler_t counter_fn)
prom_printf("sun4c_init_timers: request_irq() fails with %d\n", err);
prom_halt();
}
-
+
sun4c_disable_irq(irq[1].pri);
}
#ifdef CONFIG_SMP
-static void sun4c_nop(void) {}
+static void sun4c_nop(void)
+{
+}
#endif
void __init sun4c_init_IRQ(void)
@@ -214,7 +221,9 @@ void __init sun4c_init_IRQ(void)
BTFIXUPSET_CALL(disable_pil_irq, sun4c_disable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_clock_irq, sun4c_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4c_load_profile_irq, BTFIXUPCALL_NOP);
- sparc_init_timers = sun4c_init_timers;
+
+ sparc_irq_config.init_timers = sun4c_init_timers;
+
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(clear_cpu_int, sun4c_nop, BTFIXUPCALL_NOP);
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index e11b4612dabb..77b4a8992710 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -1,50 +1,42 @@
/*
- * arch/sparc/kernel/sun4d_irq.c:
- * SS1000/SC2000 interrupt handling.
+ * SS1000/SC2000 interrupt handling.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Heavily based on arch/sparc/kernel/irq.c.
*/
-#include <linux/errno.h>
-#include <linux/linkage.h>
#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
#include <linux/seq_file.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/psr.h>
-#include <asm/smp.h>
-#include <asm/vaddrs.h>
+
#include <asm/timer.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
#include <asm/traps.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sbi.h>
#include <asm/cacheflush.h>
-#include <asm/irq_regs.h>
#include "kernel.h"
#include "irq.h"
-/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
-/* #define DISTRIBUTE_IRQS */
+/* Sun4d interrupts fall roughly into two categories. SBUS and
+ * cpu local. CPU local interrupts cover the timer interrupts
+ * and whatnot, and we encode those as normal PILs between
+ * 0 and 15.
+ *
+ * SBUS interrupts are encoded integers including the board number
+ * (plus one), the SBUS level, and the SBUS slot number. Sun4D
+ * IRQ dispatch is done by:
+ *
+ * 1) Reading the BW local interrupt table in order to get the bus
+ * interrupt mask.
+ *
+ * This table is indexed by SBUS interrupt level which can be
+ * derived from the PIL we got interrupted on.
+ *
+ * 2) For each bus showing interrupt pending from #1, read the
+ * SBI interrupt state register. This will indicate which slots
+ * have interrupts pending for that SBUS interrupt level.
+ */
struct sun4d_timer_regs {
u32 l10_timer_limit;
@@ -59,11 +51,9 @@ static struct sun4d_timer_regs __iomem *sun4d_timers;
#define TIMER_IRQ 10
#define MAX_STATIC_ALLOC 4
-extern int static_irq_count;
static unsigned char sbus_tid[32];
static struct irqaction *irq_action[NR_IRQS];
-extern spinlock_t irq_action_lock;
static struct sbus_action {
struct irqaction *action;
@@ -71,11 +61,33 @@ static struct sbus_action {
} *sbus_actions;
static int pil_to_sbus[] = {
- 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
+ 0,
+ 0,
+ 1,
+ 2,
+ 0,
+ 3,
+ 0,
+ 4,
+ 0,
+ 5,
+ 0,
+ 6,
+ 0,
+ 7,
+ 0,
+ 0,
};
static int sbus_to_pil[] = {
- 0, 2, 3, 5, 7, 9, 11, 13,
+ 0,
+ 2,
+ 3,
+ 5,
+ 7,
+ 9,
+ 11,
+ 13,
};
static int nsbi;
@@ -86,7 +98,7 @@ DEFINE_SPINLOCK(sun4d_imsk_lock);
int show_sun4d_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j = 0, k = 0, sbusl;
- struct irqaction * action;
+ struct irqaction *action;
unsigned long flags;
#ifdef CONFIG_SMP
int x;
@@ -96,13 +108,14 @@ int show_sun4d_interrupts(struct seq_file *p, void *v)
if (i < NR_IRQS) {
sbusl = pil_to_sbus[i];
if (!sbusl) {
- action = *(i + irq_action);
- if (!action)
- goto out_unlock;
+ action = *(i + irq_action);
+ if (!action)
+ goto out_unlock;
} else {
for (j = 0; j < nsbi; j++) {
for (k = 0; k < 4; k++)
- if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action))
+ action = sbus_actions[(j << 5) + (sbusl << 2) + k].action;
+ if (action)
goto found_it;
}
goto out_unlock;
@@ -125,15 +138,17 @@ found_it: seq_printf(p, "%3d: ", i);
(action->flags & IRQF_DISABLED) ? " +" : "",
action->name);
}
- if (!sbusl) break;
+ if (!sbusl)
+ break;
k++;
- if (k < 4)
- action = sbus_actions [(j << 5) + (sbusl << 2) + k].action;
- else {
+ if (k < 4) {
+ action = sbus_actions[(j << 5) + (sbusl << 2) + k].action;
+ } else {
j++;
- if (j == nsbi) break;
+ if (j == nsbi)
+ break;
k = 0;
- action = sbus_actions [(j << 5) + (sbusl << 2)].action;
+ action = sbus_actions[(j << 5) + (sbusl << 2)].action;
}
}
seq_putc(p, '\n');
@@ -147,7 +162,7 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
{
struct irqaction *action, **actionp;
struct irqaction *tmp = NULL;
- unsigned long flags;
+ unsigned long flags;
spin_lock_irqsave(&irq_action_lock, flags);
if (irq < 15)
@@ -156,7 +171,7 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
actionp = &(sbus_actions[irq - (1 << 5)].action);
action = *actionp;
if (!action) {
- printk("Trying to free free IRQ%d\n",irq);
+ printk(KERN_ERR "Trying to free free IRQ%d\n", irq);
goto out_unlock;
}
if (dev_id) {
@@ -166,23 +181,25 @@ void sun4d_free_irq(unsigned int irq, void *dev_id)
tmp = action;
}
if (!action) {
- printk("Trying to free free shared IRQ%d\n",irq);
+ printk(KERN_ERR "Trying to free free shared IRQ%d\n",
+ irq);
goto out_unlock;
}
} else if (action->flags & IRQF_SHARED) {
- printk("Trying to free shared IRQ%d with NULL device ID\n", irq);
+ printk(KERN_ERR "Trying to free shared IRQ%d with NULL device ID\n",
+ irq);
goto out_unlock;
}
- if (action->flags & SA_STATIC_ALLOC)
- {
- /* This interrupt is marked as specially allocated
+ if (action->flags & SA_STATIC_ALLOC) {
+ /*
+ * This interrupt is marked as specially allocated
* so it is a bad idea to free it.
*/
- printk("Attempt to free statically allocated IRQ%d (%s)\n",
+ printk(KERN_ERR "Attempt to free statically allocated IRQ%d (%s)\n",
irq, action->name);
goto out_unlock;
}
-
+
if (tmp)
tmp->next = action->next;
else
@@ -203,30 +220,28 @@ out_unlock:
spin_unlock_irqrestore(&irq_action_lock, flags);
}
-extern void unexpected_irq(int, void *, struct pt_regs *);
-
-void sun4d_handler_irq(int irq, struct pt_regs * regs)
+void sun4d_handler_irq(int pil, struct pt_regs *regs)
{
struct pt_regs *old_regs;
- struct irqaction * action;
+ struct irqaction *action;
int cpu = smp_processor_id();
/* SBUS IRQ level (1 - 7) */
- int sbusl = pil_to_sbus[irq];
-
+ int sbusl = pil_to_sbus[pil];
+
/* FIXME: Is this necessary?? */
cc_get_ipen();
-
- cc_set_iclr(1 << irq);
-
+
+ cc_set_iclr(1 << pil);
+
old_regs = set_irq_regs(regs);
irq_enter();
- kstat_cpu(cpu).irqs[irq]++;
+ kstat_cpu(cpu).irqs[pil]++;
if (!sbusl) {
- action = *(irq + irq_action);
+ action = *(pil + irq_action);
if (!action)
- unexpected_irq(irq, NULL, regs);
+ unexpected_irq(pil, NULL, regs);
do {
- action->handler(irq, action->dev_id);
+ action->handler(pil, action->dev_id);
action = action->next;
} while (action);
} else {
@@ -235,9 +250,9 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs)
struct sbus_action *actionp;
unsigned mask, slot;
int sbil = (sbusl << 2);
-
+
bw_clear_intr_mask(sbusl, bus_mask);
-
+
/* Loop for each pending SBI */
for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1)
if (bus_mask & 1) {
@@ -249,11 +264,11 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs)
if (mask & slot) {
mask &= ~slot;
action = actionp->action;
-
+
if (!action)
- unexpected_irq(irq, NULL, regs);
+ unexpected_irq(pil, NULL, regs);
do {
- action->handler(irq, action->dev_id);
+ action->handler(pil, action->dev_id);
action = action->next;
} while (action);
release_sbi(SBI2DEVID(sbino), slot);
@@ -266,13 +281,13 @@ void sun4d_handler_irq(int irq, struct pt_regs * regs)
int sun4d_request_irq(unsigned int irq,
irq_handler_t handler,
- unsigned long irqflags, const char * devname, void *dev_id)
+ unsigned long irqflags, const char *devname, void *dev_id)
{
struct irqaction *action, *tmp = NULL, **actionp;
unsigned long flags;
int ret;
-
- if(irq > 14 && irq < (1 << 5)) {
+
+ if (irq > 14 && irq < (1 << 5)) {
ret = -EINVAL;
goto out;
}
@@ -289,16 +304,18 @@ int sun4d_request_irq(unsigned int irq,
else
actionp = irq + irq_action;
action = *actionp;
-
+
if (action) {
if ((action->flags & IRQF_SHARED) && (irqflags & IRQF_SHARED)) {
- for (tmp = action; tmp->next; tmp = tmp->next);
+ for (tmp = action; tmp->next; tmp = tmp->next)
+ /* find last entry - tmp used below */;
} else {
ret = -EBUSY;
goto out_unlock;
}
if ((action->flags & IRQF_DISABLED) ^ (irqflags & IRQF_DISABLED)) {
- printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
+ printk(KERN_ERR "Attempt to mix fast and slow interrupts on IRQ%d denied\n",
+ irq);
ret = -EBUSY;
goto out_unlock;
}
@@ -312,14 +329,14 @@ int sun4d_request_irq(unsigned int irq,
if (static_irq_count < MAX_STATIC_ALLOC)
action = &static_irqaction[static_irq_count++];
else
- printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname);
+ printk(KERN_ERR "Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
+ irq, devname);
}
-
+
if (action == NULL)
- action = kmalloc(sizeof(struct irqaction),
- GFP_ATOMIC);
-
- if (!action) {
+ action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
+
+ if (!action) {
ret = -ENOMEM;
goto out_unlock;
}
@@ -334,7 +351,7 @@ int sun4d_request_irq(unsigned int irq,
tmp->next = action;
else
*actionp = action;
-
+
__enable_irq(irq);
ret = 0;
@@ -348,7 +365,7 @@ static void sun4d_disable_irq(unsigned int irq)
{
int tid = sbus_tid[(irq >> 5) - 1];
unsigned long flags;
-
+
if (irq < NR_IRQS)
return;
@@ -361,7 +378,7 @@ static void sun4d_enable_irq(unsigned int irq)
{
int tid = sbus_tid[(irq >> 5) - 1];
unsigned long flags;
-
+
if (irq < NR_IRQS)
return;
@@ -389,44 +406,6 @@ void __init sun4d_distribute_irqs(void)
{
struct device_node *dp;
-#ifdef DISTRIBUTE_IRQS
- cpumask_t sbus_serving_map;
-
- sbus_serving_map = cpu_present_map;
- for_each_node_by_name(dp, "sbi") {
- int board = of_getintprop_default(dp, "board#", 0);
-
- if ((board * 2) == boot_cpu_id && cpu_isset(board * 2 + 1, cpu_present_map))
- sbus_tid[board] = (board * 2 + 1);
- else if (cpu_isset(board * 2, cpu_present_map))
- sbus_tid[board] = (board * 2);
- else if (cpu_isset(board * 2 + 1, cpu_present_map))
- sbus_tid[board] = (board * 2 + 1);
- else
- sbus_tid[board] = 0xff;
- if (sbus_tid[board] != 0xff)
- cpu_clear(sbus_tid[board], sbus_serving_map);
- }
- for_each_node_by_name(dp, "sbi") {
- int board = of_getintprop_default(dp, "board#", 0);
- if (sbus_tid[board] == 0xff) {
- int i = 31;
-
- if (cpus_empty(sbus_serving_map))
- sbus_serving_map = cpu_present_map;
- while (cpu_isset(i, sbus_serving_map))
- i--;
- sbus_tid[board] = i;
- cpu_clear(i, sbus_serving_map);
- }
- }
- for_each_node_by_name(dp, "sbi") {
- int devid = of_getintprop_default(dp, "device-id", 0);
- int board = of_getintprop_default(dp, "board#", 0);
- printk("sbus%d IRQs directed to CPU%d\n", board, sbus_tid[board]);
- set_sbi_tid(devid, sbus_tid[board] << 3);
- }
-#else
int cpuid = cpu_logical_map(1);
if (cpuid == -1)
@@ -437,11 +416,10 @@ void __init sun4d_distribute_irqs(void)
sbus_tid[board] = cpuid;
set_sbi_tid(devid, cpuid << 3);
}
- printk("All sbus IRQs directed to CPU%d\n", cpuid);
-#endif
+ printk(KERN_ERR "All sbus IRQs directed to CPU%d\n", cpuid);
}
#endif
-
+
static void sun4d_clear_clock_irq(void)
{
sbus_readl(&sun4d_timers->l10_timer_limit);
@@ -462,14 +440,61 @@ static void __init sun4d_load_profile_irqs(void)
}
}
+unsigned int sun4d_build_device_irq(struct platform_device *op,
+ unsigned int real_irq)
+{
+ static int pil_to_sbus[] = {
+ 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
+ };
+ struct device_node *dp = op->dev.of_node;
+ struct device_node *io_unit, *sbi = dp->parent;
+ const struct linux_prom_registers *regs;
+ int board, slot;
+ int sbusl;
+
+ while (sbi) {
+ if (!strcmp(sbi->name, "sbi"))
+ break;
+
+ sbi = sbi->parent;
+ }
+ if (!sbi)
+ goto err_out;
+
+ regs = of_get_property(dp, "reg", NULL);
+ if (!regs)
+ goto err_out;
+
+ slot = regs->which_io;
+
+ /*
+ * If SBI's parent is not io-unit or the io-unit lacks
+ * a "board#" property, something is very wrong.
+ */
+ if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) {
+ printk("%s: Error, parent is not io-unit.\n", sbi->full_name);
+ goto err_out;
+ }
+ io_unit = sbi->parent;
+ board = of_getintprop_default(io_unit, "board#", -1);
+ if (board == -1) {
+ printk("%s: Error, lacks board# property.\n", io_unit->full_name);
+ goto err_out;
+ }
+
+ sbusl = pil_to_sbus[real_irq];
+ if (sbusl)
+ return (((board + 1) << 5) + (sbusl << 2) + slot);
+
+err_out:
+ return real_irq;
+}
+
static void __init sun4d_fixup_trap_table(void)
{
#ifdef CONFIG_SMP
unsigned long flags;
- extern unsigned long lvl14_save[4];
struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
- extern unsigned int real_irq_entry[], smp4d_ticker[];
- extern unsigned int patchme_maybe_smp_msg[];
/* Adjust so that we jump directly to smp4d_ticker */
lvl14_save[2] += smp4d_ticker - real_irq_entry;
@@ -531,7 +556,8 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
(IRQF_DISABLED | SA_STATIC_ALLOC),
"timer", NULL);
if (err) {
- prom_printf("sun4d_init_timers: request_irq() failed with %d\n", err);
+ prom_printf("sun4d_init_timers: request_irq() failed with %d\n",
+ err);
prom_halt();
}
sun4d_load_profile_irqs();
@@ -550,7 +576,7 @@ void __init sun4d_init_sbi_irq(void)
nsbi = 0;
for_each_node_by_name(dp, "sbi")
nsbi++;
- sbus_actions = kzalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
+ sbus_actions = kzalloc(nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
if (!sbus_actions) {
prom_printf("SUN4D: Cannot allocate sbus_actions, halting.\n");
prom_halt();
@@ -566,7 +592,8 @@ void __init sun4d_init_sbi_irq(void)
/* Get rid of pending irqs from PROM */
mask = acquire_sbi(devid, 0xffffffff);
if (mask) {
- printk ("Clearing pending IRQs %08x on SBI %d\n", mask, board);
+ printk(KERN_ERR "Clearing pending IRQs %08x on SBI %d\n",
+ mask, board);
release_sbi(devid, mask);
}
}
@@ -580,7 +607,10 @@ void __init sun4d_init_IRQ(void)
BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
- sparc_init_timers = sun4d_init_timers;
+
+ sparc_irq_config.init_timers = sun4d_init_timers;
+ sparc_irq_config.build_device_irq = sun4d_build_device_irq;
+
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 482f2ab92692..475d50b96cd0 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -1,4 +1,4 @@
-/* sun4d_smp.c: Sparc SS1000/SC2000 SMP support.
+/* Sparc SS1000/SC2000 SMP support.
*
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*
@@ -6,59 +6,23 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
-#include <asm/head.h>
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/threads.h>
-#include <linux/smp.h>
#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
#include <linux/profile.h>
#include <linux/delay.h>
#include <linux/cpu.h>
-#include <asm/ptrace.h>
-#include <asm/atomic.h>
-#include <asm/irq_regs.h>
-
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/oplib.h>
#include <asm/sbi.h>
+#include <asm/mmu.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
-#include <asm/cpudata.h>
+#include "kernel.h"
#include "irq.h"
-#define IRQ_CROSS_CALL 15
-extern ctxd_t *srmmu_ctx_table_phys;
+#define IRQ_CROSS_CALL 15
-static volatile int smp_processors_ready = 0;
+static volatile int smp_processors_ready;
static int smp_highest_cpu;
-extern volatile unsigned long cpu_callin_map[NR_CPUS];
-extern cpuinfo_sparc cpu_data[NR_CPUS];
-extern unsigned char boot_cpu_id;
-extern volatile int smp_process_available;
-
-extern cpumask_t smp_commenced_mask;
-
-extern int __smp4d_processor_id(void);
-
-/* #define SMP_DEBUG */
-
-#ifdef SMP_DEBUG
-#define SMP_PRINTK(x) printk x
-#else
-#define SMP_PRINTK(x)
-#endif
static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned long val)
{
@@ -69,8 +33,6 @@ static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned lon
}
static void smp_setup_percpu_timer(void);
-extern void cpu_probe(void);
-extern void sun4d_distribute_irqs(void);
static unsigned char cpu_leds[32];
@@ -86,9 +48,8 @@ static inline void show_leds(int cpuid)
void __cpuinit smp4d_callin(void)
{
int cpuid = hard_smp4d_processor_id();
- extern spinlock_t sun4d_imsk_lock;
unsigned long flags;
-
+
/* Show we are alive */
cpu_leds[cpuid] = 0x6;
show_leds(cpuid);
@@ -118,15 +79,15 @@ void __cpuinit smp4d_callin(void)
sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1);
local_flush_cache_all();
local_flush_tlb_all();
-
+
cpu_probe();
- while((unsigned long)current_set[cpuid] < PAGE_OFFSET)
+ while ((unsigned long)current_set[cpuid] < PAGE_OFFSET)
barrier();
-
- while(current_set[cpuid]->cpu != cpuid)
+
+ while (current_set[cpuid]->cpu != cpuid)
barrier();
-
+
/* Fix idle thread fields. */
__asm__ __volatile__("ld [%0], %%g6\n\t"
: : "r" (&current_set[cpuid])
@@ -134,16 +95,16 @@ void __cpuinit smp4d_callin(void)
cpu_leds[cpuid] = 0x9;
show_leds(cpuid);
-
+
/* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
local_flush_cache_all();
local_flush_tlb_all();
-
+
local_irq_enable(); /* We don't allow PIL 14 yet */
-
+
while (!cpu_isset(cpuid, smp_commenced_mask))
barrier();
@@ -154,15 +115,9 @@ void __cpuinit smp4d_callin(void)
}
-extern void init_IRQ(void);
-extern void cpu_panic(void);
-
/*
* Cycle through the processors asking the PROM to start each one.
*/
-
-extern struct linux_prom_registers smp_penguin_ctable;
-
void __init smp4d_boot_cpus(void)
{
if (boot_cpu_id)
@@ -173,43 +128,42 @@ void __init smp4d_boot_cpus(void)
int __cpuinit smp4d_boot_one_cpu(int i)
{
- extern unsigned long sun4d_cpu_startup;
- unsigned long *entry = &sun4d_cpu_startup;
- struct task_struct *p;
- int timeout;
- int cpu_node;
+ unsigned long *entry = &sun4d_cpu_startup;
+ struct task_struct *p;
+ int timeout;
+ int cpu_node;
- cpu_find_by_instance(i, &cpu_node,NULL);
- /* Cook up an idler for this guy. */
- p = fork_idle(i);
- current_set[i] = task_thread_info(p);
+ cpu_find_by_instance(i, &cpu_node, NULL);
+ /* Cook up an idler for this guy. */
+ p = fork_idle(i);
+ current_set[i] = task_thread_info(p);
+
+ /*
+ * Initialize the contexts table
+ * Since the call to prom_startcpu() trashes the structure,
+ * we need to re-initialize it for each cpu
+ */
+ smp_penguin_ctable.which_io = 0;
+ smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
+ smp_penguin_ctable.reg_size = 0;
+
+ /* whirrr, whirrr, whirrrrrrrrr... */
+ printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
+ local_flush_cache_all();
+ prom_startcpu(cpu_node,
+ &smp_penguin_ctable, 0, (char *)entry);
+
+ printk(KERN_INFO "prom_startcpu returned :)\n");
+
+ /* wheee... it's going... */
+ for (timeout = 0; timeout < 10000; timeout++) {
+ if (cpu_callin_map[i])
+ break;
+ udelay(200);
+ }
- /*
- * Initialize the contexts table
- * Since the call to prom_startcpu() trashes the structure,
- * we need to re-initialize it for each cpu
- */
- smp_penguin_ctable.which_io = 0;
- smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys;
- smp_penguin_ctable.reg_size = 0;
-
- /* whirrr, whirrr, whirrrrrrrrr... */
- SMP_PRINTK(("Starting CPU %d at %p\n", i, entry));
- local_flush_cache_all();
- prom_startcpu(cpu_node,
- &smp_penguin_ctable, 0, (char *)entry);
-
- SMP_PRINTK(("prom_startcpu returned :)\n"));
-
- /* wheee... it's going... */
- for(timeout = 0; timeout < 10000; timeout++) {
- if(cpu_callin_map[i])
- break;
- udelay(200);
- }
-
if (!(cpu_callin_map[i])) {
- printk("Processor %d is stuck.\n", i);
+ printk(KERN_ERR "Processor %d is stuck.\n", i);
return -ENODEV;
}
@@ -255,14 +209,17 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
unsigned long arg2, unsigned long arg3,
unsigned long arg4)
{
- if(smp_processors_ready) {
+ if (smp_processors_ready) {
register int high = smp_highest_cpu;
unsigned long flags;
spin_lock_irqsave(&cross_call_lock, flags);
{
- /* If you make changes here, make sure gcc generates proper code... */
+ /*
+ * If you make changes here, make sure
+ * gcc generates proper code...
+ */
register smpfunc_t f asm("i0") = func;
register unsigned long a1 asm("i1") = arg1;
register unsigned long a2 asm("i2") = arg2;
@@ -284,7 +241,7 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
cpu_clear(smp_processor_id(), mask);
cpus_and(mask, cpu_online_map, mask);
- for(i = 0; i <= high; i++) {
+ for (i = 0; i <= high; i++) {
if (cpu_isset(i, mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
@@ -300,17 +257,17 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
do {
if (!cpu_isset(i, mask))
continue;
- while(!ccall_info.processors_in[i])
+ while (!ccall_info.processors_in[i])
barrier();
- } while(++i <= high);
+ } while (++i <= high);
i = 0;
do {
if (!cpu_isset(i, mask))
continue;
- while(!ccall_info.processors_out[i])
+ while (!ccall_info.processors_out[i])
barrier();
- } while(++i <= high);
+ } while (++i <= high);
}
spin_unlock_irqrestore(&cross_call_lock, flags);
@@ -336,7 +293,7 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd };
old_regs = set_irq_regs(regs);
- bw_get_prof_limit(cpu);
+ bw_get_prof_limit(cpu);
bw_clear_intr_mask(0, 1); /* INTR_TABLE[0] & 1 is Profile IRQ */
cpu_tick[cpu]++;
@@ -349,7 +306,7 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
profile_tick(CPU_PROFILING);
- if(!--prof_counter(cpu)) {
+ if (!--prof_counter(cpu)) {
int user = user_mode(regs);
irq_enter();
@@ -361,8 +318,6 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
set_irq_regs(old_regs);
}
-extern unsigned int lvl14_resolution;
-
static void __cpuinit smp_setup_percpu_timer(void)
{
int cpu = hard_smp4d_processor_id();
@@ -374,16 +329,16 @@ static void __cpuinit smp_setup_percpu_timer(void)
void __init smp4d_blackbox_id(unsigned *addr)
{
int rd = *addr & 0x3e000000;
-
+
addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */
- addr[1] = 0x01000000; /* nop */
- addr[2] = 0x01000000; /* nop */
+ addr[1] = 0x01000000; /* nop */
+ addr[2] = 0x01000000; /* nop */
}
void __init smp4d_blackbox_current(unsigned *addr)
{
int rd = *addr & 0x3e000000;
-
+
addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */
addr[2] = 0x81282002 | rd | (rd >> 11); /* sll reg, 2, reg */
addr[4] = 0x01000000; /* nop */
@@ -392,17 +347,16 @@ void __init smp4d_blackbox_current(unsigned *addr)
void __init sun4d_init_smp(void)
{
int i;
- extern unsigned int t_nmi[], linux_trap_ipi15_sun4d[], linux_trap_ipi15_sun4m[];
/* Patch ipi15 trap table */
t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m);
-
+
/* And set btfixup... */
BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id);
BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current);
BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
-
+
for (i = 0; i < NR_CPUS; i++) {
ccall_info.processors_in[i] = 1;
ccall_info.processors_out[i] = 1;
diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c
index 7f3b97ff62c1..69df6257a32e 100644
--- a/arch/sparc/kernel/sun4m_irq.c
+++ b/arch/sparc/kernel/sun4m_irq.c
@@ -1,5 +1,5 @@
-/* sun4m_irq.c
- * arch/sparc/kernel/sun4m_irq.c:
+/*
+ * sun4m irq support
*
* djhr: Hacked out of irq.c into a CPU dependent version.
*
@@ -9,101 +9,44 @@
* Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
*/
-#include <linux/errno.h>
-#include <linux/linkage.h>
-#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/psr.h>
-#include <asm/vaddrs.h>
#include <asm/timer.h>
-#include <asm/openprom.h>
-#include <asm/oplib.h>
#include <asm/traps.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-#include <asm/smp.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
#include "irq.h"
+#include "kernel.h"
-struct sun4m_irq_percpu {
- u32 pending;
- u32 clear;
- u32 set;
-};
-
-struct sun4m_irq_global {
- u32 pending;
- u32 mask;
- u32 mask_clear;
- u32 mask_set;
- u32 interrupt_target;
-};
-
-/* Code in entry.S needs to get at these register mappings. */
-struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
-struct sun4m_irq_global __iomem *sun4m_irq_global;
-
-/* Dave Redman (djhr@tadpole.co.uk)
- * The sun4m interrupt registers.
- */
-#define SUN4M_INT_ENABLE 0x80000000
-#define SUN4M_INT_E14 0x00000080
-#define SUN4M_INT_E10 0x00080000
-
-#define SUN4M_HARD_INT(x) (0x000000001 << (x))
-#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
-
-#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
-#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
-#define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */
-#define SUN4M_INT_ECC_ERR 0x10000000 /* ecc memory error */
-#define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */
-#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */
-#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */
-#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */
-#define SUN4M_INT_REALTIME 0x00080000 /* system timer */
-#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */
-#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */
-#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */
-#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */
-#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */
-#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */
-#define SUN4M_INT_VMEBITS 0x0000007F /* vme int bits */
-
-#define SUN4M_INT_ERROR (SUN4M_INT_MODULE_ERR | \
- SUN4M_INT_M2S_WRITE_ERR | \
- SUN4M_INT_ECC_ERR | \
- SUN4M_INT_VME_ERR)
-
-#define SUN4M_INT_SBUS(x) (1 << (x+7))
-#define SUN4M_INT_VME(x) (1 << (x))
-
-/* Interrupt levels used by OBP */
-#define OBP_INT_LEVEL_SOFT 0x10
-#define OBP_INT_LEVEL_ONBOARD 0x20
-#define OBP_INT_LEVEL_SBUS 0x30
-#define OBP_INT_LEVEL_VME 0x40
-
-/* Interrupt level assignment on sun4m:
+/* Sample sun4m IRQ layout:
+ *
+ * 0x22 - Power
+ * 0x24 - ESP SCSI
+ * 0x26 - Lance ethernet
+ * 0x2b - Floppy
+ * 0x2c - Zilog uart
+ * 0x32 - SBUS level 0
+ * 0x33 - Parallel port, SBUS level 1
+ * 0x35 - SBUS level 2
+ * 0x37 - SBUS level 3
+ * 0x39 - Audio, Graphics card, SBUS level 4
+ * 0x3b - SBUS level 5
+ * 0x3d - SBUS level 6
+ *
+ * Each interrupt source has a mask bit in the interrupt registers.
+ * When the mask bit is set, this blocks interrupt deliver. So you
+ * clear the bit to enable the interrupt.
+ *
+ * Interrupts numbered less than 0x10 are software triggered interrupts
+ * and unused by Linux.
+ *
+ * Interrupt level assignment on sun4m:
*
* level source
* ------------------------------------------------------------
- * 1 softint-1
+ * 1 softint-1
* 2 softint-2, VME/SBUS level 1
* 3 softint-3, VME/SBUS level 2
* 4 softint-4, onboard SCSI
@@ -138,10 +81,10 @@ struct sun4m_irq_global __iomem *sun4m_irq_global;
* 'intr' property IRQ priority values from ss4, ss5, ss10, ss20, and
* Tadpole S3 GX systems.
*
- * esp: 0x24 onboard ESP SCSI
- * le: 0x26 onboard Lance ETHERNET
+ * esp: 0x24 onboard ESP SCSI
+ * le: 0x26 onboard Lance ETHERNET
* p9100: 0x32 SBUS level 1 P9100 video
- * bpp: 0x33 SBUS level 2 BPP parallel port device
+ * bpp: 0x33 SBUS level 2 BPP parallel port device
* DBRI: 0x39 SBUS level 5 DBRI ISDN audio
* SUNW,leo: 0x39 SBUS level 5 LEO video
* pcmcia: 0x3b SBUS level 6 PCMCIA controller
@@ -152,8 +95,57 @@ struct sun4m_irq_global __iomem *sun4m_irq_global;
* power: 0x22 onboard power device (XXX unknown mask bit XXX)
*/
+
+/* Code in entry.S needs to get at these register mappings. */
+struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS];
+struct sun4m_irq_global __iomem *sun4m_irq_global;
+
+/* Dave Redman (djhr@tadpole.co.uk)
+ * The sun4m interrupt registers.
+ */
+#define SUN4M_INT_ENABLE 0x80000000
+#define SUN4M_INT_E14 0x00000080
+#define SUN4M_INT_E10 0x00080000
+
+#define SUN4M_HARD_INT(x) (0x000000001 << (x))
+#define SUN4M_SOFT_INT(x) (0x000010000 << (x))
+
+#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */
+#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */
+#define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */
+#define SUN4M_INT_ECC_ERR 0x10000000 /* ecc memory error */
+#define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */
+#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */
+#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */
+#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */
+#define SUN4M_INT_REALTIME 0x00080000 /* system timer */
+#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */
+#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */
+#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */
+#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */
+#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */
+#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */
+#define SUN4M_INT_VMEBITS 0x0000007F /* vme int bits */
+
+#define SUN4M_INT_ERROR (SUN4M_INT_MODULE_ERR | \
+ SUN4M_INT_M2S_WRITE_ERR | \
+ SUN4M_INT_ECC_ERR | \
+ SUN4M_INT_VME_ERR)
+
+#define SUN4M_INT_SBUS(x) (1 << (x+7))
+#define SUN4M_INT_VME(x) (1 << (x))
+
+/* Interrupt levels used by OBP */
+#define OBP_INT_LEVEL_SOFT 0x10
+#define OBP_INT_LEVEL_ONBOARD 0x20
+#define OBP_INT_LEVEL_SBUS 0x30
+#define OBP_INT_LEVEL_VME 0x40
+
+#define SUN4M_TIMER_IRQ (OBP_INT_LEVEL_ONBOARD | 10)
+#define SUM4M_PROFILE_IRQ (OBP_INT_LEVEL_ONBOARD | 14)
+
static unsigned long irq_mask[0x50] = {
- /* SMP */
+ /* 0x00 - SMP */
0, SUN4M_SOFT_INT(1),
SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3),
SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5),
@@ -162,7 +154,7 @@ static unsigned long irq_mask[0x50] = {
SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11),
SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13),
SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15),
- /* soft */
+ /* 0x10 - soft */
0, SUN4M_SOFT_INT(1),
SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3),
SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5),
@@ -171,19 +163,19 @@ static unsigned long irq_mask[0x50] = {
SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11),
SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13),
SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15),
- /* onboard */
+ /* 0x20 - onboard */
0, 0, 0, 0,
SUN4M_INT_SCSI, 0, SUN4M_INT_ETHERNET, 0,
SUN4M_INT_VIDEO, SUN4M_INT_MODULE,
SUN4M_INT_REALTIME, SUN4M_INT_FLOPPY,
(SUN4M_INT_SERIAL | SUN4M_INT_KBDMS),
SUN4M_INT_AUDIO, 0, SUN4M_INT_MODULE_ERR,
- /* sbus */
+ /* 0x30 - sbus */
0, 0, SUN4M_INT_SBUS(0), SUN4M_INT_SBUS(1),
0, SUN4M_INT_SBUS(2), 0, SUN4M_INT_SBUS(3),
0, SUN4M_INT_SBUS(4), 0, SUN4M_INT_SBUS(5),
0, SUN4M_INT_SBUS(6), 0, 0,
- /* vme */
+ /* 0x40 - vme */
0, 0, SUN4M_INT_VME(0), SUN4M_INT_VME(1),
0, SUN4M_INT_VME(2), 0, SUN4M_INT_VME(3),
0, SUN4M_INT_VME(4), 0, SUN4M_INT_VME(5),
@@ -193,7 +185,7 @@ static unsigned long irq_mask[0x50] = {
static unsigned long sun4m_get_irqmask(unsigned int irq)
{
unsigned long mask;
-
+
if (irq < 0x50)
mask = irq_mask[irq];
else
@@ -217,7 +209,7 @@ static void sun4m_disable_irq(unsigned int irq_nr)
sbus_writel(mask, &sun4m_irq_global->mask_set);
else
sbus_writel(mask, &sun4m_irq_percpu[cpu]->set);
- local_irq_restore(flags);
+ local_irq_restore(flags);
}
static void sun4m_enable_irq(unsigned int irq_nr)
@@ -226,17 +218,17 @@ static void sun4m_enable_irq(unsigned int irq_nr)
int cpu = smp_processor_id();
/* Dreadful floppy hack. When we use 0x2b instead of
- * 0x0b the system blows (it starts to whistle!).
- * So we continue to use 0x0b. Fixme ASAP. --P3
- */
- if (irq_nr != 0x0b) {
+ * 0x0b the system blows (it starts to whistle!).
+ * So we continue to use 0x0b. Fixme ASAP. --P3
+ */
+ if (irq_nr != 0x0b) {
mask = sun4m_get_irqmask(irq_nr);
local_irq_save(flags);
if (irq_nr > 15)
sbus_writel(mask, &sun4m_irq_global->mask_clear);
else
sbus_writel(mask, &sun4m_irq_percpu[cpu]->clear);
- local_irq_restore(flags);
+ local_irq_restore(flags);
} else {
local_irq_save(flags);
sbus_writel(SUN4M_INT_FLOPPY, &sun4m_irq_global->mask_clear);
@@ -260,7 +252,7 @@ static unsigned long cpu_pil_to_imask[16] = {
/*12*/ SUN4M_INT_SERIAL | SUN4M_INT_KBDMS,
/*13*/ SUN4M_INT_SBUS(6) | SUN4M_INT_VME(6) | SUN4M_INT_AUDIO,
/*14*/ SUN4M_INT_E14,
-/*15*/ SUN4M_INT_ERROR
+/*15*/ SUN4M_INT_ERROR,
};
/* We assume the caller has disabled local interrupts when these are called,
@@ -280,12 +272,14 @@ static void sun4m_enable_pil_irq(unsigned int pil)
static void sun4m_send_ipi(int cpu, int level)
{
unsigned long mask = sun4m_get_irqmask(level);
+
sbus_writel(mask, &sun4m_irq_percpu[cpu]->set);
}
static void sun4m_clear_ipi(int cpu, int level)
{
unsigned long mask = sun4m_get_irqmask(level);
+
sbus_writel(mask, &sun4m_irq_percpu[cpu]->clear);
}
@@ -314,7 +308,6 @@ struct sun4m_timer_global {
static struct sun4m_timer_global __iomem *timers_global;
-#define TIMER_IRQ (OBP_INT_LEVEL_ONBOARD | 10)
unsigned int lvl14_resolution = (((1000000/HZ) + 1) << 10);
@@ -391,7 +384,7 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
master_l10_counter = &timers_global->l10_count;
- err = request_irq(TIMER_IRQ, counter_fn,
+ err = request_irq(SUN4M_TIMER_IRQ, counter_fn,
(IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL);
if (err) {
printk(KERN_ERR "sun4m_init_timers: Register IRQ error %d.\n",
@@ -407,7 +400,6 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
#ifdef CONFIG_SMP
{
unsigned long flags;
- extern unsigned long lvl14_save[4];
struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)];
/* For SMP we use the level 14 ticker, however the bootup code
@@ -466,7 +458,9 @@ void __init sun4m_init_IRQ(void)
BTFIXUPSET_CALL(disable_pil_irq, sun4m_disable_pil_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_clock_irq, sun4m_clear_clock_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4m_load_profile_irq, BTFIXUPCALL_NORM);
- sparc_init_timers = sun4m_init_timers;
+
+ sparc_irq_config.init_timers = sun4m_init_timers;
+
#ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4m_send_ipi, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_cpu_int, sun4m_clear_ipi, BTFIXUPCALL_NORM);
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 762d6eedd944..5cc7dc51de3d 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -1,59 +1,22 @@
-/* sun4m_smp.c: Sparc SUN4M SMP support.
+/*
+ * sun4m SMP support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
-#include <asm/head.h>
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/threads.h>
-#include <linux/smp.h>
#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
#include <linux/profile.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-#include <asm/irq_regs.h>
-
-#include <asm/ptrace.h>
-#include <asm/atomic.h>
-
-#include <asm/irq.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/oplib.h>
-#include <asm/cpudata.h>
#include "irq.h"
+#include "kernel.h"
#define IRQ_CROSS_CALL 15
-extern ctxd_t *srmmu_ctx_table_phys;
-
-extern volatile unsigned long cpu_callin_map[NR_CPUS];
-extern unsigned char boot_cpu_id;
-
-extern cpumask_t smp_commenced_mask;
-
-extern int __smp4m_processor_id(void);
-
-/*#define SMP_DEBUG*/
-
-#ifdef SMP_DEBUG
-#define SMP_PRINTK(x) printk x
-#else
-#define SMP_PRINTK(x)
-#endif
-
static inline unsigned long
swap_ulong(volatile unsigned long *ptr, unsigned long val)
{
@@ -64,7 +27,6 @@ swap_ulong(volatile unsigned long *ptr, unsigned long val)
}
static void smp_setup_percpu_timer(void);
-extern void cpu_probe(void);
void __cpuinit smp4m_callin(void)
{
@@ -96,7 +58,7 @@ void __cpuinit smp4m_callin(void)
/* XXX: What's up with all the flushes? */
local_flush_cache_all();
local_flush_tlb_all();
-
+
cpu_probe();
/* Fix idle thread fields. */
@@ -119,9 +81,6 @@ void __cpuinit smp4m_callin(void)
/*
* Cycle through the processors asking the PROM to start each one.
*/
-
-extern struct linux_prom_registers smp_penguin_ctable;
-
void __init smp4m_boot_cpus(void)
{
smp_setup_percpu_timer();
@@ -130,7 +89,6 @@ void __init smp4m_boot_cpus(void)
int __cpuinit smp4m_boot_one_cpu(int i)
{
- extern unsigned long sun4m_cpu_startup;
unsigned long *entry = &sun4m_cpu_startup;
struct task_struct *p;
int timeout;
@@ -142,7 +100,7 @@ int __cpuinit smp4m_boot_one_cpu(int i)
p = fork_idle(i);
current_set[i] = task_thread_info(p);
/* See trampoline.S for details... */
- entry += ((i-1) * 3);
+ entry += ((i - 1) * 3);
/*
* Initialize the contexts table
@@ -154,20 +112,19 @@ int __cpuinit smp4m_boot_one_cpu(int i)
smp_penguin_ctable.reg_size = 0;
/* whirrr, whirrr, whirrrrrrrrr... */
- printk("Starting CPU %d at %p\n", i, entry);
+ printk(KERN_INFO "Starting CPU %d at %p\n", i, entry);
local_flush_cache_all();
- prom_startcpu(cpu_node,
- &smp_penguin_ctable, 0, (char *)entry);
+ prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry);
/* wheee... it's going... */
- for(timeout = 0; timeout < 10000; timeout++) {
- if(cpu_callin_map[i])
+ for (timeout = 0; timeout < 10000; timeout++) {
+ if (cpu_callin_map[i])
break;
udelay(200);
}
if (!(cpu_callin_map[i])) {
- printk("Processor %d is stuck.\n", i);
+ printk(KERN_ERR "Processor %d is stuck.\n", i);
return -ENODEV;
}
@@ -202,6 +159,7 @@ void __init smp4m_smp_done(void)
void smp4m_irq_rotate(int cpu)
{
int next = cpu_data(cpu).next;
+
if (next != cpu)
set_irq_udt(next);
}
@@ -243,7 +201,7 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
cpu_clear(smp_processor_id(), mask);
cpus_and(mask, cpu_online_map, mask);
- for(i = 0; i < ncpus; i++) {
+ for (i = 0; i < ncpus; i++) {
if (cpu_isset(i, mask)) {
ccall_info.processors_in[i] = 0;
ccall_info.processors_out[i] = 0;
@@ -262,19 +220,18 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1,
do {
if (!cpu_isset(i, mask))
continue;
- while(!ccall_info.processors_in[i])
+ while (!ccall_info.processors_in[i])
barrier();
- } while(++i < ncpus);
+ } while (++i < ncpus);
i = 0;
do {
if (!cpu_isset(i, mask))
continue;
- while(!ccall_info.processors_out[i])
+ while (!ccall_info.processors_out[i])
barrier();
- } while(++i < ncpus);
+ } while (++i < ncpus);
}
-
spin_unlock_irqrestore(&cross_call_lock, flags);
}
@@ -289,8 +246,6 @@ void smp4m_cross_call_irq(void)
ccall_info.processors_out[i] = 1;
}
-extern void sun4m_clear_profile_irq(int cpu);
-
void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs;
@@ -302,7 +257,7 @@ void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
profile_tick(CPU_PROFILING);
- if(!--prof_counter(cpu)) {
+ if (!--prof_counter(cpu)) {
int user = user_mode(regs);
irq_enter();
@@ -314,8 +269,6 @@ void smp4m_percpu_timer_interrupt(struct pt_regs *regs)
set_irq_regs(old_regs);
}
-extern unsigned int lvl14_resolution;
-
static void __cpuinit smp_setup_percpu_timer(void)
{
int cpu = smp_processor_id();
@@ -323,7 +276,7 @@ static void __cpuinit smp_setup_percpu_timer(void)
prof_counter(cpu) = prof_multiplier(cpu) = 1;
load_profile_irq(cpu, lvl14_resolution);
- if(cpu == boot_cpu_id)
+ if (cpu == boot_cpu_id)
enable_pil_irq(14);
}
@@ -331,9 +284,9 @@ static void __init smp4m_blackbox_id(unsigned *addr)
{
int rd = *addr & 0x3e000000;
int rs1 = rd >> 11;
-
+
addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
- addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */
+ addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */
addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */
}
@@ -341,9 +294,9 @@ static void __init smp4m_blackbox_current(unsigned *addr)
{
int rd = *addr & 0x3e000000;
int rs1 = rd >> 11;
-
+
addr[0] = 0x81580000 | rd; /* rd %tbr, reg */
- addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */
+ addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */
addr[4] = 0x8008200c | rd | rs1; /* and reg, 0xc, reg */
}
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index f836f4e93afe..96082d30def0 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -360,20 +360,25 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
}
EXPORT_SYMBOL(get_fb_unmapped_area);
-/* Essentially the same as PowerPC... */
-void arch_pick_mmap_layout(struct mm_struct *mm)
+/* Essentially the same as PowerPC. */
+static unsigned long mmap_rnd(void)
{
- unsigned long random_factor = 0UL;
- unsigned long gap;
+ unsigned long rnd = 0UL;
if (current->flags & PF_RANDOMIZE) {
- random_factor = get_random_int();
+ unsigned long val = get_random_int();
if (test_thread_flag(TIF_32BIT))
- random_factor &= ((1 * 1024 * 1024) - 1);
+ rnd = (val % (1UL << (22UL-PAGE_SHIFT)));
else
- random_factor = ((random_factor << PAGE_SHIFT) &
- 0xffffffffUL);
+ rnd = (val % (1UL << (29UL-PAGE_SHIFT)));
}
+ return (rnd << PAGE_SHIFT) * 2;
+}
+
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+ unsigned long random_factor = mmap_rnd();
+ unsigned long gap;
/*
* Fall back to the standard layout if the personality
diff --git a/arch/sparc/kernel/tick14.c b/arch/sparc/kernel/tick14.c
deleted file mode 100644
index 138bbf5f8724..000000000000
--- a/arch/sparc/kernel/tick14.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/* tick14.c
- *
- * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
- *
- * This file handles the Sparc specific level14 ticker
- * This is really useful for profiling OBP uses it for keyboard
- * aborts and other stuff.
- */
-#include <linux/kernel.h>
-
-extern unsigned long lvl14_save[5];
-static unsigned long *linux_lvl14 = NULL;
-static unsigned long obp_lvl14[4];
-
-/*
- * Call with timer IRQ closed.
- * First time we do it with disable_irq, later prom code uses spin_lock_irq().
- */
-void install_linux_ticker(void)
-{
-
- if (!linux_lvl14)
- return;
- linux_lvl14[0] = lvl14_save[0];
- linux_lvl14[1] = lvl14_save[1];
- linux_lvl14[2] = lvl14_save[2];
- linux_lvl14[3] = lvl14_save[3];
-}
-
-void install_obp_ticker(void)
-{
-
- if (!linux_lvl14)
- return;
- linux_lvl14[0] = obp_lvl14[0];
- linux_lvl14[1] = obp_lvl14[1];
- linux_lvl14[2] = obp_lvl14[2];
- linux_lvl14[3] = obp_lvl14[3];
-}
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 19ab42a932db..8237dd4dfeb4 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -219,7 +219,7 @@ static void __init sbus_time_init(void)
btfixup();
- sparc_init_timers(timer_interrupt);
+ sparc_irq_config.init_timers(timer_interrupt);
}
void __init time_init(void)
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index e1862793a61d..95ec25faba39 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -816,14 +816,12 @@ void __init time_init(void)
clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT);
clocksource_tick.name = tick_ops->name;
- clocksource_calc_mult_shift(&clocksource_tick, freq, 4);
clocksource_tick.read = clocksource_tick_read;
+ clocksource_register_hz(&clocksource_tick, freq);
printk("clocksource: mult[%x] shift[%d]\n",
clocksource_tick.mult, clocksource_tick.shift);
- clocksource_register(&clocksource_tick);
-
sparc64_clockevent.name = tick_ops->name;
clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4);
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 1e9770936c3b..1ed547bd850f 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2152,7 +2152,7 @@ static void user_instruction_dump(unsigned int __user *pc)
void show_stack(struct task_struct *tsk, unsigned long *_ksp)
{
- unsigned long fp, thread_base, ksp;
+ unsigned long fp, ksp;
struct thread_info *tp;
int count = 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -2173,7 +2173,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
flushw_all();
fp = ksp + STACK_BIAS;
- thread_base = (unsigned long) tp;
printk("Call Trace:\n");
do {
diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
index be183fe41443..1c8d33228b2a 100644
--- a/arch/sparc/kernel/una_asm_64.S
+++ b/arch/sparc/kernel/una_asm_64.S
@@ -127,7 +127,7 @@ do_int_load:
wr %o5, 0x0, %asi
retl
mov 0, %o0
- .size __do_int_load, .-__do_int_load
+ .size do_int_load, .-do_int_load
.section __ex_table,"a"
.word 4b, __retl_efault
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 5b836f5aea90..b10ac4d62378 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -240,11 +240,10 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
* only copy the information from the master page table,
* nothing more.
*/
+ code = SEGV_MAPERR;
if (!ARCH_SUN4C && address >= TASK_SIZE)
goto vmalloc_fault;
- code = SEGV_MAPERR;
-
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
diff --git a/arch/sparc/prom/misc_32.c b/arch/sparc/prom/misc_32.c
index 8c278c311ba4..677b6a10fbde 100644
--- a/arch/sparc/prom/misc_32.c
+++ b/arch/sparc/prom/misc_32.c
@@ -54,15 +54,11 @@ EXPORT_SYMBOL(prom_feval);
void
prom_cmdline(void)
{
- extern void install_obp_ticker(void);
- extern void install_linux_ticker(void);
unsigned long flags;
spin_lock_irqsave(&prom_lock, flags);
- install_obp_ticker();
(*(romvec->pv_abort))();
restore_current();
- install_linux_ticker();
spin_unlock_irqrestore(&prom_lock, flags);
set_auxio(AUXIO_LED, 0);
}
diff --git a/arch/unicore32/.gitignore b/arch/unicore32/.gitignore
new file mode 100644
index 000000000000..947e99c2a957
--- /dev/null
+++ b/arch/unicore32/.gitignore
@@ -0,0 +1,21 @@
+#
+# Generated include files
+#
+include/generated
+#
+# Generated ld script file
+#
+kernel/vmlinux.lds
+#
+# Generated images in boot
+#
+boot/Image
+boot/zImage
+boot/uImage
+#
+# Generated files in boot/compressed
+#
+boot/compressed/piggy.S
+boot/compressed/piggy.gzip
+boot/compressed/vmlinux
+boot/compressed/vmlinux.lds
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
new file mode 100644
index 000000000000..4a36db45fb3d
--- /dev/null
+++ b/arch/unicore32/Kconfig
@@ -0,0 +1,275 @@
+config UNICORE32
+ def_bool y
+ select HAVE_MEMBLOCK
+ select HAVE_GENERIC_DMA_COHERENT
+ select HAVE_GENERIC_HARDIRQS
+ select HAVE_DMA_ATTRS
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_BZIP2
+ select HAVE_KERNEL_LZO
+ select HAVE_KERNEL_LZMA
+ select GENERIC_FIND_FIRST_BIT
+ select GENERIC_IRQ_PROBE
+ select GENERIC_HARDIRQS_NO_DEPRECATED
+ select ARCH_WANT_FRAME_POINTERS
+ help
+ UniCore-32 is 32-bit Instruction Set Architecture,
+ including a series of low-power-consumption RISC chip
+ designs licensed by PKUnity Ltd.
+ Please see web page at <http://www.pkunity.com/>.
+
+config HAVE_PWM
+ bool
+
+config GENERIC_GPIO
+ def_bool y
+
+config GENERIC_CLOCKEVENTS
+ bool
+
+config GENERIC_CSUM
+ def_bool y
+
+config GENERIC_IOMAP
+ def_bool y
+
+config NO_IOPORT
+ bool
+
+config STACKTRACE_SUPPORT
+ def_bool y
+
+config HAVE_LATENCYTOP_SUPPORT
+ def_bool y
+
+config LOCKDEP_SUPPORT
+ def_bool y
+
+config RWSEM_GENERIC_SPINLOCK
+ def_bool y
+
+config RWSEM_XCHGADD_ALGORITHM
+ bool
+
+config ARCH_HAS_ILOG2_U32
+ bool
+
+config ARCH_HAS_ILOG2_U64
+ bool
+
+config ARCH_HAS_CPUFREQ
+ bool
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config GENERIC_CALIBRATE_DELAY
+ def_bool y
+
+config ARCH_MAY_HAVE_PC_FDC
+ bool
+
+config NEED_DMA_MAP_STATE
+ def_bool y
+
+source "init/Kconfig"
+
+source "kernel/Kconfig.freezer"
+
+menu "System Type"
+
+config MMU
+ def_bool y
+
+config ARCH_FPGA
+ bool
+
+config ARCH_PUV3
+ def_bool y
+ select CPU_UCV2
+ select GENERIC_CLOCKEVENTS
+ select HAVE_CLK
+ select ARCH_REQUIRE_GPIOLIB
+ select ARCH_HAS_CPUFREQ
+
+# CONFIGs for ARCH_PUV3
+
+if ARCH_PUV3
+
+choice
+ prompt "Board Selection"
+ default PUV3_DB0913
+
+config PUV3_FPGA_DLX200
+ select ARCH_FPGA
+ bool "FPGA board"
+
+config PUV3_DB0913
+ bool "DEBUG board (0913)"
+
+config PUV3_NB0916
+ bool "NetBook board (0916)"
+ select HAVE_PWM
+
+config PUV3_SMW0919
+ bool "Security Mini-Workstation board (0919)"
+
+endchoice
+
+config PUV3_PM
+ def_bool y if !ARCH_FPGA
+
+endif
+
+source "arch/unicore32/mm/Kconfig"
+
+comment "Floating poing support"
+
+config UNICORE_FPU_F64
+ def_bool y if !ARCH_FPGA
+
+endmenu
+
+menu "Bus support"
+
+config PCI
+ bool "PCI Support"
+ help
+ Find out whether you have a PCI motherboard. PCI is the name of a
+ bus system, i.e. the way the CPU talks to the other stuff inside
+ your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
+ VESA. If you have PCI, say Y, otherwise N.
+
+source "drivers/pci/Kconfig"
+
+source "drivers/pcmcia/Kconfig"
+
+endmenu
+
+menu "Kernel Features"
+
+source "kernel/time/Kconfig"
+
+source "kernel/Kconfig.preempt"
+
+source "kernel/Kconfig.hz"
+
+source "mm/Kconfig"
+
+config LEDS
+ def_bool y
+ depends on GENERIC_GPIO
+
+config ALIGNMENT_TRAP
+ def_bool y
+ help
+ Unicore processors can not fetch/store information which is not
+ naturally aligned on the bus, i.e., a 4 byte fetch must start at an
+ address divisible by 4. On 32-bit Unicore processors, these non-aligned
+ fetch/store instructions will be emulated in software if you say
+ here, which has a severe performance impact. This is necessary for
+ correct operation of some network protocols. With an IP-only
+ configuration it is safe to say N, otherwise say Y.
+
+endmenu
+
+menu "Boot options"
+
+config CMDLINE
+ string "Default kernel command string"
+ default ""
+
+config CMDLINE_FORCE
+ bool "Always use the default kernel command string"
+ depends on CMDLINE != ""
+ help
+ Always use the default kernel command string, even if the boot
+ loader passes other arguments to the kernel.
+ This is useful if you cannot or don't want to change the
+ command-line options your boot loader passes to the kernel.
+
+ If unsure, say N.
+
+endmenu
+
+menu "Userspace binary formats"
+
+source "fs/Kconfig.binfmt"
+
+endmenu
+
+menu "Power management options"
+
+source "kernel/power/Kconfig"
+
+if ARCH_HAS_CPUFREQ
+source "drivers/cpufreq/Kconfig"
+endif
+
+config ARCH_SUSPEND_POSSIBLE
+ def_bool y if !ARCH_FPGA
+
+config ARCH_HIBERNATION_POSSIBLE
+ def_bool y if !ARCH_FPGA
+
+endmenu
+
+source "net/Kconfig"
+
+if ARCH_PUV3
+
+config PUV3_GPIO
+ bool
+ depends on !ARCH_FPGA
+ select GENERIC_GPIO
+ select GPIO_SYSFS if EXPERIMENTAL
+ default y
+
+config PUV3_PWM
+ tristate
+ default BACKLIGHT_PWM
+ help
+ Enable support for NB0916 PWM controllers
+
+config PUV3_RTC
+ tristate "PKUnity v3 RTC Support"
+ depends on !ARCH_FPGA
+
+if PUV3_NB0916
+
+menu "PKUnity NetBook-0916 Features"
+
+config I2C_BATTERY_BQ27200
+ tristate "I2C Battery BQ27200 Support"
+ select PUV3_I2C
+ select POWER_SUPPLY
+ select BATTERY_BQ27x00
+
+config I2C_EEPROM_AT24
+ tristate "I2C EEPROMs AT24 support"
+ select PUV3_I2C
+ select MISC_DEVICES
+ select EEPROM_AT24
+
+config LCD_BACKLIGHT
+ tristate "LCD Backlight support"
+ select BACKLIGHT_LCD_SUPPORT
+ select BACKLIGHT_PWM
+
+endmenu
+
+endif
+
+endif
+
+source "drivers/Kconfig"
+
+source "fs/Kconfig"
+
+source "arch/unicore32/Kconfig.debug"
+
+source "security/Kconfig"
+
+source "crypto/Kconfig"
+
+source "lib/Kconfig"
diff --git a/arch/unicore32/Kconfig.debug b/arch/unicore32/Kconfig.debug
new file mode 100644
index 000000000000..3140151ede45
--- /dev/null
+++ b/arch/unicore32/Kconfig.debug
@@ -0,0 +1,68 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config STRICT_DEVMEM
+ bool "Filter access to /dev/mem"
+ depends on MMU
+ ---help---
+ If this option is disabled, you allow userspace (root) access to all
+ of memory, including kernel and userspace memory. Accidental
+ access to this is obviously disastrous, but specific access can
+ be used by people debugging the kernel.
+
+ If this option is switched on, the /dev/mem file only allows
+ userspace access to memory mapped peripherals.
+
+ If in doubt, say Y.
+
+config EARLY_PRINTK
+ def_bool DEBUG_OCD
+ help
+ Write kernel log output directly into the ocd or to a serial port.
+
+ This is useful for kernel debugging when your machine crashes very
+ early before the console code is initialized. For normal operation
+ it is not recommended because it looks ugly and doesn't cooperate
+ with klogd/syslogd or the X server. You should normally N here,
+ unless you want to debug such a crash.
+
+config DEBUG_STACK_USAGE
+ bool "Enable stack utilization instrumentation"
+ depends on DEBUG_KERNEL
+ help
+ Enables the display of the minimum amount of free stack which each
+ task has ever had available in the sysrq-T output.
+
+# These options are only for real kernel hackers who want to get their hands dirty.
+config DEBUG_LL
+ bool "Kernel low-level debugging functions"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here to include definitions of printascii, printch, printhex
+ in the kernel. This is helpful if you are debugging code that
+ executes before the console is initialized.
+
+config DEBUG_OCD
+ bool "Kernel low-level debugging via On-Chip-Debugger"
+ depends on DEBUG_LL
+ default y
+ help
+ Say Y here if you want the debug print routines to direct their
+ output to the UniCore On-Chip-Debugger channel using CP #1.
+
+config DEBUG_OCD_BREAKPOINT
+ bool "Breakpoint support via On-Chip-Debugger"
+ depends on DEBUG_OCD
+
+config DEBUG_UART
+ int "Kernel low-level debugging messages via serial port"
+ depends on DEBUG_LL
+ range 0 1
+ default "0"
+ help
+ Choice for UART for kernel low-level using PKUnity UARTS,
+ should be between zero and one. The port must have been
+ initialised by the boot-loader before use.
+
+endmenu
diff --git a/arch/unicore32/Makefile b/arch/unicore32/Makefile
new file mode 100644
index 000000000000..e08d6d370a8a
--- /dev/null
+++ b/arch/unicore32/Makefile
@@ -0,0 +1,95 @@
+#
+# arch/unicore32/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2002~2010 by Guan Xue-tao
+#
+ifneq ($(SUBARCH),$(ARCH))
+ ifeq ($(CROSS_COMPILE),)
+ CROSS_COMPILE := $(call cc-cross-prefix, unicore32-linux-)
+ endif
+endif
+
+LDFLAGS_vmlinux := -p --no-undefined -X
+
+OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment -S
+
+# Never generate .eh_frame
+KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
+
+# Never use hard float in kernel
+KBUILD_CFLAGS += -msoft-float
+
+ifeq ($(CONFIG_FRAME_POINTER),y)
+KBUILD_CFLAGS += -mno-sched-prolog
+endif
+
+CHECKFLAGS += -D__unicore32__
+
+head-y := arch/unicore32/kernel/head.o
+head-y += arch/unicore32/kernel/init_task.o
+
+core-y += arch/unicore32/kernel/
+core-y += arch/unicore32/mm/
+
+libs-y += arch/unicore32/lib/
+
+ASM_GENERATED_DIR := $(srctree)/arch/unicore32/include/generated
+LINUXINCLUDE += -I$(ASM_GENERATED_DIR)
+
+ASM_GENERIC_HEADERS := atomic.h auxvec.h
+ASM_GENERIC_HEADERS += bitsperlong.h bug.h bugs.h
+ASM_GENERIC_HEADERS += cputime.h current.h
+ASM_GENERIC_HEADERS += device.h div64.h
+ASM_GENERIC_HEADERS += emergency-restart.h errno.h
+ASM_GENERIC_HEADERS += fb.h fcntl.h ftrace.h
+ASM_GENERIC_HEADERS += hardirq.h hw_irq.h
+ASM_GENERIC_HEADERS += ioctl.h ioctls.h ipcbuf.h irq_regs.h
+ASM_GENERIC_HEADERS += kdebug.h kmap_types.h
+ASM_GENERIC_HEADERS += local.h
+ASM_GENERIC_HEADERS += mman.h module.h msgbuf.h
+ASM_GENERIC_HEADERS += param.h parport.h percpu.h poll.h posix_types.h
+ASM_GENERIC_HEADERS += resource.h
+ASM_GENERIC_HEADERS += scatterlist.h sections.h segment.h sembuf.h serial.h
+ASM_GENERIC_HEADERS += setup.h shmbuf.h shmparam.h
+ASM_GENERIC_HEADERS += siginfo.h signal.h sizes.h
+ASM_GENERIC_HEADERS += socket.h sockios.h stat.h statfs.h swab.h syscalls.h
+ASM_GENERIC_HEADERS += termbits.h termios.h topology.h types.h
+ASM_GENERIC_HEADERS += ucontext.h unaligned.h user.h
+ASM_GENERIC_HEADERS += vga.h
+ASM_GENERIC_HEADERS += xor.h
+
+archprepare:
+ifneq ($(ASM_GENERATED_DIR), $(wildcard $(ASM_GENERATED_DIR)))
+ $(Q)mkdir -p $(ASM_GENERATED_DIR)/asm
+ $(Q)$(foreach a, $(ASM_GENERIC_HEADERS), \
+ echo '#include <asm-generic/$a>' \
+ > $(ASM_GENERATED_DIR)/asm/$a; )
+endif
+
+boot := arch/unicore32/boot
+
+# Default target when executing plain make
+KBUILD_IMAGE := zImage
+
+all: $(KBUILD_IMAGE)
+
+zImage Image uImage: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+MRPROPER_DIRS += $(ASM_GENERATED_DIR)
+
+archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
+
+define archhelp
+ echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)'
+ echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
+ echo ' uImage - U-Boot wrapped zImage'
+endef
diff --git a/arch/unicore32/boot/Makefile b/arch/unicore32/boot/Makefile
new file mode 100644
index 000000000000..79e5f88845d9
--- /dev/null
+++ b/arch/unicore32/boot/Makefile
@@ -0,0 +1,47 @@
+#
+# arch/unicore32/boot/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2001~2010 GUAN Xue-tao
+#
+
+MKIMAGE := $(srctree)/scripts/mkuboot.sh
+
+targets := Image zImage uImage
+
+$(obj)/Image: vmlinux FORCE
+ $(call if_changed,objcopy)
+ @echo ' Kernel: $@ is ready'
+
+$(obj)/compressed/vmlinux: $(obj)/Image FORCE
+ $(Q)$(MAKE) $(build)=$(obj)/compressed $@
+
+$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
+ $(call if_changed,objcopy)
+ @echo ' Kernel: $@ is ready'
+
+quiet_cmd_uimage = UIMAGE $@
+ cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A unicore -O linux -T kernel \
+ -C none -a $(LOADADDR) -e $(STARTADDR) \
+ -n 'Linux-$(KERNELRELEASE)' -d $< $@
+
+$(obj)/uImage: LOADADDR=0x0
+
+$(obj)/uImage: STARTADDR=$(LOADADDR)
+
+$(obj)/uImage: $(obj)/zImage FORCE
+ $(call if_changed,uimage)
+ @echo ' Image $@ is ready'
+
+PHONY += initrd FORCE
+initrd:
+ @test "$(INITRD)" != "" || \
+ (echo You must specify INITRD; exit -1)
+
+subdir- := compressed
diff --git a/arch/unicore32/boot/compressed/Makefile b/arch/unicore32/boot/compressed/Makefile
new file mode 100644
index 000000000000..95373428cb3d
--- /dev/null
+++ b/arch/unicore32/boot/compressed/Makefile
@@ -0,0 +1,68 @@
+#
+# linux/arch/unicore32/boot/compressed/Makefile
+#
+# create a compressed vmlinuz image from the original vmlinux
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2001~2010 GUAN Xue-tao
+#
+
+EXTRA_CFLAGS := -fpic -fno-builtin
+EXTRA_AFLAGS := -Wa,-march=all
+
+OBJS := misc.o
+
+# font.c and font.o
+CFLAGS_font.o := -Dstatic=
+$(obj)/font.c: $(srctree)/drivers/video/console/font_8x8.c
+ $(call cmd,shipped)
+
+# piggy.S and piggy.o
+suffix_$(CONFIG_KERNEL_GZIP) := gzip
+suffix_$(CONFIG_KERNEL_BZIP2) := bz2
+suffix_$(CONFIG_KERNEL_LZO) := lzo
+suffix_$(CONFIG_KERNEL_LZMA) := lzma
+
+$(obj)/piggy.$(suffix_y): $(obj)/../Image FORCE
+ $(call if_changed,$(suffix_y))
+
+SEDFLAGS_piggy = s/DECOMP_SUFFIX/$(suffix_y)/
+$(obj)/piggy.S: $(obj)/piggy.S.in
+ @sed "$(SEDFLAGS_piggy)" < $< > $@
+
+$(obj)/piggy.o: $(obj)/piggy.$(suffix_y) $(obj)/piggy.S FORCE
+
+targets := vmlinux vmlinux.lds font.o font.c head.o misc.o \
+ piggy.$(suffix_y) piggy.o piggy.S \
+
+# Make sure files are removed during clean
+extra-y += piggy.gzip piggy.bz2 piggy.lzo piggy.lzma
+
+# ?
+LDFLAGS_vmlinux += -p
+# Report unresolved symbol references
+LDFLAGS_vmlinux += --no-undefined
+# Delete all temporary local symbols
+LDFLAGS_vmlinux += -X
+# Next argument is a linker script
+LDFLAGS_vmlinux += -T
+
+# For uidivmod
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/head.o $(obj)/piggy.o \
+ $(obj)/misc.o FORCE
+ $(call if_changed,ld)
+ @:
+
+# We now have a PIC decompressor implementation. Decompressors running
+# from RAM should not define ZTEXTADDR. Decompressors running directly
+# from ROM or Flash must define ZTEXTADDR (preferably via the config)
+ZTEXTADDR := 0
+ZBSSADDR := ALIGN(4)
+
+SEDFLAGS_lds = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
+$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/unicore32/boot/Makefile $(KCONFIG_CONFIG)
+ @sed "$(SEDFLAGS_lds)" < $< > $@
+
diff --git a/arch/unicore32/boot/compressed/head.S b/arch/unicore32/boot/compressed/head.S
new file mode 100644
index 000000000000..fbd1e374c685
--- /dev/null
+++ b/arch/unicore32/boot/compressed/head.S
@@ -0,0 +1,204 @@
+/*
+ * linux/arch/unicore32/boot/compressed/head.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <mach/memory.h>
+
+#define csub cmpsub
+#define cand cmpand
+#define nop8 nop; nop; nop; nop; nop; nop; nop; nop
+
+ .section ".start", #alloc, #execinstr
+ .text
+start:
+ .type start,#function
+
+ /* Initialize ASR, PRIV mode and INTR off */
+ mov r0, #0xD3
+ mov.a asr, r0
+
+ adr r0, LC0
+ ldm (r1, r2, r3, r5, r6, r7, r8), [r0]+
+ ldw sp, [r0+], #28
+ sub.a r0, r0, r1 @ calculate the delta offset
+
+ /*
+ * if delta is zero, we are running at the address
+ * we were linked at.
+ */
+ beq not_relocated
+
+ /*
+ * We're running at a different address. We need to fix
+ * up various pointers:
+ * r5 - zImage base address (_start)
+ * r7 - GOT start
+ * r8 - GOT end
+ */
+ add r5, r5, r0
+ add r7, r7, r0
+ add r8, r8, r0
+
+ /*
+ * we need to fix up pointers into the BSS region.
+ * r2 - BSS start
+ * r3 - BSS end
+ * sp - stack pointer
+ */
+ add r2, r2, r0
+ add r3, r3, r0
+ add sp, sp, r0
+
+ /*
+ * Relocate all entries in the GOT table.
+ * This fixes up the C references.
+ * r7 - GOT start
+ * r8 - GOT end
+ */
+1001: ldw r1, [r7+], #0
+ add r1, r1, r0
+ stw.w r1, [r7]+, #4
+ csub.a r7, r8
+ bub 1001b
+
+not_relocated:
+ /*
+ * Clear BSS region.
+ * r2 - BSS start
+ * r3 - BSS end
+ */
+ mov r0, #0
+1002: stw.w r0, [r2]+, #4
+ csub.a r2, r3
+ bub 1002b
+
+ /*
+ * Turn on the cache.
+ */
+ mov r0, #0
+ movc p0.c5, r0, #28 @ cache invalidate all
+ nop8
+ movc p0.c6, r0, #6 @ tlb invalidate all
+ nop8
+
+ mov r0, #0x1c @ en icache and wb dcache
+ movc p0.c1, r0, #0
+ nop8
+
+ /*
+ * Set up some pointers, for starting decompressing.
+ */
+
+ mov r1, sp @ malloc space above stack
+ add r2, sp, #0x10000 @ 64k max
+
+ /*
+ * Check to see if we will overwrite ourselves.
+ * r4 = final kernel address
+ * r5 = start of this image
+ * r6 = size of decompressed image
+ * r2 = end of malloc space (and therefore this image)
+ * We basically want:
+ * r4 >= r2 -> OK
+ * r4 + image length <= r5 -> OK
+ */
+ ldw r4, =KERNEL_IMAGE_START
+ csub.a r4, r2
+ bea wont_overwrite
+ add r0, r4, r6
+ csub.a r0, r5
+ beb wont_overwrite
+
+ /*
+ * If overwrite, just print error message
+ */
+ b __error_overwrite
+
+ /*
+ * We're not in danger of overwriting ourselves.
+ * Do this the simple way.
+ */
+wont_overwrite:
+ /*
+ * decompress_kernel:
+ * r0: output_start
+ * r1: free_mem_ptr_p
+ * r2: free_mem_ptr_end_p
+ */
+ mov r0, r4
+ b.l decompress_kernel @ C functions
+
+ /*
+ * Clean and flush the cache to maintain consistency.
+ */
+ mov r0, #0
+ movc p0.c5, r0, #14 @ flush dcache
+ nop8
+ movc p0.c5, r0, #20 @ icache invalidate all
+ nop8
+
+ /*
+ * Turn off the Cache and MMU.
+ */
+ mov r0, #0 @ disable i/d cache and MMU
+ movc p0.c1, r0, #0
+ nop8
+
+ mov r0, #0 @ must be zero
+ ldw r4, =KERNEL_IMAGE_START
+ mov pc, r4 @ call kernel
+
+
+ .align 2
+ .type LC0, #object
+LC0: .word LC0 @ r1
+ .word __bss_start @ r2
+ .word _end @ r3
+ .word _start @ r5
+ .word _image_size @ r6
+ .word _got_start @ r7
+ .word _got_end @ r8
+ .word decompress_stack_end @ sp
+ .size LC0, . - LC0
+
+print_string:
+#ifdef CONFIG_DEBUG_OCD
+2001: ldb.w r1, [r0]+, #1
+ csub.a r1, #0
+ bne 2002f
+ mov pc, lr
+2002:
+ movc r2, p1.c0, #0
+ cand.a r2, #2
+ bne 2002b
+ movc p1.c1, r1, #1
+ csub.a r1, #'\n'
+ cmoveq r1, #'\r'
+ beq 2002b
+ b 2001b
+#else
+ mov pc, lr
+#endif
+
+__error_overwrite:
+ adr r0, str_error
+ b.l print_string
+2001: nop8
+ b 2001b
+str_error: .asciz "\nError: Kernel address OVERWRITE\n"
+ .align
+
+ .ltorg
+
+ .align 4
+ .section ".stack", "aw", %nobits
+decompress_stack: .space 4096
+decompress_stack_end:
diff --git a/arch/unicore32/boot/compressed/misc.c b/arch/unicore32/boot/compressed/misc.c
new file mode 100644
index 000000000000..176d5bda3559
--- /dev/null
+++ b/arch/unicore32/boot/compressed/misc.c
@@ -0,0 +1,126 @@
+/*
+ * linux/arch/unicore32/boot/compressed/misc.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/unaligned.h>
+#include <mach/uncompress.h>
+
+/*
+ * gzip delarations
+ */
+unsigned char *output_data;
+unsigned long output_ptr;
+
+unsigned int free_mem_ptr;
+unsigned int free_mem_end_ptr;
+
+#define STATIC static
+#define STATIC_RW_DATA /* non-static please */
+
+/*
+ * arch-dependent implementations
+ */
+#ifndef ARCH_HAVE_DECOMP_ERROR
+#define arch_decomp_error(x)
+#endif
+
+#ifndef ARCH_HAVE_DECOMP_SETUP
+#define arch_decomp_setup()
+#endif
+
+#ifndef ARCH_HAVE_DECOMP_PUTS
+#define arch_decomp_puts(p)
+#endif
+
+void *memcpy(void *dest, const void *src, size_t n)
+{
+ int i = 0;
+ unsigned char *d = (unsigned char *)dest, *s = (unsigned char *)src;
+
+ for (i = n >> 3; i > 0; i--) {
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ }
+
+ if (n & 1 << 2) {
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ }
+
+ if (n & 1 << 1) {
+ *d++ = *s++;
+ *d++ = *s++;
+ }
+
+ if (n & 1)
+ *d++ = *s++;
+
+ return dest;
+}
+
+void error(char *x)
+{
+ arch_decomp_puts("\n\n");
+ arch_decomp_puts(x);
+ arch_decomp_puts("\n\n -- System halted");
+
+ arch_decomp_error(x);
+
+ for (;;)
+ ; /* Halt */
+}
+
+/* Heap size should be adjusted for different decompress method */
+#ifdef CONFIG_KERNEL_GZIP
+#include "../../../../lib/decompress_inflate.c"
+#endif
+
+#ifdef CONFIG_KERNEL_BZIP2
+#include "../../../../lib/decompress_bunzip2.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZO
+#include "../../../../lib/decompress_unlzo.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZMA
+#include "../../../../lib/decompress_unlzma.c"
+#endif
+
+unsigned long decompress_kernel(unsigned long output_start,
+ unsigned long free_mem_ptr_p,
+ unsigned long free_mem_ptr_end_p)
+{
+ unsigned char *tmp;
+
+ output_data = (unsigned char *)output_start;
+ free_mem_ptr = free_mem_ptr_p;
+ free_mem_end_ptr = free_mem_ptr_end_p;
+
+ arch_decomp_setup();
+
+ tmp = (unsigned char *) (((unsigned long)input_data_end) - 4);
+ output_ptr = get_unaligned_le32(tmp);
+
+ arch_decomp_puts("Uncompressing Linux...");
+ decompress(input_data, input_data_end - input_data, NULL, NULL,
+ output_data, NULL, error);
+ arch_decomp_puts(" done, booting the kernel.\n");
+ return output_ptr;
+}
diff --git a/arch/unicore32/boot/compressed/piggy.S.in b/arch/unicore32/boot/compressed/piggy.S.in
new file mode 100644
index 000000000000..b79704d58026
--- /dev/null
+++ b/arch/unicore32/boot/compressed/piggy.S.in
@@ -0,0 +1,6 @@
+ .section .piggydata,#alloc
+ .globl input_data
+input_data:
+ .incbin "arch/unicore32/boot/compressed/piggy.DECOMP_SUFFIX"
+ .globl input_data_end
+input_data_end:
diff --git a/arch/unicore32/boot/compressed/vmlinux.lds.in b/arch/unicore32/boot/compressed/vmlinux.lds.in
new file mode 100644
index 000000000000..d5a3ce296239
--- /dev/null
+++ b/arch/unicore32/boot/compressed/vmlinux.lds.in
@@ -0,0 +1,61 @@
+/*
+ * linux/arch/unicore/boot/compressed/vmlinux.lds.in
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+OUTPUT_ARCH(unicore32)
+ENTRY(_start)
+SECTIONS
+{
+ /DISCARD/ : {
+ /*
+ * Discard any r/w data - this produces a link error if we have any,
+ * which is required for PIC decompression. Local data generates
+ * GOTOFF relocations, which prevents it being relocated independently
+ * of the text/got segments.
+ */
+ *(.data)
+ }
+
+ . = TEXT_START;
+ _text = .;
+
+ .text : {
+ _start = .;
+ *(.start)
+ *(.text)
+ *(.text.*)
+ *(.fixup)
+ *(.gnu.warning)
+ *(.rodata)
+ *(.rodata.*)
+ *(.piggydata)
+ . = ALIGN(4);
+ }
+
+ _etext = .;
+
+ /* Assume size of decompressed image is 4x the compressed image */
+ _image_size = (_etext - _text) * 4;
+
+ _got_start = .;
+ .got : { *(.got) }
+ _got_end = .;
+ .got.plt : { *(.got.plt) }
+ _edata = .;
+
+ . = BSS_START;
+ __bss_start = .;
+ .bss : { *(.bss) }
+ _end = .;
+
+ .stack : { *(.stack) }
+ .comment 0 : { *(.comment) }
+}
+
diff --git a/arch/unicore32/configs/debug_defconfig b/arch/unicore32/configs/debug_defconfig
new file mode 100644
index 000000000000..b5fbde9f1cb2
--- /dev/null
+++ b/arch/unicore32/configs/debug_defconfig
@@ -0,0 +1,215 @@
+### General setup
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCALVERSION="-debug"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_HOTPLUG=y
+# Initial RAM filesystem and RAM disk (initramfs/initrd) support
+#CONFIG_BLK_DEV_INITRD=y
+#CONFIG_INITRAMFS_SOURCE="arch/unicore/ramfs/ramfs_config"
+
+### Enable loadable module support
+CONFIG_MODULES=n
+CONFIG_MODULE_UNLOAD=y
+
+### System Type
+CONFIG_ARCH_PUV3=y
+# Board Selection
+CONFIG_PUV3_NB0916=y
+# Processor Features
+CONFIG_CPU_DCACHE_LINE_DISABLE=y
+CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE=n
+
+### Bus support
+CONFIG_PCI=y
+CONFIG_PCI_LEGACY=n
+
+### Boot options
+# for debug, adding: earlyprintk=ocd,keep initcall_debug
+# others support: test_suspend=mem root=/dev/sda
+# hibernate support: resume=/dev/sda3
+CONFIG_CMDLINE="earlyprintk=ocd,keep ignore_loglevel"
+# TODO: mem=512M video=unifb:1024x600-16@75
+# for nfs: root=/dev/nfs rw nfsroot=192.168.10.88:/home/udb/nfs/,rsize=1024,wsize=1024
+# ip=192.168.10.83:192.168.10.88:192.168.10.1:255.255.255.0::eth0:off
+CONFIG_CMDLINE_FORCE=y
+
+### Power management options
+CONFIG_PM=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION="/dev/sda3"
+CONFIG_CPU_FREQ=n
+CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
+
+### Networking support
+CONFIG_NET=y
+# Networking options
+CONFIG_PACKET=m
+CONFIG_UNIX=m
+# TCP/IP networking
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IPV6=n
+# Wireless
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_MAC80211=m
+
+### PKUnity SoC Features
+CONFIG_USB_WLAN_HED_AQ3=n
+CONFIG_USB_CMMB_INNOFIDEI=n
+CONFIG_I2C_BATTERY_BQ27200=n
+CONFIG_I2C_EEPROM_AT24=n
+CONFIG_LCD_BACKLIGHT=n
+
+CONFIG_PUV3_RTC=y
+CONFIG_PUV3_UMAL=y
+CONFIG_PUV3_MUSB=n
+CONFIG_PUV3_AC97=n
+CONFIG_PUV3_NAND=n
+CONFIG_PUV3_MMC=n
+CONFIG_PUV3_UART=n
+
+### Device Drivers
+# Memory Technology Device (MTD) support
+CONFIG_MTD=m
+CONFIG_MTD_UBI=m
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CHAR=m
+CONFIG_MTD_BLKDEVS=m
+# RAM/ROM/Flash chip drivers
+CONFIG_MTD_CFI=m
+CONFIG_MTD_JEDECPROBE=m
+CONFIG_MTD_CFI_AMDSTD=m
+# Mapping drivers for chip access
+CONFIG_MTD_PHYSMAP=m
+
+# Block devices
+CONFIG_BLK_DEV_LOOP=m
+
+# SCSI device support
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=m
+
+# Serial ATA (prod) and Parallel ATA (experimental) drivers
+CONFIG_ATA=y
+CONFIG_SATA_VIA=y
+
+# Network device support
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_NETDEV_1000=y
+# Wireless LAN
+CONFIG_WLAN_80211=n
+CONFIG_RT2X00=n
+CONFIG_RT73USB=n
+
+# Input device support
+CONFIG_INPUT_EVDEV=m
+# Keyboards
+CONFIG_KEYBOARD_GPIO=m
+
+# I2C support
+CONFIG_I2C=y
+CONFIG_I2C_PUV3=y
+
+# Hardware Monitoring support
+#CONFIG_SENSORS_LM75=m
+# Generic Thermal sysfs driver
+#CONFIG_THERMAL=m
+#CONFIG_THERMAL_HWMON=y
+
+# Multimedia support
+CONFIG_MEDIA_SUPPORT=n
+CONFIG_VIDEO_DEV=n
+CONFIG_USB_VIDEO_CLASS=n
+
+# Graphics support
+CONFIG_FB=y
+CONFIG_FB_PUV3_UNIGFX=y
+# Console display driver support
+CONFIG_VGA_CONSOLE=n
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+# Bootup logo
+CONFIG_LOGO=n
+
+# Sound card support
+CONFIG_SOUND=m
+# Advanced Linux Sound Architecture
+CONFIG_SND=m
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+
+# USB support
+CONFIG_USB_ARCH_HAS_HCD=n
+CONFIG_USB=n
+CONFIG_USB_DEVICEFS=n
+CONFIG_USB_PRINTER=n
+CONFIG_USB_STORAGE=n
+# Inventra Highspeed Dual Role Controller
+CONFIG_USB_MUSB_HDRC=n
+
+# LED Support
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+# LED Triggers
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+
+# Real Time Clock
+CONFIG_RTC_LIB=m
+CONFIG_RTC_CLASS=m
+
+### File systems
+CONFIG_EXT2_FS=m
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_FUSE_FS=m
+# CD-ROM/DVD Filesystems
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_UDF_FS=m
+# DOS/FAT/NT Filesystems
+CONFIG_VFAT_FS=m
+# Pseudo filesystems
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# Miscellaneous filesystems
+CONFIG_MISC_FILESYSTEMS=y
+CONFIG_JFFS2_FS=m
+CONFIG_UBIFS_FS=m
+# Network File Systems
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+# Partition Types
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MSDOS_PARTITION=y
+# Native language support
+CONFIG_NLS=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_UTF8=m
+
+### Kernel hacking
+CONFIG_FRAME_WARN=8096
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_PROVE_LOCKING=n
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_FRAME_POINTER=y
+CONFIG_DEBUG_LL=y
+
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
new file mode 100644
index 000000000000..b200fdaca44d
--- /dev/null
+++ b/arch/unicore32/include/asm/Kbuild
@@ -0,0 +1,2 @@
+include include/asm-generic/Kbuild.asm
+
diff --git a/arch/unicore32/include/asm/assembler.h b/arch/unicore32/include/asm/assembler.h
new file mode 100644
index 000000000000..8e87ed7faeba
--- /dev/null
+++ b/arch/unicore32/include/asm/assembler.h
@@ -0,0 +1,131 @@
+/*
+ * linux/arch/unicore32/include/asm/assembler.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Do not include any C declarations in this file - it is included by
+ * assembler source.
+ */
+#ifndef __ASSEMBLY__
+#error "Only include this from assembly code"
+#endif
+
+#include <asm/ptrace.h>
+
+/*
+ * Little Endian independent macros for shifting bytes within registers.
+ */
+#define pull >>
+#define push <<
+#define get_byte_0 << #0
+#define get_byte_1 >> #8
+#define get_byte_2 >> #16
+#define get_byte_3 >> #24
+#define put_byte_0 << #0
+#define put_byte_1 << #8
+#define put_byte_2 << #16
+#define put_byte_3 << #24
+
+#define cadd cmpadd
+#define cand cmpand
+#define csub cmpsub
+#define cxor cmpxor
+
+/*
+ * Enable and disable interrupts
+ */
+ .macro disable_irq, temp
+ mov \temp, asr
+ andn \temp, \temp, #0xFF
+ or \temp, \temp, #PSR_I_BIT | PRIV_MODE
+ mov.a asr, \temp
+ .endm
+
+ .macro enable_irq, temp
+ mov \temp, asr
+ andn \temp, \temp, #0xFF
+ or \temp, \temp, #PRIV_MODE
+ mov.a asr, \temp
+ .endm
+
+#define USER(x...) \
+9999: x; \
+ .pushsection __ex_table, "a"; \
+ .align 3; \
+ .long 9999b, 9001f; \
+ .popsection
+
+ .macro notcond, cond, nexti = .+8
+ .ifc \cond, eq
+ bne \nexti
+ .else; .ifc \cond, ne
+ beq \nexti
+ .else; .ifc \cond, ea
+ bub \nexti
+ .else; .ifc \cond, ub
+ bea \nexti
+ .else; .ifc \cond, fs
+ bns \nexti
+ .else; .ifc \cond, ns
+ bfs \nexti
+ .else; .ifc \cond, fv
+ bnv \nexti
+ .else; .ifc \cond, nv
+ bfv \nexti
+ .else; .ifc \cond, ua
+ beb \nexti
+ .else; .ifc \cond, eb
+ bua \nexti
+ .else; .ifc \cond, eg
+ bsl \nexti
+ .else; .ifc \cond, sl
+ beg \nexti
+ .else; .ifc \cond, sg
+ bel \nexti
+ .else; .ifc \cond, el
+ bsg \nexti
+ .else; .ifnc \cond, al
+ .error "Unknown cond in notcond macro argument"
+ .endif; .endif; .endif; .endif; .endif; .endif; .endif
+ .endif; .endif; .endif; .endif; .endif; .endif; .endif
+ .endif
+ .endm
+
+ .macro usracc, instr, reg, ptr, inc, cond, rept, abort
+ .rept \rept
+ notcond \cond, .+8
+9999 :
+ .if \inc == 1
+ \instr\()b.u \reg, [\ptr], #\inc
+ .elseif \inc == 4
+ \instr\()w.u \reg, [\ptr], #\inc
+ .else
+ .error "Unsupported inc macro argument"
+ .endif
+
+ .pushsection __ex_table, "a"
+ .align 3
+ .long 9999b, \abort
+ .popsection
+ .endr
+ .endm
+
+ .macro strusr, reg, ptr, inc, cond = al, rept = 1, abort = 9001f
+ usracc st, \reg, \ptr, \inc, \cond, \rept, \abort
+ .endm
+
+ .macro ldrusr, reg, ptr, inc, cond = al, rept = 1, abort = 9001f
+ usracc ld, \reg, \ptr, \inc, \cond, \rept, \abort
+ .endm
+
+ .macro nop8
+ .rept 8
+ nop
+ .endr
+ .endm
diff --git a/arch/unicore32/include/asm/bitops.h b/arch/unicore32/include/asm/bitops.h
new file mode 100644
index 000000000000..1628a6328994
--- /dev/null
+++ b/arch/unicore32/include/asm/bitops.h
@@ -0,0 +1,47 @@
+/*
+ * linux/arch/unicore32/include/asm/bitops.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_BITOPS_H__
+#define __UNICORE_BITOPS_H__
+
+#define find_next_bit __uc32_find_next_bit
+#define find_next_zero_bit __uc32_find_next_zero_bit
+
+#define find_first_bit __uc32_find_first_bit
+#define find_first_zero_bit __uc32_find_first_zero_bit
+
+#define _ASM_GENERIC_BITOPS_FLS_H_
+#define _ASM_GENERIC_BITOPS___FLS_H_
+#define _ASM_GENERIC_BITOPS_FFS_H_
+#define _ASM_GENERIC_BITOPS___FFS_H_
+/*
+ * On UNICORE, those functions can be implemented around
+ * the cntlz instruction for much better code efficiency.
+ */
+
+static inline int fls(int x)
+{
+ int ret;
+
+ asm("cntlz\t%0, %1" : "=r" (ret) : "r" (x) : "cc");
+ ret = 32 - ret;
+
+ return ret;
+}
+
+#define __fls(x) (fls(x) - 1)
+#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
+#define __ffs(x) (ffs(x) - 1)
+
+#include <asm-generic/bitops.h>
+
+#endif /* __UNICORE_BITOPS_H__ */
diff --git a/arch/unicore32/include/asm/byteorder.h b/arch/unicore32/include/asm/byteorder.h
new file mode 100644
index 000000000000..ebe1b3fef3e3
--- /dev/null
+++ b/arch/unicore32/include/asm/byteorder.h
@@ -0,0 +1,24 @@
+/*
+ * linux/arch/unicore32/include/asm/byteorder.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * UniCore ONLY support Little Endian mode, the data bus is connected such
+ * that byte accesses appear as:
+ * 0 = d0...d7, 1 = d8...d15, 2 = d16...d23, 3 = d24...d31
+ * and word accesses (data or instruction) appear as:
+ * d0...d31
+ */
+#ifndef __UNICORE_BYTEORDER_H__
+#define __UNICORE_BYTEORDER_H__
+
+#include <linux/byteorder/little_endian.h>
+
+#endif
+
diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
new file mode 100644
index 000000000000..ad8f795d86ca
--- /dev/null
+++ b/arch/unicore32/include/asm/cache.h
@@ -0,0 +1,27 @@
+/*
+ * linux/arch/unicore32/include/asm/cache.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_CACHE_H__
+#define __UNICORE_CACHE_H__
+
+#define L1_CACHE_SHIFT (5)
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+/*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+ * sure that all such allocations are cache aligned. Otherwise,
+ * unrelated code may cause parts of the buffer to be read into the
+ * cache before the transfer is done, causing old data to be seen by
+ * the CPU.
+ */
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+#endif
diff --git a/arch/unicore32/include/asm/cacheflush.h b/arch/unicore32/include/asm/cacheflush.h
new file mode 100644
index 000000000000..c0301e6c8b81
--- /dev/null
+++ b/arch/unicore32/include/asm/cacheflush.h
@@ -0,0 +1,211 @@
+/*
+ * linux/arch/unicore32/include/asm/cacheflush.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_CACHEFLUSH_H__
+#define __UNICORE_CACHEFLUSH_H__
+
+#include <linux/mm.h>
+
+#include <asm/shmparam.h>
+
+#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
+
+/*
+ * This flag is used to indicate that the page pointed to by a pte is clean
+ * and does not require cleaning before returning it to the user.
+ */
+#define PG_dcache_clean PG_arch_1
+
+/*
+ * MM Cache Management
+ * ===================
+ *
+ * The arch/unicore32/mm/cache.S files implement these methods.
+ *
+ * Start addresses are inclusive and end addresses are exclusive;
+ * start addresses should be rounded down, end addresses up.
+ *
+ * See Documentation/cachetlb.txt for more information.
+ * Please note that the implementation of these, and the required
+ * effects are cache-type (VIVT/VIPT/PIPT) specific.
+ *
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ * Currently only needed for cache-v6.S and cache-v7.S, see
+ * __flush_icache_all for the generic implementation.
+ *
+ * flush_kern_all()
+ *
+ * Unconditionally clean and invalidate the entire cache.
+ *
+ * flush_user_all()
+ *
+ * Clean and invalidate all user space cache entries
+ * before a change of page tables.
+ *
+ * flush_user_range(start, end, flags)
+ *
+ * Clean and invalidate a range of cache entries in the
+ * specified address space before a change of page tables.
+ * - start - user start address (inclusive, page aligned)
+ * - end - user end address (exclusive, page aligned)
+ * - flags - vma->vm_flags field
+ *
+ * coherent_kern_range(start, end)
+ *
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start, end. If you have non-snooping
+ * Harvard caches, you need to implement this function.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * coherent_user_range(start, end)
+ *
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start, end. If you have non-snooping
+ * Harvard caches, you need to implement this function.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
+ * flush_kern_dcache_area(kaddr, size)
+ *
+ * Ensure that the data held in page is written back.
+ * - kaddr - page address
+ * - size - region size
+ *
+ * DMA Cache Coherency
+ * ===================
+ *
+ * dma_flush_range(start, end)
+ *
+ * Clean and invalidate the specified virtual address range.
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+
+extern void __cpuc_flush_icache_all(void);
+extern void __cpuc_flush_kern_all(void);
+extern void __cpuc_flush_user_all(void);
+extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
+extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
+extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
+extern void __cpuc_flush_dcache_area(void *, size_t);
+extern void __cpuc_flush_kern_dcache_area(void *addr, size_t size);
+
+/*
+ * These are private to the dma-mapping API. Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+extern void __cpuc_dma_clean_range(unsigned long, unsigned long);
+extern void __cpuc_dma_flush_range(unsigned long, unsigned long);
+
+/*
+ * Copy user data from/to a page which is mapped into a different
+ * processes address space. Really, we want to allow our "user
+ * space" model to handle this.
+ */
+extern void copy_to_user_page(struct vm_area_struct *, struct page *,
+ unsigned long, void *, const void *, unsigned long);
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ } while (0)
+
+/*
+ * Convert calls to our calling convention.
+ */
+/* Invalidate I-cache */
+static inline void __flush_icache_all(void)
+{
+ asm("movc p0.c5, %0, #20;\n"
+ "nop; nop; nop; nop; nop; nop; nop; nop\n"
+ :
+ : "r" (0));
+}
+
+#define flush_cache_all() __cpuc_flush_kern_all()
+
+extern void flush_cache_mm(struct mm_struct *mm);
+extern void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+extern void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long user_addr, unsigned long pfn);
+
+#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
+
+/*
+ * flush_cache_user_range is used when we want to ensure that the
+ * Harvard caches are synchronised for the user space address range.
+ * This is used for the UniCore private sys_cacheflush system call.
+ */
+#define flush_cache_user_range(vma, start, end) \
+ __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
+
+/*
+ * Perform necessary cache operations to ensure that data previously
+ * stored within this range of addresses can be executed by the CPU.
+ */
+#define flush_icache_range(s, e) __cpuc_coherent_kern_range(s, e)
+
+/*
+ * Perform necessary cache operations to ensure that the TLB will
+ * see data written in the specified area.
+ */
+#define clean_dcache_area(start, size) cpu_dcache_clean_area(start, size)
+
+/*
+ * flush_dcache_page is used when the kernel has written to the page
+ * cache page at virtual address page->virtual.
+ *
+ * If this page isn't mapped (ie, page_mapping == NULL), or it might
+ * have userspace mappings, then we _must_ always clean + invalidate
+ * the dcache entries associated with the kernel mapping.
+ *
+ * Otherwise we can defer the operation, and clean the cache when we are
+ * about to change to user space. This is the same method as used on SPARC64.
+ * See update_mmu_cache for the user space part.
+ */
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+extern void flush_dcache_page(struct page *);
+
+#define flush_dcache_mmap_lock(mapping) \
+ spin_lock_irq(&(mapping)->tree_lock)
+#define flush_dcache_mmap_unlock(mapping) \
+ spin_unlock_irq(&(mapping)->tree_lock)
+
+#define flush_icache_user_range(vma, page, addr, len) \
+ flush_dcache_page(page)
+
+/*
+ * We don't appear to need to do anything here. In fact, if we did, we'd
+ * duplicate cache flushing elsewhere performed by flush_dcache_page().
+ */
+#define flush_icache_page(vma, page) do { } while (0)
+
+/*
+ * flush_cache_vmap() is used when creating mappings (eg, via vmap,
+ * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
+ * caches, since the direct-mappings of these pages may contain cached
+ * data, we need to do a full cache flush to ensure that writebacks
+ * don't corrupt data placed into these pages via the new mappings.
+ */
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+}
+
+static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+}
+
+#endif
diff --git a/arch/unicore32/include/asm/checksum.h b/arch/unicore32/include/asm/checksum.h
new file mode 100644
index 000000000000..f55c3f937c3e
--- /dev/null
+++ b/arch/unicore32/include/asm/checksum.h
@@ -0,0 +1,41 @@
+/*
+ * linux/arch/unicore32/include/asm/checksum.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * IP checksum routines
+ */
+#ifndef __UNICORE_CHECKSUM_H__
+#define __UNICORE_CHECKSUM_H__
+
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ __asm__(
+ "add.a %0, %1, %2\n"
+ "addc.a %0, %0, %3\n"
+ "addc.a %0, %0, %4 << #8\n"
+ "addc.a %0, %0, %5\n"
+ "addc %0, %0, #0\n"
+ : "=&r"(sum)
+ : "r" (sum), "r" (daddr), "r" (saddr), "r" (len), "Ir" (htons(proto))
+ : "cc");
+ return sum;
+}
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif
diff --git a/arch/unicore32/include/asm/cpu-single.h b/arch/unicore32/include/asm/cpu-single.h
new file mode 100644
index 000000000000..0f55d1823439
--- /dev/null
+++ b/arch/unicore32/include/asm/cpu-single.h
@@ -0,0 +1,45 @@
+/*
+ * linux/arch/unicore32/include/asm/cpu-single.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_CPU_SINGLE_H__
+#define __UNICORE_CPU_SINGLE_H__
+
+#include <asm/page.h>
+#include <asm/memory.h>
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#define cpu_switch_mm(pgd, mm) cpu_do_switch_mm(virt_to_phys(pgd), mm)
+
+#define cpu_get_pgd() \
+ ({ \
+ unsigned long pg; \
+ __asm__("movc %0, p0.c2, #0" \
+ : "=r" (pg) : : "cc"); \
+ pg &= ~0x0fff; \
+ (pgd_t *)phys_to_virt(pg); \
+ })
+
+struct mm_struct;
+
+/* declare all the functions as extern */
+extern void cpu_proc_fin(void);
+extern int cpu_do_idle(void);
+extern void cpu_dcache_clean_area(void *, int);
+extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
+extern void cpu_set_pte(pte_t *ptep, pte_t pte);
+extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
+
+#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
+#endif /* __UNICORE_CPU_SINGLE_H__ */
diff --git a/arch/unicore32/include/asm/cputype.h b/arch/unicore32/include/asm/cputype.h
new file mode 100644
index 000000000000..ec1a30f98077
--- /dev/null
+++ b/arch/unicore32/include/asm/cputype.h
@@ -0,0 +1,33 @@
+/*
+ * linux/arch/unicore32/include/asm/cputype.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_CPUTYPE_H__
+#define __UNICORE_CPUTYPE_H__
+
+#include <linux/stringify.h>
+
+#define CPUID_CPUID 0
+#define CPUID_CACHETYPE 1
+
+#define read_cpuid(reg) \
+ ({ \
+ unsigned int __val; \
+ asm("movc %0, p0.c0, #" __stringify(reg) \
+ : "=r" (__val) \
+ : \
+ : "cc"); \
+ __val; \
+ })
+
+#define uc32_cpuid read_cpuid(CPUID_CPUID)
+#define uc32_cachetype read_cpuid(CPUID_CACHETYPE)
+
+#endif
diff --git a/arch/unicore32/include/asm/delay.h b/arch/unicore32/include/asm/delay.h
new file mode 100644
index 000000000000..164ae61cd6f7
--- /dev/null
+++ b/arch/unicore32/include/asm/delay.h
@@ -0,0 +1,52 @@
+/*
+ * linux/arch/unicore32/include/asm/delay.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Delay routines, using a pre-computed "loops_per_second" value.
+ */
+#ifndef __UNICORE_DELAY_H__
+#define __UNICORE_DELAY_H__
+
+#include <asm/param.h> /* HZ */
+
+extern void __delay(int loops);
+
+/*
+ * This function intentionally does not exist; if you see references to
+ * it, it means that you're calling udelay() with an out of range value.
+ *
+ * With currently imposed limits, this means that we support a max delay
+ * of 2000us. Further limits: HZ<=1000 and bogomips<=3355
+ */
+extern void __bad_udelay(void);
+
+/*
+ * division by multiplication: you don't have to worry about
+ * loss of precision.
+ *
+ * Use only for very small delays ( < 1 msec). Should probably use a
+ * lookup table, really, as the multiplications take much too long with
+ * short delays. This is a "reasonable" implementation, though (and the
+ * first constant multiplications gets optimized away if the delay is
+ * a constant)
+ */
+extern void __udelay(unsigned long usecs);
+extern void __const_udelay(unsigned long);
+
+#define MAX_UDELAY_MS 2
+
+#define udelay(n) \
+ (__builtin_constant_p(n) ? \
+ ((n) > (MAX_UDELAY_MS * 1000) ? __bad_udelay() : \
+ __const_udelay((n) * ((2199023U*HZ)>>11))) : \
+ __udelay(n))
+
+#endif /* __UNICORE_DELAY_H__ */
+
diff --git a/arch/unicore32/include/asm/dma-mapping.h b/arch/unicore32/include/asm/dma-mapping.h
new file mode 100644
index 000000000000..9258e592f414
--- /dev/null
+++ b/arch/unicore32/include/asm/dma-mapping.h
@@ -0,0 +1,124 @@
+/*
+ * linux/arch/unicore32/include/asm/dma-mapping.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_DMA_MAPPING_H__
+#define __UNICORE_DMA_MAPPING_H__
+
+#ifdef __KERNEL__
+
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/swiotlb.h>
+
+#include <asm-generic/dma-coherent.h>
+
+#include <asm/memory.h>
+#include <asm/cacheflush.h>
+
+extern struct dma_map_ops swiotlb_dma_map_ops;
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ return &swiotlb_dma_map_ops;
+}
+
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (unlikely(dma_ops == NULL))
+ return 0;
+
+ return dma_ops->dma_supported(dev, mask);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ if (dma_ops->mapping_error)
+ return dma_ops->mapping_error(dev, dma_addr);
+
+ return 0;
+}
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (dev && dev->dma_mask)
+ return addr + size - 1 <= *dev->dma_mask;
+
+ return 1;
+}
+
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr;
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return daddr;
+}
+
+static inline void dma_mark_clean(void *addr, size_t size) {}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
+}
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+static inline void dma_cache_sync(struct device *dev, void *vaddr,
+ size_t size, enum dma_data_direction direction)
+{
+ unsigned long start = (unsigned long)vaddr;
+ unsigned long end = start + size;
+
+ switch (direction) {
+ case DMA_NONE:
+ BUG();
+ case DMA_FROM_DEVICE:
+ case DMA_BIDIRECTIONAL: /* writeback and invalidate */
+ __cpuc_dma_flush_range(start, end);
+ break;
+ case DMA_TO_DEVICE: /* writeback only */
+ __cpuc_dma_clean_range(start, end);
+ break;
+ }
+}
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/unicore32/include/asm/dma.h b/arch/unicore32/include/asm/dma.h
new file mode 100644
index 000000000000..38dfff9df32f
--- /dev/null
+++ b/arch/unicore32/include/asm/dma.h
@@ -0,0 +1,23 @@
+/*
+ * linux/arch/unicore32/include/asm/dma.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_DMA_H__
+#define __UNICORE_DMA_H__
+
+#include <asm/memory.h>
+#include <asm-generic/dma.h>
+
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#endif
+
+#endif /* __UNICORE_DMA_H__ */
diff --git a/arch/unicore32/include/asm/elf.h b/arch/unicore32/include/asm/elf.h
new file mode 100644
index 000000000000..829042d07722
--- /dev/null
+++ b/arch/unicore32/include/asm/elf.h
@@ -0,0 +1,94 @@
+/*
+ * linux/arch/unicore32/include/asm/elf.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_ELF_H__
+#define __UNICORE_ELF_H__
+
+#include <asm/hwcap.h>
+
+/*
+ * ELF register definitions..
+ */
+#include <asm/ptrace.h>
+
+typedef unsigned long elf_greg_t;
+typedef unsigned long elf_freg_t[3];
+
+#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef struct fp_state elf_fpregset_t;
+
+#define EM_UNICORE 110
+
+#define R_UNICORE_NONE 0
+#define R_UNICORE_PC24 1
+#define R_UNICORE_ABS32 2
+#define R_UNICORE_CALL 28
+#define R_UNICORE_JUMP24 29
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2LSB
+#define ELF_ARCH EM_UNICORE
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ *
+ */
+#define ELF_PLATFORM_SIZE 8
+#define ELF_PLATFORM (elf_platform)
+
+extern char elf_platform[];
+
+struct elf32_hdr;
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+extern int elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch elf_check_arch
+
+struct task_struct;
+int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+#define ELF_CORE_COPY_TASK_REGS dump_task_regs
+
+#define ELF_EXEC_PAGESIZE 4096
+
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+/* When the program starts, a1 contains a pointer to a function to be
+ registered with atexit, as per the SVR4 ABI. A value of 0 means we
+ have no such handler. */
+#define ELF_PLAT_INIT(_r, load_addr) {(_r)->UCreg_00 = 0; }
+
+extern void elf_set_personality(const struct elf32_hdr *);
+#define SET_PERSONALITY(ex) elf_set_personality(&(ex))
+
+struct mm_struct;
+extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+#define arch_randomize_brk arch_randomize_brk
+
+extern int vectors_user_mapping(void);
+#define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+
+#endif
diff --git a/arch/unicore32/include/asm/fpstate.h b/arch/unicore32/include/asm/fpstate.h
new file mode 100644
index 000000000000..ba97fac6220d
--- /dev/null
+++ b/arch/unicore32/include/asm/fpstate.h
@@ -0,0 +1,26 @@
+/*
+ * linux/arch/unicore32/include/asm/fpstate.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_FPSTATE_H__
+#define __UNICORE_FPSTATE_H__
+
+#ifndef __ASSEMBLY__
+
+#define FP_REGS_NUMBER 33
+
+struct fp_state {
+ unsigned int regs[FP_REGS_NUMBER];
+} __attribute__((aligned(8)));
+
+#endif
+
+#endif
diff --git a/arch/unicore32/include/asm/fpu-ucf64.h b/arch/unicore32/include/asm/fpu-ucf64.h
new file mode 100644
index 000000000000..16c1457882ee
--- /dev/null
+++ b/arch/unicore32/include/asm/fpu-ucf64.h
@@ -0,0 +1,53 @@
+/*
+ * linux/arch/unicore32/include/asm/fpu-ucf64.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#define FPSCR s31
+
+/* FPSCR bits */
+#define FPSCR_DEFAULT_NAN (1<<25)
+
+#define FPSCR_CMPINSTR_BIT (1<<31)
+
+#define FPSCR_CON (1<<29)
+#define FPSCR_TRAP (1<<27)
+
+/* RND mode */
+#define FPSCR_ROUND_NEAREST (0<<0)
+#define FPSCR_ROUND_PLUSINF (2<<0)
+#define FPSCR_ROUND_MINUSINF (3<<0)
+#define FPSCR_ROUND_TOZERO (1<<0)
+#define FPSCR_RMODE_BIT (0)
+#define FPSCR_RMODE_MASK (7 << FPSCR_RMODE_BIT)
+
+/* trap enable */
+#define FPSCR_IOE (1<<16)
+#define FPSCR_OFE (1<<14)
+#define FPSCR_UFE (1<<13)
+#define FPSCR_IXE (1<<12)
+#define FPSCR_HIE (1<<11)
+#define FPSCR_NDE (1<<10) /* non denomal */
+
+/* flags */
+#define FPSCR_IDC (1<<24)
+#define FPSCR_HIC (1<<23)
+#define FPSCR_IXC (1<<22)
+#define FPSCR_OFC (1<<21)
+#define FPSCR_UFC (1<<20)
+#define FPSCR_IOC (1<<19)
+
+/* stick bits */
+#define FPSCR_IOS (1<<9)
+#define FPSCR_OFS (1<<7)
+#define FPSCR_UFS (1<<6)
+#define FPSCR_IXS (1<<5)
+#define FPSCR_HIS (1<<4)
+#define FPSCR_NDS (1<<3) /*non denomal */
diff --git a/arch/unicore32/include/asm/futex.h b/arch/unicore32/include/asm/futex.h
new file mode 100644
index 000000000000..07dea6170558
--- /dev/null
+++ b/arch/unicore32/include/asm/futex.h
@@ -0,0 +1,143 @@
+/*
+ * linux/arch/unicore32/include/asm/futex.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_FUTEX_H__
+#define __UNICORE_FUTEX_H__
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+ __asm__ __volatile__( \
+ "1: ldw.u %1, [%2]\n" \
+ " " insn "\n" \
+ "2: stw.u %0, [%2]\n" \
+ " mov %0, #0\n" \
+ "3:\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4f, 2b, 4f\n" \
+ " .popsection\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ "4: mov %0, %4\n" \
+ " b 3b\n" \
+ " .popsection" \
+ : "=&r" (ret), "=&r" (oldval) \
+ : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
+ : "cc", "memory")
+
+static inline int
+futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("and %0, %1, %3",
+ ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ:
+ ret = (oldval == cmparg);
+ break;
+ case FUTEX_OP_CMP_NE:
+ ret = (oldval != cmparg);
+ break;
+ case FUTEX_OP_CMP_LT:
+ ret = (oldval < cmparg);
+ break;
+ case FUTEX_OP_CMP_GE:
+ ret = (oldval >= cmparg);
+ break;
+ case FUTEX_OP_CMP_LE:
+ ret = (oldval <= cmparg);
+ break;
+ case FUTEX_OP_CMP_GT:
+ ret = (oldval > cmparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+{
+ int val;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
+ "1: ldw.u %0, [%3]\n"
+ " cmpxor.a %0, %1\n"
+ " bne 3f\n"
+ "2: stw.u %2, [%3]\n"
+ "3:\n"
+ " .pushsection __ex_table,\"a\"\n"
+ " .align 3\n"
+ " .long 1b, 4f, 2b, 4f\n"
+ " .popsection\n"
+ " .pushsection .fixup,\"ax\"\n"
+ "4: mov %0, %4\n"
+ " b 3b\n"
+ " .popsection"
+ : "=&r" (val)
+ : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
+ : "cc", "memory");
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ return val;
+}
+
+#endif /* __KERNEL__ */
+#endif /* __UNICORE_FUTEX_H__ */
diff --git a/arch/unicore32/include/asm/gpio.h b/arch/unicore32/include/asm/gpio.h
new file mode 100644
index 000000000000..2716f14e3ff6
--- /dev/null
+++ b/arch/unicore32/include/asm/gpio.h
@@ -0,0 +1,104 @@
+/*
+ * linux/arch/unicore32/include/asm/gpio.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_GPIO_H__
+#define __UNICORE_GPIO_H__
+
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <mach/hardware.h>
+#include <asm-generic/gpio.h>
+
+#define GPI_OTP_INT 0
+#define GPI_PCI_INTA 1
+#define GPI_PCI_INTB 2
+#define GPI_PCI_INTC 3
+#define GPI_PCI_INTD 4
+#define GPI_BAT_DET 5
+#define GPI_SD_CD 6
+#define GPI_SOFF_REQ 7
+#define GPI_SD_WP 8
+#define GPI_LCD_CASE_OFF 9
+#define GPO_WIFI_EN 10
+#define GPO_HDD_LED 11
+#define GPO_VGA_EN 12
+#define GPO_LCD_EN 13
+#define GPO_LED_DATA 14
+#define GPO_LED_CLK 15
+#define GPO_CAM_PWR_EN 16
+#define GPO_LCD_VCC_EN 17
+#define GPO_SOFT_OFF 18
+#define GPO_BT_EN 19
+#define GPO_FAN_ON 20
+#define GPO_SPKR 21
+#define GPO_SET_V1 23
+#define GPO_SET_V2 24
+#define GPO_CPU_HEALTH 25
+#define GPO_LAN_SEL 26
+
+#ifdef CONFIG_PUV3_NB0916
+#define GPI_BTN_TOUCH 14
+#define GPIO_IN 0x000043ff /* 1 for input */
+#define GPIO_OUT 0x0fffbc00 /* 1 for output */
+#endif /* CONFIG_PUV3_NB0916 */
+
+#ifdef CONFIG_PUV3_SMW0919
+#define GPIO_IN 0x000003ff /* 1 for input */
+#define GPIO_OUT 0x0ffffc00 /* 1 for output */
+#endif /* CONFIG_PUV3_SMW0919 */
+
+#ifdef CONFIG_PUV3_DB0913
+#define GPIO_IN 0x000001df /* 1 for input */
+#define GPIO_OUT 0x03fee800 /* 1 for output */
+#endif /* CONFIG_PUV3_DB0913 */
+
+#define GPIO_DIR (~((GPIO_IN) | 0xf0000000))
+ /* 0 input, 1 output */
+
+static inline int gpio_get_value(unsigned gpio)
+{
+ if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX))
+ return readl(GPIO_GPLR) & GPIO_GPIO(gpio);
+ else
+ return __gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value(unsigned gpio, int value)
+{
+ if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX))
+ if (value)
+ writel(GPIO_GPIO(gpio), GPIO_GPSR);
+ else
+ writel(GPIO_GPIO(gpio), GPIO_GPCR);
+ else
+ __gpio_set_value(gpio, value);
+}
+
+#define gpio_cansleep __gpio_cansleep
+
+static inline unsigned gpio_to_irq(unsigned gpio)
+{
+ if ((gpio < IRQ_GPIOHIGH) && (FIELD(1, 1, gpio) & readl(GPIO_GPIR)))
+ return IRQ_GPIOLOW0 + gpio;
+ else
+ return IRQ_GPIO0 + gpio;
+}
+
+static inline unsigned irq_to_gpio(unsigned irq)
+{
+ if (irq < IRQ_GPIOHIGH)
+ return irq - IRQ_GPIOLOW0;
+ else
+ return irq - IRQ_GPIO0;
+}
+
+#endif /* __UNICORE_GPIO_H__ */
diff --git a/arch/unicore32/include/asm/hwcap.h b/arch/unicore32/include/asm/hwcap.h
new file mode 100644
index 000000000000..97bd40fdd4ac
--- /dev/null
+++ b/arch/unicore32/include/asm/hwcap.h
@@ -0,0 +1,32 @@
+/*
+ * linux/arch/unicore32/include/asm/hwcap.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_HWCAP_H__
+#define __UNICORE_HWCAP_H__
+
+/*
+ * HWCAP flags
+ */
+#define HWCAP_MSP 1
+#define HWCAP_UNICORE16 2
+#define HWCAP_CMOV 4
+#define HWCAP_UNICORE_F64 8
+#define HWCAP_TLS 0x80
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP (HWCAP_CMOV | HWCAP_UNICORE_F64)
+#endif
+
+#endif
diff --git a/arch/unicore32/include/asm/io.h b/arch/unicore32/include/asm/io.h
new file mode 100644
index 000000000000..4bd87f3d13d4
--- /dev/null
+++ b/arch/unicore32/include/asm/io.h
@@ -0,0 +1,55 @@
+/*
+ * linux/arch/unicore32/include/asm/io.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_IO_H__
+#define __UNICORE_IO_H__
+
+#ifdef __KERNEL__
+
+#include <asm/byteorder.h>
+#include <asm/memory.h>
+#include <asm/system.h>
+
+#define PCI_IOBASE PKUNITY_PCILIO_BASE
+#include <asm-generic/io.h>
+
+/*
+ * __uc32_ioremap and __uc32_ioremap_cached takes CPU physical address.
+ */
+extern void __iomem *__uc32_ioremap(unsigned long, size_t);
+extern void __iomem *__uc32_ioremap_cached(unsigned long, size_t);
+extern void __uc32_iounmap(volatile void __iomem *addr);
+
+/*
+ * ioremap and friends.
+ *
+ * ioremap takes a PCI memory address, as specified in
+ * Documentation/IO-mapping.txt.
+ *
+ */
+#define ioremap(cookie, size) __uc32_ioremap(cookie, size)
+#define ioremap_cached(cookie, size) __uc32_ioremap_cached(cookie, size)
+#define iounmap(cookie) __uc32_iounmap(cookie)
+
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+#undef xlate_dev_mem_ptr
+#define xlate_dev_mem_ptr(p) __va(p)
+
+#define HAVE_ARCH_PIO_SIZE
+#define PIO_OFFSET (unsigned int)(PCI_IOBASE)
+#define PIO_MASK (unsigned int)(IO_SPACE_LIMIT)
+#define PIO_RESERVED (PIO_OFFSET + PIO_MASK + 1)
+
+#endif /* __KERNEL__ */
+#endif /* __UNICORE_IO_H__ */
diff --git a/arch/unicore32/include/asm/irq.h b/arch/unicore32/include/asm/irq.h
new file mode 100644
index 000000000000..baea93e2a6e6
--- /dev/null
+++ b/arch/unicore32/include/asm/irq.h
@@ -0,0 +1,105 @@
+/*
+ * linux/arch/unicore32/include/asm/irq.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_IRQ_H__
+#define __UNICORE_IRQ_H__
+
+#include <asm-generic/irq.h>
+
+#define IRQ_GPIOLOW0 0x00
+#define IRQ_GPIOLOW1 0x01
+#define IRQ_GPIOLOW2 0x02
+#define IRQ_GPIOLOW3 0x03
+#define IRQ_GPIOLOW4 0x04
+#define IRQ_GPIOLOW5 0x05
+#define IRQ_GPIOLOW6 0x06
+#define IRQ_GPIOLOW7 0x07
+#define IRQ_GPIOHIGH 0x08
+#define IRQ_USB 0x09
+#define IRQ_SDC 0x0a
+#define IRQ_AC97 0x0b
+#define IRQ_SATA 0x0c
+#define IRQ_MME 0x0d
+#define IRQ_PCI_BRIDGE 0x0e
+#define IRQ_DDR 0x0f
+#define IRQ_SPI 0x10
+#define IRQ_UNIGFX 0x11
+#define IRQ_I2C 0x11
+#define IRQ_UART1 0x12
+#define IRQ_UART0 0x13
+#define IRQ_UMAL 0x14
+#define IRQ_NAND 0x15
+#define IRQ_PS2_KBD 0x16
+#define IRQ_PS2_AUX 0x17
+#define IRQ_DMA 0x18
+#define IRQ_DMAERR 0x19
+#define IRQ_TIMER0 0x1a
+#define IRQ_TIMER1 0x1b
+#define IRQ_TIMER2 0x1c
+#define IRQ_TIMER3 0x1d
+#define IRQ_RTC 0x1e
+#define IRQ_RTCAlarm 0x1f
+
+#define IRQ_GPIO0 0x20
+#define IRQ_GPIO1 0x21
+#define IRQ_GPIO2 0x22
+#define IRQ_GPIO3 0x23
+#define IRQ_GPIO4 0x24
+#define IRQ_GPIO5 0x25
+#define IRQ_GPIO6 0x26
+#define IRQ_GPIO7 0x27
+#define IRQ_GPIO8 0x28
+#define IRQ_GPIO9 0x29
+#define IRQ_GPIO10 0x2a
+#define IRQ_GPIO11 0x2b
+#define IRQ_GPIO12 0x2c
+#define IRQ_GPIO13 0x2d
+#define IRQ_GPIO14 0x2e
+#define IRQ_GPIO15 0x2f
+#define IRQ_GPIO16 0x30
+#define IRQ_GPIO17 0x31
+#define IRQ_GPIO18 0x32
+#define IRQ_GPIO19 0x33
+#define IRQ_GPIO20 0x34
+#define IRQ_GPIO21 0x35
+#define IRQ_GPIO22 0x36
+#define IRQ_GPIO23 0x37
+#define IRQ_GPIO24 0x38
+#define IRQ_GPIO25 0x39
+#define IRQ_GPIO26 0x3a
+#define IRQ_GPIO27 0x3b
+
+#ifdef CONFIG_ARCH_FPGA
+#define IRQ_PCIINTA IRQ_GPIOLOW2
+#define IRQ_PCIINTB IRQ_GPIOLOW1
+#define IRQ_PCIINTC IRQ_GPIOLOW0
+#define IRQ_PCIINTD IRQ_GPIOLOW6
+#endif
+
+#if defined(CONFIG_PUV3_DB0913) || defined(CONFIG_PUV3_NB0916) \
+ || defined(CONFIG_PUV3_SMW0919)
+#define IRQ_PCIINTA IRQ_GPIOLOW1
+#define IRQ_PCIINTB IRQ_GPIOLOW2
+#define IRQ_PCIINTC IRQ_GPIOLOW3
+#define IRQ_PCIINTD IRQ_GPIOLOW4
+#endif
+
+#define IRQ_SD_CD IRQ_GPIO6 /* falling or rising trigger */
+
+#ifndef __ASSEMBLY__
+struct pt_regs;
+
+extern void asm_do_IRQ(unsigned int, struct pt_regs *);
+
+#endif
+
+#endif
+
diff --git a/arch/unicore32/include/asm/irqflags.h b/arch/unicore32/include/asm/irqflags.h
new file mode 100644
index 000000000000..6d8a28dfdbae
--- /dev/null
+++ b/arch/unicore32/include/asm/irqflags.h
@@ -0,0 +1,53 @@
+/*
+ * linux/arch/unicore32/include/asm/irqflags.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_IRQFLAGS_H__
+#define __UNICORE_IRQFLAGS_H__
+
+#ifdef __KERNEL__
+
+#include <asm/ptrace.h>
+
+#define ARCH_IRQ_DISABLED (PRIV_MODE | PSR_I_BIT)
+#define ARCH_IRQ_ENABLED (PRIV_MODE)
+
+/*
+ * Save the current interrupt enable state.
+ */
+static inline unsigned long arch_local_save_flags(void)
+{
+ unsigned long temp;
+
+ asm volatile("mov %0, asr" : "=r" (temp) : : "memory", "cc");
+
+ return temp & PSR_c;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ unsigned long temp;
+
+ asm volatile(
+ "mov %0, asr\n"
+ "mov.a asr, %1\n"
+ "mov.f asr, %0"
+ : "=&r" (temp)
+ : "r" (flags)
+ : "memory", "cc");
+}
+
+#include <asm-generic/irqflags.h>
+
+#endif
+#endif
diff --git a/arch/unicore32/include/asm/linkage.h b/arch/unicore32/include/asm/linkage.h
new file mode 100644
index 000000000000..d1618bd35b67
--- /dev/null
+++ b/arch/unicore32/include/asm/linkage.h
@@ -0,0 +1,22 @@
+/*
+ * linux/arch/unicore32/include/asm/linkage.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_LINKAGE_H__
+#define __UNICORE_LINKAGE_H__
+
+#define __ALIGN .align 0
+#define __ALIGN_STR ".align 0"
+
+#define ENDPROC(name) \
+ .type name, %function; \
+ END(name)
+
+#endif
diff --git a/arch/unicore32/include/asm/memblock.h b/arch/unicore32/include/asm/memblock.h
new file mode 100644
index 000000000000..a8a5d8d0a26e
--- /dev/null
+++ b/arch/unicore32/include/asm/memblock.h
@@ -0,0 +1,46 @@
+/*
+ * linux/arch/unicore32/include/asm/memblock.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_MEMBLOCK_H__
+#define __UNICORE_MEMBLOCK_H__
+
+/*
+ * Memory map description
+ */
+# define NR_BANKS 8
+
+struct membank {
+ unsigned long start;
+ unsigned long size;
+ unsigned int highmem;
+};
+
+struct meminfo {
+ int nr_banks;
+ struct membank bank[NR_BANKS];
+};
+
+extern struct meminfo meminfo;
+
+#define for_each_bank(iter, mi) \
+ for (iter = 0; iter < (mi)->nr_banks; iter++)
+
+#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)
+#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size)
+#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT)
+#define bank_phys_start(bank) ((bank)->start)
+#define bank_phys_end(bank) ((bank)->start + (bank)->size)
+#define bank_phys_size(bank) ((bank)->size)
+
+extern void uc32_memblock_init(struct meminfo *);
+
+#endif
diff --git a/arch/unicore32/include/asm/memory.h b/arch/unicore32/include/asm/memory.h
new file mode 100644
index 000000000000..5eddb997defe
--- /dev/null
+++ b/arch/unicore32/include/asm/memory.h
@@ -0,0 +1,123 @@
+/*
+ * linux/arch/unicore32/include/asm/memory.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Note: this file should not be included by non-asm/.h files
+ */
+#ifndef __UNICORE_MEMORY_H__
+#define __UNICORE_MEMORY_H__
+
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <asm/sizes.h>
+#include <mach/memory.h>
+
+/*
+ * Allow for constants defined here to be used from assembly code
+ * by prepending the UL suffix only with actual C code compilation.
+ */
+#define UL(x) _AC(x, UL)
+
+/*
+ * PAGE_OFFSET - the virtual address of the start of the kernel image
+ * TASK_SIZE - the maximum size of a user space task.
+ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
+ */
+#define PAGE_OFFSET UL(0xC0000000)
+#define TASK_SIZE (PAGE_OFFSET - UL(0x41000000))
+#define TASK_UNMAPPED_BASE (PAGE_OFFSET / 3)
+
+/*
+ * The module space lives between the addresses given by TASK_SIZE
+ * and PAGE_OFFSET - it must be within 32MB of the kernel text.
+ */
+#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)
+#if TASK_SIZE > MODULES_VADDR
+#error Top of user space clashes with start of module space
+#endif
+
+#define MODULES_END (PAGE_OFFSET)
+
+/*
+ * Allow 16MB-aligned ioremap pages
+ */
+#define IOREMAP_MAX_ORDER 24
+
+/*
+ * Physical vs virtual RAM address space conversion. These are
+ * private definitions which should NOT be used outside memory.h
+ * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
+ */
+#ifndef __virt_to_phys
+#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
+#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
+#endif
+
+/*
+ * Convert a physical address to a Page Frame Number and back
+ */
+#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
+#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
+
+/*
+ * Convert a page to/from a physical address
+ */
+#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
+#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
+
+#ifndef __ASSEMBLY__
+
+#ifndef arch_adjust_zones
+#define arch_adjust_zones(size, holes) do { } while (0)
+#endif
+
+/*
+ * PFNs are used to describe any physical page; this means
+ * PFN 0 == physical address 0.
+ *
+ * This is the PFN of the first RAM page in the kernel
+ * direct-mapped view. We assume this is the first page
+ * of RAM in the mem_map as well.
+ */
+#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
+
+/*
+ * Drivers should NOT use these either.
+ */
+#define __pa(x) __virt_to_phys((unsigned long)(x))
+#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+
+/*
+ * Conversion between a struct page and a physical address.
+ *
+ * Note: when converting an unknown physical address to a
+ * struct page, the resulting pointer must be validated
+ * using VALID_PAGE(). It must return an invalid struct page
+ * for any physical address not corresponding to a system
+ * RAM address.
+ *
+ * page_to_pfn(page) convert a struct page * to a PFN number
+ * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
+ *
+ * virt_to_page(k) convert a _valid_ virtual address to struct page *
+ * virt_addr_valid(k) indicates whether a virtual address is valid
+ */
+#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
+
+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && \
+ (unsigned long)(kaddr) < (unsigned long)high_memory)
+
+#endif
+
+#include <asm-generic/memory_model.h>
+
+#endif
diff --git a/arch/unicore32/include/asm/mmu.h b/arch/unicore32/include/asm/mmu.h
new file mode 100644
index 000000000000..66fa341dc2c6
--- /dev/null
+++ b/arch/unicore32/include/asm/mmu.h
@@ -0,0 +1,17 @@
+/*
+ * linux/arch/unicore32/include/asm/mmu.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_MMU_H__
+#define __UNICORE_MMU_H__
+
+typedef unsigned long mm_context_t;
+
+#endif
diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h
new file mode 100644
index 000000000000..fb5e4c658f7a
--- /dev/null
+++ b/arch/unicore32/include/asm/mmu_context.h
@@ -0,0 +1,87 @@
+/*
+ * linux/arch/unicore32/include/asm/mmu_context.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_MMU_CONTEXT_H__
+#define __UNICORE_MMU_CONTEXT_H__
+
+#include <linux/compiler.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpu-single.h>
+
+#define init_new_context(tsk, mm) 0
+
+#define destroy_context(mm) do { } while (0)
+
+/*
+ * This is called when "tsk" is about to enter lazy TLB mode.
+ *
+ * mm: describes the currently active mm context
+ * tsk: task which is entering lazy tlb
+ * cpu: cpu number which is entering lazy tlb
+ *
+ * tsk->mm will be NULL
+ */
+static inline void
+enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+/*
+ * This is the actual mm switch as far as the scheduler
+ * is concerned. No registers are touched. We avoid
+ * calling the CPU specific function when the mm hasn't
+ * actually changed.
+ */
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned int cpu = smp_processor_id();
+
+ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
+ cpu_switch_mm(next->pgd, next);
+}
+
+#define deactivate_mm(tsk, mm) do { } while (0)
+#define activate_mm(prev, next) switch_mm(prev, next, NULL)
+
+/*
+ * We are inserting a "fake" vma for the user-accessible vector page so
+ * gdb and friends can get to it through ptrace and /proc/<pid>/mem.
+ * But we also want to remove it before the generic code gets to see it
+ * during process exit or the unmapping of it would cause total havoc.
+ * (the macro is used as remove_vma() is static to mm/mmap.c)
+ */
+#define arch_exit_mmap(mm) \
+do { \
+ struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
+ if (high_vma) { \
+ BUG_ON(high_vma->vm_next); /* it should be last */ \
+ if (high_vma->vm_prev) \
+ high_vma->vm_prev->vm_next = NULL; \
+ else \
+ mm->mmap = NULL; \
+ rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
+ mm->mmap_cache = NULL; \
+ mm->map_count--; \
+ remove_vma(high_vma); \
+ } \
+} while (0)
+
+static inline void arch_dup_mmap(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+}
+
+#endif
diff --git a/arch/unicore32/include/asm/mutex.h b/arch/unicore32/include/asm/mutex.h
new file mode 100644
index 000000000000..fab7d0e8adf6
--- /dev/null
+++ b/arch/unicore32/include/asm/mutex.h
@@ -0,0 +1,20 @@
+/*
+ * linux/arch/unicore32/include/asm/mutex.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * UniCore optimized mutex locking primitives
+ *
+ * Please look into asm-generic/mutex-xchg.h for a formal definition.
+ */
+#ifndef __UNICORE_MUTEX_H__
+#define __UNICORE_MUTEX_H__
+
+# include <asm-generic/mutex-xchg.h>
+#endif
diff --git a/arch/unicore32/include/asm/page.h b/arch/unicore32/include/asm/page.h
new file mode 100644
index 000000000000..594b3226250e
--- /dev/null
+++ b/arch/unicore32/include/asm/page.h
@@ -0,0 +1,80 @@
+/*
+ * linux/arch/unicore32/include/asm/page.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_PAGE_H__
+#define __UNICORE_PAGE_H__
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#ifndef __ASSEMBLY__
+
+struct page;
+struct vm_area_struct;
+
+#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
+extern void copy_page(void *to, const void *from);
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif /* STRICT_MM_TYPECHECKS */
+
+typedef struct page *pgtable_t;
+
+extern int pfn_valid(unsigned long);
+
+#include <asm/memory.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#define VM_DATA_DEFAULT_FLAGS \
+ (VM_READ | VM_WRITE | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#include <asm-generic/getorder.h>
+
+#endif
diff --git a/arch/unicore32/include/asm/pci.h b/arch/unicore32/include/asm/pci.h
new file mode 100644
index 000000000000..c5b28b459535
--- /dev/null
+++ b/arch/unicore32/include/asm/pci.h
@@ -0,0 +1,46 @@
+/*
+ * linux/arch/unicore32/include/asm/pci.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_PCI_H__
+#define __UNICORE_PCI_H__
+
+#ifdef __KERNEL__
+#include <asm-generic/pci-dma-compat.h>
+#include <asm-generic/pci.h>
+#include <mach/hardware.h> /* for PCIBIOS_MIN_* */
+
+static inline void pcibios_set_master(struct pci_dev *dev)
+{
+ /* No special bus mastering setup handling */
+}
+
+static inline void pcibios_penalize_isa_irq(int irq, int active)
+{
+ /* We don't do dynamic PCI IRQ allocation */
+}
+
+#ifdef CONFIG_PCI
+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
+ enum pci_dma_burst_strategy *strat,
+ unsigned long *strategy_parameter)
+{
+ *strat = PCI_DMA_BURST_INFINITY;
+ *strategy_parameter = ~0UL;
+}
+#endif
+
+#define HAVE_PCI_MMAP
+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h
new file mode 100644
index 000000000000..0213e373a895
--- /dev/null
+++ b/arch/unicore32/include/asm/pgalloc.h
@@ -0,0 +1,110 @@
+/*
+ * linux/arch/unicore32/include/asm/pgalloc.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_PGALLOC_H__
+#define __UNICORE_PGALLOC_H__
+
+#include <asm/pgtable-hwdef.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#define check_pgt_cache() do { } while (0)
+
+#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_PRESENT)
+#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_PRESENT)
+
+extern pgd_t *get_pgd_slow(struct mm_struct *mm);
+extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
+
+#define pgd_alloc(mm) get_pgd_slow(mm)
+#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
+
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+
+/*
+ * Allocate one PTE table.
+ */
+static inline pte_t *
+pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
+{
+ pte_t *pte;
+
+ pte = (pte_t *)__get_free_page(PGALLOC_GFP);
+ if (pte)
+ clean_dcache_area(pte, PTRS_PER_PTE * sizeof(pte_t));
+
+ return pte;
+}
+
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ struct page *pte;
+
+ pte = alloc_pages(PGALLOC_GFP, 0);
+ if (pte) {
+ if (!PageHighMem(pte)) {
+ void *page = page_address(pte);
+ clean_dcache_area(page, PTRS_PER_PTE * sizeof(pte_t));
+ }
+ pgtable_page_ctor(pte);
+ }
+
+ return pte;
+}
+
+/*
+ * Free one PTE table.
+ */
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ if (pte)
+ free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+ pgtable_page_dtor(pte);
+ __free_page(pte);
+}
+
+static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
+{
+ set_pmd(pmdp, __pmd(pmdval));
+ flush_pmd_entry(pmdp);
+}
+
+/*
+ * Populate the pmdp entry with a pointer to the pte. This pmd is part
+ * of the mm address space.
+ */
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
+{
+ unsigned long pte_ptr = (unsigned long)ptep;
+
+ /*
+ * The pmd must be loaded with the physical
+ * address of the PTE table
+ */
+ __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE);
+}
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
+{
+ __pmd_populate(pmdp,
+ page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
+}
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+#endif
diff --git a/arch/unicore32/include/asm/pgtable-hwdef.h b/arch/unicore32/include/asm/pgtable-hwdef.h
new file mode 100644
index 000000000000..7314e859cca0
--- /dev/null
+++ b/arch/unicore32/include/asm/pgtable-hwdef.h
@@ -0,0 +1,55 @@
+/*
+ * linux/arch/unicore32/include/asm/pgtable-hwdef.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_PGTABLE_HWDEF_H__
+#define __UNICORE_PGTABLE_HWDEF_H__
+
+/*
+ * Hardware page table definitions.
+ *
+ * + Level 1 descriptor (PMD)
+ * - common
+ */
+#define PMD_TYPE_MASK (3 << 0)
+#define PMD_TYPE_TABLE (0 << 0)
+/*#define PMD_TYPE_LARGE (1 << 0) */
+#define PMD_TYPE_INVALID (2 << 0)
+#define PMD_TYPE_SECT (3 << 0)
+
+#define PMD_PRESENT (1 << 2)
+#define PMD_YOUNG (1 << 3)
+
+/*#define PMD_SECT_DIRTY (1 << 4) */
+#define PMD_SECT_CACHEABLE (1 << 5)
+#define PMD_SECT_EXEC (1 << 6)
+#define PMD_SECT_WRITE (1 << 7)
+#define PMD_SECT_READ (1 << 8)
+
+/*
+ * + Level 2 descriptor (PTE)
+ * - common
+ */
+#define PTE_TYPE_MASK (3 << 0)
+#define PTE_TYPE_SMALL (0 << 0)
+#define PTE_TYPE_MIDDLE (1 << 0)
+#define PTE_TYPE_LARGE (2 << 0)
+#define PTE_TYPE_INVALID (3 << 0)
+
+#define PTE_PRESENT (1 << 2)
+#define PTE_FILE (1 << 3) /* only when !PRESENT */
+#define PTE_YOUNG (1 << 3)
+#define PTE_DIRTY (1 << 4)
+#define PTE_CACHEABLE (1 << 5)
+#define PTE_EXEC (1 << 6)
+#define PTE_WRITE (1 << 7)
+#define PTE_READ (1 << 8)
+
+#endif
diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h
new file mode 100644
index 000000000000..68b2f297ac97
--- /dev/null
+++ b/arch/unicore32/include/asm/pgtable.h
@@ -0,0 +1,317 @@
+/*
+ * linux/arch/unicore32/include/asm/pgtable.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_PGTABLE_H__
+#define __UNICORE_PGTABLE_H__
+
+#include <asm-generic/pgtable-nopmd.h>
+#include <asm/cpu-single.h>
+
+#include <asm/memory.h>
+#include <asm/pgtable-hwdef.h>
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ *
+ * Note that platforms may override VMALLOC_START, but they must provide
+ * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
+ * which may not overlap IO space.
+ */
+#ifndef VMALLOC_START
+#define VMALLOC_OFFSET SZ_8M
+#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) \
+ & ~(VMALLOC_OFFSET-1))
+#define VMALLOC_END (0xff000000UL)
+#endif
+
+#define PTRS_PER_PTE 1024
+#define PTRS_PER_PGD 1024
+
+/*
+ * PGDIR_SHIFT determines what a third-level page table entry can map
+ */
+#define PGDIR_SHIFT 22
+
+#ifndef __ASSEMBLY__
+extern void __pte_error(const char *file, int line, unsigned long val);
+extern void __pgd_error(const char *file, int line, unsigned long val);
+
+#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
+#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
+#endif /* !__ASSEMBLY__ */
+
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+/*
+ * This is the lowest virtual address we can permit any user space
+ * mapping to be mapped at. This is particularly important for
+ * non-high vector CPUs.
+ */
+#define FIRST_USER_ADDRESS PAGE_SIZE
+
+#define FIRST_USER_PGD_NR 1
+#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
+
+/*
+ * section address mask and size definitions.
+ */
+#define SECTION_SHIFT 22
+#define SECTION_SIZE (1UL << SECTION_SHIFT)
+#define SECTION_MASK (~(SECTION_SIZE-1))
+
+#ifndef __ASSEMBLY__
+
+/*
+ * The pgprot_* and protection_map entries will be fixed up in runtime
+ * to include the cachable bits based on memory policy, as well as any
+ * architecture dependent bits.
+ */
+#define _PTE_DEFAULT (PTE_PRESENT | PTE_YOUNG | PTE_CACHEABLE)
+
+extern pgprot_t pgprot_user;
+extern pgprot_t pgprot_kernel;
+
+#define PAGE_NONE pgprot_user
+#define PAGE_SHARED __pgprot(pgprot_val(pgprot_user | PTE_READ \
+ | PTE_WRITE)
+#define PAGE_SHARED_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \
+ | PTE_WRITE \
+ | PTE_EXEC)
+#define PAGE_COPY __pgprot(pgprot_val(pgprot_user | PTE_READ)
+#define PAGE_COPY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \
+ | PTE_EXEC)
+#define PAGE_READONLY __pgprot(pgprot_val(pgprot_user | PTE_READ)
+#define PAGE_READONLY_EXEC __pgprot(pgprot_val(pgprot_user | PTE_READ \
+ | PTE_EXEC)
+#define PAGE_KERNEL pgprot_kernel
+#define PAGE_KERNEL_EXEC __pgprot(pgprot_val(pgprot_kernel | PTE_EXEC))
+
+#define __PAGE_NONE __pgprot(_PTE_DEFAULT)
+#define __PAGE_SHARED __pgprot(_PTE_DEFAULT | PTE_READ \
+ | PTE_WRITE)
+#define __PAGE_SHARED_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \
+ | PTE_WRITE \
+ | PTE_EXEC)
+#define __PAGE_COPY __pgprot(_PTE_DEFAULT | PTE_READ)
+#define __PAGE_COPY_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \
+ | PTE_EXEC)
+#define __PAGE_READONLY __pgprot(_PTE_DEFAULT | PTE_READ)
+#define __PAGE_READONLY_EXEC __pgprot(_PTE_DEFAULT | PTE_READ \
+ | PTE_EXEC)
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * The table below defines the page protection levels that we insert into our
+ * Linux page table version. These get translated into the best that the
+ * architecture can perform. Note that on UniCore hardware:
+ * 1) We cannot do execute protection
+ * 2) If we could do execute protection, then read is implied
+ * 3) write implies read permissions
+ */
+#define __P000 __PAGE_NONE
+#define __P001 __PAGE_READONLY
+#define __P010 __PAGE_COPY
+#define __P011 __PAGE_COPY
+#define __P100 __PAGE_READONLY_EXEC
+#define __P101 __PAGE_READONLY_EXEC
+#define __P110 __PAGE_COPY_EXEC
+#define __P111 __PAGE_COPY_EXEC
+
+#define __S000 __PAGE_NONE
+#define __S001 __PAGE_READONLY
+#define __S010 __PAGE_SHARED
+#define __S011 __PAGE_SHARED
+#define __S100 __PAGE_READONLY_EXEC
+#define __S101 __PAGE_READONLY_EXEC
+#define __S110 __PAGE_SHARED_EXEC
+#define __S111 __PAGE_SHARED_EXEC
+
+#ifndef __ASSEMBLY__
+/*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+extern struct page *empty_zero_page;
+#define ZERO_PAGE(vaddr) (empty_zero_page)
+
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) \
+ | pgprot_val(prot)))
+
+#define pte_none(pte) (!pte_val(pte))
+#define pte_clear(mm, addr, ptep) set_pte(ptep, __pte(0))
+#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
+#define pte_offset_kernel(dir, addr) (pmd_page_vaddr(*(dir)) \
+ + __pte_index(addr))
+
+#define pte_offset_map(dir, addr) (pmd_page_vaddr(*(dir)) \
+ + __pte_index(addr))
+#define pte_unmap(pte) do { } while (0)
+
+#define set_pte(ptep, pte) cpu_set_pte(ptep, pte)
+
+#define set_pte_at(mm, addr, ptep, pteval) \
+ do { \
+ set_pte(ptep, pteval); \
+ } while (0)
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+#define pte_present(pte) (pte_val(pte) & PTE_PRESENT)
+#define pte_write(pte) (pte_val(pte) & PTE_WRITE)
+#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
+#define pte_young(pte) (pte_val(pte) & PTE_YOUNG)
+#define pte_exec(pte) (pte_val(pte) & PTE_EXEC)
+#define pte_special(pte) (0)
+
+#define PTE_BIT_FUNC(fn, op) \
+static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+
+PTE_BIT_FUNC(wrprotect, &= ~PTE_WRITE);
+PTE_BIT_FUNC(mkwrite, |= PTE_WRITE);
+PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY);
+PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY);
+PTE_BIT_FUNC(mkold, &= ~PTE_YOUNG);
+PTE_BIT_FUNC(mkyoung, |= PTE_YOUNG);
+
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+/*
+ * Mark the prot value as uncacheable.
+ */
+#define pgprot_noncached(prot) \
+ __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE)
+#define pgprot_writecombine(prot) \
+ __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE)
+#define pgprot_dmacoherent(prot) \
+ __pgprot(pgprot_val(prot) & ~PTE_CACHEABLE)
+
+#define pmd_none(pmd) (!pmd_val(pmd))
+#define pmd_present(pmd) (pmd_val(pmd) & PMD_PRESENT)
+#define pmd_bad(pmd) (((pmd_val(pmd) & \
+ (PMD_PRESENT | PMD_TYPE_MASK)) \
+ != (PMD_PRESENT | PMD_TYPE_TABLE)))
+
+#define set_pmd(pmdpd, pmdval) \
+ do { \
+ *(pmdpd) = pmdval; \
+ } while (0)
+
+#define pmd_clear(pmdp) \
+ do { \
+ set_pmd(pmdp, __pmd(0));\
+ clean_pmd_entry(pmdp); \
+ } while (0)
+
+#define pmd_page_vaddr(pmd) ((pte_t *)__va(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+
+/* to find an entry in a page-table-directory */
+#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
+
+#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
+
+/* Find an entry in the third-level page table.. */
+#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ const unsigned long mask = PTE_EXEC | PTE_WRITE | PTE_READ;
+ pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+ return pte;
+}
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+/*
+ * Encode and decode a swap entry. Swap entries are stored in the Linux
+ * page tables as follows:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <--------------- offset --------------> <--- type --> 0 0 0 0 0
+ *
+ * This gives us up to 127 swap files and 32GB per swap file. Note that
+ * the offset field is always non-zero.
+ */
+#define __SWP_TYPE_SHIFT 5
+#define __SWP_TYPE_BITS 7
+#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+
+#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) \
+ & __SWP_TYPE_MASK)
+#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+ ((type) << __SWP_TYPE_SHIFT) | \
+ ((offset) << __SWP_OFFSET_SHIFT) })
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
+
+/*
+ * It is an error for the kernel to have more swap files than we can
+ * encode in the PTEs. This ensures that we know when MAX_SWAPFILES
+ * is increased beyond what we presently support.
+ */
+#define MAX_SWAPFILES_CHECK() \
+ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
+
+/*
+ * Encode and decode a file entry. File entries are stored in the Linux
+ * page tables as follows:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * <----------------------- offset ----------------------> 1 0 0 0
+ */
+#define pte_file(pte) (pte_val(pte) & PTE_FILE)
+#define pte_to_pgoff(x) (pte_val(x) >> 4)
+#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE)
+
+#define PTE_FILE_MAX_BITS 28
+
+/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
+/* FIXME: this is not correct */
+#define kern_addr_valid(addr) (1)
+
+#include <asm-generic/pgtable.h>
+
+/*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+#define io_remap_pfn_range(vma, from, pfn, size, prot) \
+ remap_pfn_range(vma, from, pfn, size, prot)
+
+#define pgtable_cache_init() do { } while (0)
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __UNICORE_PGTABLE_H__ */
diff --git a/arch/unicore32/include/asm/processor.h b/arch/unicore32/include/asm/processor.h
new file mode 100644
index 000000000000..e11cb0786578
--- /dev/null
+++ b/arch/unicore32/include/asm/processor.h
@@ -0,0 +1,92 @@
+/*
+ * linux/arch/unicore32/include/asm/processor.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_PROCESSOR_H__
+#define __UNICORE_PROCESSOR_H__
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+#ifdef __KERNEL__
+
+#include <asm/ptrace.h>
+#include <asm/types.h>
+
+#ifdef __KERNEL__
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX TASK_SIZE
+#endif
+
+struct debug_entry {
+ u32 address;
+ u32 insn;
+};
+
+struct debug_info {
+ int nsaved;
+ struct debug_entry bp[2];
+};
+
+struct thread_struct {
+ /* fault info */
+ unsigned long address;
+ unsigned long trap_no;
+ unsigned long error_code;
+ /* debugging */
+ struct debug_info debug;
+};
+
+#define INIT_THREAD { }
+
+#define start_thread(regs, pc, sp) \
+({ \
+ unsigned long *stack = (unsigned long *)sp; \
+ set_fs(USER_DS); \
+ memset(regs->uregs, 0, sizeof(regs->uregs)); \
+ regs->UCreg_asr = USER_MODE; \
+ regs->UCreg_pc = pc & ~1; /* pc */ \
+ regs->UCreg_sp = sp; /* sp */ \
+ regs->UCreg_02 = stack[2]; /* r2 (envp) */ \
+ regs->UCreg_01 = stack[1]; /* r1 (argv) */ \
+ regs->UCreg_00 = stack[0]; /* r0 (argc) */ \
+})
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/* Free all resources held by a thread. */
+extern void release_thread(struct task_struct *);
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+unsigned long get_wchan(struct task_struct *p);
+
+#define cpu_relax() barrier()
+
+/*
+ * Create a new kernel thread
+ */
+extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
+
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->UCreg_pc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->UCreg_sp)
+
+#endif
+
+#endif /* __UNICORE_PROCESSOR_H__ */
diff --git a/arch/unicore32/include/asm/ptrace.h b/arch/unicore32/include/asm/ptrace.h
new file mode 100644
index 000000000000..b9caf9b0997b
--- /dev/null
+++ b/arch/unicore32/include/asm/ptrace.h
@@ -0,0 +1,133 @@
+/*
+ * linux/arch/unicore32/include/asm/ptrace.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_PTRACE_H__
+#define __UNICORE_PTRACE_H__
+
+#define PTRACE_GET_THREAD_AREA 22
+
+/*
+ * PSR bits
+ */
+#define USER_MODE 0x00000010
+#define REAL_MODE 0x00000011
+#define INTR_MODE 0x00000012
+#define PRIV_MODE 0x00000013
+#define ABRT_MODE 0x00000017
+#define EXTN_MODE 0x0000001b
+#define SUSR_MODE 0x0000001f
+#define MODE_MASK 0x0000001f
+#define PSR_R_BIT 0x00000040
+#define PSR_I_BIT 0x00000080
+#define PSR_V_BIT 0x10000000
+#define PSR_C_BIT 0x20000000
+#define PSR_Z_BIT 0x40000000
+#define PSR_S_BIT 0x80000000
+
+/*
+ * Groups of PSR bits
+ */
+#define PSR_f 0xff000000 /* Flags */
+#define PSR_c 0x000000ff /* Control */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This struct defines the way the registers are stored on the
+ * stack during a system call. Note that sizeof(struct pt_regs)
+ * has to be a multiple of 8.
+ */
+struct pt_regs {
+ unsigned long uregs[34];
+};
+
+#define UCreg_asr uregs[32]
+#define UCreg_pc uregs[31]
+#define UCreg_lr uregs[30]
+#define UCreg_sp uregs[29]
+#define UCreg_ip uregs[28]
+#define UCreg_fp uregs[27]
+#define UCreg_26 uregs[26]
+#define UCreg_25 uregs[25]
+#define UCreg_24 uregs[24]
+#define UCreg_23 uregs[23]
+#define UCreg_22 uregs[22]
+#define UCreg_21 uregs[21]
+#define UCreg_20 uregs[20]
+#define UCreg_19 uregs[19]
+#define UCreg_18 uregs[18]
+#define UCreg_17 uregs[17]
+#define UCreg_16 uregs[16]
+#define UCreg_15 uregs[15]
+#define UCreg_14 uregs[14]
+#define UCreg_13 uregs[13]
+#define UCreg_12 uregs[12]
+#define UCreg_11 uregs[11]
+#define UCreg_10 uregs[10]
+#define UCreg_09 uregs[9]
+#define UCreg_08 uregs[8]
+#define UCreg_07 uregs[7]
+#define UCreg_06 uregs[6]
+#define UCreg_05 uregs[5]
+#define UCreg_04 uregs[4]
+#define UCreg_03 uregs[3]
+#define UCreg_02 uregs[2]
+#define UCreg_01 uregs[1]
+#define UCreg_00 uregs[0]
+#define UCreg_ORIG_00 uregs[33]
+
+#ifdef __KERNEL__
+
+#define user_mode(regs) \
+ (processor_mode(regs) == USER_MODE)
+
+#define processor_mode(regs) \
+ ((regs)->UCreg_asr & MODE_MASK)
+
+#define interrupts_enabled(regs) \
+ (!((regs)->UCreg_asr & PSR_I_BIT))
+
+#define fast_interrupts_enabled(regs) \
+ (!((regs)->UCreg_asr & PSR_R_BIT))
+
+/* Are the current registers suitable for user mode?
+ * (used to maintain security in signal handlers)
+ */
+static inline int valid_user_regs(struct pt_regs *regs)
+{
+ unsigned long mode = regs->UCreg_asr & MODE_MASK;
+
+ /*
+ * Always clear the R (REAL) bits
+ */
+ regs->UCreg_asr &= ~(PSR_R_BIT);
+
+ if ((regs->UCreg_asr & PSR_I_BIT) == 0) {
+ if (mode == USER_MODE)
+ return 1;
+ }
+
+ /*
+ * Force ASR to something logical...
+ */
+ regs->UCreg_asr &= PSR_f | USER_MODE;
+
+ return 0;
+}
+
+#define instruction_pointer(regs) ((regs)->UCreg_pc)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASSEMBLY__ */
+
+#endif
+
diff --git a/arch/unicore32/include/asm/sigcontext.h b/arch/unicore32/include/asm/sigcontext.h
new file mode 100644
index 000000000000..6a2d7671c052
--- /dev/null
+++ b/arch/unicore32/include/asm/sigcontext.h
@@ -0,0 +1,29 @@
+/*
+ * linux/arch/unicore32/include/asm/sigcontext.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_SIGCONTEXT_H__
+#define __UNICORE_SIGCONTEXT_H__
+
+#include <asm/ptrace.h>
+/*
+ * Signal context structure - contains all info to do with the state
+ * before the signal handler was invoked. Note: only add new entries
+ * to the end of the structure.
+ */
+struct sigcontext {
+ unsigned long trap_no;
+ unsigned long error_code;
+ unsigned long oldmask;
+ unsigned long fault_address;
+ struct pt_regs regs;
+};
+
+#endif
diff --git a/arch/unicore32/include/asm/stacktrace.h b/arch/unicore32/include/asm/stacktrace.h
new file mode 100644
index 000000000000..76edc65a5871
--- /dev/null
+++ b/arch/unicore32/include/asm/stacktrace.h
@@ -0,0 +1,31 @@
+/*
+ * linux/arch/unicore32/include/asm/stacktrace.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_STACKTRACE_H__
+#define __UNICORE_STACKTRACE_H__
+
+struct stackframe {
+ unsigned long fp;
+ unsigned long sp;
+ unsigned long lr;
+ unsigned long pc;
+};
+
+#ifdef CONFIG_FRAME_POINTER
+extern int unwind_frame(struct stackframe *frame);
+#else
+#define unwind_frame(f) (-EINVAL)
+#endif
+extern void walk_stackframe(struct stackframe *frame,
+ int (*fn)(struct stackframe *, void *), void *data);
+
+#endif /* __UNICORE_STACKTRACE_H__ */
diff --git a/arch/unicore32/include/asm/string.h b/arch/unicore32/include/asm/string.h
new file mode 100644
index 000000000000..55264c84369a
--- /dev/null
+++ b/arch/unicore32/include/asm/string.h
@@ -0,0 +1,38 @@
+/*
+ * linux/arch/unicore32/include/asm/string.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_STRING_H__
+#define __UNICORE_STRING_H__
+
+/*
+ * We don't do inline string functions, since the
+ * optimised inline asm versions are not small.
+ */
+
+#define __HAVE_ARCH_STRRCHR
+extern char *strrchr(const char *s, int c);
+
+#define __HAVE_ARCH_STRCHR
+extern char *strchr(const char *s, int c);
+
+#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMMOVE
+extern void *memmove(void *, const void *, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMCHR
+extern void *memchr(const void *, int, __kernel_size_t);
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *, int, __kernel_size_t);
+
+#endif
diff --git a/arch/unicore32/include/asm/suspend.h b/arch/unicore32/include/asm/suspend.h
new file mode 100644
index 000000000000..88a9c0f32b21
--- /dev/null
+++ b/arch/unicore32/include/asm/suspend.h
@@ -0,0 +1,30 @@
+/*
+ * linux/arch/unicore32/include/asm/suspend.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_SUSPEND_H__
+#define __UNICORE_SUSPEND_H__
+
+#ifndef __ASSEMBLY__
+static inline int arch_prepare_suspend(void) { return 0; }
+
+#include <asm/ptrace.h>
+
+struct swsusp_arch_regs {
+ struct cpu_context_save cpu_context; /* cpu context */
+#ifdef CONFIG_UNICORE_FPU_F64
+ struct fp_state fpstate __attribute__((aligned(8)));
+#endif
+};
+#endif
+
+#endif /* __UNICORE_SUSPEND_H__ */
+
diff --git a/arch/unicore32/include/asm/system.h b/arch/unicore32/include/asm/system.h
new file mode 100644
index 000000000000..246b71c17fda
--- /dev/null
+++ b/arch/unicore32/include/asm/system.h
@@ -0,0 +1,161 @@
+/*
+ * linux/arch/unicore32/include/asm/system.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_SYSTEM_H__
+#define __UNICORE_SYSTEM_H__
+
+#ifdef __KERNEL__
+
+/*
+ * CR1 bits (CP#0 CR1)
+ */
+#define CR_M (1 << 0) /* MMU enable */
+#define CR_A (1 << 1) /* Alignment abort enable */
+#define CR_D (1 << 2) /* Dcache enable */
+#define CR_I (1 << 3) /* Icache enable */
+#define CR_B (1 << 4) /* Dcache write mechanism: write back */
+#define CR_T (1 << 5) /* Burst enable */
+#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/linkage.h>
+#include <linux/irqflags.h>
+
+struct thread_info;
+struct task_struct;
+
+struct pt_regs;
+
+void die(const char *msg, struct pt_regs *regs, int err);
+
+struct siginfo;
+void uc32_notify_die(const char *str, struct pt_regs *regs,
+ struct siginfo *info, unsigned long err, unsigned long trap);
+
+void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
+ struct pt_regs *),
+ int sig, int code, const char *name);
+
+#define xchg(ptr, x) \
+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
+
+extern asmlinkage void __backtrace(void);
+extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
+
+struct mm_struct;
+extern void show_pte(struct mm_struct *mm, unsigned long addr);
+extern void __show_regs(struct pt_regs *);
+
+extern int cpu_architecture(void);
+extern void cpu_init(void);
+
+#define vectors_high() (cr_alignment & CR_V)
+
+#define isb() __asm__ __volatile__ ("" : : : "memory")
+#define dsb() __asm__ __volatile__ ("" : : : "memory")
+#define dmb() __asm__ __volatile__ ("" : : : "memory")
+
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define read_barrier_depends() do { } while (0)
+#define smp_read_barrier_depends() do { } while (0)
+
+#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
+
+extern unsigned long cr_no_alignment; /* defined in entry-unicore.S */
+extern unsigned long cr_alignment; /* defined in entry-unicore.S */
+
+static inline unsigned int get_cr(void)
+{
+ unsigned int val;
+ asm("movc %0, p0.c1, #0" : "=r" (val) : : "cc");
+ return val;
+}
+
+static inline void set_cr(unsigned int val)
+{
+ asm volatile("movc p0.c1, %0, #0 @set CR"
+ : : "r" (val) : "cc");
+ isb();
+}
+
+extern void adjust_cr(unsigned long mask, unsigned long set);
+
+/*
+ * switch_to(prev, next) should switch from task `prev' to `next'
+ * `prev' will never be the same as `next'. schedule() itself
+ * contains the memory barrier to tell GCC not to cache `current'.
+ */
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct thread_info *, struct thread_info *);
+extern void panic(const char *fmt, ...);
+
+#define switch_to(prev, next, last) \
+do { \
+ last = __switch_to(prev, \
+ task_thread_info(prev), task_thread_info(next)); \
+} while (0)
+
+static inline unsigned long
+__xchg(unsigned long x, volatile void *ptr, int size)
+{
+ unsigned long ret;
+
+ switch (size) {
+ case 1:
+ asm volatile("@ __xchg1\n"
+ " swapb %0, %1, [%2]"
+ : "=&r" (ret)
+ : "r" (x), "r" (ptr)
+ : "memory", "cc");
+ break;
+ case 4:
+ asm volatile("@ __xchg4\n"
+ " swapw %0, %1, [%2]"
+ : "=&r" (ret)
+ : "r" (x), "r" (ptr)
+ : "memory", "cc");
+ break;
+ default:
+ panic("xchg: bad data size: ptr 0x%p, size %d\n",
+ ptr, size);
+ }
+
+ return ret;
+}
+
+#include <asm-generic/cmpxchg-local.h>
+
+/*
+ * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
+ * them available.
+ */
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \
+ (unsigned long)(o), (unsigned long)(n), sizeof(*(ptr))))
+#define cmpxchg64_local(ptr, o, n) \
+ __cmpxchg64_local_generic((ptr), (o), (n))
+
+#include <asm-generic/cmpxchg.h>
+
+#endif /* __ASSEMBLY__ */
+
+#define arch_align_stack(x) (x)
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h
new file mode 100644
index 000000000000..c270e9e04861
--- /dev/null
+++ b/arch/unicore32/include/asm/thread_info.h
@@ -0,0 +1,154 @@
+/*
+ * linux/arch/unicore32/include/asm/thread_info.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_THREAD_INFO_H__
+#define __UNICORE_THREAD_INFO_H__
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <asm/fpstate.h>
+
+#define THREAD_SIZE_ORDER 1
+#define THREAD_SIZE 8192
+#define THREAD_START_SP (THREAD_SIZE - 8)
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+struct exec_domain;
+
+#include <asm/types.h>
+
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
+struct cpu_context_save {
+ __u32 r4;
+ __u32 r5;
+ __u32 r6;
+ __u32 r7;
+ __u32 r8;
+ __u32 r9;
+ __u32 r10;
+ __u32 r11;
+ __u32 r12;
+ __u32 r13;
+ __u32 r14;
+ __u32 r15;
+ __u32 r16;
+ __u32 r17;
+ __u32 r18;
+ __u32 r19;
+ __u32 r20;
+ __u32 r21;
+ __u32 r22;
+ __u32 r23;
+ __u32 r24;
+ __u32 r25;
+ __u32 r26;
+ __u32 fp;
+ __u32 sp;
+ __u32 pc;
+};
+
+/*
+ * low level task data that entry.S needs immediate access to.
+ * __switch_to() assumes cpu_context follows immediately after cpu_domain.
+ */
+struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable */
+ /* <0 => bug */
+ mm_segment_t addr_limit; /* address limit */
+ struct task_struct *task; /* main task structure */
+ struct exec_domain *exec_domain; /* execution domain */
+ __u32 cpu; /* cpu */
+ struct cpu_context_save cpu_context; /* cpu context */
+ __u32 syscall; /* syscall number */
+ __u8 used_cp[16]; /* thread used copro */
+#ifdef CONFIG_UNICORE_FPU_F64
+ struct fp_state fpstate __attribute__((aligned(8)));
+#endif
+ struct restart_block restart_block;
+};
+
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+/*
+ * how to get the thread information struct from C
+ */
+static inline struct thread_info *current_thread_info(void) __attribute_const__;
+
+static inline struct thread_info *current_thread_info(void)
+{
+ register unsigned long sp asm ("sp");
+ return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+
+#define thread_saved_pc(tsk) \
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
+#define thread_saved_sp(tsk) \
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
+#define thread_saved_fp(tsk) \
+ ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
+
+#endif
+
+/*
+ * We use bit 30 of the preempt_count to indicate that kernel
+ * preemption is occurring. See <asm/hardirq.h>.
+ */
+#define PREEMPT_ACTIVE 0x40000000
+
+/*
+ * thread information flags:
+ * TIF_SYSCALL_TRACE - syscall trace active
+ * TIF_SIGPENDING - signal pending
+ * TIF_NEED_RESCHED - rescheduling necessary
+ * TIF_NOTIFY_RESUME - callback before returning to user
+ */
+#define TIF_SIGPENDING 0
+#define TIF_NEED_RESCHED 1
+#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+#define TIF_SYSCALL_TRACE 8
+#define TIF_MEMDIE 18
+#define TIF_FREEZE 19
+#define TIF_RESTORE_SIGMASK 20
+
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_FREEZE (1 << TIF_FREEZE)
+#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
+
+/*
+ * Change these and you break ASM code in entry-common.S
+ */
+#define _TIF_WORK_MASK 0x000000ff
+
+#endif /* __KERNEL__ */
+#endif /* __UNICORE_THREAD_INFO_H__ */
diff --git a/arch/unicore32/include/asm/timex.h b/arch/unicore32/include/asm/timex.h
new file mode 100644
index 000000000000..faf16ba46544
--- /dev/null
+++ b/arch/unicore32/include/asm/timex.h
@@ -0,0 +1,34 @@
+/*
+ * linux/arch/unicore32/include/asm/timex.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __UNICORE_TIMEX_H__
+#define __UNICORE_TIMEX_H__
+
+#ifdef CONFIG_ARCH_FPGA
+
+/* in FPGA, APB clock is 33M, and OST clock is 32K, */
+/* so, 1M is selected for timer interrupt correctly */
+#define CLOCK_TICK_RATE (32*1024)
+
+#endif
+
+#if defined(CONFIG_PUV3_DB0913) \
+ || defined(CONFIG_PUV3_NB0916) \
+ || defined(CONFIG_PUV3_SMW0919)
+
+#define CLOCK_TICK_RATE (14318000)
+
+#endif
+
+#include <asm-generic/timex.h>
+
+#endif
diff --git a/arch/unicore32/include/asm/tlb.h b/arch/unicore32/include/asm/tlb.h
new file mode 100644
index 000000000000..9cca15cdae94
--- /dev/null
+++ b/arch/unicore32/include/asm/tlb.h
@@ -0,0 +1,28 @@
+/*
+ * linux/arch/unicore32/include/asm/tlb.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_TLB_H__
+#define __UNICORE_TLB_H__
+
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#define __pte_free_tlb(tlb, pte, addr) \
+ do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), (pte)); \
+ } while (0)
+
+#include <asm-generic/tlb.h>
+
+#endif
diff --git a/arch/unicore32/include/asm/tlbflush.h b/arch/unicore32/include/asm/tlbflush.h
new file mode 100644
index 000000000000..e446ac8bb9e5
--- /dev/null
+++ b/arch/unicore32/include/asm/tlbflush.h
@@ -0,0 +1,195 @@
+/*
+ * linux/arch/unicore32/include/asm/tlbflush.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_TLBFLUSH_H__
+#define __UNICORE_TLBFLUSH_H__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+
+extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long,
+ struct vm_area_struct *);
+extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
+
+/*
+ * TLB Management
+ * ==============
+ *
+ * The arch/unicore/mm/tlb-*.S files implement these methods.
+ *
+ * The TLB specific code is expected to perform whatever tests it
+ * needs to determine if it should invalidate the TLB for each
+ * call. Start addresses are inclusive and end addresses are
+ * exclusive; it is safe to round these addresses down.
+ *
+ * flush_tlb_all()
+ *
+ * Invalidate the entire TLB.
+ *
+ * flush_tlb_mm(mm)
+ *
+ * Invalidate all TLB entries in a particular address
+ * space.
+ * - mm - mm_struct describing address space
+ *
+ * flush_tlb_range(mm,start,end)
+ *
+ * Invalidate a range of TLB entries in the specified
+ * address space.
+ * - mm - mm_struct describing address space
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ *
+ * flush_tlb_page(vaddr,vma)
+ *
+ * Invalidate the specified page in the specified address range.
+ * - vaddr - virtual address (may not be aligned)
+ * - vma - vma_struct describing address range
+ *
+ * flush_kern_tlb_page(kaddr)
+ *
+ * Invalidate the TLB entry for the specified page. The address
+ * will be in the kernels virtual memory space. Current uses
+ * only require the D-TLB to be invalidated.
+ * - kaddr - Kernel virtual memory address
+ */
+
+static inline void local_flush_tlb_all(void)
+{
+ const int zero = 0;
+
+ /* TLB invalidate all */
+ asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (zero) : "cc");
+}
+
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ const int zero = 0;
+
+ if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
+ /* TLB invalidate all */
+ asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (zero) : "cc");
+ }
+ put_cpu();
+}
+
+static inline void
+local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
+#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
+ /* iTLB invalidate page */
+ asm("movc p0.c6, %0, #5; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (uaddr & PAGE_MASK) : "cc");
+ /* dTLB invalidate page */
+ asm("movc p0.c6, %0, #3; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (uaddr & PAGE_MASK) : "cc");
+#else
+ /* TLB invalidate all */
+ asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (uaddr & PAGE_MASK) : "cc");
+#endif
+ }
+}
+
+static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
+{
+#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
+ /* iTLB invalidate page */
+ asm("movc p0.c6, %0, #5; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (kaddr & PAGE_MASK) : "cc");
+ /* dTLB invalidate page */
+ asm("movc p0.c6, %0, #3; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (kaddr & PAGE_MASK) : "cc");
+#else
+ /* TLB invalidate all */
+ asm("movc p0.c6, %0, #6; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (kaddr & PAGE_MASK) : "cc");
+#endif
+}
+
+/*
+ * flush_pmd_entry
+ *
+ * Flush a PMD entry (word aligned, or double-word aligned) to
+ * RAM if the TLB for the CPU we are running on requires this.
+ * This is typically used when we are creating PMD entries.
+ *
+ * clean_pmd_entry
+ *
+ * Clean (but don't drain the write buffer) if the CPU requires
+ * these operations. This is typically used when we are removing
+ * PMD entries.
+ */
+static inline void flush_pmd_entry(pmd_t *pmd)
+{
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ /* flush dcache line, see dcacheline_flush in proc-macros.S */
+ asm("mov r1, %0 << #20\n"
+ "ldw r2, =_stext\n"
+ "add r2, r2, r1 >> #20\n"
+ "ldw r1, [r2+], #0x0000\n"
+ "ldw r1, [r2+], #0x1000\n"
+ "ldw r1, [r2+], #0x2000\n"
+ "ldw r1, [r2+], #0x3000\n"
+ : : "r" (pmd) : "r1", "r2");
+#else
+ /* flush dcache all */
+ asm("movc p0.c5, %0, #14; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (pmd) : "cc");
+#endif
+}
+
+static inline void clean_pmd_entry(pmd_t *pmd)
+{
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ /* clean dcache line */
+ asm("movc p0.c5, %0, #11; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (__pa(pmd) & ~(L1_CACHE_BYTES - 1)) : "cc");
+#else
+ /* clean dcache all */
+ asm("movc p0.c5, %0, #10; nop; nop; nop; nop; nop; nop; nop; nop"
+ : : "r" (pmd) : "cc");
+#endif
+}
+
+/*
+ * Convert calls to our calling convention.
+ */
+#define local_flush_tlb_range(vma, start, end) \
+ __cpu_flush_user_tlb_range(start, end, vma)
+#define local_flush_tlb_kernel_range(s, e) \
+ __cpu_flush_kern_tlb_range(s, e)
+
+#define flush_tlb_all local_flush_tlb_all
+#define flush_tlb_mm local_flush_tlb_mm
+#define flush_tlb_page local_flush_tlb_page
+#define flush_tlb_kernel_page local_flush_tlb_kernel_page
+#define flush_tlb_range local_flush_tlb_range
+#define flush_tlb_kernel_range local_flush_tlb_kernel_range
+
+/*
+ * if PG_dcache_clean is not set for the page, we need to ensure that any
+ * cache entries for the kernels virtual memory range are written
+ * back to the page.
+ */
+extern void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+
+extern void do_bad_area(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs);
+
+#endif
+
+#endif
diff --git a/arch/unicore32/include/asm/traps.h b/arch/unicore32/include/asm/traps.h
new file mode 100644
index 000000000000..66e17a724bfe
--- /dev/null
+++ b/arch/unicore32/include/asm/traps.h
@@ -0,0 +1,21 @@
+/*
+ * linux/arch/unicore32/include/asm/traps.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_TRAP_H__
+#define __UNICORE_TRAP_H__
+
+extern void __init early_trap_init(void);
+extern void dump_backtrace_entry(unsigned long where,
+ unsigned long from, unsigned long frame);
+
+extern void do_DataAbort(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs);
+#endif
diff --git a/arch/unicore32/include/asm/uaccess.h b/arch/unicore32/include/asm/uaccess.h
new file mode 100644
index 000000000000..2acda503a6d9
--- /dev/null
+++ b/arch/unicore32/include/asm/uaccess.h
@@ -0,0 +1,47 @@
+/*
+ * linux/arch/unicore32/include/asm/uaccess.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_UACCESS_H__
+#define __UNICORE_UACCESS_H__
+
+#include <linux/thread_info.h>
+#include <linux/errno.h>
+
+#include <asm/memory.h>
+#include <asm/system.h>
+
+#define __copy_from_user __copy_from_user
+#define __copy_to_user __copy_to_user
+#define __strncpy_from_user __strncpy_from_user
+#define __strnlen_user __strnlen_user
+#define __clear_user __clear_user
+
+#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+#define __user_ok(addr, size) (((size) <= TASK_SIZE) \
+ && ((addr) <= TASK_SIZE - (size)))
+#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
+
+extern unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__strncpy_from_user(char *to, const char __user *from, unsigned long count);
+extern unsigned long
+__strnlen_user(const char __user *s, long n);
+
+#include <asm-generic/uaccess.h>
+
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif /* __UNICORE_UACCESS_H__ */
diff --git a/arch/unicore32/include/asm/unistd.h b/arch/unicore32/include/asm/unistd.h
new file mode 100644
index 000000000000..9b2428019961
--- /dev/null
+++ b/arch/unicore32/include/asm/unistd.h
@@ -0,0 +1,18 @@
+/*
+ * linux/arch/unicore32/include/asm/unistd.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#if !defined(__UNICORE_UNISTD_H__) || defined(__SYSCALL)
+#define __UNICORE_UNISTD_H__
+
+/* Use the standard ABI for syscalls. */
+#include <asm-generic/unistd.h>
+
+#endif /* __UNICORE_UNISTD_H__ */
diff --git a/arch/unicore32/include/mach/PKUnity.h b/arch/unicore32/include/mach/PKUnity.h
new file mode 100644
index 000000000000..a18bdc3810e6
--- /dev/null
+++ b/arch/unicore32/include/mach/PKUnity.h
@@ -0,0 +1,108 @@
+/*
+ * linux/arch/unicore32/include/mach/PKUnity.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Be sure that virtual mapping is defined right */
+#ifndef __MACH_PUV3_HARDWARE_H__
+#error You must include hardware.h not PKUnity.h
+#endif
+
+#include "bitfield.h"
+
+/*
+ * Memory Definitions
+ */
+#define PKUNITY_SDRAM_BASE 0x00000000 /* 0x00000000 - 0x7FFFFFFF 2GB */
+#define PKUNITY_MMIO_BASE 0x80000000 /* 0x80000000 - 0xFFFFFFFF 2GB */
+
+/*
+ * PKUNITY Memory Map Addresses: 0x0D000000 - 0x0EFFFFFF (32MB)
+ * 0x0D000000 - 0x0DFFFFFF 16MB: for UVC
+ * 0x0E000000 - 0x0EFFFFFF 16MB: for UNIGFX
+ */
+#define PKUNITY_UVC_MMAP_BASE 0x0D000000
+#define PKUNITY_UVC_MMAP_SIZE 0x01000000 /* 16MB */
+#define PKUNITY_UNIGFX_MMAP_BASE 0x0E000000
+#define PKUNITY_UNIGFX_MMAP_SIZE 0x01000000 /* 16MB */
+
+/*
+ * PKUNITY System Bus Addresses (PCI): 0x80000000 - 0xBFFFFFFF (1GB)
+ * 0x80000000 - 0x8000000B 12B PCI Configuration regs
+ * 0x80010000 - 0x80010250 592B PCI Bridge Base
+ * 0x80030000 - 0x8003FFFF 64KB PCI Legacy IO
+ * 0x90000000 - 0x97FFFFFF 128MB PCI AHB-PCI MEM-mapping
+ * 0x98000000 - 0x9FFFFFFF 128MB PCI PCI-AHB MEM-mapping
+ */
+#define PKUNITY_PCI_BASE io_p2v(0x80000000) /* 0x80000000 - 0xBFFFFFFF 1GB */
+#include "regs-pci.h"
+
+#define PKUNITY_PCICFG_BASE (PKUNITY_PCI_BASE + 0x0)
+#define PKUNITY_PCIBRI_BASE (PKUNITY_PCI_BASE + 0x00010000)
+#define PKUNITY_PCILIO_BASE (PKUNITY_PCI_BASE + 0x00030000)
+#define PKUNITY_PCIMEM_BASE (PKUNITY_PCI_BASE + 0x10000000)
+#define PKUNITY_PCIAHB_BASE (PKUNITY_PCI_BASE + 0x18000000)
+
+/*
+ * PKUNITY System Bus Addresses (AHB): 0xC0000000 - 0xEDFFFFFF (640MB)
+ */
+#define PKUNITY_AHB_BASE io_p2v(0xC0000000)
+
+/* AHB-0 is DDR2 SDRAM */
+/* AHB-1 is PCI Space */
+#define PKUNITY_ARBITER_BASE (PKUNITY_AHB_BASE + 0x000000) /* AHB-2 */
+#define PKUNITY_DDR2CTRL_BASE (PKUNITY_AHB_BASE + 0x100000) /* AHB-3 */
+#define PKUNITY_DMAC_BASE (PKUNITY_AHB_BASE + 0x200000) /* AHB-4 */
+#include "regs-dmac.h"
+#define PKUNITY_UMAL_BASE (PKUNITY_AHB_BASE + 0x300000) /* AHB-5 */
+#include "regs-umal.h"
+#define PKUNITY_USB_BASE (PKUNITY_AHB_BASE + 0x400000) /* AHB-6 */
+#define PKUNITY_SATA_BASE (PKUNITY_AHB_BASE + 0x500000) /* AHB-7 */
+#define PKUNITY_SMC_BASE (PKUNITY_AHB_BASE + 0x600000) /* AHB-8 */
+/* AHB-9 is for APB bridge */
+#define PKUNITY_MME_BASE (PKUNITY_AHB_BASE + 0x700000) /* AHB-10 */
+#define PKUNITY_UNIGFX_BASE (PKUNITY_AHB_BASE + 0x800000) /* AHB-11 */
+#include "regs-unigfx.h"
+#define PKUNITY_NAND_BASE (PKUNITY_AHB_BASE + 0x900000) /* AHB-12 */
+#include "regs-nand.h"
+#define PKUNITY_H264D_BASE (PKUNITY_AHB_BASE + 0xA00000) /* AHB-13 */
+#define PKUNITY_H264E_BASE (PKUNITY_AHB_BASE + 0xB00000) /* AHB-14 */
+
+/*
+ * PKUNITY Peripheral Bus Addresses (APB): 0xEE000000 - 0xEFFFFFFF (128MB)
+ */
+#define PKUNITY_APB_BASE io_p2v(0xEE000000)
+
+#define PKUNITY_UART0_BASE (PKUNITY_APB_BASE + 0x000000) /* APB-0 */
+#define PKUNITY_UART1_BASE (PKUNITY_APB_BASE + 0x100000) /* APB-1 */
+#include "regs-uart.h"
+#define PKUNITY_I2C_BASE (PKUNITY_APB_BASE + 0x200000) /* APB-2 */
+#include "regs-i2c.h"
+#define PKUNITY_SPI_BASE (PKUNITY_APB_BASE + 0x300000) /* APB-3 */
+#include "regs-spi.h"
+#define PKUNITY_AC97_BASE (PKUNITY_APB_BASE + 0x400000) /* APB-4 */
+#include "regs-ac97.h"
+#define PKUNITY_GPIO_BASE (PKUNITY_APB_BASE + 0x500000) /* APB-5 */
+#include "regs-gpio.h"
+#define PKUNITY_INTC_BASE (PKUNITY_APB_BASE + 0x600000) /* APB-6 */
+#include "regs-intc.h"
+#define PKUNITY_RTC_BASE (PKUNITY_APB_BASE + 0x700000) /* APB-7 */
+#include "regs-rtc.h"
+#define PKUNITY_OST_BASE (PKUNITY_APB_BASE + 0x800000) /* APB-8 */
+#include "regs-ost.h"
+#define PKUNITY_RESETC_BASE (PKUNITY_APB_BASE + 0x900000) /* APB-9 */
+#include "regs-resetc.h"
+#define PKUNITY_PM_BASE (PKUNITY_APB_BASE + 0xA00000) /* APB-10 */
+#include "regs-pm.h"
+#define PKUNITY_PS2_BASE (PKUNITY_APB_BASE + 0xB00000) /* APB-11 */
+#include "regs-ps2.h"
+#define PKUNITY_SDC_BASE (PKUNITY_APB_BASE + 0xC00000) /* APB-12 */
+#include "regs-sdc.h"
+
diff --git a/arch/unicore32/include/mach/bitfield.h b/arch/unicore32/include/mach/bitfield.h
new file mode 100644
index 000000000000..128a70281efc
--- /dev/null
+++ b/arch/unicore32/include/mach/bitfield.h
@@ -0,0 +1,24 @@
+/*
+ * linux/arch/unicore32/include/mach/bitfield.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __MACH_PUV3_BITFIELD_H__
+#define __MACH_PUV3_BITFIELD_H__
+
+#ifndef __ASSEMBLY__
+#define UData(Data) ((unsigned long) (Data))
+#else
+#define UData(Data) (Data)
+#endif
+
+#define FIELD(val, vmask, vshift) (((val) & ((UData(1) << (vmask)) - 1)) << (vshift))
+#define FMASK(vmask, vshift) (((UData(1) << (vmask)) - 1) << (vshift))
+
+#endif /* __MACH_PUV3_BITFIELD_H__ */
diff --git a/arch/unicore32/include/mach/dma.h b/arch/unicore32/include/mach/dma.h
new file mode 100644
index 000000000000..d655c1b6e083
--- /dev/null
+++ b/arch/unicore32/include/mach/dma.h
@@ -0,0 +1,48 @@
+/*
+ * linux/arch/unicore32/include/mach/dma.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __MACH_PUV3_DMA_H__
+#define __MACH_PUV3_DMA_H__
+
+/*
+ * The PKUnity has six internal DMA channels.
+ */
+#define MAX_DMA_CHANNELS 6
+
+typedef enum {
+ DMA_PRIO_HIGH = 0,
+ DMA_PRIO_MEDIUM = 1,
+ DMA_PRIO_LOW = 2
+} puv3_dma_prio;
+
+/*
+ * DMA registration
+ */
+
+extern int puv3_request_dma(char *name,
+ puv3_dma_prio prio,
+ void (*irq_handler)(int, void *),
+ void (*err_handler)(int, void *),
+ void *data);
+
+extern void puv3_free_dma(int dma_ch);
+
+static inline void puv3_stop_dma(int ch)
+{
+ writel(readl(DMAC_CONFIG(ch)) & ~DMAC_CONFIG_EN, DMAC_CONFIG(ch));
+}
+
+static inline void puv3_resume_dma(int ch)
+{
+ writel(readl(DMAC_CONFIG(ch)) | DMAC_CONFIG_EN, DMAC_CONFIG(ch));
+}
+
+#endif /* __MACH_PUV3_DMA_H__ */
diff --git a/arch/unicore32/include/mach/hardware.h b/arch/unicore32/include/mach/hardware.h
new file mode 100644
index 000000000000..930bea6e129a
--- /dev/null
+++ b/arch/unicore32/include/mach/hardware.h
@@ -0,0 +1,38 @@
+/*
+ * linux/arch/unicore32/include/mach/hardware.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains the hardware definitions for PKUnity architecture
+ */
+
+#ifndef __MACH_PUV3_HARDWARE_H__
+#define __MACH_PUV3_HARDWARE_H__
+
+#include "PKUnity.h"
+
+#ifndef __ASSEMBLY__
+#define io_p2v(x) (void __iomem *)((x) - PKUNITY_MMIO_BASE)
+#define io_v2p(x) (phys_addr_t)((x) + PKUNITY_MMIO_BASE)
+#else
+#define io_p2v(x) ((x) - PKUNITY_MMIO_BASE)
+#define io_v2p(x) ((x) + PKUNITY_MMIO_BASE)
+#endif
+
+#define PCIBIOS_MIN_IO 0x4000 /* should lower than 64KB */
+#define PCIBIOS_MIN_MEM io_v2p(PKUNITY_PCIMEM_BASE)
+
+/*
+ * We override the standard dma-mask routines for bouncing.
+ */
+#define HAVE_ARCH_PCI_SET_DMA_MASK
+
+#define pcibios_assign_all_busses() 1
+
+#endif /* __MACH_PUV3_HARDWARE_H__ */
diff --git a/arch/unicore32/include/mach/map.h b/arch/unicore32/include/mach/map.h
new file mode 100644
index 000000000000..55c936573741
--- /dev/null
+++ b/arch/unicore32/include/mach/map.h
@@ -0,0 +1,20 @@
+/*
+ * linux/arch/unicore32/include/mach/map.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Page table mapping constructs and function prototypes
+ */
+#define MT_DEVICE 0
+#define MT_DEVICE_CACHED 2
+#define MT_KUSER 7
+#define MT_HIGH_VECTORS 8
+#define MT_MEMORY 9
+#define MT_ROM 10
+
diff --git a/arch/unicore32/include/mach/memory.h b/arch/unicore32/include/mach/memory.h
new file mode 100644
index 000000000000..0bf21c944710
--- /dev/null
+++ b/arch/unicore32/include/mach/memory.h
@@ -0,0 +1,58 @@
+/*
+ * linux/arch/unicore32/include/mach/memory.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __MACH_PUV3_MEMORY_H__
+#define __MACH_PUV3_MEMORY_H__
+
+#include <mach/hardware.h>
+
+/* Physical DRAM offset. */
+#define PHYS_OFFSET UL(0x00000000)
+/* The base address of exception vectors. */
+#define VECTORS_BASE UL(0xffff0000)
+/* The base address of kuser area. */
+#define KUSER_BASE UL(0x80000000)
+
+#ifdef __ASSEMBLY__
+/* The byte offset of the kernel image in RAM from the start of RAM. */
+#define KERNEL_IMAGE_START 0x00408000
+#endif
+
+#if !defined(__ASSEMBLY__) && defined(CONFIG_PCI)
+
+void puv3_pci_adjust_zones(unsigned long *size, unsigned long *holes);
+
+#define arch_adjust_zones(size, holes) \
+ puv3_pci_adjust_zones(size, holes)
+
+#endif
+
+/*
+ * PCI controller in PKUnity-3 masks highest 5-bit for upstream channel,
+ * so we must limit the DMA allocation within 128M physical memory for
+ * supporting PCI devices.
+ */
+#define PCI_DMA_THRESHOLD (PHYS_OFFSET + SZ_128M - 1)
+
+#define is_pcibus_device(dev) (dev && \
+ (strncmp(dev->bus->name, "pci", 3) == 0))
+
+#define __virt_to_pcibus(x) (__virt_to_phys((x) + PKUNITY_PCIAHB_BASE))
+#define __pcibus_to_virt(x) (__phys_to_virt(x) - PKUNITY_PCIAHB_BASE)
+
+/* kuser area */
+#define KUSER_VECPAGE_BASE (KUSER_BASE + UL(0x3fff0000))
+#define KUSER_UNIGFX_BASE (PAGE_OFFSET + PKUNITY_UNIGFX_MMAP_BASE)
+/* kuser_vecpage (0xbfff0000) is ro, and vectors page (0xffff0000) is rw */
+#define kuser_vecpage_to_vectors(x) ((x) - (KUSER_VECPAGE_BASE) \
+ + (VECTORS_BASE))
+
+#endif
diff --git a/arch/unicore32/include/mach/ocd.h b/arch/unicore32/include/mach/ocd.h
new file mode 100644
index 000000000000..189fd71bfa34
--- /dev/null
+++ b/arch/unicore32/include/mach/ocd.h
@@ -0,0 +1,36 @@
+/*
+ * linux/arch/unicore32/include/mach/ocd.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_PUV3_OCD_H__
+#define __MACH_PUV3_OCD_H__
+
+#if defined(CONFIG_DEBUG_OCD)
+static inline void ocd_putc(unsigned int c)
+{
+ int status, i = 0x2000000;
+
+ do {
+ if (--i < 0)
+ return;
+
+ asm volatile ("movc %0, p1.c0, #0" : "=r" (status));
+ } while (status & 2);
+
+ asm("movc p1.c1, %0, #1" : : "r" (c));
+}
+
+#define putc(ch) ocd_putc(ch)
+#else
+#define putc(ch)
+#endif
+
+#endif
diff --git a/arch/unicore32/include/mach/pm.h b/arch/unicore32/include/mach/pm.h
new file mode 100644
index 000000000000..4dcd34ae194c
--- /dev/null
+++ b/arch/unicore32/include/mach/pm.h
@@ -0,0 +1,43 @@
+/*
+ * linux/arch/unicore/include/mach/pm.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __PUV3_PM_H__
+#define __PUV3_PM_H__
+
+#include <linux/suspend.h>
+
+struct puv3_cpu_pm_fns {
+ int save_count;
+ void (*save)(unsigned long *);
+ void (*restore)(unsigned long *);
+ int (*valid)(suspend_state_t state);
+ void (*enter)(suspend_state_t state);
+ int (*prepare)(void);
+ void (*finish)(void);
+};
+
+extern struct puv3_cpu_pm_fns *puv3_cpu_pm_fns;
+
+/* sleep.S */
+extern void puv3_cpu_suspend(unsigned int);
+
+extern void puv3_cpu_resume(void);
+
+extern int puv3_pm_enter(suspend_state_t state);
+
+/* Defined in hibernate_asm.S */
+extern int restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist);
+
+/* References to section boundaries */
+extern const void __nosave_begin, __nosave_end;
+
+extern struct pbe *restore_pblist;
+#endif
diff --git a/arch/unicore32/include/mach/regs-ac97.h b/arch/unicore32/include/mach/regs-ac97.h
new file mode 100644
index 000000000000..b7563e9d6503
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-ac97.h
@@ -0,0 +1,32 @@
+/*
+ * PKUnity AC97 Registers
+ */
+
+#define PKUNITY_AC97_CONR (PKUNITY_AC97_BASE + 0x0000)
+#define PKUNITY_AC97_OCR (PKUNITY_AC97_BASE + 0x0004)
+#define PKUNITY_AC97_ICR (PKUNITY_AC97_BASE + 0x0008)
+#define PKUNITY_AC97_CRAC (PKUNITY_AC97_BASE + 0x000C)
+#define PKUNITY_AC97_INTR (PKUNITY_AC97_BASE + 0x0010)
+#define PKUNITY_AC97_INTRSTAT (PKUNITY_AC97_BASE + 0x0014)
+#define PKUNITY_AC97_INTRCLEAR (PKUNITY_AC97_BASE + 0x0018)
+#define PKUNITY_AC97_ENABLE (PKUNITY_AC97_BASE + 0x001C)
+#define PKUNITY_AC97_OUT_FIFO (PKUNITY_AC97_BASE + 0x0020)
+#define PKUNITY_AC97_IN_FIFO (PKUNITY_AC97_BASE + 0x0030)
+
+#define AC97_CODEC_REG(v) FIELD((v), 7, 16)
+#define AC97_CODEC_VAL(v) FIELD((v), 16, 0)
+#define AC97_CODEC_WRITECOMPLETE FIELD(1, 1, 2)
+
+/*
+ * VAR PLAY SAMPLE RATE
+ */
+#define AC97_CMD_VPSAMPLE (FIELD(3, 2, 16) | FIELD(3, 2, 0))
+
+/*
+ * FIX CAPTURE SAMPLE RATE
+ */
+#define AC97_CMD_FCSAMPLE FIELD(7, 3, 0)
+
+#define AC97_CMD_RESET FIELD(1, 1, 0)
+#define AC97_CMD_ENABLE FIELD(1, 1, 0)
+#define AC97_CMD_DISABLE FIELD(0, 1, 0)
diff --git a/arch/unicore32/include/mach/regs-dmac.h b/arch/unicore32/include/mach/regs-dmac.h
new file mode 100644
index 000000000000..66de9e7d1c8f
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-dmac.h
@@ -0,0 +1,81 @@
+/*
+ * PKUnity Direct Memory Access Controller (DMAC)
+ */
+
+/*
+ * Interrupt Status Reg DMAC_ISR.
+ */
+#define DMAC_ISR (PKUNITY_DMAC_BASE + 0x0020)
+/*
+ * Interrupt Transfer Complete Status Reg DMAC_ITCSR.
+ */
+#define DMAC_ITCSR (PKUNITY_DMAC_BASE + 0x0050)
+/*
+ * Interrupt Transfer Complete Clear Reg DMAC_ITCCR.
+ */
+#define DMAC_ITCCR (PKUNITY_DMAC_BASE + 0x0060)
+/*
+ * Interrupt Error Status Reg DMAC_IESR.
+ */
+#define DMAC_IESR (PKUNITY_DMAC_BASE + 0x0080)
+/*
+ * Interrupt Error Clear Reg DMAC_IECR.
+ */
+#define DMAC_IECR (PKUNITY_DMAC_BASE + 0x0090)
+/*
+ * Enable Channels Reg DMAC_ENCH.
+ */
+#define DMAC_ENCH (PKUNITY_DMAC_BASE + 0x00B0)
+
+/*
+ * DMA control reg. Space [byte]
+ */
+#define DMASp 0x00000100
+
+/*
+ * Source Addr DMAC_SRCADDR(ch).
+ */
+#define DMAC_SRCADDR(ch) (PKUNITY_DMAC_BASE + (ch)*DMASp + 0x00)
+/*
+ * Destination Addr DMAC_DESTADDR(ch).
+ */
+#define DMAC_DESTADDR(ch) (PKUNITY_DMAC_BASE + (ch)*DMASp + 0x04)
+/*
+ * Control Reg DMAC_CONTROL(ch).
+ */
+#define DMAC_CONTROL(ch) (PKUNITY_DMAC_BASE + (ch)*DMASp + 0x0C)
+/*
+ * Configuration Reg DMAC_CONFIG(ch).
+ */
+#define DMAC_CONFIG(ch) (PKUNITY_DMAC_BASE + (ch)*DMASp + 0x10)
+
+#define DMAC_IR_MASK FMASK(6, 0)
+/*
+ * select channel (ch)
+ */
+#define DMAC_CHANNEL(ch) FIELD(1, 1, (ch))
+
+#define DMAC_CONTROL_SIZE_BYTE(v) (FIELD((v), 12, 14) | \
+ FIELD(0, 3, 9) | FIELD(0, 3, 6))
+#define DMAC_CONTROL_SIZE_HWORD(v) (FIELD((v) >> 1, 12, 14) | \
+ FIELD(1, 3, 9) | FIELD(1, 3, 6))
+#define DMAC_CONTROL_SIZE_WORD(v) (FIELD((v) >> 2, 12, 14) | \
+ FIELD(2, 3, 9) | FIELD(2, 3, 6))
+#define DMAC_CONTROL_DI FIELD(1, 1, 13)
+#define DMAC_CONTROL_SI FIELD(1, 1, 12)
+#define DMAC_CONTROL_BURST_1BYTE (FIELD(0, 3, 3) | FIELD(0, 3, 0))
+#define DMAC_CONTROL_BURST_4BYTE (FIELD(3, 3, 3) | FIELD(3, 3, 0))
+#define DMAC_CONTROL_BURST_8BYTE (FIELD(5, 3, 3) | FIELD(5, 3, 0))
+#define DMAC_CONTROL_BURST_16BYTE (FIELD(7, 3, 3) | FIELD(7, 3, 0))
+
+#define DMAC_CONFIG_UART0_WR (FIELD(2, 4, 11) | FIELD(1, 2, 1))
+#define DMAC_CONFIG_UART0_RD (FIELD(2, 4, 7) | FIELD(2, 2, 1))
+#define DMAC_CONFIG_UART1_WR (FIELD(3, 4, 11) | FIELD(1, 2, 1))
+#define DMAC_CONFIG_UART1RD (FIELD(3, 4, 7) | FIELD(2, 2, 1))
+#define DMAC_CONFIG_AC97WR (FIELD(4, 4, 11) | FIELD(1, 2, 1))
+#define DMAC_CONFIG_AC97RD (FIELD(4, 4, 7) | FIELD(2, 2, 1))
+#define DMAC_CONFIG_MMCWR (FIELD(7, 4, 11) | FIELD(1, 2, 1))
+#define DMAC_CONFIG_MMCRD (FIELD(7, 4, 7) | FIELD(2, 2, 1))
+#define DMAC_CONFIG_MASKITC FIELD(1, 1, 4)
+#define DMAC_CONFIG_MASKIE FIELD(1, 1, 3)
+#define DMAC_CONFIG_EN FIELD(1, 1, 0)
diff --git a/arch/unicore32/include/mach/regs-gpio.h b/arch/unicore32/include/mach/regs-gpio.h
new file mode 100644
index 000000000000..0273b861ef96
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-gpio.h
@@ -0,0 +1,70 @@
+/*
+ * PKUnity General-Purpose Input/Output (GPIO) Registers
+ */
+
+/*
+ * Voltage Status Reg GPIO_GPLR.
+ */
+#define GPIO_GPLR (PKUNITY_GPIO_BASE + 0x0000)
+/*
+ * Pin Direction Reg GPIO_GPDR.
+ */
+#define GPIO_GPDR (PKUNITY_GPIO_BASE + 0x0004)
+/*
+ * Output Pin Set Reg GPIO_GPSR.
+ */
+#define GPIO_GPSR (PKUNITY_GPIO_BASE + 0x0008)
+/*
+ * Output Pin Clear Reg GPIO_GPCR.
+ */
+#define GPIO_GPCR (PKUNITY_GPIO_BASE + 0x000C)
+/*
+ * Raise Edge Detect Reg GPIO_GRER.
+ */
+#define GPIO_GRER (PKUNITY_GPIO_BASE + 0x0010)
+/*
+ * Fall Edge Detect Reg GPIO_GFER.
+ */
+#define GPIO_GFER (PKUNITY_GPIO_BASE + 0x0014)
+/*
+ * Edge Status Reg GPIO_GEDR.
+ */
+#define GPIO_GEDR (PKUNITY_GPIO_BASE + 0x0018)
+/*
+ * Sepcial Voltage Detect Reg GPIO_GPIR.
+ */
+#define GPIO_GPIR (PKUNITY_GPIO_BASE + 0x0020)
+
+#define GPIO_MIN (0)
+#define GPIO_MAX (27)
+
+#define GPIO_GPIO(Nb) (0x00000001 << (Nb)) /* GPIO [0..27] */
+#define GPIO_GPIO0 GPIO_GPIO(0) /* GPIO [0] */
+#define GPIO_GPIO1 GPIO_GPIO(1) /* GPIO [1] */
+#define GPIO_GPIO2 GPIO_GPIO(2) /* GPIO [2] */
+#define GPIO_GPIO3 GPIO_GPIO(3) /* GPIO [3] */
+#define GPIO_GPIO4 GPIO_GPIO(4) /* GPIO [4] */
+#define GPIO_GPIO5 GPIO_GPIO(5) /* GPIO [5] */
+#define GPIO_GPIO6 GPIO_GPIO(6) /* GPIO [6] */
+#define GPIO_GPIO7 GPIO_GPIO(7) /* GPIO [7] */
+#define GPIO_GPIO8 GPIO_GPIO(8) /* GPIO [8] */
+#define GPIO_GPIO9 GPIO_GPIO(9) /* GPIO [9] */
+#define GPIO_GPIO10 GPIO_GPIO(10) /* GPIO [10] */
+#define GPIO_GPIO11 GPIO_GPIO(11) /* GPIO [11] */
+#define GPIO_GPIO12 GPIO_GPIO(12) /* GPIO [12] */
+#define GPIO_GPIO13 GPIO_GPIO(13) /* GPIO [13] */
+#define GPIO_GPIO14 GPIO_GPIO(14) /* GPIO [14] */
+#define GPIO_GPIO15 GPIO_GPIO(15) /* GPIO [15] */
+#define GPIO_GPIO16 GPIO_GPIO(16) /* GPIO [16] */
+#define GPIO_GPIO17 GPIO_GPIO(17) /* GPIO [17] */
+#define GPIO_GPIO18 GPIO_GPIO(18) /* GPIO [18] */
+#define GPIO_GPIO19 GPIO_GPIO(19) /* GPIO [19] */
+#define GPIO_GPIO20 GPIO_GPIO(20) /* GPIO [20] */
+#define GPIO_GPIO21 GPIO_GPIO(21) /* GPIO [21] */
+#define GPIO_GPIO22 GPIO_GPIO(22) /* GPIO [22] */
+#define GPIO_GPIO23 GPIO_GPIO(23) /* GPIO [23] */
+#define GPIO_GPIO24 GPIO_GPIO(24) /* GPIO [24] */
+#define GPIO_GPIO25 GPIO_GPIO(25) /* GPIO [25] */
+#define GPIO_GPIO26 GPIO_GPIO(26) /* GPIO [26] */
+#define GPIO_GPIO27 GPIO_GPIO(27) /* GPIO [27] */
+
diff --git a/arch/unicore32/include/mach/regs-i2c.h b/arch/unicore32/include/mach/regs-i2c.h
new file mode 100644
index 000000000000..463d108f8bfb
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-i2c.h
@@ -0,0 +1,63 @@
+/*
+ * PKUnity Inter-integrated Circuit (I2C) Registers
+ */
+
+/*
+ * Control Reg I2C_CON.
+ */
+#define I2C_CON (PKUNITY_I2C_BASE + 0x0000)
+/*
+ * Target Address Reg I2C_TAR.
+ */
+#define I2C_TAR (PKUNITY_I2C_BASE + 0x0004)
+/*
+ * Data buffer and command Reg I2C_DATACMD.
+ */
+#define I2C_DATACMD (PKUNITY_I2C_BASE + 0x0010)
+/*
+ * Enable Reg I2C_ENABLE.
+ */
+#define I2C_ENABLE (PKUNITY_I2C_BASE + 0x006C)
+/*
+ * Status Reg I2C_STATUS.
+ */
+#define I2C_STATUS (PKUNITY_I2C_BASE + 0x0070)
+/*
+ * Tx FIFO Length Reg I2C_TXFLR.
+ */
+#define I2C_TXFLR (PKUNITY_I2C_BASE + 0x0074)
+/*
+ * Rx FIFO Length Reg I2C_RXFLR.
+ */
+#define I2C_RXFLR (PKUNITY_I2C_BASE + 0x0078)
+/*
+ * Enable Status Reg I2C_ENSTATUS.
+ */
+#define I2C_ENSTATUS (PKUNITY_I2C_BASE + 0x009C)
+
+#define I2C_CON_MASTER FIELD(1, 1, 0)
+#define I2C_CON_SPEED_STD FIELD(1, 2, 1)
+#define I2C_CON_SPEED_FAST FIELD(2, 2, 1)
+#define I2C_CON_RESTART FIELD(1, 1, 5)
+#define I2C_CON_SLAVEDISABLE FIELD(1, 1, 6)
+
+#define I2C_DATACMD_READ FIELD(1, 1, 8)
+#define I2C_DATACMD_WRITE FIELD(0, 1, 8)
+#define I2C_DATACMD_DAT_MASK FMASK(8, 0)
+#define I2C_DATACMD_DAT(v) FIELD((v), 8, 0)
+
+#define I2C_ENABLE_ENABLE FIELD(1, 1, 0)
+#define I2C_ENABLE_DISABLE FIELD(0, 1, 0)
+
+#define I2C_STATUS_RFF FIELD(1, 1, 4)
+#define I2C_STATUS_RFNE FIELD(1, 1, 3)
+#define I2C_STATUS_TFE FIELD(1, 1, 2)
+#define I2C_STATUS_TFNF FIELD(1, 1, 1)
+#define I2C_STATUS_ACTIVITY FIELD(1, 1, 0)
+
+#define I2C_ENSTATUS_ENABLE FIELD(1, 1, 0)
+
+#define I2C_TAR_THERMAL 0x4f
+#define I2C_TAR_SPD 0x50
+#define I2C_TAR_PWIC 0x55
+#define I2C_TAR_EEPROM 0x57
diff --git a/arch/unicore32/include/mach/regs-intc.h b/arch/unicore32/include/mach/regs-intc.h
new file mode 100644
index 000000000000..25648f89cbd3
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-intc.h
@@ -0,0 +1,28 @@
+/*
+ * PKUNITY Interrupt Controller (INTC) Registers
+ */
+/*
+ * INTC Level Reg INTC_ICLR.
+ */
+#define INTC_ICLR (PKUNITY_INTC_BASE + 0x0000)
+/*
+ * INTC Mask Reg INTC_ICMR.
+ */
+#define INTC_ICMR (PKUNITY_INTC_BASE + 0x0004)
+/*
+ * INTC Pending Reg INTC_ICPR.
+ */
+#define INTC_ICPR (PKUNITY_INTC_BASE + 0x0008)
+/*
+ * INTC IRQ Pending Reg INTC_ICIP.
+ */
+#define INTC_ICIP (PKUNITY_INTC_BASE + 0x000C)
+/*
+ * INTC REAL Pending Reg INTC_ICFP.
+ */
+#define INTC_ICFP (PKUNITY_INTC_BASE + 0x0010)
+/*
+ * INTC Control Reg INTC_ICCR.
+ */
+#define INTC_ICCR (PKUNITY_INTC_BASE + 0x0014)
+
diff --git a/arch/unicore32/include/mach/regs-nand.h b/arch/unicore32/include/mach/regs-nand.h
new file mode 100644
index 000000000000..a7c5563bb550
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-nand.h
@@ -0,0 +1,79 @@
+/*
+ * PKUnity NAND Controller Registers
+ */
+/*
+ * ID Reg. 0 NAND_IDR0
+ */
+#define NAND_IDR0 (PKUNITY_NAND_BASE + 0x0000)
+/*
+ * ID Reg. 1 NAND_IDR1
+ */
+#define NAND_IDR1 (PKUNITY_NAND_BASE + 0x0004)
+/*
+ * ID Reg. 2 NAND_IDR2
+ */
+#define NAND_IDR2 (PKUNITY_NAND_BASE + 0x0008)
+/*
+ * ID Reg. 3 NAND_IDR3
+ */
+#define NAND_IDR3 (PKUNITY_NAND_BASE + 0x000C)
+/*
+ * Page Address Reg 0 NAND_PAR0
+ */
+#define NAND_PAR0 (PKUNITY_NAND_BASE + 0x0010)
+/*
+ * Page Address Reg 1 NAND_PAR1
+ */
+#define NAND_PAR1 (PKUNITY_NAND_BASE + 0x0014)
+/*
+ * Page Address Reg 2 NAND_PAR2
+ */
+#define NAND_PAR2 (PKUNITY_NAND_BASE + 0x0018)
+/*
+ * ECC Enable Reg NAND_ECCEN
+ */
+#define NAND_ECCEN (PKUNITY_NAND_BASE + 0x001C)
+/*
+ * Buffer Reg NAND_BUF
+ */
+#define NAND_BUF (PKUNITY_NAND_BASE + 0x0020)
+/*
+ * ECC Status Reg NAND_ECCSR
+ */
+#define NAND_ECCSR (PKUNITY_NAND_BASE + 0x0024)
+/*
+ * Command Reg NAND_CMD
+ */
+#define NAND_CMD (PKUNITY_NAND_BASE + 0x0028)
+/*
+ * DMA Configure Reg NAND_DMACR
+ */
+#define NAND_DMACR (PKUNITY_NAND_BASE + 0x002C)
+/*
+ * Interrupt Reg NAND_IR
+ */
+#define NAND_IR (PKUNITY_NAND_BASE + 0x0030)
+/*
+ * Interrupt Mask Reg NAND_IMR
+ */
+#define NAND_IMR (PKUNITY_NAND_BASE + 0x0034)
+/*
+ * Chip Enable Reg NAND_CHIPEN
+ */
+#define NAND_CHIPEN (PKUNITY_NAND_BASE + 0x0038)
+/*
+ * Address Reg NAND_ADDR
+ */
+#define NAND_ADDR (PKUNITY_NAND_BASE + 0x003C)
+
+/*
+ * Command bits NAND_CMD_CMD_MASK
+ */
+#define NAND_CMD_CMD_MASK FMASK(4, 4)
+#define NAND_CMD_CMD_READPAGE FIELD(0x0, 4, 4)
+#define NAND_CMD_CMD_ERASEBLOCK FIELD(0x6, 4, 4)
+#define NAND_CMD_CMD_READSTATUS FIELD(0x7, 4, 4)
+#define NAND_CMD_CMD_WRITEPAGE FIELD(0x8, 4, 4)
+#define NAND_CMD_CMD_READID FIELD(0x9, 4, 4)
+#define NAND_CMD_CMD_RESET FIELD(0xf, 4, 4)
+
diff --git a/arch/unicore32/include/mach/regs-ost.h b/arch/unicore32/include/mach/regs-ost.h
new file mode 100644
index 000000000000..7b91fe698eed
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-ost.h
@@ -0,0 +1,92 @@
+/*
+ * PKUnity Operating System Timer (OST) Registers
+ */
+/*
+ * Match Reg 0 OST_OSMR0
+ */
+#define OST_OSMR0 (PKUNITY_OST_BASE + 0x0000)
+/*
+ * Match Reg 1 OST_OSMR1
+ */
+#define OST_OSMR1 (PKUNITY_OST_BASE + 0x0004)
+/*
+ * Match Reg 2 OST_OSMR2
+ */
+#define OST_OSMR2 (PKUNITY_OST_BASE + 0x0008)
+/*
+ * Match Reg 3 OST_OSMR3
+ */
+#define OST_OSMR3 (PKUNITY_OST_BASE + 0x000C)
+/*
+ * Counter Reg OST_OSCR
+ */
+#define OST_OSCR (PKUNITY_OST_BASE + 0x0010)
+/*
+ * Status Reg OST_OSSR
+ */
+#define OST_OSSR (PKUNITY_OST_BASE + 0x0014)
+/*
+ * Watchdog Enable Reg OST_OWER
+ */
+#define OST_OWER (PKUNITY_OST_BASE + 0x0018)
+/*
+ * Interrupt Enable Reg OST_OIER
+ */
+#define OST_OIER (PKUNITY_OST_BASE + 0x001C)
+/*
+ * PWM Pulse Width Control Reg OST_PWMPWCR
+ */
+#define OST_PWMPWCR (PKUNITY_OST_BASE + 0x0080)
+/*
+ * PWM Duty Cycle Control Reg OST_PWMDCCR
+ */
+#define OST_PWMDCCR (PKUNITY_OST_BASE + 0x0084)
+/*
+ * PWM Period Control Reg OST_PWMPCR
+ */
+#define OST_PWMPCR (PKUNITY_OST_BASE + 0x0088)
+
+/*
+ * Match detected 0 OST_OSSR_M0
+ */
+#define OST_OSSR_M0 FIELD(1, 1, 0)
+/*
+ * Match detected 1 OST_OSSR_M1
+ */
+#define OST_OSSR_M1 FIELD(1, 1, 1)
+/*
+ * Match detected 2 OST_OSSR_M2
+ */
+#define OST_OSSR_M2 FIELD(1, 1, 2)
+/*
+ * Match detected 3 OST_OSSR_M3
+ */
+#define OST_OSSR_M3 FIELD(1, 1, 3)
+
+/*
+ * Interrupt enable 0 OST_OIER_E0
+ */
+#define OST_OIER_E0 FIELD(1, 1, 0)
+/*
+ * Interrupt enable 1 OST_OIER_E1
+ */
+#define OST_OIER_E1 FIELD(1, 1, 1)
+/*
+ * Interrupt enable 2 OST_OIER_E2
+ */
+#define OST_OIER_E2 FIELD(1, 1, 2)
+/*
+ * Interrupt enable 3 OST_OIER_E3
+ */
+#define OST_OIER_E3 FIELD(1, 1, 3)
+
+/*
+ * Watchdog Match Enable OST_OWER_WME
+ */
+#define OST_OWER_WME FIELD(1, 1, 0)
+
+/*
+ * PWM Full Duty Cycle OST_PWMDCCR_FDCYCLE
+ */
+#define OST_PWMDCCR_FDCYCLE FIELD(1, 1, 10)
+
diff --git a/arch/unicore32/include/mach/regs-pci.h b/arch/unicore32/include/mach/regs-pci.h
new file mode 100644
index 000000000000..6a9341686bf8
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-pci.h
@@ -0,0 +1,94 @@
+/*
+ * PKUnity AHB-PCI Bridge Registers
+ */
+
+/*
+ * AHB/PCI fixed physical address for pci addess configuration
+ */
+/*
+ * PCICFG Bridge Base Reg.
+ */
+#define PCICFG_BRIBASE (PKUNITY_PCICFG_BASE + 0x0000)
+/*
+ * PCICFG Address Reg.
+ */
+#define PCICFG_ADDR (PKUNITY_PCICFG_BASE + 0x0004)
+/*
+ * PCICFG Address Reg.
+ */
+#define PCICFG_DATA (PKUNITY_PCICFG_BASE + 0x0008)
+
+/*
+ * PCI Bridge configuration space
+ */
+#define PCIBRI_ID (PKUNITY_PCIBRI_BASE + 0x0000)
+#define PCIBRI_CMD (PKUNITY_PCIBRI_BASE + 0x0004)
+#define PCIBRI_CLASS (PKUNITY_PCIBRI_BASE + 0x0008)
+#define PCIBRI_LTR (PKUNITY_PCIBRI_BASE + 0x000C)
+#define PCIBRI_BAR0 (PKUNITY_PCIBRI_BASE + 0x0010)
+#define PCIBRI_BAR1 (PKUNITY_PCIBRI_BASE + 0x0014)
+#define PCIBRI_BAR2 (PKUNITY_PCIBRI_BASE + 0x0018)
+#define PCIBRI_BAR3 (PKUNITY_PCIBRI_BASE + 0x001C)
+#define PCIBRI_BAR4 (PKUNITY_PCIBRI_BASE + 0x0020)
+#define PCIBRI_BAR5 (PKUNITY_PCIBRI_BASE + 0x0024)
+
+#define PCIBRI_PCICTL0 (PKUNITY_PCIBRI_BASE + 0x0100)
+#define PCIBRI_PCIBAR0 (PKUNITY_PCIBRI_BASE + 0x0104)
+#define PCIBRI_PCIAMR0 (PKUNITY_PCIBRI_BASE + 0x0108)
+#define PCIBRI_PCITAR0 (PKUNITY_PCIBRI_BASE + 0x010C)
+#define PCIBRI_PCICTL1 (PKUNITY_PCIBRI_BASE + 0x0110)
+#define PCIBRI_PCIBAR1 (PKUNITY_PCIBRI_BASE + 0x0114)
+#define PCIBRI_PCIAMR1 (PKUNITY_PCIBRI_BASE + 0x0118)
+#define PCIBRI_PCITAR1 (PKUNITY_PCIBRI_BASE + 0x011C)
+#define PCIBRI_PCICTL2 (PKUNITY_PCIBRI_BASE + 0x0120)
+#define PCIBRI_PCIBAR2 (PKUNITY_PCIBRI_BASE + 0x0124)
+#define PCIBRI_PCIAMR2 (PKUNITY_PCIBRI_BASE + 0x0128)
+#define PCIBRI_PCITAR2 (PKUNITY_PCIBRI_BASE + 0x012C)
+#define PCIBRI_PCICTL3 (PKUNITY_PCIBRI_BASE + 0x0130)
+#define PCIBRI_PCIBAR3 (PKUNITY_PCIBRI_BASE + 0x0134)
+#define PCIBRI_PCIAMR3 (PKUNITY_PCIBRI_BASE + 0x0138)
+#define PCIBRI_PCITAR3 (PKUNITY_PCIBRI_BASE + 0x013C)
+#define PCIBRI_PCICTL4 (PKUNITY_PCIBRI_BASE + 0x0140)
+#define PCIBRI_PCIBAR4 (PKUNITY_PCIBRI_BASE + 0x0144)
+#define PCIBRI_PCIAMR4 (PKUNITY_PCIBRI_BASE + 0x0148)
+#define PCIBRI_PCITAR4 (PKUNITY_PCIBRI_BASE + 0x014C)
+#define PCIBRI_PCICTL5 (PKUNITY_PCIBRI_BASE + 0x0150)
+#define PCIBRI_PCIBAR5 (PKUNITY_PCIBRI_BASE + 0x0154)
+#define PCIBRI_PCIAMR5 (PKUNITY_PCIBRI_BASE + 0x0158)
+#define PCIBRI_PCITAR5 (PKUNITY_PCIBRI_BASE + 0x015C)
+
+#define PCIBRI_AHBCTL0 (PKUNITY_PCIBRI_BASE + 0x0180)
+#define PCIBRI_AHBBAR0 (PKUNITY_PCIBRI_BASE + 0x0184)
+#define PCIBRI_AHBAMR0 (PKUNITY_PCIBRI_BASE + 0x0188)
+#define PCIBRI_AHBTAR0 (PKUNITY_PCIBRI_BASE + 0x018C)
+#define PCIBRI_AHBCTL1 (PKUNITY_PCIBRI_BASE + 0x0190)
+#define PCIBRI_AHBBAR1 (PKUNITY_PCIBRI_BASE + 0x0194)
+#define PCIBRI_AHBAMR1 (PKUNITY_PCIBRI_BASE + 0x0198)
+#define PCIBRI_AHBTAR1 (PKUNITY_PCIBRI_BASE + 0x019C)
+#define PCIBRI_AHBCTL2 (PKUNITY_PCIBRI_BASE + 0x01A0)
+#define PCIBRI_AHBBAR2 (PKUNITY_PCIBRI_BASE + 0x01A4)
+#define PCIBRI_AHBAMR2 (PKUNITY_PCIBRI_BASE + 0x01A8)
+#define PCIBRI_AHBTAR2 (PKUNITY_PCIBRI_BASE + 0x01AC)
+#define PCIBRI_AHBCTL3 (PKUNITY_PCIBRI_BASE + 0x01B0)
+#define PCIBRI_AHBBAR3 (PKUNITY_PCIBRI_BASE + 0x01B4)
+#define PCIBRI_AHBAMR3 (PKUNITY_PCIBRI_BASE + 0x01B8)
+#define PCIBRI_AHBTAR3 (PKUNITY_PCIBRI_BASE + 0x01BC)
+#define PCIBRI_AHBCTL4 (PKUNITY_PCIBRI_BASE + 0x01C0)
+#define PCIBRI_AHBBAR4 (PKUNITY_PCIBRI_BASE + 0x01C4)
+#define PCIBRI_AHBAMR4 (PKUNITY_PCIBRI_BASE + 0x01C8)
+#define PCIBRI_AHBTAR4 (PKUNITY_PCIBRI_BASE + 0x01CC)
+#define PCIBRI_AHBCTL5 (PKUNITY_PCIBRI_BASE + 0x01D0)
+#define PCIBRI_AHBBAR5 (PKUNITY_PCIBRI_BASE + 0x01D4)
+#define PCIBRI_AHBAMR5 (PKUNITY_PCIBRI_BASE + 0x01D8)
+#define PCIBRI_AHBTAR5 (PKUNITY_PCIBRI_BASE + 0x01DC)
+
+#define PCIBRI_CTLx_AT FIELD(1, 1, 2)
+#define PCIBRI_CTLx_PREF FIELD(1, 1, 1)
+#define PCIBRI_CTLx_MRL FIELD(1, 1, 0)
+
+#define PCIBRI_BARx_ADDR FIELD(0xFFFFFFFC, 30, 2)
+#define PCIBRI_BARx_IO FIELD(1, 1, 0)
+#define PCIBRI_BARx_MEM FIELD(0, 1, 0)
+
+#define PCIBRI_CMD_IO FIELD(1, 1, 0)
+#define PCIBRI_CMD_MEM FIELD(1, 1, 1)
diff --git a/arch/unicore32/include/mach/regs-pm.h b/arch/unicore32/include/mach/regs-pm.h
new file mode 100644
index 000000000000..854844aa8f4b
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-pm.h
@@ -0,0 +1,126 @@
+/*
+ * PKUNITY Power Manager (PM) Registers
+ */
+/*
+ * PM Control Reg PM_PMCR
+ */
+#define PM_PMCR (PKUNITY_PM_BASE + 0x0000)
+/*
+ * PM General Conf. Reg PM_PGCR
+ */
+#define PM_PGCR (PKUNITY_PM_BASE + 0x0004)
+/*
+ * PM PLL Conf. Reg PM_PPCR
+ */
+#define PM_PPCR (PKUNITY_PM_BASE + 0x0008)
+/*
+ * PM Wakeup Enable Reg PM_PWER
+ */
+#define PM_PWER (PKUNITY_PM_BASE + 0x000C)
+/*
+ * PM GPIO Sleep Status Reg PM_PGSR
+ */
+#define PM_PGSR (PKUNITY_PM_BASE + 0x0010)
+/*
+ * PM Clock Gate Reg PM_PCGR
+ */
+#define PM_PCGR (PKUNITY_PM_BASE + 0x0014)
+/*
+ * PM SYS PLL Conf. Reg PM_PLLSYSCFG
+ */
+#define PM_PLLSYSCFG (PKUNITY_PM_BASE + 0x0018)
+/*
+ * PM DDR PLL Conf. Reg PM_PLLDDRCFG
+ */
+#define PM_PLLDDRCFG (PKUNITY_PM_BASE + 0x001C)
+/*
+ * PM VGA PLL Conf. Reg PM_PLLVGACFG
+ */
+#define PM_PLLVGACFG (PKUNITY_PM_BASE + 0x0020)
+/*
+ * PM Div Conf. Reg PM_DIVCFG
+ */
+#define PM_DIVCFG (PKUNITY_PM_BASE + 0x0024)
+/*
+ * PM SYS PLL Status Reg PM_PLLSYSSTATUS
+ */
+#define PM_PLLSYSSTATUS (PKUNITY_PM_BASE + 0x0028)
+/*
+ * PM DDR PLL Status Reg PM_PLLDDRSTATUS
+ */
+#define PM_PLLDDRSTATUS (PKUNITY_PM_BASE + 0x002C)
+/*
+ * PM VGA PLL Status Reg PM_PLLVGASTATUS
+ */
+#define PM_PLLVGASTATUS (PKUNITY_PM_BASE + 0x0030)
+/*
+ * PM Div Status Reg PM_DIVSTATUS
+ */
+#define PM_DIVSTATUS (PKUNITY_PM_BASE + 0x0034)
+/*
+ * PM Software Reset Reg PM_SWRESET
+ */
+#define PM_SWRESET (PKUNITY_PM_BASE + 0x0038)
+/*
+ * PM DDR2 PAD Start Reg PM_DDR2START
+ */
+#define PM_DDR2START (PKUNITY_PM_BASE + 0x003C)
+/*
+ * PM DDR2 PAD Status Reg PM_DDR2CAL0
+ */
+#define PM_DDR2CAL0 (PKUNITY_PM_BASE + 0x0040)
+/*
+ * PM PLL DFC Done Reg PM_PLLDFCDONE
+ */
+#define PM_PLLDFCDONE (PKUNITY_PM_BASE + 0x0044)
+
+#define PM_PMCR_SFB FIELD(1, 1, 0)
+#define PM_PMCR_IFB FIELD(1, 1, 1)
+#define PM_PMCR_CFBSYS FIELD(1, 1, 2)
+#define PM_PMCR_CFBDDR FIELD(1, 1, 3)
+#define PM_PMCR_CFBVGA FIELD(1, 1, 4)
+#define PM_PMCR_CFBDIVBCLK FIELD(1, 1, 5)
+
+/*
+ * GPIO 8~27 wake-up enable PM_PWER_GPIOHIGH
+ */
+#define PM_PWER_GPIOHIGH FIELD(1, 1, 8)
+/*
+ * RTC alarm wake-up enable PM_PWER_RTC
+ */
+#define PM_PWER_RTC FIELD(1, 1, 31)
+
+#define PM_PCGR_BCLK64DDR FIELD(1, 1, 0)
+#define PM_PCGR_BCLK64VGA FIELD(1, 1, 1)
+#define PM_PCGR_BCLKDDR FIELD(1, 1, 2)
+#define PM_PCGR_BCLKPCI FIELD(1, 1, 4)
+#define PM_PCGR_BCLKDMAC FIELD(1, 1, 5)
+#define PM_PCGR_BCLKUMAL FIELD(1, 1, 6)
+#define PM_PCGR_BCLKUSB FIELD(1, 1, 7)
+#define PM_PCGR_BCLKMME FIELD(1, 1, 10)
+#define PM_PCGR_BCLKNAND FIELD(1, 1, 11)
+#define PM_PCGR_BCLKH264E FIELD(1, 1, 12)
+#define PM_PCGR_BCLKVGA FIELD(1, 1, 13)
+#define PM_PCGR_BCLKH264D FIELD(1, 1, 14)
+#define PM_PCGR_VECLK FIELD(1, 1, 15)
+#define PM_PCGR_HECLK FIELD(1, 1, 16)
+#define PM_PCGR_HDCLK FIELD(1, 1, 17)
+#define PM_PCGR_NANDCLK FIELD(1, 1, 18)
+#define PM_PCGR_GECLK FIELD(1, 1, 19)
+#define PM_PCGR_VGACLK FIELD(1, 1, 20)
+#define PM_PCGR_PCICLK FIELD(1, 1, 21)
+#define PM_PCGR_SATACLK FIELD(1, 1, 25)
+
+/*
+ * [23:20]PM_DIVCFG_VGACLK(v)
+ */
+#define PM_DIVCFG_VGACLK_MASK FMASK(4, 20)
+#define PM_DIVCFG_VGACLK(v) FIELD((v), 4, 20)
+
+#define PM_SWRESET_USB FIELD(1, 1, 6)
+#define PM_SWRESET_VGADIV FIELD(1, 1, 26)
+#define PM_SWRESET_GEDIV FIELD(1, 1, 27)
+
+#define PM_PLLDFCDONE_SYSDFC FIELD(1, 1, 0)
+#define PM_PLLDFCDONE_DDRDFC FIELD(1, 1, 1)
+#define PM_PLLDFCDONE_VGADFC FIELD(1, 1, 2)
diff --git a/arch/unicore32/include/mach/regs-ps2.h b/arch/unicore32/include/mach/regs-ps2.h
new file mode 100644
index 000000000000..17d4e6dc0069
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-ps2.h
@@ -0,0 +1,20 @@
+/*
+ * PKUnity PS2 Controller Registers
+ */
+/*
+ * the same as I8042_DATA_REG PS2_DATA
+ */
+#define PS2_DATA (PKUNITY_PS2_BASE + 0x0060)
+/*
+ * the same as I8042_COMMAND_REG PS2_COMMAND
+ */
+#define PS2_COMMAND (PKUNITY_PS2_BASE + 0x0064)
+/*
+ * the same as I8042_STATUS_REG PS2_STATUS
+ */
+#define PS2_STATUS (PKUNITY_PS2_BASE + 0x0064)
+/*
+ * counter reg PS2_CNT
+ */
+#define PS2_CNT (PKUNITY_PS2_BASE + 0x0068)
+
diff --git a/arch/unicore32/include/mach/regs-resetc.h b/arch/unicore32/include/mach/regs-resetc.h
new file mode 100644
index 000000000000..39900cf4c936
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-resetc.h
@@ -0,0 +1,34 @@
+/*
+ * PKUnity Reset Controller (RC) Registers
+ */
+/*
+ * Software Reset Register
+ */
+#define RESETC_SWRR (PKUNITY_RESETC_BASE + 0x0000)
+/*
+ * Reset Status Register
+ */
+#define RESETC_RSSR (PKUNITY_RESETC_BASE + 0x0004)
+
+/*
+ * Software Reset Bit
+ */
+#define RESETC_SWRR_SRB FIELD(1, 1, 0)
+
+/*
+ * Hardware Reset
+ */
+#define RESETC_RSSR_HWR FIELD(1, 1, 0)
+/*
+ * Software Reset
+ */
+#define RESETC_RSSR_SWR FIELD(1, 1, 1)
+/*
+ * Watchdog Reset
+ */
+#define RESETC_RSSR_WDR FIELD(1, 1, 2)
+/*
+ * Sleep Mode Reset
+ */
+#define RESETC_RSSR_SMR FIELD(1, 1, 3)
+
diff --git a/arch/unicore32/include/mach/regs-rtc.h b/arch/unicore32/include/mach/regs-rtc.h
new file mode 100644
index 000000000000..e94ca193271d
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-rtc.h
@@ -0,0 +1,37 @@
+/*
+ * PKUnity Real-Time Clock (RTC) control registers
+ */
+/*
+ * RTC Alarm Reg RTC_RTAR
+ */
+#define RTC_RTAR (PKUNITY_RTC_BASE + 0x0000)
+/*
+ * RTC Count Reg RTC_RCNR
+ */
+#define RTC_RCNR (PKUNITY_RTC_BASE + 0x0004)
+/*
+ * RTC Trim Reg RTC_RTTR
+ */
+#define RTC_RTTR (PKUNITY_RTC_BASE + 0x0008)
+/*
+ * RTC Status Reg RTC_RTSR
+ */
+#define RTC_RTSR (PKUNITY_RTC_BASE + 0x0010)
+
+/*
+ * ALarm detected RTC_RTSR_AL
+ */
+#define RTC_RTSR_AL FIELD(1, 1, 0)
+/*
+ * 1 Hz clock detected RTC_RTSR_HZ
+ */
+#define RTC_RTSR_HZ FIELD(1, 1, 1)
+/*
+ * ALarm interrupt Enable RTC_RTSR_ALE
+ */
+#define RTC_RTSR_ALE FIELD(1, 1, 2)
+/*
+ * 1 Hz clock interrupt Enable RTC_RTSR_HZE
+ */
+#define RTC_RTSR_HZE FIELD(1, 1, 3)
+
diff --git a/arch/unicore32/include/mach/regs-sdc.h b/arch/unicore32/include/mach/regs-sdc.h
new file mode 100644
index 000000000000..1303ecf660ba
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-sdc.h
@@ -0,0 +1,156 @@
+/*
+ * PKUnity Multi-Media Card and Security Digital Card (MMC/SD) Registers
+ */
+/*
+ * Clock Control Reg SDC_CCR
+ */
+#define SDC_CCR (PKUNITY_SDC_BASE + 0x0000)
+/*
+ * Software Reset Reg SDC_SRR
+ */
+#define SDC_SRR (PKUNITY_SDC_BASE + 0x0004)
+/*
+ * Argument Reg SDC_ARGUMENT
+ */
+#define SDC_ARGUMENT (PKUNITY_SDC_BASE + 0x0008)
+/*
+ * Command Reg SDC_COMMAND
+ */
+#define SDC_COMMAND (PKUNITY_SDC_BASE + 0x000C)
+/*
+ * Block Size Reg SDC_BLOCKSIZE
+ */
+#define SDC_BLOCKSIZE (PKUNITY_SDC_BASE + 0x0010)
+/*
+ * Block Cound Reg SDC_BLOCKCOUNT
+ */
+#define SDC_BLOCKCOUNT (PKUNITY_SDC_BASE + 0x0014)
+/*
+ * Transfer Mode Reg SDC_TMR
+ */
+#define SDC_TMR (PKUNITY_SDC_BASE + 0x0018)
+/*
+ * Response Reg. 0 SDC_RES0
+ */
+#define SDC_RES0 (PKUNITY_SDC_BASE + 0x001C)
+/*
+ * Response Reg. 1 SDC_RES1
+ */
+#define SDC_RES1 (PKUNITY_SDC_BASE + 0x0020)
+/*
+ * Response Reg. 2 SDC_RES2
+ */
+#define SDC_RES2 (PKUNITY_SDC_BASE + 0x0024)
+/*
+ * Response Reg. 3 SDC_RES3
+ */
+#define SDC_RES3 (PKUNITY_SDC_BASE + 0x0028)
+/*
+ * Read Timeout Control Reg SDC_RTCR
+ */
+#define SDC_RTCR (PKUNITY_SDC_BASE + 0x002C)
+/*
+ * Interrupt Status Reg SDC_ISR
+ */
+#define SDC_ISR (PKUNITY_SDC_BASE + 0x0030)
+/*
+ * Interrupt Status Mask Reg SDC_ISMR
+ */
+#define SDC_ISMR (PKUNITY_SDC_BASE + 0x0034)
+/*
+ * RX FIFO SDC_RXFIFO
+ */
+#define SDC_RXFIFO (PKUNITY_SDC_BASE + 0x0038)
+/*
+ * TX FIFO SDC_TXFIFO
+ */
+#define SDC_TXFIFO (PKUNITY_SDC_BASE + 0x003C)
+
+/*
+ * SD Clock Enable SDC_CCR_CLKEN
+ */
+#define SDC_CCR_CLKEN FIELD(1, 1, 2)
+/*
+ * [15:8] SDC_CCR_PDIV(v)
+ */
+#define SDC_CCR_PDIV(v) FIELD((v), 8, 8)
+
+/*
+ * Software reset enable SDC_SRR_ENABLE
+ */
+#define SDC_SRR_ENABLE FIELD(0, 1, 0)
+/*
+ * Software reset disable SDC_SRR_DISABLE
+ */
+#define SDC_SRR_DISABLE FIELD(1, 1, 0)
+
+/*
+ * Response type SDC_COMMAND_RESTYPE_MASK
+ */
+#define SDC_COMMAND_RESTYPE_MASK FMASK(2, 0)
+/*
+ * No response SDC_COMMAND_RESTYPE_NONE
+ */
+#define SDC_COMMAND_RESTYPE_NONE FIELD(0, 2, 0)
+/*
+ * 136-bit long response SDC_COMMAND_RESTYPE_LONG
+ */
+#define SDC_COMMAND_RESTYPE_LONG FIELD(1, 2, 0)
+/*
+ * 48-bit short response SDC_COMMAND_RESTYPE_SHORT
+ */
+#define SDC_COMMAND_RESTYPE_SHORT FIELD(2, 2, 0)
+/*
+ * 48-bit short and test if busy response SDC_COMMAND_RESTYPE_SHORTBUSY
+ */
+#define SDC_COMMAND_RESTYPE_SHORTBUSY FIELD(3, 2, 0)
+/*
+ * data ready SDC_COMMAND_DATAREADY
+ */
+#define SDC_COMMAND_DATAREADY FIELD(1, 1, 2)
+#define SDC_COMMAND_CMDEN FIELD(1, 1, 3)
+/*
+ * [10:5] SDC_COMMAND_CMDINDEX(v)
+ */
+#define SDC_COMMAND_CMDINDEX(v) FIELD((v), 6, 5)
+
+/*
+ * [10:0] SDC_BLOCKSIZE_BSMASK(v)
+ */
+#define SDC_BLOCKSIZE_BSMASK(v) FIELD((v), 11, 0)
+/*
+ * [11:0] SDC_BLOCKCOUNT_BCMASK(v)
+ */
+#define SDC_BLOCKCOUNT_BCMASK(v) FIELD((v), 12, 0)
+
+/*
+ * Data Width 1bit SDC_TMR_WTH_1BIT
+ */
+#define SDC_TMR_WTH_1BIT FIELD(0, 1, 0)
+/*
+ * Data Width 4bit SDC_TMR_WTH_4BIT
+ */
+#define SDC_TMR_WTH_4BIT FIELD(1, 1, 0)
+/*
+ * Read SDC_TMR_DIR_READ
+ */
+#define SDC_TMR_DIR_READ FIELD(0, 1, 1)
+/*
+ * Write SDC_TMR_DIR_WRITE
+ */
+#define SDC_TMR_DIR_WRITE FIELD(1, 1, 1)
+
+#define SDC_IR_MASK FMASK(13, 0)
+#define SDC_IR_RESTIMEOUT FIELD(1, 1, 0)
+#define SDC_IR_WRITECRC FIELD(1, 1, 1)
+#define SDC_IR_READCRC FIELD(1, 1, 2)
+#define SDC_IR_TXFIFOREAD FIELD(1, 1, 3)
+#define SDC_IR_RXFIFOWRITE FIELD(1, 1, 4)
+#define SDC_IR_READTIMEOUT FIELD(1, 1, 5)
+#define SDC_IR_DATACOMPLETE FIELD(1, 1, 6)
+#define SDC_IR_CMDCOMPLETE FIELD(1, 1, 7)
+#define SDC_IR_RXFIFOFULL FIELD(1, 1, 8)
+#define SDC_IR_RXFIFOEMPTY FIELD(1, 1, 9)
+#define SDC_IR_TXFIFOFULL FIELD(1, 1, 10)
+#define SDC_IR_TXFIFOEMPTY FIELD(1, 1, 11)
+#define SDC_IR_ENDCMDWITHRES FIELD(1, 1, 12)
diff --git a/arch/unicore32/include/mach/regs-spi.h b/arch/unicore32/include/mach/regs-spi.h
new file mode 100644
index 000000000000..de16895e2dc0
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-spi.h
@@ -0,0 +1,98 @@
+/*
+ * PKUnity Serial Peripheral Interface (SPI) Registers
+ */
+/*
+ * Control reg. 0 SPI_CR0
+ */
+#define SPI_CR0 (PKUNITY_SPI_BASE + 0x0000)
+/*
+ * Control reg. 1 SPI_CR1
+ */
+#define SPI_CR1 (PKUNITY_SPI_BASE + 0x0004)
+/*
+ * Enable reg SPI_SSIENR
+ */
+#define SPI_SSIENR (PKUNITY_SPI_BASE + 0x0008)
+/*
+ * Status reg SPI_SR
+ */
+#define SPI_SR (PKUNITY_SPI_BASE + 0x0028)
+/*
+ * Interrupt Mask reg SPI_IMR
+ */
+#define SPI_IMR (PKUNITY_SPI_BASE + 0x002C)
+/*
+ * Interrupt Status reg SPI_ISR
+ */
+#define SPI_ISR (PKUNITY_SPI_BASE + 0x0030)
+
+/*
+ * Enable SPI Controller SPI_SSIENR_EN
+ */
+#define SPI_SSIENR_EN FIELD(1, 1, 0)
+
+/*
+ * SPI Busy SPI_SR_BUSY
+ */
+#define SPI_SR_BUSY FIELD(1, 1, 0)
+/*
+ * Transmit FIFO Not Full SPI_SR_TFNF
+ */
+#define SPI_SR_TFNF FIELD(1, 1, 1)
+/*
+ * Transmit FIFO Empty SPI_SR_TFE
+ */
+#define SPI_SR_TFE FIELD(1, 1, 2)
+/*
+ * Receive FIFO Not Empty SPI_SR_RFNE
+ */
+#define SPI_SR_RFNE FIELD(1, 1, 3)
+/*
+ * Receive FIFO Full SPI_SR_RFF
+ */
+#define SPI_SR_RFF FIELD(1, 1, 4)
+
+/*
+ * Trans. FIFO Empty Interrupt Status SPI_ISR_TXEIS
+ */
+#define SPI_ISR_TXEIS FIELD(1, 1, 0)
+/*
+ * Trans. FIFO Overflow Interrupt Status SPI_ISR_TXOIS
+ */
+#define SPI_ISR_TXOIS FIELD(1, 1, 1)
+/*
+ * Receiv. FIFO Underflow Interrupt Status SPI_ISR_RXUIS
+ */
+#define SPI_ISR_RXUIS FIELD(1, 1, 2)
+/*
+ * Receiv. FIFO Overflow Interrupt Status SPI_ISR_RXOIS
+ */
+#define SPI_ISR_RXOIS FIELD(1, 1, 3)
+/*
+ * Receiv. FIFO Full Interrupt Status SPI_ISR_RXFIS
+ */
+#define SPI_ISR_RXFIS FIELD(1, 1, 4)
+#define SPI_ISR_MSTIS FIELD(1, 1, 5)
+
+/*
+ * Trans. FIFO Empty Interrupt Mask SPI_IMR_TXEIM
+ */
+#define SPI_IMR_TXEIM FIELD(1, 1, 0)
+/*
+ * Trans. FIFO Overflow Interrupt Mask SPI_IMR_TXOIM
+ */
+#define SPI_IMR_TXOIM FIELD(1, 1, 1)
+/*
+ * Receiv. FIFO Underflow Interrupt Mask SPI_IMR_RXUIM
+ */
+#define SPI_IMR_RXUIM FIELD(1, 1, 2)
+/*
+ * Receiv. FIFO Overflow Interrupt Mask SPI_IMR_RXOIM
+ */
+#define SPI_IMR_RXOIM FIELD(1, 1, 3)
+/*
+ * Receiv. FIFO Full Interrupt Mask SPI_IMR_RXFIM
+ */
+#define SPI_IMR_RXFIM FIELD(1, 1, 4)
+#define SPI_IMR_MSTIM FIELD(1, 1, 5)
+
diff --git a/arch/unicore32/include/mach/regs-uart.h b/arch/unicore32/include/mach/regs-uart.h
new file mode 100644
index 000000000000..9fa6b1938b77
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-uart.h
@@ -0,0 +1,3 @@
+/*
+ * PKUnity Universal Asynchronous Receiver/Transmitter (UART) Registers
+ */
diff --git a/arch/unicore32/include/mach/regs-umal.h b/arch/unicore32/include/mach/regs-umal.h
new file mode 100644
index 000000000000..885bb62fee71
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-umal.h
@@ -0,0 +1,229 @@
+/*
+ * PKUnity Ultra Media Access Layer (UMAL) Ethernet MAC Registers
+ */
+
+/* MAC module of UMAL */
+/* UMAL's MAC module includes G/MII interface, several additional PHY
+ * interfaces, and MAC control sub-layer, which provides support for control
+ * frames (e.g. PAUSE frames).
+ */
+/*
+ * TX/RX reset and control UMAL_CFG1
+ */
+#define UMAL_CFG1 (PKUNITY_UMAL_BASE + 0x0000)
+/*
+ * MAC interface mode control UMAL_CFG2
+ */
+#define UMAL_CFG2 (PKUNITY_UMAL_BASE + 0x0004)
+/*
+ * Inter Packet/Frame Gap UMAL_IPGIFG
+ */
+#define UMAL_IPGIFG (PKUNITY_UMAL_BASE + 0x0008)
+/*
+ * Collision retry or backoff UMAL_HALFDUPLEX
+ */
+#define UMAL_HALFDUPLEX (PKUNITY_UMAL_BASE + 0x000c)
+/*
+ * Maximum Frame Length UMAL_MAXFRAME
+ */
+#define UMAL_MAXFRAME (PKUNITY_UMAL_BASE + 0x0010)
+/*
+ * Test Regsiter UMAL_TESTREG
+ */
+#define UMAL_TESTREG (PKUNITY_UMAL_BASE + 0x001c)
+/*
+ * MII Management Configure UMAL_MIICFG
+ */
+#define UMAL_MIICFG (PKUNITY_UMAL_BASE + 0x0020)
+/*
+ * MII Management Command UMAL_MIICMD
+ */
+#define UMAL_MIICMD (PKUNITY_UMAL_BASE + 0x0024)
+/*
+ * MII Management Address UMAL_MIIADDR
+ */
+#define UMAL_MIIADDR (PKUNITY_UMAL_BASE + 0x0028)
+/*
+ * MII Management Control UMAL_MIICTRL
+ */
+#define UMAL_MIICTRL (PKUNITY_UMAL_BASE + 0x002c)
+/*
+ * MII Management Status UMAL_MIISTATUS
+ */
+#define UMAL_MIISTATUS (PKUNITY_UMAL_BASE + 0x0030)
+/*
+ * MII Managment Indicator UMAL_MIIIDCT
+ */
+#define UMAL_MIIIDCT (PKUNITY_UMAL_BASE + 0x0034)
+/*
+ * Interface Control UMAL_IFCTRL
+ */
+#define UMAL_IFCTRL (PKUNITY_UMAL_BASE + 0x0038)
+/*
+ * Interface Status UMAL_IFSTATUS
+ */
+#define UMAL_IFSTATUS (PKUNITY_UMAL_BASE + 0x003c)
+/*
+ * MAC address (high 4 bytes) UMAL_STADDR1
+ */
+#define UMAL_STADDR1 (PKUNITY_UMAL_BASE + 0x0040)
+/*
+ * MAC address (low 2 bytes) UMAL_STADDR2
+ */
+#define UMAL_STADDR2 (PKUNITY_UMAL_BASE + 0x0044)
+
+/* FIFO MODULE OF UMAL */
+/* UMAL's FIFO module provides data queuing for increased system level
+ * throughput
+ */
+#define UMAL_FIFOCFG0 (PKUNITY_UMAL_BASE + 0x0048)
+#define UMAL_FIFOCFG1 (PKUNITY_UMAL_BASE + 0x004c)
+#define UMAL_FIFOCFG2 (PKUNITY_UMAL_BASE + 0x0050)
+#define UMAL_FIFOCFG3 (PKUNITY_UMAL_BASE + 0x0054)
+#define UMAL_FIFOCFG4 (PKUNITY_UMAL_BASE + 0x0058)
+#define UMAL_FIFOCFG5 (PKUNITY_UMAL_BASE + 0x005c)
+#define UMAL_FIFORAM0 (PKUNITY_UMAL_BASE + 0x0060)
+#define UMAL_FIFORAM1 (PKUNITY_UMAL_BASE + 0x0064)
+#define UMAL_FIFORAM2 (PKUNITY_UMAL_BASE + 0x0068)
+#define UMAL_FIFORAM3 (PKUNITY_UMAL_BASE + 0x006c)
+#define UMAL_FIFORAM4 (PKUNITY_UMAL_BASE + 0x0070)
+#define UMAL_FIFORAM5 (PKUNITY_UMAL_BASE + 0x0074)
+#define UMAL_FIFORAM6 (PKUNITY_UMAL_BASE + 0x0078)
+#define UMAL_FIFORAM7 (PKUNITY_UMAL_BASE + 0x007c)
+
+/* MAHBE MODUEL OF UMAL */
+/* UMAL's MAHBE module interfaces to the host system through 32-bit AHB Master
+ * and Slave ports.Registers within the M-AHBE provide Control and Status
+ * information concerning these transfers.
+ */
+/*
+ * Transmit Control UMAL_DMATxCtrl
+ */
+#define UMAL_DMATxCtrl (PKUNITY_UMAL_BASE + 0x0180)
+/*
+ * Pointer to TX Descripter UMAL_DMATxDescriptor
+ */
+#define UMAL_DMATxDescriptor (PKUNITY_UMAL_BASE + 0x0184)
+/*
+ * Status of Tx Packet Transfers UMAL_DMATxStatus
+ */
+#define UMAL_DMATxStatus (PKUNITY_UMAL_BASE + 0x0188)
+/*
+ * Receive Control UMAL_DMARxCtrl
+ */
+#define UMAL_DMARxCtrl (PKUNITY_UMAL_BASE + 0x018c)
+/*
+ * Pointer to Rx Descriptor UMAL_DMARxDescriptor
+ */
+#define UMAL_DMARxDescriptor (PKUNITY_UMAL_BASE + 0x0190)
+/*
+ * Status of Rx Packet Transfers UMAL_DMARxStatus
+ */
+#define UMAL_DMARxStatus (PKUNITY_UMAL_BASE + 0x0194)
+/*
+ * Interrupt Mask UMAL_DMAIntrMask
+ */
+#define UMAL_DMAIntrMask (PKUNITY_UMAL_BASE + 0x0198)
+/*
+ * Interrupts, read only UMAL_DMAInterrupt
+ */
+#define UMAL_DMAInterrupt (PKUNITY_UMAL_BASE + 0x019c)
+
+/*
+ * Commands for UMAL_CFG1 register
+ */
+#define UMAL_CFG1_TXENABLE FIELD(1, 1, 0)
+#define UMAL_CFG1_RXENABLE FIELD(1, 1, 2)
+#define UMAL_CFG1_TXFLOWCTL FIELD(1, 1, 4)
+#define UMAL_CFG1_RXFLOWCTL FIELD(1, 1, 5)
+#define UMAL_CFG1_CONFLPBK FIELD(1, 1, 8)
+#define UMAL_CFG1_RESET FIELD(1, 1, 31)
+#define UMAL_CFG1_CONFFLCTL (MAC_TX_FLOW_CTL | MAC_RX_FLOW_CTL)
+
+/*
+ * Commands for UMAL_CFG2 register
+ */
+#define UMAL_CFG2_FULLDUPLEX FIELD(1, 1, 0)
+#define UMAL_CFG2_CRCENABLE FIELD(1, 1, 1)
+#define UMAL_CFG2_PADCRC FIELD(1, 1, 2)
+#define UMAL_CFG2_LENGTHCHECK FIELD(1, 1, 4)
+#define UMAL_CFG2_MODEMASK FMASK(2, 8)
+#define UMAL_CFG2_NIBBLEMODE FIELD(1, 2, 8)
+#define UMAL_CFG2_BYTEMODE FIELD(2, 2, 8)
+#define UMAL_CFG2_PREAMBLENMASK FMASK(4, 12)
+#define UMAL_CFG2_DEFPREAMBLEN FIELD(7, 4, 12)
+#define UMAL_CFG2_FD100 (UMAL_CFG2_DEFPREAMBLEN | UMAL_CFG2_NIBBLEMODE \
+ | UMAL_CFG2_LENGTHCHECK | UMAL_CFG2_PADCRC \
+ | UMAL_CFG2_CRCENABLE | UMAL_CFG2_FULLDUPLEX)
+#define UMAL_CFG2_FD1000 (UMAL_CFG2_DEFPREAMBLEN | UMAL_CFG2_BYTEMODE \
+ | UMAL_CFG2_LENGTHCHECK | UMAL_CFG2_PADCRC \
+ | UMAL_CFG2_CRCENABLE | UMAL_CFG2_FULLDUPLEX)
+#define UMAL_CFG2_HD100 (UMAL_CFG2_DEFPREAMBLEN | UMAL_CFG2_NIBBLEMODE \
+ | UMAL_CFG2_LENGTHCHECK | UMAL_CFG2_PADCRC \
+ | UMAL_CFG2_CRCENABLE)
+
+/*
+ * Command for UMAL_IFCTRL register
+ */
+#define UMAL_IFCTRL_RESET FIELD(1, 1, 31)
+
+/*
+ * Command for UMAL_MIICFG register
+ */
+#define UMAL_MIICFG_RESET FIELD(1, 1, 31)
+
+/*
+ * Command for UMAL_MIICMD register
+ */
+#define UMAL_MIICMD_READ FIELD(1, 1, 0)
+
+/*
+ * Command for UMAL_MIIIDCT register
+ */
+#define UMAL_MIIIDCT_BUSY FIELD(1, 1, 0)
+#define UMAL_MIIIDCT_NOTVALID FIELD(1, 1, 2)
+
+/*
+ * Commands for DMATxCtrl regesters
+ */
+#define UMAL_DMA_Enable FIELD(1, 1, 0)
+
+/*
+ * Commands for DMARxCtrl regesters
+ */
+#define UMAL_DMAIntrMask_ENABLEHALFWORD FIELD(1, 1, 16)
+
+/*
+ * Command for DMARxStatus
+ */
+#define CLR_RX_BUS_ERR FIELD(1, 1, 3)
+#define CLR_RX_OVERFLOW FIELD(1, 1, 2)
+#define CLR_RX_PKT FIELD(1, 1, 0)
+
+/*
+ * Command for DMATxStatus
+ */
+#define CLR_TX_BUS_ERR FIELD(1, 1, 3)
+#define CLR_TX_UNDERRUN FIELD(1, 1, 1)
+#define CLR_TX_PKT FIELD(1, 1, 0)
+
+/*
+ * Commands for DMAIntrMask and DMAInterrupt register
+ */
+#define INT_RX_MASK FIELD(0xd, 4, 4)
+#define INT_TX_MASK FIELD(0xb, 4, 0)
+
+#define INT_RX_BUS_ERR FIELD(1, 1, 7)
+#define INT_RX_OVERFLOW FIELD(1, 1, 6)
+#define INT_RX_PKT FIELD(1, 1, 4)
+#define INT_TX_BUS_ERR FIELD(1, 1, 3)
+#define INT_TX_UNDERRUN FIELD(1, 1, 1)
+#define INT_TX_PKT FIELD(1, 1, 0)
+
+/*
+ * MARCOS of UMAL's descriptors
+ */
+#define UMAL_DESC_PACKETSIZE_EMPTY FIELD(1, 1, 31)
+#define UMAL_DESC_PACKETSIZE_NONEMPTY FIELD(0, 1, 31)
+#define UMAL_DESC_PACKETSIZE_SIZEMASK FMASK(12, 0)
+
diff --git a/arch/unicore32/include/mach/regs-unigfx.h b/arch/unicore32/include/mach/regs-unigfx.h
new file mode 100644
index 000000000000..faf8b287fccf
--- /dev/null
+++ b/arch/unicore32/include/mach/regs-unigfx.h
@@ -0,0 +1,200 @@
+/*
+ * PKUnity UNIGFX Registers
+ */
+
+#define UDE_BASE (PKUNITY_UNIGFX_BASE + 0x1400)
+#define UGE_BASE (PKUNITY_UNIGFX_BASE + 0x0000)
+
+/*
+ * command reg for UNIGFX DE
+ */
+/*
+ * control reg UDE_CFG
+ */
+#define UDE_CFG (UDE_BASE + 0x0000)
+/*
+ * framebuffer start address reg UDE_FSA
+ */
+#define UDE_FSA (UDE_BASE + 0x0004)
+/*
+ * line size reg UDE_LS
+ */
+#define UDE_LS (UDE_BASE + 0x0008)
+/*
+ * pitch size reg UDE_PS
+ */
+#define UDE_PS (UDE_BASE + 0x000C)
+/*
+ * horizontal active time reg UDE_HAT
+ */
+#define UDE_HAT (UDE_BASE + 0x0010)
+/*
+ * horizontal blank time reg UDE_HBT
+ */
+#define UDE_HBT (UDE_BASE + 0x0014)
+/*
+ * horizontal sync time reg UDE_HST
+ */
+#define UDE_HST (UDE_BASE + 0x0018)
+/*
+ * vertival active time reg UDE_VAT
+ */
+#define UDE_VAT (UDE_BASE + 0x001C)
+/*
+ * vertival blank time reg UDE_VBT
+ */
+#define UDE_VBT (UDE_BASE + 0x0020)
+/*
+ * vertival sync time reg UDE_VST
+ */
+#define UDE_VST (UDE_BASE + 0x0024)
+/*
+ * cursor position UDE_CXY
+ */
+#define UDE_CXY (UDE_BASE + 0x0028)
+/*
+ * cursor front color UDE_CC0
+ */
+#define UDE_CC0 (UDE_BASE + 0x002C)
+/*
+ * cursor background color UDE_CC1
+ */
+#define UDE_CC1 (UDE_BASE + 0x0030)
+/*
+ * video position UDE_VXY
+ */
+#define UDE_VXY (UDE_BASE + 0x0034)
+/*
+ * video start address reg UDE_VSA
+ */
+#define UDE_VSA (UDE_BASE + 0x0040)
+/*
+ * video size reg UDE_VS
+ */
+#define UDE_VS (UDE_BASE + 0x004C)
+
+/*
+ * command reg for UNIGFX GE
+ */
+/*
+ * src xy reg UGE_SRCXY
+ */
+#define UGE_SRCXY (UGE_BASE + 0x0000)
+/*
+ * dst xy reg UGE_DSTXY
+ */
+#define UGE_DSTXY (UGE_BASE + 0x0004)
+/*
+ * pitch reg UGE_PITCH
+ */
+#define UGE_PITCH (UGE_BASE + 0x0008)
+/*
+ * src start reg UGE_SRCSTART
+ */
+#define UGE_SRCSTART (UGE_BASE + 0x000C)
+/*
+ * dst start reg UGE_DSTSTART
+ */
+#define UGE_DSTSTART (UGE_BASE + 0x0010)
+/*
+ * width height reg UGE_WIDHEIGHT
+ */
+#define UGE_WIDHEIGHT (UGE_BASE + 0x0014)
+/*
+ * rop alpah reg UGE_ROPALPHA
+ */
+#define UGE_ROPALPHA (UGE_BASE + 0x0018)
+/*
+ * front color UGE_FCOLOR
+ */
+#define UGE_FCOLOR (UGE_BASE + 0x001C)
+/*
+ * background color UGE_BCOLOR
+ */
+#define UGE_BCOLOR (UGE_BASE + 0x0020)
+/*
+ * src color key for high value UGE_SCH
+ */
+#define UGE_SCH (UGE_BASE + 0x0024)
+/*
+ * dst color key for high value UGE_DCH
+ */
+#define UGE_DCH (UGE_BASE + 0x0028)
+/*
+ * src color key for low value UGE_SCL
+ */
+#define UGE_SCL (UGE_BASE + 0x002C)
+/*
+ * dst color key for low value UGE_DCL
+ */
+#define UGE_DCL (UGE_BASE + 0x0030)
+/*
+ * clip 0 reg UGE_CLIP0
+ */
+#define UGE_CLIP0 (UGE_BASE + 0x0034)
+/*
+ * clip 1 reg UGE_CLIP1
+ */
+#define UGE_CLIP1 (UGE_BASE + 0x0038)
+/*
+ * command reg UGE_COMMAND
+ */
+#define UGE_COMMAND (UGE_BASE + 0x003C)
+/*
+ * pattern 0 UGE_P0
+ */
+#define UGE_P0 (UGE_BASE + 0x0040)
+#define UGE_P1 (UGE_BASE + 0x0044)
+#define UGE_P2 (UGE_BASE + 0x0048)
+#define UGE_P3 (UGE_BASE + 0x004C)
+#define UGE_P4 (UGE_BASE + 0x0050)
+#define UGE_P5 (UGE_BASE + 0x0054)
+#define UGE_P6 (UGE_BASE + 0x0058)
+#define UGE_P7 (UGE_BASE + 0x005C)
+#define UGE_P8 (UGE_BASE + 0x0060)
+#define UGE_P9 (UGE_BASE + 0x0064)
+#define UGE_P10 (UGE_BASE + 0x0068)
+#define UGE_P11 (UGE_BASE + 0x006C)
+#define UGE_P12 (UGE_BASE + 0x0070)
+#define UGE_P13 (UGE_BASE + 0x0074)
+#define UGE_P14 (UGE_BASE + 0x0078)
+#define UGE_P15 (UGE_BASE + 0x007C)
+#define UGE_P16 (UGE_BASE + 0x0080)
+#define UGE_P17 (UGE_BASE + 0x0084)
+#define UGE_P18 (UGE_BASE + 0x0088)
+#define UGE_P19 (UGE_BASE + 0x008C)
+#define UGE_P20 (UGE_BASE + 0x0090)
+#define UGE_P21 (UGE_BASE + 0x0094)
+#define UGE_P22 (UGE_BASE + 0x0098)
+#define UGE_P23 (UGE_BASE + 0x009C)
+#define UGE_P24 (UGE_BASE + 0x00A0)
+#define UGE_P25 (UGE_BASE + 0x00A4)
+#define UGE_P26 (UGE_BASE + 0x00A8)
+#define UGE_P27 (UGE_BASE + 0x00AC)
+#define UGE_P28 (UGE_BASE + 0x00B0)
+#define UGE_P29 (UGE_BASE + 0x00B4)
+#define UGE_P30 (UGE_BASE + 0x00B8)
+#define UGE_P31 (UGE_BASE + 0x00BC)
+
+#define UDE_CFG_DST_MASK FMASK(2, 8)
+#define UDE_CFG_DST8 FIELD(0x0, 2, 8)
+#define UDE_CFG_DST16 FIELD(0x1, 2, 8)
+#define UDE_CFG_DST24 FIELD(0x2, 2, 8)
+#define UDE_CFG_DST32 FIELD(0x3, 2, 8)
+
+/*
+ * GDEN enable UDE_CFG_GDEN_ENABLE
+ */
+#define UDE_CFG_GDEN_ENABLE FIELD(1, 1, 3)
+/*
+ * VDEN enable UDE_CFG_VDEN_ENABLE
+ */
+#define UDE_CFG_VDEN_ENABLE FIELD(1, 1, 4)
+/*
+ * CDEN enable UDE_CFG_CDEN_ENABLE
+ */
+#define UDE_CFG_CDEN_ENABLE FIELD(1, 1, 5)
+/*
+ * TIMEUP enable UDE_CFG_TIMEUP_ENABLE
+ */
+#define UDE_CFG_TIMEUP_ENABLE FIELD(1, 1, 6)
diff --git a/arch/unicore32/include/mach/uncompress.h b/arch/unicore32/include/mach/uncompress.h
new file mode 100644
index 000000000000..142d3e7958a9
--- /dev/null
+++ b/arch/unicore32/include/mach/uncompress.h
@@ -0,0 +1,34 @@
+/*
+ * linux/arch/unicore32/include/mach/uncompress.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MACH_PUV3_UNCOMPRESS_H__
+#define __MACH_PUV3_UNCOMPRESS_H__
+
+#include "hardware.h"
+#include "ocd.h"
+
+extern char input_data[];
+extern char input_data_end[];
+
+static void arch_decomp_puts(const char *ptr)
+{
+ char c;
+
+ while ((c = *ptr++) != '\0') {
+ if (c == '\n')
+ putc('\r');
+ putc(c);
+ }
+}
+#define ARCH_HAVE_DECOMP_PUTS
+
+#endif /* __MACH_PUV3_UNCOMPRESS_H__ */
diff --git a/arch/unicore32/kernel/Makefile b/arch/unicore32/kernel/Makefile
new file mode 100644
index 000000000000..ec23a2fb2f50
--- /dev/null
+++ b/arch/unicore32/kernel/Makefile
@@ -0,0 +1,33 @@
+#
+# Makefile for the linux kernel.
+#
+
+# Object file lists.
+obj-y := dma.o elf.o entry.o process.o ptrace.o
+obj-y += setup.o signal.o sys.o stacktrace.o traps.o
+
+obj-$(CONFIG_MODULES) += ksyms.o module.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+obj-$(CONFIG_CPU_FREQ) += cpu-ucv2.o
+obj-$(CONFIG_UNICORE_FPU_F64) += fpu-ucf64.o
+
+# obj-y for architecture PKUnity v3
+obj-$(CONFIG_ARCH_PUV3) += clock.o irq.o time.o
+
+obj-$(CONFIG_PUV3_GPIO) += gpio.o
+obj-$(CONFIG_PUV3_RTC) += rtc.o
+obj-$(CONFIG_PUV3_PWM) += pwm.o
+obj-$(CONFIG_PUV3_PM) += pm.o sleep.o
+obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate_asm.o
+
+obj-$(CONFIG_PCI) += pci.o
+
+# obj-y for specific machines
+obj-$(CONFIG_ARCH_PUV3) += puv3-core.o
+obj-$(CONFIG_PUV3_NB0916) += puv3-nb0916.o
+
+head-y := head.o
+obj-$(CONFIG_DEBUG_LL) += debug.o
+
+extra-y := $(head-y) init_task.o vmlinux.lds
diff --git a/arch/unicore32/kernel/asm-offsets.c b/arch/unicore32/kernel/asm-offsets.c
new file mode 100644
index 000000000000..ffcbe7536ca7
--- /dev/null
+++ b/arch/unicore32/kernel/asm-offsets.c
@@ -0,0 +1,112 @@
+/*
+ * linux/arch/unicore32/kernel/asm-offsets.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/kbuild.h>
+#include <linux/suspend.h>
+#include <linux/thread_info.h>
+#include <asm/memory.h>
+#include <asm/suspend.h>
+
+/*
+ * GCC 3.0, 3.1: general bad code generation.
+ * GCC 3.2.0: incorrect function argument offset calculation.
+ * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
+ * (http://gcc.gnu.org/PR8896) and incorrect structure
+ * initialisation in fs/jffs2/erase.c
+ */
+#if (__GNUC__ < 4)
+#error Your compiler should upgrade to uc4
+#error Known good compilers: 4.2.2
+#endif
+
+int main(void)
+{
+ DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+ BLANK();
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
+ DEFINE(TI_USED_CP, offsetof(struct thread_info, used_cp));
+#ifdef CONFIG_UNICORE_FPU_F64
+ DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate));
+#endif
+ BLANK();
+ DEFINE(S_R0, offsetof(struct pt_regs, UCreg_00));
+ DEFINE(S_R1, offsetof(struct pt_regs, UCreg_01));
+ DEFINE(S_R2, offsetof(struct pt_regs, UCreg_02));
+ DEFINE(S_R3, offsetof(struct pt_regs, UCreg_03));
+ DEFINE(S_R4, offsetof(struct pt_regs, UCreg_04));
+ DEFINE(S_R5, offsetof(struct pt_regs, UCreg_05));
+ DEFINE(S_R6, offsetof(struct pt_regs, UCreg_06));
+ DEFINE(S_R7, offsetof(struct pt_regs, UCreg_07));
+ DEFINE(S_R8, offsetof(struct pt_regs, UCreg_08));
+ DEFINE(S_R9, offsetof(struct pt_regs, UCreg_09));
+ DEFINE(S_R10, offsetof(struct pt_regs, UCreg_10));
+ DEFINE(S_R11, offsetof(struct pt_regs, UCreg_11));
+ DEFINE(S_R12, offsetof(struct pt_regs, UCreg_12));
+ DEFINE(S_R13, offsetof(struct pt_regs, UCreg_13));
+ DEFINE(S_R14, offsetof(struct pt_regs, UCreg_14));
+ DEFINE(S_R15, offsetof(struct pt_regs, UCreg_15));
+ DEFINE(S_R16, offsetof(struct pt_regs, UCreg_16));
+ DEFINE(S_R17, offsetof(struct pt_regs, UCreg_17));
+ DEFINE(S_R18, offsetof(struct pt_regs, UCreg_18));
+ DEFINE(S_R19, offsetof(struct pt_regs, UCreg_19));
+ DEFINE(S_R20, offsetof(struct pt_regs, UCreg_20));
+ DEFINE(S_R21, offsetof(struct pt_regs, UCreg_21));
+ DEFINE(S_R22, offsetof(struct pt_regs, UCreg_22));
+ DEFINE(S_R23, offsetof(struct pt_regs, UCreg_23));
+ DEFINE(S_R24, offsetof(struct pt_regs, UCreg_24));
+ DEFINE(S_R25, offsetof(struct pt_regs, UCreg_25));
+ DEFINE(S_R26, offsetof(struct pt_regs, UCreg_26));
+ DEFINE(S_FP, offsetof(struct pt_regs, UCreg_fp));
+ DEFINE(S_IP, offsetof(struct pt_regs, UCreg_ip));
+ DEFINE(S_SP, offsetof(struct pt_regs, UCreg_sp));
+ DEFINE(S_LR, offsetof(struct pt_regs, UCreg_lr));
+ DEFINE(S_PC, offsetof(struct pt_regs, UCreg_pc));
+ DEFINE(S_PSR, offsetof(struct pt_regs, UCreg_asr));
+ DEFINE(S_OLD_R0, offsetof(struct pt_regs, UCreg_ORIG_00));
+ DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
+ BLANK();
+ DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
+ DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags));
+ BLANK();
+ DEFINE(VM_EXEC, VM_EXEC);
+ BLANK();
+ DEFINE(PAGE_SZ, PAGE_SIZE);
+ BLANK();
+ DEFINE(SYS_ERROR0, 0x9f0000);
+ BLANK();
+ DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
+ DEFINE(PBE_ORIN_ADDRESS, offsetof(struct pbe, orig_address));
+ DEFINE(PBE_NEXT, offsetof(struct pbe, next));
+ DEFINE(SWSUSP_CPU, offsetof(struct swsusp_arch_regs, \
+ cpu_context));
+#ifdef CONFIG_UNICORE_FPU_F64
+ DEFINE(SWSUSP_FPSTATE, offsetof(struct swsusp_arch_regs, \
+ fpstate));
+#endif
+ BLANK();
+ DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
+ DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
+ DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
+ return 0;
+}
diff --git a/arch/unicore32/kernel/clock.c b/arch/unicore32/kernel/clock.c
new file mode 100644
index 000000000000..18d4563e6fa5
--- /dev/null
+++ b/arch/unicore32/kernel/clock.c
@@ -0,0 +1,390 @@
+/*
+ * linux/arch/unicore32/kernel/clock.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+
+/*
+ * Very simple clock implementation
+ */
+struct clk {
+ struct list_head node;
+ unsigned long rate;
+ const char *name;
+};
+
+static struct clk clk_ost_clk = {
+ .name = "OST_CLK",
+ .rate = CLOCK_TICK_RATE,
+};
+
+static struct clk clk_mclk_clk = {
+ .name = "MAIN_CLK",
+};
+
+static struct clk clk_bclk32_clk = {
+ .name = "BUS32_CLK",
+};
+
+static struct clk clk_ddr_clk = {
+ .name = "DDR_CLK",
+};
+
+static struct clk clk_vga_clk = {
+ .name = "VGA_CLK",
+};
+
+static LIST_HEAD(clocks);
+static DEFINE_MUTEX(clocks_mutex);
+
+struct clk *clk_get(struct device *dev, const char *id)
+{
+ struct clk *p, *clk = ERR_PTR(-ENOENT);
+
+ mutex_lock(&clocks_mutex);
+ list_for_each_entry(p, &clocks, node) {
+ if (strcmp(id, p->name) == 0) {
+ clk = p;
+ break;
+ }
+ }
+ mutex_unlock(&clocks_mutex);
+
+ return clk;
+}
+EXPORT_SYMBOL(clk_get);
+
+void clk_put(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_put);
+
+int clk_enable(struct clk *clk)
+{
+ return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ return clk->rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+struct {
+ unsigned long rate;
+ unsigned long cfg;
+ unsigned long div;
+} vga_clk_table[] = {
+ {.rate = 25175000, .cfg = 0x00002001, .div = 0x9},
+ {.rate = 31500000, .cfg = 0x00002001, .div = 0x7},
+ {.rate = 40000000, .cfg = 0x00003801, .div = 0x9},
+ {.rate = 49500000, .cfg = 0x00003801, .div = 0x7},
+ {.rate = 65000000, .cfg = 0x00002c01, .div = 0x4},
+ {.rate = 78750000, .cfg = 0x00002400, .div = 0x7},
+ {.rate = 108000000, .cfg = 0x00002c01, .div = 0x2},
+ {.rate = 106500000, .cfg = 0x00003c01, .div = 0x3},
+ {.rate = 50650000, .cfg = 0x00106400, .div = 0x9},
+ {.rate = 61500000, .cfg = 0x00106400, .div = 0xa},
+ {.rate = 85500000, .cfg = 0x00002800, .div = 0x6},
+};
+
+struct {
+ unsigned long mrate;
+ unsigned long prate;
+} mclk_clk_table[] = {
+ {.mrate = 500000000, .prate = 0x00109801},
+ {.mrate = 525000000, .prate = 0x00104C00},
+ {.mrate = 550000000, .prate = 0x00105000},
+ {.mrate = 575000000, .prate = 0x00105400},
+ {.mrate = 600000000, .prate = 0x00105800},
+ {.mrate = 625000000, .prate = 0x00105C00},
+ {.mrate = 650000000, .prate = 0x00106000},
+ {.mrate = 675000000, .prate = 0x00106400},
+ {.mrate = 700000000, .prate = 0x00106800},
+ {.mrate = 725000000, .prate = 0x00106C00},
+ {.mrate = 750000000, .prate = 0x00107000},
+ {.mrate = 775000000, .prate = 0x00107400},
+ {.mrate = 800000000, .prate = 0x00107800},
+};
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ if (clk == &clk_vga_clk) {
+ unsigned long pll_vgacfg, pll_vgadiv;
+ int ret, i;
+
+ /* lookup vga_clk_table */
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(vga_clk_table); i++) {
+ if (rate == vga_clk_table[i].rate) {
+ pll_vgacfg = vga_clk_table[i].cfg;
+ pll_vgadiv = vga_clk_table[i].div;
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ if (readl(PM_PLLVGACFG) == pll_vgacfg)
+ return 0;
+
+ /* set pll vga cfg reg. */
+ writel(pll_vgacfg, PM_PLLVGACFG);
+
+ writel(PM_PMCR_CFBVGA, PM_PMCR);
+ while ((readl(PM_PLLDFCDONE) & PM_PLLDFCDONE_VGADFC)
+ != PM_PLLDFCDONE_VGADFC)
+ udelay(100); /* about 1ms */
+
+ /* set div cfg reg. */
+ writel(readl(PM_PCGR) | PM_PCGR_VGACLK, PM_PCGR);
+
+ writel((readl(PM_DIVCFG) & ~PM_DIVCFG_VGACLK_MASK)
+ | PM_DIVCFG_VGACLK(pll_vgadiv), PM_DIVCFG);
+
+ writel(readl(PM_SWRESET) | PM_SWRESET_VGADIV, PM_SWRESET);
+ while ((readl(PM_SWRESET) & PM_SWRESET_VGADIV)
+ == PM_SWRESET_VGADIV)
+ udelay(100); /* 65536 bclk32, about 320us */
+
+ writel(readl(PM_PCGR) & ~PM_PCGR_VGACLK, PM_PCGR);
+ }
+#ifdef CONFIG_CPU_FREQ
+ if (clk == &clk_mclk_clk) {
+ u32 pll_rate, divstatus = PM_DIVSTATUS;
+ int ret, i;
+
+ /* lookup mclk_clk_table */
+ ret = -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(mclk_clk_table); i++) {
+ if (rate == mclk_clk_table[i].mrate) {
+ pll_rate = mclk_clk_table[i].prate;
+ clk_mclk_clk.rate = mclk_clk_table[i].mrate;
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ if (clk_mclk_clk.rate)
+ clk_bclk32_clk.rate = clk_mclk_clk.rate
+ / (((divstatus & 0x0000f000) >> 12) + 1);
+
+ /* set pll sys cfg reg. */
+ PM_PLLSYSCFG = pll_rate;
+
+ PM_PMCR = PM_PMCR_CFBSYS;
+ while ((PM_PLLDFCDONE & PM_PLLDFCDONE_SYSDFC)
+ != PM_PLLDFCDONE_SYSDFC)
+ udelay(100);
+ /* about 1ms */
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+int clk_register(struct clk *clk)
+{
+ mutex_lock(&clocks_mutex);
+ list_add(&clk->node, &clocks);
+ mutex_unlock(&clocks_mutex);
+ printk(KERN_DEFAULT "PKUnity PM: %s %lu.%02luM\n", clk->name,
+ (clk->rate)/1000000, (clk->rate)/10000 % 100);
+ return 0;
+}
+EXPORT_SYMBOL(clk_register);
+
+void clk_unregister(struct clk *clk)
+{
+ mutex_lock(&clocks_mutex);
+ list_del(&clk->node);
+ mutex_unlock(&clocks_mutex);
+}
+EXPORT_SYMBOL(clk_unregister);
+
+struct {
+ unsigned long prate;
+ unsigned long rate;
+} pllrate_table[] = {
+ {.prate = 0x00002001, .rate = 250000000},
+ {.prate = 0x00104801, .rate = 250000000},
+ {.prate = 0x00104C01, .rate = 262500000},
+ {.prate = 0x00002401, .rate = 275000000},
+ {.prate = 0x00105001, .rate = 275000000},
+ {.prate = 0x00105401, .rate = 287500000},
+ {.prate = 0x00002801, .rate = 300000000},
+ {.prate = 0x00105801, .rate = 300000000},
+ {.prate = 0x00105C01, .rate = 312500000},
+ {.prate = 0x00002C01, .rate = 325000000},
+ {.prate = 0x00106001, .rate = 325000000},
+ {.prate = 0x00106401, .rate = 337500000},
+ {.prate = 0x00003001, .rate = 350000000},
+ {.prate = 0x00106801, .rate = 350000000},
+ {.prate = 0x00106C01, .rate = 362500000},
+ {.prate = 0x00003401, .rate = 375000000},
+ {.prate = 0x00107001, .rate = 375000000},
+ {.prate = 0x00107401, .rate = 387500000},
+ {.prate = 0x00003801, .rate = 400000000},
+ {.prate = 0x00107801, .rate = 400000000},
+ {.prate = 0x00107C01, .rate = 412500000},
+ {.prate = 0x00003C01, .rate = 425000000},
+ {.prate = 0x00108001, .rate = 425000000},
+ {.prate = 0x00108401, .rate = 437500000},
+ {.prate = 0x00004001, .rate = 450000000},
+ {.prate = 0x00108801, .rate = 450000000},
+ {.prate = 0x00108C01, .rate = 462500000},
+ {.prate = 0x00004401, .rate = 475000000},
+ {.prate = 0x00109001, .rate = 475000000},
+ {.prate = 0x00109401, .rate = 487500000},
+ {.prate = 0x00004801, .rate = 500000000},
+ {.prate = 0x00109801, .rate = 500000000},
+ {.prate = 0x00104C00, .rate = 525000000},
+ {.prate = 0x00002400, .rate = 550000000},
+ {.prate = 0x00105000, .rate = 550000000},
+ {.prate = 0x00105400, .rate = 575000000},
+ {.prate = 0x00002800, .rate = 600000000},
+ {.prate = 0x00105800, .rate = 600000000},
+ {.prate = 0x00105C00, .rate = 625000000},
+ {.prate = 0x00002C00, .rate = 650000000},
+ {.prate = 0x00106000, .rate = 650000000},
+ {.prate = 0x00106400, .rate = 675000000},
+ {.prate = 0x00003000, .rate = 700000000},
+ {.prate = 0x00106800, .rate = 700000000},
+ {.prate = 0x00106C00, .rate = 725000000},
+ {.prate = 0x00003400, .rate = 750000000},
+ {.prate = 0x00107000, .rate = 750000000},
+ {.prate = 0x00107400, .rate = 775000000},
+ {.prate = 0x00003800, .rate = 800000000},
+ {.prate = 0x00107800, .rate = 800000000},
+ {.prate = 0x00107C00, .rate = 825000000},
+ {.prate = 0x00003C00, .rate = 850000000},
+ {.prate = 0x00108000, .rate = 850000000},
+ {.prate = 0x00108400, .rate = 875000000},
+ {.prate = 0x00004000, .rate = 900000000},
+ {.prate = 0x00108800, .rate = 900000000},
+ {.prate = 0x00108C00, .rate = 925000000},
+ {.prate = 0x00004400, .rate = 950000000},
+ {.prate = 0x00109000, .rate = 950000000},
+ {.prate = 0x00109400, .rate = 975000000},
+ {.prate = 0x00004800, .rate = 1000000000},
+ {.prate = 0x00109800, .rate = 1000000000},
+};
+
+struct {
+ unsigned long prate;
+ unsigned long drate;
+} pddr_table[] = {
+ {.prate = 0x00100800, .drate = 44236800},
+ {.prate = 0x00100C00, .drate = 66355200},
+ {.prate = 0x00101000, .drate = 88473600},
+ {.prate = 0x00101400, .drate = 110592000},
+ {.prate = 0x00101800, .drate = 132710400},
+ {.prate = 0x00101C01, .drate = 154828800},
+ {.prate = 0x00102001, .drate = 176947200},
+ {.prate = 0x00102401, .drate = 199065600},
+ {.prate = 0x00102801, .drate = 221184000},
+ {.prate = 0x00102C01, .drate = 243302400},
+ {.prate = 0x00103001, .drate = 265420800},
+ {.prate = 0x00103401, .drate = 287539200},
+ {.prate = 0x00103801, .drate = 309657600},
+ {.prate = 0x00103C01, .drate = 331776000},
+ {.prate = 0x00104001, .drate = 353894400},
+};
+
+static int __init clk_init(void)
+{
+#ifdef CONFIG_PUV3_PM
+ u32 pllrate, divstatus = readl(PM_DIVSTATUS);
+ u32 pcgr_val = readl(PM_PCGR);
+ int i;
+
+ pcgr_val |= PM_PCGR_BCLKMME | PM_PCGR_BCLKH264E | PM_PCGR_BCLKH264D
+ | PM_PCGR_HECLK | PM_PCGR_HDCLK;
+ writel(pcgr_val, PM_PCGR);
+
+ pllrate = readl(PM_PLLSYSSTATUS);
+
+ /* lookup pmclk_table */
+ clk_mclk_clk.rate = 0;
+ for (i = 0; i < ARRAY_SIZE(pllrate_table); i++) {
+ if (pllrate == pllrate_table[i].prate) {
+ clk_mclk_clk.rate = pllrate_table[i].rate;
+ break;
+ }
+ }
+
+ if (clk_mclk_clk.rate)
+ clk_bclk32_clk.rate = clk_mclk_clk.rate /
+ (((divstatus & 0x0000f000) >> 12) + 1);
+
+ pllrate = readl(PM_PLLDDRSTATUS);
+
+ /* lookup pddr_table */
+ clk_ddr_clk.rate = 0;
+ for (i = 0; i < ARRAY_SIZE(pddr_table); i++) {
+ if (pllrate == pddr_table[i].prate) {
+ clk_ddr_clk.rate = pddr_table[i].drate;
+ break;
+ }
+ }
+
+ pllrate = readl(PM_PLLVGASTATUS);
+
+ /* lookup pvga_table */
+ clk_vga_clk.rate = 0;
+ for (i = 0; i < ARRAY_SIZE(pllrate_table); i++) {
+ if (pllrate == pllrate_table[i].prate) {
+ clk_vga_clk.rate = pllrate_table[i].rate;
+ break;
+ }
+ }
+
+ if (clk_vga_clk.rate)
+ clk_vga_clk.rate = clk_vga_clk.rate /
+ (((divstatus & 0x00f00000) >> 20) + 1);
+
+ clk_register(&clk_vga_clk);
+#endif
+#ifdef CONFIG_ARCH_FPGA
+ clk_ddr_clk.rate = 33000000;
+ clk_mclk_clk.rate = 33000000;
+ clk_bclk32_clk.rate = 33000000;
+#endif
+ clk_register(&clk_ddr_clk);
+ clk_register(&clk_mclk_clk);
+ clk_register(&clk_bclk32_clk);
+ clk_register(&clk_ost_clk);
+ return 0;
+}
+core_initcall(clk_init);
diff --git a/arch/unicore32/kernel/cpu-ucv2.c b/arch/unicore32/kernel/cpu-ucv2.c
new file mode 100644
index 000000000000..4a99f62584c7
--- /dev/null
+++ b/arch/unicore32/kernel/cpu-ucv2.c
@@ -0,0 +1,93 @@
+/*
+ * linux/arch/unicore32/kernel/cpu-ucv2.c: clock scaling for the UniCore-II
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+
+#include <mach/hardware.h>
+
+static struct cpufreq_driver ucv2_driver;
+
+/* make sure that only the "userspace" governor is run
+ * -- anything else wouldn't make sense on this platform, anyway.
+ */
+int ucv2_verify_speed(struct cpufreq_policy *policy)
+{
+ if (policy->cpu)
+ return -EINVAL;
+
+ cpufreq_verify_within_limits(policy,
+ policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
+
+ return 0;
+}
+
+static unsigned int ucv2_getspeed(unsigned int cpu)
+{
+ struct clk *mclk = clk_get(NULL, "MAIN_CLK");
+
+ if (cpu)
+ return 0;
+ return clk_get_rate(mclk)/1000;
+}
+
+static int ucv2_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int cur = ucv2_getspeed(0);
+ struct cpufreq_freqs freqs;
+ struct clk *mclk = clk_get(NULL, "MAIN_CLK");
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ if (!clk_set_rate(mclk, target_freq * 1000)) {
+ freqs.old = cur;
+ freqs.new = target_freq;
+ freqs.cpu = 0;
+ }
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return 0;
+}
+
+static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
+{
+ if (policy->cpu != 0)
+ return -EINVAL;
+ policy->cur = ucv2_getspeed(0);
+ policy->min = policy->cpuinfo.min_freq = 250000;
+ policy->max = policy->cpuinfo.max_freq = 1000000;
+ policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ return 0;
+}
+
+static struct cpufreq_driver ucv2_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = ucv2_verify_speed,
+ .target = ucv2_target,
+ .get = ucv2_getspeed,
+ .init = ucv2_cpu_init,
+ .name = "UniCore-II",
+};
+
+static int __init ucv2_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&ucv2_driver);
+}
+
+arch_initcall(ucv2_cpufreq_init);
diff --git a/arch/unicore32/kernel/debug-macro.S b/arch/unicore32/kernel/debug-macro.S
new file mode 100644
index 000000000000..2711d6d87d8e
--- /dev/null
+++ b/arch/unicore32/kernel/debug-macro.S
@@ -0,0 +1,89 @@
+/*
+ * linux/arch/unicore32/kernel/debug-macro.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Debugging macro include header
+ */
+#include <generated/asm-offsets.h>
+#include <mach/hardware.h>
+
+ .macro put_word_ocd, rd, rx=r16
+1001: movc \rx, p1.c0, #0
+ cand.a \rx, #2
+ bne 1001b
+ movc p1.c1, \rd, #1
+ .endm
+
+#ifdef CONFIG_DEBUG_OCD
+ /* debug using UniCore On-Chip-Debugger */
+ .macro addruart, rx
+ .endm
+
+ .macro senduart, rd, rx
+ put_word_ocd \rd, \rx
+ .endm
+
+ .macro busyuart, rd, rx
+ .endm
+
+ .macro waituart, rd, rx
+ .endm
+#else
+#define UART_CLK_DEFAULT 3686400 * 20
+ /* Uartclk = MCLK/ 2, The MCLK on my board is 3686400 * 40 */
+#define BAUD_RATE_DEFAULT 115200
+ /* The baud rate of the serial port */
+
+#define UART_DIVISOR_DEFAULT (UART_CLK_DEFAULT \
+ / (16 * BAUD_RATE_DEFAULT) - 1)
+
+ .macro addruart,rx
+ mrc p0, #0, \rx, c1, c0
+ tst \rx, #1 @ MMU enabled?
+ moveq \rx, #0xee000000 @ physical base address
+ movne \rx, #0x6e000000 @ virtual address
+
+ @ We probe for the active serial port here
+ @ However, now we assume UART0 is active: epip4d
+ @ We assume r1 and r2 can be clobbered.
+
+ movl r2, #UART_DIVISOR_DEFAULT
+ mov r1, #0x80
+ str r1, [\rx, #UART_LCR_OFFSET]
+ and r1, r2, #0xff00
+ mov r1, r1, lsr #8
+ str r1, [\rx, #UART_DLH_OFFSET]
+ and r1, r2, #0xff
+ str r1, [\rx, #UART_DLL_OFFSET]
+ mov r1, #0x7
+ str r1, [\rx, #UART_FCR_OFFSET]
+ mov r1, #0x3
+ str r1, [\rx, #UART_LCR_OFFSET]
+ mov r1, #0x0
+ str r1, [\rx, #UART_IER_OFFSET]
+ .endm
+
+ .macro senduart,rd,rx
+ str \rd, [\rx, #UART_THR_OFFSET]
+ .endm
+
+ .macro waituart,rd,rx
+1001: ldr \rd, [\rx, #UART_LSR_OFFSET]
+ tst \rd, #UART_LSR_THRE
+ beq 1001b
+ .endm
+
+ .macro busyuart,rd,rx
+1001: ldr \rd, [\rx, #UART_LSR_OFFSET]
+ tst \rd, #UART_LSR_TEMT
+ bne 1001b
+ .endm
+#endif
+
diff --git a/arch/unicore32/kernel/debug.S b/arch/unicore32/kernel/debug.S
new file mode 100644
index 000000000000..029fd12f6ab0
--- /dev/null
+++ b/arch/unicore32/kernel/debug.S
@@ -0,0 +1,85 @@
+/*
+ * linux/arch/unicore32/kernel/debug.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 32-bit debugging code
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+
+/*
+ * Some debugging routines (useful if you've got MM problems and
+ * printk isn't working). For DEBUGGING ONLY!!! Do not leave
+ * references to these in a production kernel!
+ */
+#include "debug-macro.S"
+
+/*
+ * Useful debugging routines
+ */
+ENTRY(printhex8)
+ mov r1, #8
+ b printhex
+ENDPROC(printhex8)
+
+ENTRY(printhex4)
+ mov r1, #4
+ b printhex
+ENDPROC(printhex4)
+
+ENTRY(printhex2)
+ mov r1, #2
+printhex: adr r2, hexbuf
+ add r3, r2, r1
+ mov r1, #0
+ stb r1, [r3]
+1: and r1, r0, #15
+ mov r0, r0 >> #4
+ csub.a r1, #10
+ beg 2f
+ add r1, r1, #'0' - 'a' + 10
+2: add r1, r1, #'a' - 10
+ stb.w r1, [r3+], #-1
+ cxor.a r3, r2
+ bne 1b
+ mov r0, r2
+ b printascii
+ENDPROC(printhex2)
+
+ .ltorg
+
+ENTRY(printascii)
+ addruart r3
+ b 2f
+1: waituart r2, r3
+ senduart r1, r3
+ busyuart r2, r3
+ cxor.a r1, #'\n'
+ cmoveq r1, #'\r'
+ beq 1b
+2: cxor.a r0, #0
+ beq 3f
+ ldb.w r1, [r0]+, #1
+ cxor.a r1, #0
+ bne 1b
+3: mov pc, lr
+ENDPROC(printascii)
+
+ENTRY(printch)
+ addruart r3
+ mov r1, r0
+ mov r0, #0
+ b 1b
+ENDPROC(printch)
+
+hexbuf: .space 16
+
diff --git a/arch/unicore32/kernel/dma.c b/arch/unicore32/kernel/dma.c
new file mode 100644
index 000000000000..ae441bc3122c
--- /dev/null
+++ b/arch/unicore32/kernel/dma.c
@@ -0,0 +1,183 @@
+/*
+ * linux/arch/unicore32/kernel/dma.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <mach/hardware.h>
+#include <mach/dma.h>
+
+struct dma_channel {
+ char *name;
+ puv3_dma_prio prio;
+ void (*irq_handler)(int, void *);
+ void (*err_handler)(int, void *);
+ void *data;
+};
+
+static struct dma_channel dma_channels[MAX_DMA_CHANNELS];
+
+int puv3_request_dma(char *name, puv3_dma_prio prio,
+ void (*irq_handler)(int, void *),
+ void (*err_handler)(int, void *),
+ void *data)
+{
+ unsigned long flags;
+ int i, found = 0;
+
+ /* basic sanity checks */
+ if (!name)
+ return -EINVAL;
+
+ local_irq_save(flags);
+
+ do {
+ /* try grabbing a DMA channel with the requested priority */
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ if ((dma_channels[i].prio == prio) &&
+ !dma_channels[i].name) {
+ found = 1;
+ break;
+ }
+ }
+ /* if requested prio group is full, try a hier priority */
+ } while (!found && prio--);
+
+ if (found) {
+ dma_channels[i].name = name;
+ dma_channels[i].irq_handler = irq_handler;
+ dma_channels[i].err_handler = err_handler;
+ dma_channels[i].data = data;
+ } else {
+ printk(KERN_WARNING "No more available DMA channels for %s\n",
+ name);
+ i = -ENODEV;
+ }
+
+ local_irq_restore(flags);
+ return i;
+}
+EXPORT_SYMBOL(puv3_request_dma);
+
+void puv3_free_dma(int dma_ch)
+{
+ unsigned long flags;
+
+ if (!dma_channels[dma_ch].name) {
+ printk(KERN_CRIT
+ "%s: trying to free channel %d which is already freed\n",
+ __func__, dma_ch);
+ return;
+ }
+
+ local_irq_save(flags);
+ dma_channels[dma_ch].name = NULL;
+ dma_channels[dma_ch].err_handler = NULL;
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(puv3_free_dma);
+
+static irqreturn_t dma_irq_handler(int irq, void *dev_id)
+{
+ int i, dint;
+
+ dint = readl(DMAC_ITCSR);
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ if (dint & DMAC_CHANNEL(i)) {
+ struct dma_channel *channel = &dma_channels[i];
+
+ /* Clear TC interrupt of channel i */
+ writel(DMAC_CHANNEL(i), DMAC_ITCCR);
+ writel(0, DMAC_ITCCR);
+
+ if (channel->name && channel->irq_handler) {
+ channel->irq_handler(i, channel->data);
+ } else {
+ /*
+ * IRQ for an unregistered DMA channel:
+ * let's clear the interrupts and disable it.
+ */
+ printk(KERN_WARNING "spurious IRQ for"
+ " DMA channel %d\n", i);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dma_err_handler(int irq, void *dev_id)
+{
+ int i, dint;
+
+ dint = readl(DMAC_IESR);
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ if (dint & DMAC_CHANNEL(i)) {
+ struct dma_channel *channel = &dma_channels[i];
+
+ /* Clear Err interrupt of channel i */
+ writel(DMAC_CHANNEL(i), DMAC_IECR);
+ writel(0, DMAC_IECR);
+
+ if (channel->name && channel->err_handler) {
+ channel->err_handler(i, channel->data);
+ } else {
+ /*
+ * IRQ for an unregistered DMA channel:
+ * let's clear the interrupts and disable it.
+ */
+ printk(KERN_WARNING "spurious IRQ for"
+ " DMA channel %d\n", i);
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+int __init puv3_init_dma(void)
+{
+ int i, ret;
+
+ /* dma channel priorities on v8 processors:
+ * ch 0 - 1 <--> (0) DMA_PRIO_HIGH
+ * ch 2 - 3 <--> (1) DMA_PRIO_MEDIUM
+ * ch 4 - 5 <--> (2) DMA_PRIO_LOW
+ */
+ for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ puv3_stop_dma(i);
+ dma_channels[i].name = NULL;
+ dma_channels[i].prio = min((i & 0x7) >> 1, DMA_PRIO_LOW);
+ }
+
+ ret = request_irq(IRQ_DMA, dma_irq_handler, 0, "DMA", NULL);
+ if (ret) {
+ printk(KERN_CRIT "Can't register IRQ for DMA\n");
+ return ret;
+ }
+
+ ret = request_irq(IRQ_DMAERR, dma_err_handler, 0, "DMAERR", NULL);
+ if (ret) {
+ printk(KERN_CRIT "Can't register IRQ for DMAERR\n");
+ free_irq(IRQ_DMA, "DMA");
+ return ret;
+ }
+
+ return 0;
+}
+
+postcore_initcall(puv3_init_dma);
diff --git a/arch/unicore32/kernel/early_printk.c b/arch/unicore32/kernel/early_printk.c
new file mode 100644
index 000000000000..3922255f1fa8
--- /dev/null
+++ b/arch/unicore32/kernel/early_printk.c
@@ -0,0 +1,59 @@
+/*
+ * linux/arch/unicore32/kernel/early_printk.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <mach/ocd.h>
+
+/* On-Chip-Debugger functions */
+
+static void early_ocd_write(struct console *con, const char *s, unsigned n)
+{
+ while (*s && n-- > 0) {
+ if (*s == '\n')
+ ocd_putc((int)'\r');
+ ocd_putc((int)*s);
+ s++;
+ }
+}
+
+static struct console early_ocd_console = {
+ .name = "earlyocd",
+ .write = early_ocd_write,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+/* Direct interface for emergencies */
+static struct console *early_console = &early_ocd_console;
+
+static int __initdata keep_early;
+
+static int __init setup_early_printk(char *buf)
+{
+ if (!buf)
+ return 0;
+
+ if (strstr(buf, "keep"))
+ keep_early = 1;
+
+ if (!strncmp(buf, "ocd", 3))
+ early_console = &early_ocd_console;
+
+ if (keep_early)
+ early_console->flags &= ~CON_BOOT;
+ else
+ early_console->flags |= CON_BOOT;
+ register_console(early_console);
+ return 0;
+}
+early_param("earlyprintk", setup_early_printk);
diff --git a/arch/unicore32/kernel/elf.c b/arch/unicore32/kernel/elf.c
new file mode 100644
index 000000000000..0a176734fefa
--- /dev/null
+++ b/arch/unicore32/kernel/elf.c
@@ -0,0 +1,38 @@
+/*
+ * linux/arch/unicore32/kernel/elf.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/personality.h>
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+
+int elf_check_arch(const struct elf32_hdr *x)
+{
+ /* Make sure it's an UniCore executable */
+ if (x->e_machine != EM_UNICORE)
+ return 0;
+
+ /* Make sure the entry address is reasonable */
+ if (x->e_entry & 3)
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL(elf_check_arch);
+
+void elf_set_personality(const struct elf32_hdr *x)
+{
+ unsigned int personality = PER_LINUX;
+
+ set_personality(personality);
+}
+EXPORT_SYMBOL(elf_set_personality);
diff --git a/arch/unicore32/kernel/entry.S b/arch/unicore32/kernel/entry.S
new file mode 100644
index 000000000000..00a259f9819e
--- /dev/null
+++ b/arch/unicore32/kernel/entry.S
@@ -0,0 +1,824 @@
+/*
+ * linux/arch/unicore32/kernel/entry.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Low-level vector interface routines
+ */
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+#include <asm/thread_info.h>
+#include <asm/memory.h>
+#include <asm/unistd.h>
+#include <generated/asm-offsets.h>
+#include "debug-macro.S"
+
+@
+@ Most of the stack format comes from struct pt_regs, but with
+@ the addition of 8 bytes for storing syscall args 5 and 6.
+@
+#define S_OFF 8
+
+/*
+ * The SWI code relies on the fact that R0 is at the bottom of the stack
+ * (due to slow/fast restore user regs).
+ */
+#if S_R0 != 0
+#error "Please fix"
+#endif
+
+ .macro zero_fp
+#ifdef CONFIG_FRAME_POINTER
+ mov fp, #0
+#endif
+ .endm
+
+ .macro alignment_trap, rtemp
+#ifdef CONFIG_ALIGNMENT_TRAP
+ ldw \rtemp, .LCcralign
+ ldw \rtemp, [\rtemp]
+ movc p0.c1, \rtemp, #0
+#endif
+ .endm
+
+ .macro load_user_sp_lr, rd, rtemp, offset = 0
+ mov \rtemp, asr
+ xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
+ mov.a asr, \rtemp @ switch to the SUSR mode
+
+ ldw sp, [\rd+], #\offset @ load sp_user
+ ldw lr, [\rd+], #\offset + 4 @ load lr_user
+
+ xor \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
+ mov.a asr, \rtemp @ switch back to the PRIV mode
+ .endm
+
+ .macro priv_exit, rpsr
+ mov.a bsr, \rpsr
+ ldm.w (r0 - r15), [sp]+
+ ldm.b (r16 - pc), [sp]+ @ load r0 - pc, asr
+ .endm
+
+ .macro restore_user_regs, fast = 0, offset = 0
+ ldw r1, [sp+], #\offset + S_PSR @ get calling asr
+ ldw lr, [sp+], #\offset + S_PC @ get pc
+ mov.a bsr, r1 @ save in bsr_priv
+ .if \fast
+ add sp, sp, #\offset + S_R1 @ r0 is syscall return value
+ ldm.w (r1 - r15), [sp]+ @ get calling r1 - r15
+ ldur (r16 - lr), [sp]+ @ get calling r16 - lr
+ .else
+ ldm.w (r0 - r15), [sp]+ @ get calling r0 - r15
+ ldur (r16 - lr), [sp]+ @ get calling r16 - lr
+ .endif
+ nop
+ add sp, sp, #S_FRAME_SIZE - S_R16
+ mov.a pc, lr @ return
+ @ and move bsr_priv into asr
+ .endm
+
+ .macro get_thread_info, rd
+ mov \rd, sp >> #13
+ mov \rd, \rd << #13
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+ ldw \base, =(PKUNITY_INTC_BASE)
+ ldw \irqstat, [\base+], #0xC @ INTC_ICIP
+ ldw \tmp, [\base+], #0x4 @ INTC_ICMR
+ and.a \irqstat, \irqstat, \tmp
+ beq 1001f
+ cntlz \irqnr, \irqstat
+ rsub \irqnr, \irqnr, #31
+1001: /* EQ will be set if no irqs pending */
+ .endm
+
+#ifdef CONFIG_DEBUG_LL
+ .macro printreg, reg, temp
+ adr \temp, 901f
+ stm (r0-r3), [\temp]+
+ stw lr, [\temp+], #0x10
+ mov r0, \reg
+ b.l printhex8
+ mov r0, #':'
+ b.l printch
+ mov r0, pc
+ b.l printhex8
+ adr r0, 902f
+ b.l printascii
+ adr \temp, 901f
+ ldm (r0-r3), [\temp]+
+ ldw lr, [\temp+], #0x10
+ b 903f
+901: .word 0, 0, 0, 0, 0 @ r0-r3, lr
+902: .asciz ": epip4d\n"
+ .align
+903:
+ .endm
+#endif
+
+/*
+ * These are the registers used in the syscall handler, and allow us to
+ * have in theory up to 7 arguments to a function - r0 to r6.
+ *
+ * Note that tbl == why is intentional.
+ *
+ * We must set at least "tsk" and "why" when calling ret_with_reschedule.
+ */
+scno .req r21 @ syscall number
+tbl .req r22 @ syscall table pointer
+why .req r22 @ Linux syscall (!= 0)
+tsk .req r23 @ current thread_info
+
+/*
+ * Interrupt handling. Preserves r17, r18, r19
+ */
+ .macro intr_handler
+1: get_irqnr_and_base r0, r6, r5, lr
+ beq 2f
+ mov r1, sp
+ @
+ @ routine called with r0 = irq number, r1 = struct pt_regs *
+ @
+ adr lr, 1b
+ b asm_do_IRQ
+2:
+ .endm
+
+/*
+ * PRIV mode handlers
+ */
+ .macro priv_entry
+ sub sp, sp, #(S_FRAME_SIZE - 4)
+ stm (r1 - r15), [sp]+
+ add r5, sp, #S_R15
+ stm (r16 - r28), [r5]+
+
+ ldm (r1 - r3), [r0]+
+ add r5, sp, #S_SP - 4 @ here for interlock avoidance
+ mov r4, #-1 @ "" "" "" ""
+ add r0, sp, #(S_FRAME_SIZE - 4)
+ stw.w r1, [sp+], #-4 @ save the "real" r0 copied
+ @ from the exception stack
+
+ mov r1, lr
+
+ @
+ @ We are now ready to fill in the remaining blanks on the stack:
+ @
+ @ r0 - sp_priv
+ @ r1 - lr_priv
+ @ r2 - lr_<exception>, already fixed up for correct return/restart
+ @ r3 - bsr_<exception>
+ @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
+ @
+ stm (r0 - r4), [r5]+
+ .endm
+
+/*
+ * User mode handlers
+ *
+ */
+ .macro user_entry
+ sub sp, sp, #S_FRAME_SIZE
+ stm (r1 - r15), [sp+]
+ add r4, sp, #S_R16
+ stm (r16 - r28), [r4]+
+
+ ldm (r1 - r3), [r0]+
+ add r0, sp, #S_PC @ here for interlock avoidance
+ mov r4, #-1 @ "" "" "" ""
+
+ stw r1, [sp] @ save the "real" r0 copied
+ @ from the exception stack
+
+ @
+ @ We are now ready to fill in the remaining blanks on the stack:
+ @
+ @ r2 - lr_<exception>, already fixed up for correct return/restart
+ @ r3 - bsr_<exception>
+ @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
+ @
+ @ Also, separately save sp_user and lr_user
+ @
+ stm (r2 - r4), [r0]+
+ stur (sp, lr), [r0-]
+
+ @
+ @ Enable the alignment trap while in kernel mode
+ @
+ alignment_trap r0
+
+ @
+ @ Clear FP to mark the first stack frame
+ @
+ zero_fp
+ .endm
+
+ .text
+
+@
+@ __invalid - generic code for failed exception
+@ (re-entrant version of handlers)
+@
+__invalid:
+ sub sp, sp, #S_FRAME_SIZE
+ stm (r1 - r15), [sp+]
+ add r1, sp, #S_R16
+ stm (r16 - r28, sp, lr), [r1]+
+
+ zero_fp
+
+ ldm (r4 - r6), [r0]+
+ add r0, sp, #S_PC @ here for interlock avoidance
+ mov r7, #-1 @ "" "" "" ""
+ stw r4, [sp] @ save preserved r0
+ stm (r5 - r7), [r0]+ @ lr_<exception>,
+ @ asr_<exception>, "old_r0"
+
+ mov r0, sp
+ mov r1, asr
+ b bad_mode
+ENDPROC(__invalid)
+
+ .align 5
+__dabt_priv:
+ priv_entry
+
+ @
+ @ get ready to re-enable interrupts if appropriate
+ @
+ mov r17, asr
+ cand.a r3, #PSR_I_BIT
+ bne 1f
+ andn r17, r17, #PSR_I_BIT
+1:
+
+ @
+ @ Call the processor-specific abort handler:
+ @
+ @ r2 - aborted context pc
+ @ r3 - aborted context asr
+ @
+ @ The abort handler must return the aborted address in r0, and
+ @ the fault status register in r1.
+ @
+ movc r1, p0.c3, #0 @ get FSR
+ movc r0, p0.c4, #0 @ get FAR
+
+ @
+ @ set desired INTR state, then call main handler
+ @
+ mov.a asr, r17
+ mov r2, sp
+ b.l do_DataAbort
+
+ @
+ @ INTRs off again before pulling preserved data off the stack
+ @
+ disable_irq r0
+
+ @
+ @ restore BSR and restart the instruction
+ @
+ ldw r2, [sp+], #S_PSR
+ priv_exit r2 @ return from exception
+ENDPROC(__dabt_priv)
+
+ .align 5
+__intr_priv:
+ priv_entry
+
+ intr_handler
+
+ mov r0, #0 @ epip4d
+ movc p0.c5, r0, #14
+ nop; nop; nop; nop; nop; nop; nop; nop
+
+ ldw r4, [sp+], #S_PSR @ irqs are already disabled
+
+ priv_exit r4 @ return from exception
+ENDPROC(__intr_priv)
+
+ .ltorg
+
+ .align 5
+__extn_priv:
+ priv_entry
+
+ mov r0, sp @ struct pt_regs *regs
+ mov r1, asr
+ b bad_mode @ not supported
+ENDPROC(__extn_priv)
+
+ .align 5
+__pabt_priv:
+ priv_entry
+
+ @
+ @ re-enable interrupts if appropriate
+ @
+ mov r17, asr
+ cand.a r3, #PSR_I_BIT
+ bne 1f
+ andn r17, r17, #PSR_I_BIT
+1:
+
+ @
+ @ set args, then call main handler
+ @
+ @ r0 - address of faulting instruction
+ @ r1 - pointer to registers on stack
+ @
+ mov r0, r2 @ pass address of aborted instruction
+ mov r1, #5
+ mov.a asr, r17
+ mov r2, sp @ regs
+ b.l do_PrefetchAbort @ call abort handler
+
+ @
+ @ INTRs off again before pulling preserved data off the stack
+ @
+ disable_irq r0
+
+ @
+ @ restore BSR and restart the instruction
+ @
+ ldw r2, [sp+], #S_PSR
+ priv_exit r2 @ return from exception
+ENDPROC(__pabt_priv)
+
+ .align 5
+.LCcralign:
+ .word cr_alignment
+
+ .align 5
+__dabt_user:
+ user_entry
+
+#ifdef CONFIG_UNICORE_FPU_F64
+ cff ip, s31
+ cand.a ip, #0x08000000 @ FPU execption traps?
+ beq 209f
+
+ ldw ip, [sp+], #S_PC
+ add ip, ip, #4
+ stw ip, [sp+], #S_PC
+ @
+ @ fall through to the emulation code, which returns using r19 if
+ @ it has emulated the instruction, or the more conventional lr
+ @ if we are to treat this as a real extended instruction
+ @
+ @ r0 - instruction
+ @
+1: ldw.u r0, [r2]
+ adr r19, ret_from_exception
+ adr lr, 209f
+ @
+ @ fallthrough to call do_uc_f64
+ @
+/*
+ * Check whether the instruction is a co-processor instruction.
+ * If yes, we need to call the relevant co-processor handler.
+ *
+ * Note that we don't do a full check here for the co-processor
+ * instructions; all instructions with bit 27 set are well
+ * defined. The only instructions that should fault are the
+ * co-processor instructions.
+ *
+ * Emulators may wish to make use of the following registers:
+ * r0 = instruction opcode.
+ * r2 = PC
+ * r19 = normal "successful" return address
+ * r20 = this threads thread_info structure.
+ * lr = unrecognised instruction return address
+ */
+ get_thread_info r20 @ get current thread
+ and r8, r0, #0x00003c00 @ mask out CP number
+ mov r7, #1
+ stb r7, [r20+], #TI_USED_CP + 2 @ set appropriate used_cp[]
+
+ @ F64 hardware support entry point.
+ @ r0 = faulted instruction
+ @ r19 = return address
+ @ r20 = fp_state
+ enable_irq r4
+ add r20, r20, #TI_FPSTATE @ r20 = workspace
+ cff r1, s31 @ get fpu FPSCR
+ andn r2, r1, #0x08000000
+ ctf r2, s31 @ clear 27 bit
+ mov r2, sp @ nothing stacked - regdump is at TOS
+ mov lr, r19 @ setup for a return to the user code
+
+ @ Now call the C code to package up the bounce to the support code
+ @ r0 holds the trigger instruction
+ @ r1 holds the FPSCR value
+ @ r2 pointer to register dump
+ b ucf64_exchandler
+209:
+#endif
+ @
+ @ Call the processor-specific abort handler:
+ @
+ @ r2 - aborted context pc
+ @ r3 - aborted context asr
+ @
+ @ The abort handler must return the aborted address in r0, and
+ @ the fault status register in r1.
+ @
+ movc r1, p0.c3, #0 @ get FSR
+ movc r0, p0.c4, #0 @ get FAR
+
+ @
+ @ INTRs on, then call the main handler
+ @
+ enable_irq r2
+ mov r2, sp
+ adr lr, ret_from_exception
+ b do_DataAbort
+ENDPROC(__dabt_user)
+
+ .align 5
+__intr_user:
+ user_entry
+
+ get_thread_info tsk
+
+ intr_handler
+
+ mov why, #0
+ b ret_to_user
+ENDPROC(__intr_user)
+
+ .ltorg
+
+ .align 5
+__extn_user:
+ user_entry
+
+ mov r0, sp
+ mov r1, asr
+ b bad_mode
+ENDPROC(__extn_user)
+
+ .align 5
+__pabt_user:
+ user_entry
+
+ mov r0, r2 @ pass address of aborted instruction.
+ mov r1, #5
+ enable_irq r1 @ Enable interrupts
+ mov r2, sp @ regs
+ b.l do_PrefetchAbort @ call abort handler
+ /* fall through */
+/*
+ * This is the return code to user mode for abort handlers
+ */
+ENTRY(ret_from_exception)
+ get_thread_info tsk
+ mov why, #0
+ b ret_to_user
+ENDPROC(__pabt_user)
+ENDPROC(ret_from_exception)
+
+/*
+ * Register switch for UniCore V2 processors
+ * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
+ * previous and next are guaranteed not to be the same.
+ */
+ENTRY(__switch_to)
+ add ip, r1, #TI_CPU_SAVE
+ stm.w (r4 - r15), [ip]+
+ stm.w (r16 - r27, sp, lr), [ip]+
+
+#ifdef CONFIG_UNICORE_FPU_F64
+ add ip, r1, #TI_FPSTATE
+ sfm.w (f0 - f7 ), [ip]+
+ sfm.w (f8 - f15), [ip]+
+ sfm.w (f16 - f23), [ip]+
+ sfm.w (f24 - f31), [ip]+
+ cff r4, s31
+ stw r4, [ip]
+
+ add ip, r2, #TI_FPSTATE
+ lfm.w (f0 - f7 ), [ip]+
+ lfm.w (f8 - f15), [ip]+
+ lfm.w (f16 - f23), [ip]+
+ lfm.w (f24 - f31), [ip]+
+ ldw r4, [ip]
+ ctf r4, s31
+#endif
+ add ip, r2, #TI_CPU_SAVE
+ ldm.w (r4 - r15), [ip]+
+ ldm (r16 - r27, sp, pc), [ip]+ @ Load all regs saved previously
+ENDPROC(__switch_to)
+
+ .align 5
+/*
+ * This is the fast syscall return path. We do as little as
+ * possible here, and this includes saving r0 back into the PRIV
+ * stack.
+ */
+ret_fast_syscall:
+ disable_irq r1 @ disable interrupts
+ ldw r1, [tsk+], #TI_FLAGS
+ cand.a r1, #_TIF_WORK_MASK
+ bne fast_work_pending
+
+ @ fast_restore_user_regs
+ restore_user_regs fast = 1, offset = S_OFF
+
+/*
+ * Ok, we need to do extra processing, enter the slow path.
+ */
+fast_work_pending:
+ stw.w r0, [sp+], #S_R0+S_OFF @ returned r0
+work_pending:
+ cand.a r1, #_TIF_NEED_RESCHED
+ bne work_resched
+ cand.a r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME
+ beq no_work_pending
+ mov r0, sp @ 'regs'
+ mov r2, why @ 'syscall'
+ cand.a r1, #_TIF_SIGPENDING @ delivering a signal?
+ cmovne why, #0 @ prevent further restarts
+ b.l do_notify_resume
+ b ret_slow_syscall @ Check work again
+
+work_resched:
+ b.l schedule
+/*
+ * "slow" syscall return path. "why" tells us if this was a real syscall.
+ */
+ENTRY(ret_to_user)
+ret_slow_syscall:
+ disable_irq r1 @ disable interrupts
+ get_thread_info tsk @ epip4d, one path error?!
+ ldw r1, [tsk+], #TI_FLAGS
+ cand.a r1, #_TIF_WORK_MASK
+ bne work_pending
+no_work_pending:
+ @ slow_restore_user_regs
+ restore_user_regs fast = 0, offset = 0
+ENDPROC(ret_to_user)
+
+/*
+ * This is how we return from a fork.
+ */
+ENTRY(ret_from_fork)
+ b.l schedule_tail
+ get_thread_info tsk
+ ldw r1, [tsk+], #TI_FLAGS @ check for syscall tracing
+ mov why, #1
+ cand.a r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
+ beq ret_slow_syscall
+ mov r1, sp
+ mov r0, #1 @ trace exit [IP = 1]
+ b.l syscall_trace
+ b ret_slow_syscall
+ENDPROC(ret_from_fork)
+
+/*=============================================================================
+ * SWI handler
+ *-----------------------------------------------------------------------------
+ */
+ .align 5
+ENTRY(vector_swi)
+ sub sp, sp, #S_FRAME_SIZE
+ stm (r0 - r15), [sp]+ @ Calling r0 - r15
+ add r8, sp, #S_R16
+ stm (r16 - r28), [r8]+ @ Calling r16 - r28
+ add r8, sp, #S_PC
+ stur (sp, lr), [r8-] @ Calling sp, lr
+ mov r8, bsr @ called from non-REAL mode
+ stw lr, [sp+], #S_PC @ Save calling PC
+ stw r8, [sp+], #S_PSR @ Save ASR
+ stw r0, [sp+], #S_OLD_R0 @ Save OLD_R0
+ zero_fp
+
+ /*
+ * Get the system call number.
+ */
+ sub ip, lr, #4
+ ldw.u scno, [ip] @ get SWI instruction
+
+#ifdef CONFIG_ALIGNMENT_TRAP
+ ldw ip, __cr_alignment
+ ldw ip, [ip]
+ movc p0.c1, ip, #0 @ update control register
+#endif
+ enable_irq ip
+
+ get_thread_info tsk
+ ldw tbl, =sys_call_table @ load syscall table pointer
+
+ andn scno, scno, #0xff000000 @ mask off SWI op-code
+ andn scno, scno, #0x00ff0000 @ mask off SWI op-code
+
+ stm.w (r4, r5), [sp-] @ push fifth and sixth args
+ ldw ip, [tsk+], #TI_FLAGS @ check for syscall tracing
+ cand.a ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
+ bne __sys_trace
+
+ csub.a scno, #__NR_syscalls @ check upper syscall limit
+ adr lr, ret_fast_syscall @ return address
+ bea 1f
+ ldw pc, [tbl+], scno << #2 @ call sys_* routine
+1:
+ add r1, sp, #S_OFF
+2: mov why, #0 @ no longer a real syscall
+ b sys_ni_syscall @ not private func
+
+ /*
+ * This is the really slow path. We're going to be doing
+ * context switches, and waiting for our parent to respond.
+ */
+__sys_trace:
+ mov r2, scno
+ add r1, sp, #S_OFF
+ mov r0, #0 @ trace entry [IP = 0]
+ b.l syscall_trace
+
+ adr lr, __sys_trace_return @ return address
+ mov scno, r0 @ syscall number (possibly new)
+ add r1, sp, #S_R0 + S_OFF @ pointer to regs
+ csub.a scno, #__NR_syscalls @ check upper syscall limit
+ bea 2b
+ ldm (r0 - r3), [r1]+ @ have to reload r0 - r3
+ ldw pc, [tbl+], scno << #2 @ call sys_* routine
+
+__sys_trace_return:
+ stw.w r0, [sp+], #S_R0 + S_OFF @ save returned r0
+ mov r2, scno
+ mov r1, sp
+ mov r0, #1 @ trace exit [IP = 1]
+ b.l syscall_trace
+ b ret_slow_syscall
+
+ .align 5
+#ifdef CONFIG_ALIGNMENT_TRAP
+ .type __cr_alignment, #object
+__cr_alignment:
+ .word cr_alignment
+#endif
+ .ltorg
+
+ENTRY(sys_execve)
+ add r3, sp, #S_OFF
+ b __sys_execve
+ENDPROC(sys_execve)
+
+ENTRY(sys_clone)
+ add ip, sp, #S_OFF
+ stw ip, [sp+], #4
+ b __sys_clone
+ENDPROC(sys_clone)
+
+ENTRY(sys_rt_sigreturn)
+ add r0, sp, #S_OFF
+ mov why, #0 @ prevent syscall restart handling
+ b __sys_rt_sigreturn
+ENDPROC(sys_rt_sigreturn)
+
+ENTRY(sys_sigaltstack)
+ ldw r2, [sp+], #S_OFF + S_SP
+ b do_sigaltstack
+ENDPROC(sys_sigaltstack)
+
+ __INIT
+
+/*
+ * Vector stubs.
+ *
+ * This code is copied to 0xffff0200 so we can use branches in the
+ * vectors, rather than ldr's. Note that this code must not
+ * exceed 0x300 bytes.
+ *
+ * Common stub entry macro:
+ * Enter in INTR mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC
+ *
+ * SP points to a minimal amount of processor-private memory, the address
+ * of which is copied into r0 for the mode specific abort handler.
+ */
+ .macro vector_stub, name, mode
+ .align 5
+
+vector_\name:
+ @
+ @ Save r0, lr_<exception> (parent PC) and bsr_<exception>
+ @ (parent ASR)
+ @
+ stw r0, [sp]
+ stw lr, [sp+], #4 @ save r0, lr
+ mov lr, bsr
+ stw lr, [sp+], #8 @ save bsr
+
+ @
+ @ Prepare for PRIV mode. INTRs remain disabled.
+ @
+ mov r0, asr
+ xor r0, r0, #(\mode ^ PRIV_MODE)
+ mov.a bsr, r0
+
+ @
+ @ the branch table must immediately follow this code
+ @
+ and lr, lr, #0x03
+ add lr, lr, #1
+ mov r0, sp
+ ldw lr, [pc+], lr << #2
+ mov.a pc, lr @ branch to handler in PRIV mode
+ENDPROC(vector_\name)
+ .align 2
+ @ handler addresses follow this label
+ .endm
+
+ .globl __stubs_start
+__stubs_start:
+/*
+ * Interrupt dispatcher
+ */
+ vector_stub intr, INTR_MODE
+
+ .long __intr_user @ 0 (USER)
+ .long __invalid @ 1
+ .long __invalid @ 2
+ .long __intr_priv @ 3 (PRIV)
+
+/*
+ * Data abort dispatcher
+ * Enter in ABT mode, bsr = USER ASR, lr = USER PC
+ */
+ vector_stub dabt, ABRT_MODE
+
+ .long __dabt_user @ 0 (USER)
+ .long __invalid @ 1
+ .long __invalid @ 2 (INTR)
+ .long __dabt_priv @ 3 (PRIV)
+
+/*
+ * Prefetch abort dispatcher
+ * Enter in ABT mode, bsr = USER ASR, lr = USER PC
+ */
+ vector_stub pabt, ABRT_MODE
+
+ .long __pabt_user @ 0 (USER)
+ .long __invalid @ 1
+ .long __invalid @ 2 (INTR)
+ .long __pabt_priv @ 3 (PRIV)
+
+/*
+ * Undef instr entry dispatcher
+ * Enter in EXTN mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC
+ */
+ vector_stub extn, EXTN_MODE
+
+ .long __extn_user @ 0 (USER)
+ .long __invalid @ 1
+ .long __invalid @ 2 (INTR)
+ .long __extn_priv @ 3 (PRIV)
+
+/*
+ * We group all the following data together to optimise
+ * for CPUs with separate I & D caches.
+ */
+ .align 5
+
+.LCvswi:
+ .word vector_swi
+
+ .globl __stubs_end
+__stubs_end:
+
+ .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
+
+ .globl __vectors_start
+__vectors_start:
+ jepriv SYS_ERROR0
+ b vector_extn + stubs_offset
+ ldw pc, .LCvswi + stubs_offset
+ b vector_pabt + stubs_offset
+ b vector_dabt + stubs_offset
+ jepriv SYS_ERROR0
+ b vector_intr + stubs_offset
+ jepriv SYS_ERROR0
+
+ .globl __vectors_end
+__vectors_end:
+
+ .data
+
+ .globl cr_alignment
+ .globl cr_no_alignment
+cr_alignment:
+ .space 4
+cr_no_alignment:
+ .space 4
diff --git a/arch/unicore32/kernel/fpu-ucf64.c b/arch/unicore32/kernel/fpu-ucf64.c
new file mode 100644
index 000000000000..282a60ac82ba
--- /dev/null
+++ b/arch/unicore32/kernel/fpu-ucf64.c
@@ -0,0 +1,126 @@
+/*
+ * linux/arch/unicore32/kernel/fpu-ucf64.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+#include <asm/fpu-ucf64.h>
+
+/*
+ * A special flag to tell the normalisation code not to normalise.
+ */
+#define F64_NAN_FLAG 0x100
+
+/*
+ * A bit pattern used to indicate the initial (unset) value of the
+ * exception mask, in case nothing handles an instruction. This
+ * doesn't include the NAN flag, which get masked out before
+ * we check for an error.
+ */
+#define F64_EXCEPTION_ERROR ((u32)-1 & ~F64_NAN_FLAG)
+
+/*
+ * Since we aren't building with -mfpu=f64, we need to code
+ * these instructions using their MRC/MCR equivalents.
+ */
+#define f64reg(_f64_) #_f64_
+
+#define cff(_f64_) ({ \
+ u32 __v; \
+ asm("cff %0, " f64reg(_f64_) "@ fmrx %0, " #_f64_ \
+ : "=r" (__v) : : "cc"); \
+ __v; \
+ })
+
+#define ctf(_f64_, _var_) \
+ asm("ctf %0, " f64reg(_f64_) "@ fmxr " #_f64_ ", %0" \
+ : : "r" (_var_) : "cc")
+
+/*
+ * Raise a SIGFPE for the current process.
+ * sicode describes the signal being raised.
+ */
+void ucf64_raise_sigfpe(unsigned int sicode, struct pt_regs *regs)
+{
+ siginfo_t info;
+
+ memset(&info, 0, sizeof(info));
+
+ info.si_signo = SIGFPE;
+ info.si_code = sicode;
+ info.si_addr = (void __user *)(instruction_pointer(regs) - 4);
+
+ /*
+ * This is the same as NWFPE, because it's not clear what
+ * this is used for
+ */
+ current->thread.error_code = 0;
+ current->thread.trap_no = 6;
+
+ send_sig_info(SIGFPE, &info, current);
+}
+
+/*
+ * Handle exceptions of UniCore-F64.
+ */
+void ucf64_exchandler(u32 inst, u32 fpexc, struct pt_regs *regs)
+{
+ u32 tmp = fpexc;
+ u32 exc = F64_EXCEPTION_ERROR & fpexc;
+
+ pr_debug("UniCore-F64: instruction %08x fpscr %08x\n",
+ inst, fpexc);
+
+ if (exc & FPSCR_CMPINSTR_BIT) {
+ if (exc & FPSCR_CON)
+ tmp |= FPSCR_CON;
+ else
+ tmp &= ~(FPSCR_CON);
+ exc &= ~(FPSCR_CMPINSTR_BIT | FPSCR_CON);
+ } else {
+ pr_debug(KERN_ERR "UniCore-F64 Error: unhandled exceptions\n");
+ pr_debug(KERN_ERR "UniCore-F64 FPSCR 0x%08x INST 0x%08x\n",
+ cff(FPSCR), inst);
+
+ ucf64_raise_sigfpe(0, regs);
+ return;
+ }
+
+ /*
+ * Update the FPSCR with the additional exception flags.
+ * Comparison instructions always return at least one of
+ * these flags set.
+ */
+ tmp &= ~(FPSCR_TRAP | FPSCR_IOS | FPSCR_OFS | FPSCR_UFS |
+ FPSCR_IXS | FPSCR_HIS | FPSCR_IOC | FPSCR_OFC |
+ FPSCR_UFC | FPSCR_IXC | FPSCR_HIC);
+
+ tmp |= exc;
+ ctf(FPSCR, tmp);
+}
+
+/*
+ * F64 support code initialisation.
+ */
+static int __init ucf64_init(void)
+{
+ ctf(FPSCR, 0x0); /* FPSCR_UFE | FPSCR_NDE perhaps better */
+
+ printk(KERN_INFO "Enable UniCore-F64 support.\n");
+
+ return 0;
+}
+
+late_initcall(ucf64_init);
diff --git a/arch/unicore32/kernel/gpio.c b/arch/unicore32/kernel/gpio.c
new file mode 100644
index 000000000000..cb12ec39552c
--- /dev/null
+++ b/arch/unicore32/kernel/gpio.c
@@ -0,0 +1,122 @@
+/*
+ * linux/arch/unicore32/kernel/gpio.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/* in FPGA, no GPIO support */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <mach/hardware.h>
+
+#ifdef CONFIG_LEDS
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+
+static const struct gpio_led puv3_gpio_leds[] = {
+ { .name = "cpuhealth", .gpio = GPO_CPU_HEALTH, .active_low = 0,
+ .default_trigger = "heartbeat", },
+ { .name = "hdd_led", .gpio = GPO_HDD_LED, .active_low = 1,
+ .default_trigger = "ide-disk", },
+};
+
+static const struct gpio_led_platform_data puv3_gpio_led_data = {
+ .num_leds = ARRAY_SIZE(puv3_gpio_leds),
+ .leds = (void *) puv3_gpio_leds,
+};
+
+static struct platform_device puv3_gpio_gpio_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = (void *) &puv3_gpio_led_data,
+ }
+};
+
+static int __init puv3_gpio_leds_init(void)
+{
+ platform_device_register(&puv3_gpio_gpio_leds);
+ return 0;
+}
+
+device_initcall(puv3_gpio_leds_init);
+#endif
+
+static int puv3_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ return readl(GPIO_GPLR) & GPIO_GPIO(offset);
+}
+
+static void puv3_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ if (value)
+ writel(GPIO_GPIO(offset), GPIO_GPSR);
+ else
+ writel(GPIO_GPIO(offset), GPIO_GPCR);
+}
+
+static int puv3_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ writel(readl(GPIO_GPDR) & ~GPIO_GPIO(offset), GPIO_GPDR);
+ local_irq_restore(flags);
+ return 0;
+}
+
+static int puv3_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ puv3_gpio_set(chip, offset, value);
+ writel(readl(GPIO_GPDR) | GPIO_GPIO(offset), GPIO_GPDR);
+ local_irq_restore(flags);
+ return 0;
+}
+
+static struct gpio_chip puv3_gpio_chip = {
+ .label = "gpio",
+ .direction_input = puv3_direction_input,
+ .direction_output = puv3_direction_output,
+ .set = puv3_gpio_set,
+ .get = puv3_gpio_get,
+ .base = 0,
+ .ngpio = GPIO_MAX + 1,
+};
+
+void __init puv3_init_gpio(void)
+{
+ writel(GPIO_DIR, GPIO_GPDR);
+#if defined(CONFIG_PUV3_NB0916) || defined(CONFIG_PUV3_SMW0919) \
+ || defined(CONFIG_PUV3_DB0913)
+ gpio_set_value(GPO_WIFI_EN, 1);
+ gpio_set_value(GPO_HDD_LED, 1);
+ gpio_set_value(GPO_VGA_EN, 1);
+ gpio_set_value(GPO_LCD_EN, 1);
+ gpio_set_value(GPO_CAM_PWR_EN, 0);
+ gpio_set_value(GPO_LCD_VCC_EN, 1);
+ gpio_set_value(GPO_SOFT_OFF, 1);
+ gpio_set_value(GPO_BT_EN, 1);
+ gpio_set_value(GPO_FAN_ON, 0);
+ gpio_set_value(GPO_SPKR, 0);
+ gpio_set_value(GPO_CPU_HEALTH, 1);
+ gpio_set_value(GPO_LAN_SEL, 1);
+/*
+ * DO NOT modify the GPO_SET_V1 and GPO_SET_V2 in kernel
+ * gpio_set_value(GPO_SET_V1, 1);
+ * gpio_set_value(GPO_SET_V2, 1);
+ */
+#endif
+ gpiochip_add(&puv3_gpio_chip);
+}
diff --git a/arch/unicore32/kernel/head.S b/arch/unicore32/kernel/head.S
new file mode 100644
index 000000000000..92255f3ab6a7
--- /dev/null
+++ b/arch/unicore32/kernel/head.S
@@ -0,0 +1,252 @@
+/*
+ * linux/arch/unicore32/kernel/head.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/assembler.h>
+#include <asm/ptrace.h>
+#include <generated/asm-offsets.h>
+#include <asm/memory.h>
+#include <asm/thread_info.h>
+#include <asm/system.h>
+#include <asm/pgtable-hwdef.h>
+
+#if (PHYS_OFFSET & 0x003fffff)
+#error "PHYS_OFFSET must be at an even 4MiB boundary!"
+#endif
+
+#define KERNEL_RAM_VADDR (PAGE_OFFSET + KERNEL_IMAGE_START)
+#define KERNEL_RAM_PADDR (PHYS_OFFSET + KERNEL_IMAGE_START)
+
+#define KERNEL_PGD_PADDR (KERNEL_RAM_PADDR - 0x1000)
+#define KERNEL_PGD_VADDR (KERNEL_RAM_VADDR - 0x1000)
+
+#define KERNEL_START KERNEL_RAM_VADDR
+#define KERNEL_END _end
+
+/*
+ * swapper_pg_dir is the virtual address of the initial page table.
+ * We place the page tables 4K below KERNEL_RAM_VADDR. Therefore, we must
+ * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect
+ * the least significant 16 bits to be 0x8000, but we could probably
+ * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x1000.
+ */
+#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
+#error KERNEL_RAM_VADDR must start at 0xXXXX8000
+#endif
+
+ .globl swapper_pg_dir
+ .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x1000
+
+/*
+ * Kernel startup entry point.
+ * ---------------------------
+ *
+ * This is normally called from the decompressor code. The requirements
+ * are: MMU = off, D-cache = off, I-cache = dont care
+ *
+ * This code is mostly position independent, so if you link the kernel at
+ * 0xc0008000, you call this at __pa(0xc0008000).
+ */
+ __HEAD
+ENTRY(stext)
+ @ set asr
+ mov r0, #PRIV_MODE @ ensure priv mode
+ or r0, #PSR_R_BIT | PSR_I_BIT @ disable irqs
+ mov.a asr, r0
+
+ @ process identify
+ movc r0, p0.c0, #0 @ cpuid
+ movl r1, 0xff00ffff @ mask
+ movl r2, 0x4d000863 @ value
+ and r0, r1, r0
+ cxor.a r0, r2
+ bne __error_p @ invalid processor id
+
+ /*
+ * Clear the 4K level 1 swapper page table
+ */
+ movl r0, #KERNEL_PGD_PADDR @ page table address
+ mov r1, #0
+ add r2, r0, #0x1000
+101: stw.w r1, [r0]+, #4
+ stw.w r1, [r0]+, #4
+ stw.w r1, [r0]+, #4
+ stw.w r1, [r0]+, #4
+ cxor.a r0, r2
+ bne 101b
+
+ movl r4, #KERNEL_PGD_PADDR @ page table address
+ mov r7, #PMD_TYPE_SECT | PMD_PRESENT @ page size: section
+ or r7, r7, #PMD_SECT_CACHEABLE @ cacheable
+ or r7, r7, #PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC
+
+ /*
+ * Create identity mapping for first 4MB of kernel to
+ * cater for the MMU enable. This identity mapping
+ * will be removed by paging_init(). We use our current program
+ * counter to determine corresponding section base address.
+ */
+ mov r6, pc
+ mov r6, r6 >> #22 @ start of kernel section
+ or r1, r7, r6 << #22 @ flags + kernel base
+ stw r1, [r4+], r6 << #2 @ identity mapping
+
+ /*
+ * Now setup the pagetables for our kernel direct
+ * mapped region.
+ */
+ add r0, r4, #(KERNEL_START & 0xff000000) >> 20
+ stw.w r1, [r0+], #(KERNEL_START & 0x00c00000) >> 20
+ movl r6, #(KERNEL_END - 1)
+ add r0, r0, #4
+ add r6, r4, r6 >> #20
+102: csub.a r0, r6
+ add r1, r1, #1 << 22
+ bua 103f
+ stw.w r1, [r0]+, #4
+ b 102b
+103:
+ /*
+ * Then map first 4MB of ram in case it contains our boot params.
+ */
+ add r0, r4, #PAGE_OFFSET >> 20
+ or r6, r7, #(PHYS_OFFSET & 0xffc00000)
+ stw r6, [r0]
+
+ ldw r15, __switch_data @ address to jump to after
+
+ /*
+ * Initialise TLB, Caches, and MMU state ready to switch the MMU
+ * on.
+ */
+ mov r0, #0
+ movc p0.c5, r0, #28 @ cache invalidate all
+ nop8
+ movc p0.c6, r0, #6 @ TLB invalidate all
+ nop8
+
+ /*
+ * ..V. .... ..TB IDAM
+ * ..1. .... ..01 1111
+ */
+ movl r0, #0x201f @ control register setting
+
+ /*
+ * Setup common bits before finally enabling the MMU. Essentially
+ * this is just loading the page table pointer and domain access
+ * registers.
+ */
+ #ifndef CONFIG_ALIGNMENT_TRAP
+ andn r0, r0, #CR_A
+ #endif
+ #ifdef CONFIG_CPU_DCACHE_DISABLE
+ andn r0, r0, #CR_D
+ #endif
+ #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+ andn r0, r0, #CR_B
+ #endif
+ #ifdef CONFIG_CPU_ICACHE_DISABLE
+ andn r0, r0, #CR_I
+ #endif
+
+ movc p0.c2, r4, #0 @ set pgd
+ b __turn_mmu_on
+ENDPROC(stext)
+
+/*
+ * Enable the MMU. This completely changes the stucture of the visible
+ * memory space. You will not be able to trace execution through this.
+ *
+ * r0 = cp#0 control register
+ * r15 = *virtual* address to jump to upon completion
+ */
+ .align 5
+__turn_mmu_on:
+ mov r0, r0
+ movc p0.c1, r0, #0 @ write control reg
+ nop @ fetch inst by phys addr
+ mov pc, r15
+ nop8 @ fetch inst by phys addr
+ENDPROC(__turn_mmu_on)
+
+/*
+ * Setup the initial page tables. We only setup the barest
+ * amount which are required to get the kernel running, which
+ * generally means mapping in the kernel code.
+ *
+ * r9 = cpuid
+ * r10 = procinfo
+ *
+ * Returns:
+ * r0, r3, r6, r7 corrupted
+ * r4 = physical page table address
+ */
+ .ltorg
+
+ .align 2
+ .type __switch_data, %object
+__switch_data:
+ .long __mmap_switched
+ .long __bss_start @ r6
+ .long _end @ r7
+ .long cr_alignment @ r8
+ .long init_thread_union + THREAD_START_SP @ sp
+
+/*
+ * The following fragment of code is executed with the MMU on in MMU mode,
+ * and uses absolute addresses; this is not position independent.
+ *
+ * r0 = cp#0 control register
+ */
+__mmap_switched:
+ adr r3, __switch_data + 4
+
+ ldm.w (r6, r7, r8), [r3]+
+ ldw sp, [r3]
+
+ mov fp, #0 @ Clear BSS (and zero fp)
+203: csub.a r6, r7
+ bea 204f
+ stw.w fp, [r6]+,#4
+ b 203b
+204:
+ andn r1, r0, #CR_A @ Clear 'A' bit
+ stm (r0, r1), [r8]+ @ Save control register values
+ b start_kernel
+ENDPROC(__mmap_switched)
+
+/*
+ * Exception handling. Something went wrong and we can't proceed. We
+ * ought to tell the user, but since we don't have any guarantee that
+ * we're even running on the right architecture, we do virtually nothing.
+ *
+ * If CONFIG_DEBUG_LL is set we try to print out something about the error
+ * and hope for the best (useful if bootloader fails to pass a proper
+ * machine ID for example).
+ */
+__error_p:
+#ifdef CONFIG_DEBUG_LL
+ adr r0, str_p1
+ b.l printascii
+ mov r0, r9
+ b.l printhex8
+ adr r0, str_p2
+ b.l printascii
+901: nop8
+ b 901b
+str_p1: .asciz "\nError: unrecognized processor variant (0x"
+str_p2: .asciz ").\n"
+ .align
+#endif
+ENDPROC(__error_p)
+
diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c
new file mode 100644
index 000000000000..7d0f0b7983a0
--- /dev/null
+++ b/arch/unicore32/kernel/hibernate.c
@@ -0,0 +1,160 @@
+/*
+ * linux/arch/unicore32/kernel/hibernate.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gfp.h>
+#include <linux/suspend.h>
+#include <linux/bootmem.h>
+
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/suspend.h>
+
+#include "mach/pm.h"
+
+/* Pointer to the temporary resume page tables */
+pgd_t *resume_pg_dir;
+
+struct swsusp_arch_regs swsusp_arch_regs_cpu0;
+
+/*
+ * Create a middle page table on a resume-safe page and put a pointer to it in
+ * the given global directory entry. This only returns the gd entry
+ * in non-PAE compilation mode, since the middle layer is folded.
+ */
+static pmd_t *resume_one_md_table_init(pgd_t *pgd)
+{
+ pud_t *pud;
+ pmd_t *pmd_table;
+
+ pud = pud_offset(pgd, 0);
+ pmd_table = pmd_offset(pud, 0);
+
+ return pmd_table;
+}
+
+/*
+ * Create a page table on a resume-safe page and place a pointer to it in
+ * a middle page directory entry.
+ */
+static pte_t *resume_one_page_table_init(pmd_t *pmd)
+{
+ if (pmd_none(*pmd)) {
+ pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
+ if (!page_table)
+ return NULL;
+
+ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_KERNEL_TABLE));
+
+ BUG_ON(page_table != pte_offset_kernel(pmd, 0));
+
+ return page_table;
+ }
+
+ return pte_offset_kernel(pmd, 0);
+}
+
+/*
+ * This maps the physical memory to kernel virtual address space, a total
+ * of max_low_pfn pages, by creating page tables starting from address
+ * PAGE_OFFSET. The page tables are allocated out of resume-safe pages.
+ */
+static int resume_physical_mapping_init(pgd_t *pgd_base)
+{
+ unsigned long pfn;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+ int pgd_idx, pmd_idx;
+
+ pgd_idx = pgd_index(PAGE_OFFSET);
+ pgd = pgd_base + pgd_idx;
+ pfn = 0;
+
+ for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
+ pmd = resume_one_md_table_init(pgd);
+ if (!pmd)
+ return -ENOMEM;
+
+ if (pfn >= max_low_pfn)
+ continue;
+
+ for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
+ pte_t *max_pte;
+
+ if (pfn >= max_low_pfn)
+ break;
+
+ /* Map with normal page tables.
+ * NOTE: We can mark everything as executable here
+ */
+ pte = resume_one_page_table_init(pmd);
+ if (!pte)
+ return -ENOMEM;
+
+ max_pte = pte + PTRS_PER_PTE;
+ for (; pte < max_pte; pte++, pfn++) {
+ if (pfn >= max_low_pfn)
+ break;
+
+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
+ }
+ }
+ }
+
+ return 0;
+}
+
+static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
+{
+}
+
+int swsusp_arch_resume(void)
+{
+ int error;
+
+ resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+ if (!resume_pg_dir)
+ return -ENOMEM;
+
+ resume_init_first_level_page_table(resume_pg_dir);
+ error = resume_physical_mapping_init(resume_pg_dir);
+ if (error)
+ return error;
+
+ /* We have got enough memory and from now on we cannot recover */
+ restore_image(resume_pg_dir, restore_pblist);
+ return 0;
+}
+
+/*
+ * pfn_is_nosave - check if given pfn is in the 'nosave' section
+ */
+
+int pfn_is_nosave(unsigned long pfn)
+{
+ unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
+ unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
+
+ return (pfn >= begin_pfn) && (pfn < end_pfn);
+}
+
+void save_processor_state(void)
+{
+}
+
+void restore_processor_state(void)
+{
+ local_flush_tlb_all();
+}
diff --git a/arch/unicore32/kernel/hibernate_asm.S b/arch/unicore32/kernel/hibernate_asm.S
new file mode 100644
index 000000000000..cc3c65253c8c
--- /dev/null
+++ b/arch/unicore32/kernel/hibernate_asm.S
@@ -0,0 +1,117 @@
+/*
+ * linux/arch/unicore32/kernel/hibernate_asm.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sys.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <generated/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/assembler.h>
+
+@ restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist)
+@ r0: resume_pg_dir
+@ r1: restore_pblist
+@ copy restore_pblist pages
+@ restore registers from swsusp_arch_regs_cpu0
+@
+ENTRY(restore_image)
+ sub r0, r0, #PAGE_OFFSET
+ mov r5, #0
+ movc p0.c6, r5, #6 @invalidate ITLB & DTLB
+ movc p0.c2, r0, #0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+ .p2align 4,,7
+101:
+ csub.a r1, #0
+ beq 109f
+
+ ldw r6, [r1+], #PBE_ADDRESS
+ ldw r7, [r1+], #PBE_ORIN_ADDRESS
+
+ movl ip, #128
+102: ldm.w (r8 - r15), [r6]+
+ stm.w (r8 - r15), [r7]+
+ sub.a ip, ip, #1
+ bne 102b
+
+ ldw r1, [r1+], #PBE_NEXT
+ b 101b
+
+ .p2align 4,,7
+109:
+ /* go back to the original page tables */
+ ldw r0, =swapper_pg_dir
+ sub r0, r0, #PAGE_OFFSET
+ mov r5, #0
+ movc p0.c6, r5, #6
+ movc p0.c2, r0, #0
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+#ifdef CONFIG_UNICORE_FPU_F64
+ ldw ip, 1f
+ add ip, ip, #SWSUSP_FPSTATE
+ lfm.w (f0 - f7 ), [ip]+
+ lfm.w (f8 - f15), [ip]+
+ lfm.w (f16 - f23), [ip]+
+ lfm.w (f24 - f31), [ip]+
+ ldw r4, [ip]
+ ctf r4, s31
+#endif
+ mov r0, #0x0
+ ldw ip, 1f
+ add ip, ip, #SWSUSP_CPU
+ ldm.w (r4 - r15), [ip]+
+ ldm (r16 - r27, sp, pc), [ip]+ @ Load all regs saved previously
+
+ .align 2
+1: .long swsusp_arch_regs_cpu0
+
+
+@ swsusp_arch_suspend()
+@ - prepare pc for resume, return from function without swsusp_save on resume
+@ - save registers in swsusp_arch_regs_cpu0
+@ - call swsusp_save write suspend image
+
+ENTRY(swsusp_arch_suspend)
+ ldw ip, 1f
+ add ip, ip, #SWSUSP_CPU
+ stm.w (r4 - r15), [ip]+
+ stm.w (r16 - r27, sp, lr), [ip]+
+
+#ifdef CONFIG_UNICORE_FPU_F64
+ ldw ip, 1f
+ add ip, ip, #SWSUSP_FPSTATE
+ sfm.w (f0 - f7 ), [ip]+
+ sfm.w (f8 - f15), [ip]+
+ sfm.w (f16 - f23), [ip]+
+ sfm.w (f24 - f31), [ip]+
+ cff r4, s31
+ stw r4, [ip]
+#endif
+ b swsusp_save @ no return
+
+1: .long swsusp_arch_regs_cpu0
diff --git a/arch/unicore32/kernel/init_task.c b/arch/unicore32/kernel/init_task.c
new file mode 100644
index 000000000000..a35a1e50e4f4
--- /dev/null
+++ b/arch/unicore32/kernel/init_task.c
@@ -0,0 +1,44 @@
+/*
+ * linux/arch/unicore32/kernel/init_task.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+#include <linux/uaccess.h>
+
+#include <asm/pgtable.h>
+
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+/*
+ * Initial thread structure.
+ *
+ * We need to make sure that this is 8192-byte aligned due to the
+ * way process stacks are handled. This is done by making sure
+ * the linker maps this in the .text segment right after head.S,
+ * and making head.S ensure the proper alignment.
+ *
+ * The things we do for performance..
+ */
+union thread_union init_thread_union __init_task_data = {
+ INIT_THREAD_INFO(init_task) };
+
+/*
+ * Initial task structure.
+ *
+ * All other task structs will be allocated on slabs in fork.c
+ */
+struct task_struct init_task = INIT_TASK(init_task);
+EXPORT_SYMBOL(init_task);
diff --git a/arch/unicore32/kernel/irq.c b/arch/unicore32/kernel/irq.c
new file mode 100644
index 000000000000..b23624cf3062
--- /dev/null
+++ b/arch/unicore32/kernel/irq.c
@@ -0,0 +1,426 @@
+/*
+ * linux/arch/unicore32/kernel/irq.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/random.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/kallsyms.h>
+#include <linux/proc_fs.h>
+#include <linux/sysdev.h>
+#include <linux/gpio.h>
+
+#include <asm/system.h>
+#include <mach/hardware.h>
+
+#include "setup.h"
+
+/*
+ * PKUnity GPIO edge detection for IRQs:
+ * IRQs are generated on Falling-Edge, Rising-Edge, or both.
+ * Use this instead of directly setting GRER/GFER.
+ */
+static int GPIO_IRQ_rising_edge;
+static int GPIO_IRQ_falling_edge;
+static int GPIO_IRQ_mask = 0;
+
+#define GPIO_MASK(irq) (1 << (irq - IRQ_GPIO0))
+
+static int puv3_gpio_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int mask;
+
+ if (d->irq < IRQ_GPIOHIGH)
+ mask = 1 << d->irq;
+ else
+ mask = GPIO_MASK(d->irq);
+
+ if (type == IRQ_TYPE_PROBE) {
+ if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask)
+ return 0;
+ type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
+ }
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ GPIO_IRQ_rising_edge |= mask;
+ else
+ GPIO_IRQ_rising_edge &= ~mask;
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ GPIO_IRQ_falling_edge |= mask;
+ else
+ GPIO_IRQ_falling_edge &= ~mask;
+
+ writel(GPIO_IRQ_rising_edge & GPIO_IRQ_mask, GPIO_GRER);
+ writel(GPIO_IRQ_falling_edge & GPIO_IRQ_mask, GPIO_GFER);
+
+ return 0;
+}
+
+/*
+ * GPIO IRQs must be acknowledged. This is for IRQs from 0 to 7.
+ */
+static void puv3_low_gpio_ack(struct irq_data *d)
+{
+ writel((1 << d->irq), GPIO_GEDR);
+}
+
+static void puv3_low_gpio_mask(struct irq_data *d)
+{
+ writel(readl(INTC_ICMR) & ~(1 << d->irq), INTC_ICMR);
+}
+
+static void puv3_low_gpio_unmask(struct irq_data *d)
+{
+ writel(readl(INTC_ICMR) | (1 << d->irq), INTC_ICMR);
+}
+
+static int puv3_low_gpio_wake(struct irq_data *d, unsigned int on)
+{
+ if (on)
+ writel(readl(PM_PWER) | (1 << d->irq), PM_PWER);
+ else
+ writel(readl(PM_PWER) & ~(1 << d->irq), PM_PWER);
+ return 0;
+}
+
+static struct irq_chip puv3_low_gpio_chip = {
+ .name = "GPIO-low",
+ .irq_ack = puv3_low_gpio_ack,
+ .irq_mask = puv3_low_gpio_mask,
+ .irq_unmask = puv3_low_gpio_unmask,
+ .irq_set_type = puv3_gpio_type,
+ .irq_set_wake = puv3_low_gpio_wake,
+};
+
+/*
+ * IRQ8 (GPIO0 through 27) handler. We enter here with the
+ * irq_controller_lock held, and IRQs disabled. Decode the IRQ
+ * and call the handler.
+ */
+static void
+puv3_gpio_handler(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned int mask;
+
+ mask = readl(GPIO_GEDR);
+ do {
+ /*
+ * clear down all currently active IRQ sources.
+ * We will be processing them all.
+ */
+ writel(mask, GPIO_GEDR);
+
+ irq = IRQ_GPIO0;
+ do {
+ if (mask & 1)
+ generic_handle_irq(irq);
+ mask >>= 1;
+ irq++;
+ } while (mask);
+ mask = readl(GPIO_GEDR);
+ } while (mask);
+}
+
+/*
+ * GPIO0-27 edge IRQs need to be handled specially.
+ * In addition, the IRQs are all collected up into one bit in the
+ * interrupt controller registers.
+ */
+static void puv3_high_gpio_ack(struct irq_data *d)
+{
+ unsigned int mask = GPIO_MASK(d->irq);
+
+ writel(mask, GPIO_GEDR);
+}
+
+static void puv3_high_gpio_mask(struct irq_data *d)
+{
+ unsigned int mask = GPIO_MASK(d->irq);
+
+ GPIO_IRQ_mask &= ~mask;
+
+ writel(readl(GPIO_GRER) & ~mask, GPIO_GRER);
+ writel(readl(GPIO_GFER) & ~mask, GPIO_GFER);
+}
+
+static void puv3_high_gpio_unmask(struct irq_data *d)
+{
+ unsigned int mask = GPIO_MASK(d->irq);
+
+ GPIO_IRQ_mask |= mask;
+
+ writel(GPIO_IRQ_rising_edge & GPIO_IRQ_mask, GPIO_GRER);
+ writel(GPIO_IRQ_falling_edge & GPIO_IRQ_mask, GPIO_GFER);
+}
+
+static int puv3_high_gpio_wake(struct irq_data *d, unsigned int on)
+{
+ if (on)
+ writel(readl(PM_PWER) | PM_PWER_GPIOHIGH, PM_PWER);
+ else
+ writel(readl(PM_PWER) & ~PM_PWER_GPIOHIGH, PM_PWER);
+ return 0;
+}
+
+static struct irq_chip puv3_high_gpio_chip = {
+ .name = "GPIO-high",
+ .irq_ack = puv3_high_gpio_ack,
+ .irq_mask = puv3_high_gpio_mask,
+ .irq_unmask = puv3_high_gpio_unmask,
+ .irq_set_type = puv3_gpio_type,
+ .irq_set_wake = puv3_high_gpio_wake,
+};
+
+/*
+ * We don't need to ACK IRQs on the PKUnity unless they're GPIOs
+ * this is for internal IRQs i.e. from 8 to 31.
+ */
+static void puv3_mask_irq(struct irq_data *d)
+{
+ writel(readl(INTC_ICMR) & ~(1 << d->irq), INTC_ICMR);
+}
+
+static void puv3_unmask_irq(struct irq_data *d)
+{
+ writel(readl(INTC_ICMR) | (1 << d->irq), INTC_ICMR);
+}
+
+/*
+ * Apart form GPIOs, only the RTC alarm can be a wakeup event.
+ */
+static int puv3_set_wake(struct irq_data *d, unsigned int on)
+{
+ if (d->irq == IRQ_RTCAlarm) {
+ if (on)
+ writel(readl(PM_PWER) | PM_PWER_RTC, PM_PWER);
+ else
+ writel(readl(PM_PWER) & ~PM_PWER_RTC, PM_PWER);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static struct irq_chip puv3_normal_chip = {
+ .name = "PKUnity-v3",
+ .irq_ack = puv3_mask_irq,
+ .irq_mask = puv3_mask_irq,
+ .irq_unmask = puv3_unmask_irq,
+ .irq_set_wake = puv3_set_wake,
+};
+
+static struct resource irq_resource = {
+ .name = "irqs",
+ .start = io_v2p(PKUNITY_INTC_BASE),
+ .end = io_v2p(PKUNITY_INTC_BASE) + 0xFFFFF,
+};
+
+static struct puv3_irq_state {
+ unsigned int saved;
+ unsigned int icmr;
+ unsigned int iclr;
+ unsigned int iccr;
+} puv3_irq_state;
+
+static int puv3_irq_suspend(struct sys_device *dev, pm_message_t state)
+{
+ struct puv3_irq_state *st = &puv3_irq_state;
+
+ st->saved = 1;
+ st->icmr = readl(INTC_ICMR);
+ st->iclr = readl(INTC_ICLR);
+ st->iccr = readl(INTC_ICCR);
+
+ /*
+ * Disable all GPIO-based interrupts.
+ */
+ writel(readl(INTC_ICMR) & ~(0x1ff), INTC_ICMR);
+
+ /*
+ * Set the appropriate edges for wakeup.
+ */
+ writel(readl(PM_PWER) & GPIO_IRQ_rising_edge, GPIO_GRER);
+ writel(readl(PM_PWER) & GPIO_IRQ_falling_edge, GPIO_GFER);
+
+ /*
+ * Clear any pending GPIO interrupts.
+ */
+ writel(readl(GPIO_GEDR), GPIO_GEDR);
+
+ return 0;
+}
+
+static int puv3_irq_resume(struct sys_device *dev)
+{
+ struct puv3_irq_state *st = &puv3_irq_state;
+
+ if (st->saved) {
+ writel(st->iccr, INTC_ICCR);
+ writel(st->iclr, INTC_ICLR);
+
+ writel(GPIO_IRQ_rising_edge & GPIO_IRQ_mask, GPIO_GRER);
+ writel(GPIO_IRQ_falling_edge & GPIO_IRQ_mask, GPIO_GFER);
+
+ writel(st->icmr, INTC_ICMR);
+ }
+ return 0;
+}
+
+static struct sysdev_class puv3_irq_sysclass = {
+ .name = "pkunity-irq",
+ .suspend = puv3_irq_suspend,
+ .resume = puv3_irq_resume,
+};
+
+static struct sys_device puv3_irq_device = {
+ .id = 0,
+ .cls = &puv3_irq_sysclass,
+};
+
+static int __init puv3_irq_init_devicefs(void)
+{
+ sysdev_class_register(&puv3_irq_sysclass);
+ return sysdev_register(&puv3_irq_device);
+}
+
+device_initcall(puv3_irq_init_devicefs);
+
+void __init init_IRQ(void)
+{
+ unsigned int irq;
+
+ request_resource(&iomem_resource, &irq_resource);
+
+ /* disable all IRQs */
+ writel(0, INTC_ICMR);
+
+ /* all IRQs are IRQ, not REAL */
+ writel(0, INTC_ICLR);
+
+ /* clear all GPIO edge detects */
+ writel(FMASK(8, 0) & ~FIELD(1, 1, GPI_SOFF_REQ), GPIO_GPIR);
+ writel(0, GPIO_GFER);
+ writel(0, GPIO_GRER);
+ writel(0x0FFFFFFF, GPIO_GEDR);
+
+ writel(1, INTC_ICCR);
+
+ for (irq = 0; irq < IRQ_GPIOHIGH; irq++) {
+ set_irq_chip(irq, &puv3_low_gpio_chip);
+ set_irq_handler(irq, handle_edge_irq);
+ irq_modify_status(irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN,
+ 0);
+ }
+
+ for (irq = IRQ_GPIOHIGH + 1; irq < IRQ_GPIO0; irq++) {
+ set_irq_chip(irq, &puv3_normal_chip);
+ set_irq_handler(irq, handle_level_irq);
+ irq_modify_status(irq,
+ IRQ_NOREQUEST | IRQ_NOAUTOEN,
+ IRQ_NOPROBE);
+ }
+
+ for (irq = IRQ_GPIO0; irq <= IRQ_GPIO27; irq++) {
+ set_irq_chip(irq, &puv3_high_gpio_chip);
+ set_irq_handler(irq, handle_edge_irq);
+ irq_modify_status(irq,
+ IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN,
+ 0);
+ }
+
+ /*
+ * Install handler for GPIO 0-27 edge detect interrupts
+ */
+ set_irq_chip(IRQ_GPIOHIGH, &puv3_normal_chip);
+ set_irq_chained_handler(IRQ_GPIOHIGH, puv3_gpio_handler);
+
+#ifdef CONFIG_PUV3_GPIO
+ puv3_init_gpio();
+#endif
+}
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i = *(loff_t *) v, cpu;
+ struct irq_desc *desc;
+ struct irqaction *action;
+ unsigned long flags;
+
+ if (i == 0) {
+ char cpuname[12];
+
+ seq_printf(p, " ");
+ for_each_present_cpu(cpu) {
+ sprintf(cpuname, "CPU%d", cpu);
+ seq_printf(p, " %10s", cpuname);
+ }
+ seq_putc(p, '\n');
+ }
+
+ if (i < nr_irqs) {
+ desc = irq_to_desc(i);
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ action = desc->action;
+ if (!action)
+ goto unlock;
+
+ seq_printf(p, "%3d: ", i);
+ for_each_present_cpu(cpu)
+ seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
+ seq_printf(p, " %10s", desc->irq_data.chip->name ? : "-");
+ seq_printf(p, " %s", action->name);
+ for (action = action->next; action; action = action->next)
+ seq_printf(p, ", %s", action->name);
+
+ seq_putc(p, '\n');
+unlock:
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ } else if (i == nr_irqs) {
+ seq_printf(p, "Error in interrupt!\n");
+ }
+ return 0;
+}
+
+/*
+ * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
+ * come via this function. Instead, they should provide their
+ * own 'handler'
+ */
+asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+
+ /*
+ * Some hardware gives randomly wrong interrupts. Rather
+ * than crashing, do something sensible.
+ */
+ if (unlikely(irq >= nr_irqs)) {
+ if (printk_ratelimit())
+ printk(KERN_WARNING "Bad IRQ%u\n", irq);
+ ack_bad_irq(irq);
+ } else {
+ generic_handle_irq(irq);
+ }
+
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
diff --git a/arch/unicore32/kernel/ksyms.c b/arch/unicore32/kernel/ksyms.c
new file mode 100644
index 000000000000..a8970809428a
--- /dev/null
+++ b/arch/unicore32/kernel/ksyms.c
@@ -0,0 +1,99 @@
+/*
+ * linux/arch/unicore32/kernel/ksyms.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/cryptohash.h>
+#include <linux/delay.h>
+#include <linux/in6.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <asm/checksum.h>
+#include <asm/system.h>
+
+#include "ksyms.h"
+
+EXPORT_SYMBOL(__uc32_find_next_zero_bit);
+EXPORT_SYMBOL(__uc32_find_next_bit);
+
+EXPORT_SYMBOL(__backtrace);
+
+ /* platform dependent support */
+EXPORT_SYMBOL(__udelay);
+EXPORT_SYMBOL(__const_udelay);
+
+ /* networking */
+EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(__csum_ipv6_magic);
+
+ /* io */
+#ifndef __raw_readsb
+EXPORT_SYMBOL(__raw_readsb);
+#endif
+#ifndef __raw_readsw
+EXPORT_SYMBOL(__raw_readsw);
+#endif
+#ifndef __raw_readsl
+EXPORT_SYMBOL(__raw_readsl);
+#endif
+#ifndef __raw_writesb
+EXPORT_SYMBOL(__raw_writesb);
+#endif
+#ifndef __raw_writesw
+EXPORT_SYMBOL(__raw_writesw);
+#endif
+#ifndef __raw_writesl
+EXPORT_SYMBOL(__raw_writesl);
+#endif
+
+ /* string / mem functions */
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memchr);
+
+ /* user mem (segment) */
+EXPORT_SYMBOL(__strnlen_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+
+EXPORT_SYMBOL(copy_page);
+
+EXPORT_SYMBOL(__copy_from_user);
+EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(__clear_user);
+
+EXPORT_SYMBOL(__get_user_1);
+EXPORT_SYMBOL(__get_user_2);
+EXPORT_SYMBOL(__get_user_4);
+
+EXPORT_SYMBOL(__put_user_1);
+EXPORT_SYMBOL(__put_user_2);
+EXPORT_SYMBOL(__put_user_4);
+EXPORT_SYMBOL(__put_user_8);
+
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__bswapsi2);
+
diff --git a/arch/unicore32/kernel/ksyms.h b/arch/unicore32/kernel/ksyms.h
new file mode 100644
index 000000000000..185cdc712d03
--- /dev/null
+++ b/arch/unicore32/kernel/ksyms.h
@@ -0,0 +1,15 @@
+/*
+ * libgcc functions - functions that are used internally by the
+ * compiler... (prototypes are not correct though, but that
+ * doesn't really matter since they're not versioned).
+ */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __divsi3(void);
+extern void __lshrdi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __ucmpdi2(void);
+extern void __udivsi3(void);
+extern void __umodsi3(void);
+extern void __bswapsi2(void);
diff --git a/arch/unicore32/kernel/module.c b/arch/unicore32/kernel/module.c
new file mode 100644
index 000000000000..3e5a38d71a1e
--- /dev/null
+++ b/arch/unicore32/kernel/module.c
@@ -0,0 +1,152 @@
+/*
+ * linux/arch/unicore32/kernel/module.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/gfp.h>
+
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+
+void *module_alloc(unsigned long size)
+{
+ struct vm_struct *area;
+
+ size = PAGE_ALIGN(size);
+ if (!size)
+ return NULL;
+
+ area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
+ if (!area)
+ return NULL;
+
+ return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
+}
+
+void module_free(struct module *module, void *region)
+{
+ vfree(region);
+}
+
+int module_frob_arch_sections(Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs,
+ char *secstrings,
+ struct module *mod)
+{
+ return 0;
+}
+
+int
+apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
+ unsigned int relindex, struct module *module)
+{
+ Elf32_Shdr *symsec = sechdrs + symindex;
+ Elf32_Shdr *relsec = sechdrs + relindex;
+ Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
+ Elf32_Rel *rel = (void *)relsec->sh_addr;
+ unsigned int i;
+
+ for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
+ unsigned long loc;
+ Elf32_Sym *sym;
+ s32 offset;
+
+ offset = ELF32_R_SYM(rel->r_info);
+ if (offset < 0 || offset >
+ (symsec->sh_size / sizeof(Elf32_Sym))) {
+ printk(KERN_ERR "%s: bad relocation, "
+ "section %d reloc %d\n",
+ module->name, relindex, i);
+ return -ENOEXEC;
+ }
+
+ sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
+
+ if (rel->r_offset < 0 || rel->r_offset >
+ dstsec->sh_size - sizeof(u32)) {
+ printk(KERN_ERR "%s: out of bounds relocation, "
+ "section %d reloc %d offset %d size %d\n",
+ module->name, relindex, i, rel->r_offset,
+ dstsec->sh_size);
+ return -ENOEXEC;
+ }
+
+ loc = dstsec->sh_addr + rel->r_offset;
+
+ switch (ELF32_R_TYPE(rel->r_info)) {
+ case R_UNICORE_NONE:
+ /* ignore */
+ break;
+
+ case R_UNICORE_ABS32:
+ *(u32 *)loc += sym->st_value;
+ break;
+
+ case R_UNICORE_PC24:
+ case R_UNICORE_CALL:
+ case R_UNICORE_JUMP24:
+ offset = (*(u32 *)loc & 0x00ffffff) << 2;
+ if (offset & 0x02000000)
+ offset -= 0x04000000;
+
+ offset += sym->st_value - loc;
+ if (offset & 3 ||
+ offset <= (s32)0xfe000000 ||
+ offset >= (s32)0x02000000) {
+ printk(KERN_ERR
+ "%s: relocation out of range, section "
+ "%d reloc %d sym '%s'\n", module->name,
+ relindex, i, strtab + sym->st_name);
+ return -ENOEXEC;
+ }
+
+ offset >>= 2;
+
+ *(u32 *)loc &= 0xff000000;
+ *(u32 *)loc |= offset & 0x00ffffff;
+ break;
+
+ default:
+ printk(KERN_ERR "%s: unknown relocation: %u\n",
+ module->name, ELF32_R_TYPE(rel->r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
+
+int
+apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *module)
+{
+ printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
+ module->name);
+ return -ENOEXEC;
+}
+
+int
+module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *module)
+{
+ return 0;
+}
+
+void
+module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/unicore32/kernel/pci.c b/arch/unicore32/kernel/pci.c
new file mode 100644
index 000000000000..100eab842e66
--- /dev/null
+++ b/arch/unicore32/kernel/pci.c
@@ -0,0 +1,404 @@
+/*
+ * linux/arch/unicore32/kernel/pci.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * PCI bios-type initialisation for PCI machines
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/io.h>
+
+static int debug_pci;
+static int use_firmware;
+
+#define CONFIG_CMD(bus, devfn, where) \
+ (0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3))
+
+static int
+puv3_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ writel(CONFIG_CMD(bus, devfn, where), PCICFG_ADDR);
+ switch (size) {
+ case 1:
+ *value = (readl(PCICFG_DATA) >> ((where & 3) * 8)) & 0xFF;
+ break;
+ case 2:
+ *value = (readl(PCICFG_DATA) >> ((where & 2) * 8)) & 0xFFFF;
+ break;
+ case 4:
+ *value = readl(PCICFG_DATA);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+puv3_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ writel(CONFIG_CMD(bus, devfn, where), PCICFG_ADDR);
+ switch (size) {
+ case 1:
+ writel((readl(PCICFG_DATA) & ~FMASK(8, (where&3)*8))
+ | FIELD(value, 8, (where&3)*8), PCICFG_DATA);
+ break;
+ case 2:
+ writel((readl(PCICFG_DATA) & ~FMASK(16, (where&2)*8))
+ | FIELD(value, 16, (where&2)*8), PCICFG_DATA);
+ break;
+ case 4:
+ writel(value, PCICFG_DATA);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops pci_puv3_ops = {
+ .read = puv3_read_config,
+ .write = puv3_write_config,
+};
+
+void pci_puv3_preinit(void)
+{
+ printk(KERN_DEBUG "PCI: PKUnity PCI Controller Initializing ...\n");
+ /* config PCI bridge base */
+ writel(io_v2p(PKUNITY_PCIBRI_BASE), PCICFG_BRIBASE);
+
+ writel(0, PCIBRI_AHBCTL0);
+ writel(io_v2p(PKUNITY_PCIBRI_BASE) | PCIBRI_BARx_MEM, PCIBRI_AHBBAR0);
+ writel(0xFFFF0000, PCIBRI_AHBAMR0);
+ writel(0, PCIBRI_AHBTAR0);
+
+ writel(PCIBRI_CTLx_AT, PCIBRI_AHBCTL1);
+ writel(io_v2p(PKUNITY_PCILIO_BASE) | PCIBRI_BARx_IO, PCIBRI_AHBBAR1);
+ writel(0xFFFF0000, PCIBRI_AHBAMR1);
+ writel(0x00000000, PCIBRI_AHBTAR1);
+
+ writel(PCIBRI_CTLx_PREF, PCIBRI_AHBCTL2);
+ writel(io_v2p(PKUNITY_PCIMEM_BASE) | PCIBRI_BARx_MEM, PCIBRI_AHBBAR2);
+ writel(0xF8000000, PCIBRI_AHBAMR2);
+ writel(0, PCIBRI_AHBTAR2);
+
+ writel(io_v2p(PKUNITY_PCIAHB_BASE) | PCIBRI_BARx_MEM, PCIBRI_BAR1);
+
+ writel(PCIBRI_CTLx_AT | PCIBRI_CTLx_PREF, PCIBRI_PCICTL0);
+ writel(io_v2p(PKUNITY_PCIAHB_BASE) | PCIBRI_BARx_MEM, PCIBRI_PCIBAR0);
+ writel(0xF8000000, PCIBRI_PCIAMR0);
+ writel(PKUNITY_SDRAM_BASE, PCIBRI_PCITAR0);
+
+ writel(readl(PCIBRI_CMD) | PCIBRI_CMD_IO | PCIBRI_CMD_MEM, PCIBRI_CMD);
+}
+
+static int __init pci_puv3_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ if (dev->bus->number == 0) {
+#ifdef CONFIG_ARCH_FPGA /* 4 pci slots */
+ if (dev->devfn == 0x00)
+ return IRQ_PCIINTA;
+ else if (dev->devfn == 0x08)
+ return IRQ_PCIINTB;
+ else if (dev->devfn == 0x10)
+ return IRQ_PCIINTC;
+ else if (dev->devfn == 0x18)
+ return IRQ_PCIINTD;
+#endif
+#ifdef CONFIG_PUV3_DB0913 /* 3 pci slots */
+ if (dev->devfn == 0x30)
+ return IRQ_PCIINTB;
+ else if (dev->devfn == 0x60)
+ return IRQ_PCIINTC;
+ else if (dev->devfn == 0x58)
+ return IRQ_PCIINTD;
+#endif
+#if defined(CONFIG_PUV3_NB0916) || defined(CONFIG_PUV3_SMW0919)
+ /* only support 2 pci devices */
+ if (dev->devfn == 0x00)
+ return IRQ_PCIINTC; /* sata */
+#endif
+ }
+ return -1;
+}
+
+/*
+ * Only first 128MB of memory can be accessed via PCI.
+ * We use GFP_DMA to allocate safe buffers to do map/unmap.
+ * This is really ugly and we need a better way of specifying
+ * DMA-capable regions of memory.
+ */
+void __init puv3_pci_adjust_zones(unsigned long *zone_size,
+ unsigned long *zhole_size)
+{
+ unsigned int sz = SZ_128M >> PAGE_SHIFT;
+
+ /*
+ * Only adjust if > 128M on current system
+ */
+ if (zone_size[0] <= sz)
+ return;
+
+ zone_size[1] = zone_size[0] - sz;
+ zone_size[0] = sz;
+ zhole_size[1] = zhole_size[0];
+ zhole_size[0] = 0;
+}
+
+void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+ if (debug_pci)
+ printk(KERN_DEBUG "PCI: Assigning IRQ %02d to %s\n",
+ irq, pci_name(dev));
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+/*
+ * If the bus contains any of these devices, then we must not turn on
+ * parity checking of any kind.
+ */
+static inline int pdev_bad_for_parity(struct pci_dev *dev)
+{
+ return 0;
+}
+
+/*
+ * pcibios_fixup_bus - Called after each bus is probed,
+ * but before its children are examined.
+ */
+void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+ u16 features = PCI_COMMAND_SERR
+ | PCI_COMMAND_PARITY
+ | PCI_COMMAND_FAST_BACK;
+
+ bus->resource[0] = &ioport_resource;
+ bus->resource[1] = &iomem_resource;
+
+ /*
+ * Walk the devices on this bus, working out what we can
+ * and can't support.
+ */
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ u16 status;
+
+ pci_read_config_word(dev, PCI_STATUS, &status);
+
+ /*
+ * If any device on this bus does not support fast back
+ * to back transfers, then the bus as a whole is not able
+ * to support them. Having fast back to back transfers
+ * on saves us one PCI cycle per transaction.
+ */
+ if (!(status & PCI_STATUS_FAST_BACK))
+ features &= ~PCI_COMMAND_FAST_BACK;
+
+ if (pdev_bad_for_parity(dev))
+ features &= ~(PCI_COMMAND_SERR
+ | PCI_COMMAND_PARITY);
+
+ switch (dev->class >> 8) {
+ case PCI_CLASS_BRIDGE_PCI:
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status);
+ status |= PCI_BRIDGE_CTL_PARITY
+ | PCI_BRIDGE_CTL_MASTER_ABORT;
+ status &= ~(PCI_BRIDGE_CTL_BUS_RESET
+ | PCI_BRIDGE_CTL_FAST_BACK);
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status);
+ break;
+
+ case PCI_CLASS_BRIDGE_CARDBUS:
+ pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL,
+ &status);
+ status |= PCI_CB_BRIDGE_CTL_PARITY
+ | PCI_CB_BRIDGE_CTL_MASTER_ABORT;
+ pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL,
+ status);
+ break;
+ }
+ }
+
+ /*
+ * Now walk the devices again, this time setting them up.
+ */
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ u16 cmd;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ cmd |= features;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
+ L1_CACHE_BYTES >> 2);
+ }
+
+ /*
+ * Propagate the flags to the PCI bridge.
+ */
+ if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ if (features & PCI_COMMAND_FAST_BACK)
+ bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK;
+ if (features & PCI_COMMAND_PARITY)
+ bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY;
+ }
+
+ /*
+ * Report what we did for this bus
+ */
+ printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n",
+ bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
+}
+#ifdef CONFIG_HOTPLUG
+EXPORT_SYMBOL(pcibios_fixup_bus);
+#endif
+
+static int __init pci_common_init(void)
+{
+ struct pci_bus *puv3_bus;
+
+ pci_puv3_preinit();
+
+ puv3_bus = pci_scan_bus(0, &pci_puv3_ops, NULL);
+
+ if (!puv3_bus)
+ panic("PCI: unable to scan bus!");
+
+ pci_fixup_irqs(pci_common_swizzle, pci_puv3_map_irq);
+
+ if (!use_firmware) {
+ /*
+ * Size the bridge windows.
+ */
+ pci_bus_size_bridges(puv3_bus);
+
+ /*
+ * Assign resources.
+ */
+ pci_bus_assign_resources(puv3_bus);
+ }
+
+ /*
+ * Tell drivers about devices found.
+ */
+ pci_bus_add_devices(puv3_bus);
+
+ return 0;
+}
+subsys_initcall(pci_common_init);
+
+char * __devinit pcibios_setup(char *str)
+{
+ if (!strcmp(str, "debug")) {
+ debug_pci = 1;
+ return NULL;
+ } else if (!strcmp(str, "firmware")) {
+ use_firmware = 1;
+ return NULL;
+ }
+ return str;
+}
+
+/*
+ * From arch/i386/kernel/pci-i386.c:
+ *
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might be mirrored at 0x0100-0x03ff..
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ resource_size_t start = res->start;
+
+ if (res->flags & IORESOURCE_IO && start & 0x300)
+ start = (start + 0x3ff) & ~0x3ff;
+
+ start = (start + align - 1) & ~(align - 1);
+
+ return start;
+}
+
+/**
+ * pcibios_enable_device - Enable I/O and memory.
+ * @dev: PCI device to be enabled
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ u16 cmd, old_cmd;
+ int idx;
+ struct resource *r;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ old_cmd = cmd;
+ for (idx = 0; idx < 6; idx++) {
+ /* Only set up the requested stuff */
+ if (!(mask & (1 << idx)))
+ continue;
+
+ r = dev->resource + idx;
+ if (!r->start && r->end) {
+ printk(KERN_ERR "PCI: Device %s not available because"
+ " of resource collisions\n", pci_name(dev));
+ return -EINVAL;
+ }
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+
+ /*
+ * Bridges (eg, cardbus bridges) need to be fully enabled
+ */
+ if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
+ cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
+
+ if (cmd != old_cmd) {
+ printk("PCI: enabling device %s (%04x -> %04x)\n",
+ pci_name(dev), old_cmd, cmd);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+}
+
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine)
+{
+ unsigned long phys;
+
+ if (mmap_state == pci_mmap_io)
+ return -EINVAL;
+
+ phys = vma->vm_pgoff;
+
+ /*
+ * Mark this as IO
+ */
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (remap_pfn_range(vma, vma->vm_start, phys,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
diff --git a/arch/unicore32/kernel/pm.c b/arch/unicore32/kernel/pm.c
new file mode 100644
index 000000000000..784bc2db3b28
--- /dev/null
+++ b/arch/unicore32/kernel/pm.c
@@ -0,0 +1,123 @@
+/*
+ * linux/arch/unicore32/kernel/pm.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/suspend.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <mach/pm.h>
+
+#include "setup.h"
+
+struct puv3_cpu_pm_fns *puv3_cpu_pm_fns;
+static unsigned long *sleep_save;
+
+int puv3_pm_enter(suspend_state_t state)
+{
+ unsigned long sleep_save_checksum = 0, checksum = 0;
+ int i;
+
+ /* skip registers saving for standby */
+ if (state != PM_SUSPEND_STANDBY) {
+ puv3_cpu_pm_fns->save(sleep_save);
+ /* before sleeping, calculate and save a checksum */
+ for (i = 0; i < puv3_cpu_pm_fns->save_count - 1; i++)
+ sleep_save_checksum += sleep_save[i];
+ }
+
+ /* *** go zzz *** */
+ puv3_cpu_pm_fns->enter(state);
+ cpu_init();
+#ifdef CONFIG_INPUT_KEYBOARD
+ puv3_ps2_init();
+#endif
+#ifdef CONFIG_PCI
+ pci_puv3_preinit();
+#endif
+ if (state != PM_SUSPEND_STANDBY) {
+ /* after sleeping, validate the checksum */
+ for (i = 0; i < puv3_cpu_pm_fns->save_count - 1; i++)
+ checksum += sleep_save[i];
+
+ /* if invalid, display message and wait for a hardware reset */
+ if (checksum != sleep_save_checksum) {
+ while (1)
+ puv3_cpu_pm_fns->enter(state);
+ }
+ puv3_cpu_pm_fns->restore(sleep_save);
+ }
+
+ pr_debug("*** made it back from resume\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(puv3_pm_enter);
+
+unsigned long sleep_phys_sp(void *sp)
+{
+ return virt_to_phys(sp);
+}
+
+static int puv3_pm_valid(suspend_state_t state)
+{
+ if (puv3_cpu_pm_fns)
+ return puv3_cpu_pm_fns->valid(state);
+
+ return -EINVAL;
+}
+
+static int puv3_pm_prepare(void)
+{
+ int ret = 0;
+
+ if (puv3_cpu_pm_fns && puv3_cpu_pm_fns->prepare)
+ ret = puv3_cpu_pm_fns->prepare();
+
+ return ret;
+}
+
+static void puv3_pm_finish(void)
+{
+ if (puv3_cpu_pm_fns && puv3_cpu_pm_fns->finish)
+ puv3_cpu_pm_fns->finish();
+}
+
+static struct platform_suspend_ops puv3_pm_ops = {
+ .valid = puv3_pm_valid,
+ .enter = puv3_pm_enter,
+ .prepare = puv3_pm_prepare,
+ .finish = puv3_pm_finish,
+};
+
+static int __init puv3_pm_init(void)
+{
+ if (!puv3_cpu_pm_fns) {
+ printk(KERN_ERR "no valid puv3_cpu_pm_fns defined\n");
+ return -EINVAL;
+ }
+
+ sleep_save = kmalloc(puv3_cpu_pm_fns->save_count
+ * sizeof(unsigned long), GFP_KERNEL);
+ if (!sleep_save) {
+ printk(KERN_ERR "failed to alloc memory for pm save\n");
+ return -ENOMEM;
+ }
+
+ suspend_set_ops(&puv3_pm_ops);
+ return 0;
+}
+
+device_initcall(puv3_pm_init);
diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c
new file mode 100644
index 000000000000..ba401df971ed
--- /dev/null
+++ b/arch/unicore32/kernel/process.c
@@ -0,0 +1,389 @@
+/*
+ * linux/arch/unicore32/kernel/process.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <stdarg.h>
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+#include <linux/kallsyms.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/elfcore.h>
+#include <linux/pm.h>
+#include <linux/tick.h>
+#include <linux/utsname.h>
+#include <linux/uaccess.h>
+#include <linux/random.h>
+#include <linux/gpio.h>
+#include <linux/stacktrace.h>
+
+#include <asm/cacheflush.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/stacktrace.h>
+
+#include "setup.h"
+
+static const char * const processor_modes[] = {
+ "UK00", "UK01", "UK02", "UK03", "UK04", "UK05", "UK06", "UK07",
+ "UK08", "UK09", "UK0A", "UK0B", "UK0C", "UK0D", "UK0E", "UK0F",
+ "USER", "REAL", "INTR", "PRIV", "UK14", "UK15", "UK16", "ABRT",
+ "UK18", "UK19", "UK1A", "EXTN", "UK1C", "UK1D", "UK1E", "SUSR"
+};
+
+/*
+ * The idle thread, has rather strange semantics for calling pm_idle,
+ * but this is what x86 does and we need to do the same, so that
+ * things like cpuidle get called in the same way.
+ */
+void cpu_idle(void)
+{
+ /* endless idle loop with no priority at all */
+ while (1) {
+ tick_nohz_stop_sched_tick(1);
+ while (!need_resched()) {
+ local_irq_disable();
+ stop_critical_timings();
+ cpu_do_idle();
+ local_irq_enable();
+ start_critical_timings();
+ }
+ tick_nohz_restart_sched_tick();
+ preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+ }
+}
+
+static char reboot_mode = 'h';
+
+int __init reboot_setup(char *str)
+{
+ reboot_mode = str[0];
+ return 1;
+}
+
+__setup("reboot=", reboot_setup);
+
+void machine_halt(void)
+{
+ gpio_set_value(GPO_SOFT_OFF, 0);
+}
+
+/*
+ * Function pointers to optional machine specific functions
+ */
+void (*pm_power_off)(void) = NULL;
+
+void machine_power_off(void)
+{
+ if (pm_power_off)
+ pm_power_off();
+ machine_halt();
+}
+
+void machine_restart(char *cmd)
+{
+ /* Disable interrupts first */
+ local_irq_disable();
+
+ /*
+ * Tell the mm system that we are going to reboot -
+ * we may need it to insert some 1:1 mappings so that
+ * soft boot works.
+ */
+ setup_mm_for_reboot(reboot_mode);
+
+ /* Clean and invalidate caches */
+ flush_cache_all();
+
+ /* Turn off caching */
+ cpu_proc_fin();
+
+ /* Push out any further dirty data, and ensure cache is empty */
+ flush_cache_all();
+
+ /*
+ * Now handle reboot code.
+ */
+ if (reboot_mode == 's') {
+ /* Jump into ROM at address 0xffff0000 */
+ cpu_reset(VECTORS_BASE);
+ } else {
+ writel(0x00002001, PM_PLLSYSCFG); /* cpu clk = 250M */
+ writel(0x00100800, PM_PLLDDRCFG); /* ddr clk = 44M */
+ writel(0x00002001, PM_PLLVGACFG); /* vga clk = 250M */
+
+ /* Use on-chip reset capability */
+ /* following instructions must be in one icache line */
+ __asm__ __volatile__(
+ " .align 5\n\t"
+ " stw %1, [%0]\n\t"
+ "201: ldw r0, [%0]\n\t"
+ " cmpsub.a r0, #0\n\t"
+ " bne 201b\n\t"
+ " stw %3, [%2]\n\t"
+ " nop; nop; nop\n\t"
+ /* prefetch 3 instructions at most */
+ :
+ : "r" (PM_PMCR),
+ "r" (PM_PMCR_CFBSYS | PM_PMCR_CFBDDR
+ | PM_PMCR_CFBVGA),
+ "r" (RESETC_SWRR),
+ "r" (RESETC_SWRR_SRB)
+ : "r0", "memory");
+ }
+
+ /*
+ * Whoops - the architecture was unable to reboot.
+ * Tell the user!
+ */
+ mdelay(1000);
+ printk(KERN_EMERG "Reboot failed -- System halted\n");
+ do { } while (1);
+}
+
+void __show_regs(struct pt_regs *regs)
+{
+ unsigned long flags;
+ char buf[64];
+
+ printk(KERN_DEFAULT "CPU: %d %s (%s %.*s)\n",
+ raw_smp_processor_id(), print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+ print_symbol("PC is at %s\n", instruction_pointer(regs));
+ print_symbol("LR is at %s\n", regs->UCreg_lr);
+ printk(KERN_DEFAULT "pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
+ "sp : %08lx ip : %08lx fp : %08lx\n",
+ regs->UCreg_pc, regs->UCreg_lr, regs->UCreg_asr,
+ regs->UCreg_sp, regs->UCreg_ip, regs->UCreg_fp);
+ printk(KERN_DEFAULT "r26: %08lx r25: %08lx r24: %08lx\n",
+ regs->UCreg_26, regs->UCreg_25,
+ regs->UCreg_24);
+ printk(KERN_DEFAULT "r23: %08lx r22: %08lx r21: %08lx r20: %08lx\n",
+ regs->UCreg_23, regs->UCreg_22,
+ regs->UCreg_21, regs->UCreg_20);
+ printk(KERN_DEFAULT "r19: %08lx r18: %08lx r17: %08lx r16: %08lx\n",
+ regs->UCreg_19, regs->UCreg_18,
+ regs->UCreg_17, regs->UCreg_16);
+ printk(KERN_DEFAULT "r15: %08lx r14: %08lx r13: %08lx r12: %08lx\n",
+ regs->UCreg_15, regs->UCreg_14,
+ regs->UCreg_13, regs->UCreg_12);
+ printk(KERN_DEFAULT "r11: %08lx r10: %08lx r9 : %08lx r8 : %08lx\n",
+ regs->UCreg_11, regs->UCreg_10,
+ regs->UCreg_09, regs->UCreg_08);
+ printk(KERN_DEFAULT "r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
+ regs->UCreg_07, regs->UCreg_06,
+ regs->UCreg_05, regs->UCreg_04);
+ printk(KERN_DEFAULT "r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
+ regs->UCreg_03, regs->UCreg_02,
+ regs->UCreg_01, regs->UCreg_00);
+
+ flags = regs->UCreg_asr;
+ buf[0] = flags & PSR_S_BIT ? 'S' : 's';
+ buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
+ buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
+ buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
+ buf[4] = '\0';
+
+ printk(KERN_DEFAULT "Flags: %s INTR o%s REAL o%s Mode %s Segment %s\n",
+ buf, interrupts_enabled(regs) ? "n" : "ff",
+ fast_interrupts_enabled(regs) ? "n" : "ff",
+ processor_modes[processor_mode(regs)],
+ segment_eq(get_fs(), get_ds()) ? "kernel" : "user");
+ {
+ unsigned int ctrl;
+
+ buf[0] = '\0';
+ {
+ unsigned int transbase;
+ asm("movc %0, p0.c2, #0\n"
+ : "=r" (transbase));
+ snprintf(buf, sizeof(buf), " Table: %08x", transbase);
+ }
+ asm("movc %0, p0.c1, #0\n" : "=r" (ctrl));
+
+ printk(KERN_DEFAULT "Control: %08x%s\n", ctrl, buf);
+ }
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ printk(KERN_DEFAULT "\n");
+ printk(KERN_DEFAULT "Pid: %d, comm: %20s\n",
+ task_pid_nr(current), current->comm);
+ __show_regs(regs);
+ __backtrace();
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+}
+
+void flush_thread(void)
+{
+ struct thread_info *thread = current_thread_info();
+ struct task_struct *tsk = current;
+
+ memset(thread->used_cp, 0, sizeof(thread->used_cp));
+ memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
+#ifdef CONFIG_UNICORE_FPU_F64
+ memset(&thread->fpstate, 0, sizeof(struct fp_state));
+#endif
+}
+
+void release_thread(struct task_struct *dead_task)
+{
+}
+
+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+
+int
+copy_thread(unsigned long clone_flags, unsigned long stack_start,
+ unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
+{
+ struct thread_info *thread = task_thread_info(p);
+ struct pt_regs *childregs = task_pt_regs(p);
+
+ *childregs = *regs;
+ childregs->UCreg_00 = 0;
+ childregs->UCreg_sp = stack_start;
+
+ memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
+ thread->cpu_context.sp = (unsigned long)childregs;
+ thread->cpu_context.pc = (unsigned long)ret_from_fork;
+
+ if (clone_flags & CLONE_SETTLS)
+ childregs->UCreg_16 = regs->UCreg_03;
+
+ return 0;
+}
+
+/*
+ * Fill in the task's elfregs structure for a core dump.
+ */
+int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
+{
+ elf_core_copy_regs(elfregs, task_pt_regs(t));
+ return 1;
+}
+
+/*
+ * fill in the fpe structure for a core dump...
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fp)
+{
+ struct thread_info *thread = current_thread_info();
+ int used_math = thread->used_cp[1] | thread->used_cp[2];
+
+#ifdef CONFIG_UNICORE_FPU_F64
+ if (used_math)
+ memcpy(fp, &thread->fpstate, sizeof(*fp));
+#endif
+ return used_math != 0;
+}
+EXPORT_SYMBOL(dump_fpu);
+
+/*
+ * Shuffle the argument into the correct register before calling the
+ * thread function. r1 is the thread argument, r2 is the pointer to
+ * the thread function, and r3 points to the exit function.
+ */
+asm(".pushsection .text\n"
+" .align\n"
+" .type kernel_thread_helper, #function\n"
+"kernel_thread_helper:\n"
+" mov.a asr, r7\n"
+" mov r0, r4\n"
+" mov lr, r6\n"
+" mov pc, r5\n"
+" .size kernel_thread_helper, . - kernel_thread_helper\n"
+" .popsection");
+
+/*
+ * Create a kernel thread.
+ */
+pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+{
+ struct pt_regs regs;
+
+ memset(&regs, 0, sizeof(regs));
+
+ regs.UCreg_04 = (unsigned long)arg;
+ regs.UCreg_05 = (unsigned long)fn;
+ regs.UCreg_06 = (unsigned long)do_exit;
+ regs.UCreg_07 = PRIV_MODE;
+ regs.UCreg_pc = (unsigned long)kernel_thread_helper;
+ regs.UCreg_asr = regs.UCreg_07 | PSR_I_BIT;
+
+ return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
+}
+EXPORT_SYMBOL(kernel_thread);
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ struct stackframe frame;
+ int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ frame.fp = thread_saved_fp(p);
+ frame.sp = thread_saved_sp(p);
+ frame.lr = 0; /* recovered from the stack */
+ frame.pc = thread_saved_pc(p);
+ do {
+ int ret = unwind_frame(&frame);
+ if (ret < 0)
+ return 0;
+ if (!in_sched_functions(frame.pc))
+ return frame.pc;
+ } while ((count++) < 16);
+ return 0;
+}
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+ unsigned long range_end = mm->brk + 0x02000000;
+ return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+}
+
+/*
+ * The vectors page is always readable from user space for the
+ * atomic helpers and the signal restart code. Let's declare a mapping
+ * for it so it is visible through ptrace and /proc/<pid>/mem.
+ */
+
+int vectors_user_mapping(void)
+{
+ struct mm_struct *mm = current->mm;
+ return install_special_mapping(mm, 0xffff0000, PAGE_SIZE,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYEXEC |
+ VM_ALWAYSDUMP | VM_RESERVED,
+ NULL);
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL;
+}
diff --git a/arch/unicore32/kernel/ptrace.c b/arch/unicore32/kernel/ptrace.c
new file mode 100644
index 000000000000..9f07c08da050
--- /dev/null
+++ b/arch/unicore32/kernel/ptrace.c
@@ -0,0 +1,149 @@
+/*
+ * linux/arch/unicore32/kernel/ptrace.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * By Ross Biro 1/23/92
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+
+/*
+ * this routine will get a word off of the processes privileged stack.
+ * the offset is how far from the base addr as stored in the THREAD.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline long get_user_reg(struct task_struct *task, int offset)
+{
+ return task_pt_regs(task)->uregs[offset];
+}
+
+/*
+ * this routine will put a word on the processes privileged stack.
+ * the offset is how far from the base addr as stored in the THREAD.
+ * this routine assumes that all the privileged stacks are in our
+ * data space.
+ */
+static inline int
+put_user_reg(struct task_struct *task, int offset, long data)
+{
+ struct pt_regs newregs, *regs = task_pt_regs(task);
+ int ret = -EINVAL;
+
+ newregs = *regs;
+ newregs.uregs[offset] = data;
+
+ if (valid_user_regs(&newregs)) {
+ regs->uregs[offset] = data;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ */
+void ptrace_disable(struct task_struct *child)
+{
+}
+
+/*
+ * We actually access the pt_regs stored on the kernel stack.
+ */
+static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
+ unsigned long __user *ret)
+{
+ unsigned long tmp;
+
+ tmp = 0;
+ if (off < sizeof(struct pt_regs))
+ tmp = get_user_reg(tsk, off >> 2);
+
+ return put_user(tmp, ret);
+}
+
+/*
+ * We actually access the pt_regs stored on the kernel stack.
+ */
+static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
+ unsigned long val)
+{
+ if (off >= sizeof(struct pt_regs))
+ return 0;
+
+ return put_user_reg(tsk, off >> 2, val);
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret;
+ unsigned long __user *datap = (unsigned long __user *) data;
+
+ switch (request) {
+ case PTRACE_PEEKUSR:
+ ret = ptrace_read_user(child, addr, datap);
+ break;
+
+ case PTRACE_POKEUSR:
+ ret = ptrace_write_user(child, addr, data);
+ break;
+
+ case PTRACE_GET_THREAD_AREA:
+ ret = put_user(task_pt_regs(child)->UCreg_16,
+ datap);
+ break;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
+{
+ unsigned long ip;
+
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return scno;
+ if (!(current->ptrace & PT_PTRACED))
+ return scno;
+
+ /*
+ * Save IP. IP is used to denote syscall entry/exit:
+ * IP = 0 -> entry, = 1 -> exit
+ */
+ ip = regs->UCreg_ip;
+ regs->UCreg_ip = why;
+
+ current_thread_info()->syscall = scno;
+
+ /* the 0x80 provides a way for the tracing parent to distinguish
+ between a syscall stop and SIGTRAP delivery */
+ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0));
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+ regs->UCreg_ip = ip;
+
+ return current_thread_info()->syscall;
+}
diff --git a/arch/unicore32/kernel/puv3-core.c b/arch/unicore32/kernel/puv3-core.c
new file mode 100644
index 000000000000..8b1b6beb858e
--- /dev/null
+++ b/arch/unicore32/kernel/puv3-core.c
@@ -0,0 +1,285 @@
+/*
+ * linux/arch/unicore32/kernel/puv3-core.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/sysdev.h>
+#include <linux/amba/bus.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/cnt32_to_63.h>
+#include <linux/usb/musb.h>
+
+#include <asm/irq.h>
+#include <mach/hardware.h>
+#include <mach/pm.h>
+
+/*
+ * This is the PKUnity sched_clock implementation. This has
+ * a resolution of 271ns, and a maximum value of 32025597s (370 days).
+ *
+ * The return value is guaranteed to be monotonic in that range as
+ * long as there is always less than 582 seconds between successive
+ * calls to this function.
+ *
+ * ( * 1E9 / CLOCK_TICK_RATE ) -> about 2235/32
+ */
+unsigned long long sched_clock(void)
+{
+ unsigned long long v = cnt32_to_63(readl(OST_OSCR));
+
+ /* original conservative method, but overflow frequently
+ * v *= NSEC_PER_SEC >> 12;
+ * do_div(v, CLOCK_TICK_RATE >> 12);
+ */
+ v = ((v & 0x7fffffffffffffffULL) * 2235) >> 5;
+
+ return v;
+}
+
+static struct resource puv3_usb_resources[] = {
+ /* order is significant! */
+ {
+ .start = io_v2p(PKUNITY_USB_BASE),
+ .end = io_v2p(PKUNITY_USB_BASE) + 0x3ff,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = IRQ_USB,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .start = IRQ_USB,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct musb_hdrc_config puv3_usb_config[] = {
+ {
+ .num_eps = 16,
+ .multipoint = 1,
+#ifdef CONFIG_USB_INVENTRA_DMA
+ .dma = 1,
+ .dma_channels = 8,
+#endif
+ },
+};
+
+static struct musb_hdrc_platform_data puv3_usb_plat = {
+ .mode = MUSB_HOST,
+ .min_power = 100,
+ .clock = 0,
+ .config = puv3_usb_config,
+};
+
+static struct resource puv3_mmc_resources[] = {
+ [0] = {
+ .start = io_v2p(PKUNITY_SDC_BASE),
+ .end = io_v2p(PKUNITY_SDC_BASE) + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_SDC,
+ .end = IRQ_SDC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource puv3_unigfx_resources[] = {
+ [0] = {
+ .start = io_v2p(PKUNITY_UNIGFX_BASE),
+ .end = io_v2p(PKUNITY_UNIGFX_BASE) + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = PKUNITY_UNIGFX_MMAP_BASE,
+ .end = PKUNITY_UNIGFX_MMAP_BASE + PKUNITY_UNIGFX_MMAP_SIZE,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource puv3_rtc_resources[] = {
+ [0] = {
+ .start = io_v2p(PKUNITY_RTC_BASE),
+ .end = io_v2p(PKUNITY_RTC_BASE) + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_RTCAlarm,
+ .end = IRQ_RTCAlarm,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = IRQ_RTC,
+ .end = IRQ_RTC,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource puv3_pwm_resources[] = {
+ [0] = {
+ .start = io_v2p(PKUNITY_OST_BASE) + 0x80,
+ .end = io_v2p(PKUNITY_OST_BASE) + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct resource puv3_uart0_resources[] = {
+ [0] = {
+ .start = io_v2p(PKUNITY_UART0_BASE),
+ .end = io_v2p(PKUNITY_UART0_BASE) + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_UART0,
+ .end = IRQ_UART0,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource puv3_uart1_resources[] = {
+ [0] = {
+ .start = io_v2p(PKUNITY_UART1_BASE),
+ .end = io_v2p(PKUNITY_UART1_BASE) + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_UART1,
+ .end = IRQ_UART1,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+static struct resource puv3_umal_resources[] = {
+ [0] = {
+ .start = io_v2p(PKUNITY_UMAL_BASE),
+ .end = io_v2p(PKUNITY_UMAL_BASE) + 0x1fff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_UMAL,
+ .end = IRQ_UMAL,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+#ifdef CONFIG_PUV3_PM
+
+#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x
+#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x]
+
+/*
+ * List of global PXA peripheral registers to preserve.
+ * More ones like CP and general purpose register values are preserved
+ * with the stack pointer in sleep.S.
+ */
+enum {
+ SLEEP_SAVE_PM_PLLDDRCFG,
+ SLEEP_SAVE_COUNT
+};
+
+
+static void puv3_cpu_pm_save(unsigned long *sleep_save)
+{
+/* SAVE(PM_PLLDDRCFG); */
+}
+
+static void puv3_cpu_pm_restore(unsigned long *sleep_save)
+{
+/* RESTORE(PM_PLLDDRCFG); */
+}
+
+static int puv3_cpu_pm_prepare(void)
+{
+ /* set resume return address */
+ writel(virt_to_phys(puv3_cpu_resume), PM_DIVCFG);
+ return 0;
+}
+
+static void puv3_cpu_pm_enter(suspend_state_t state)
+{
+ /* Clear reset status */
+ writel(RESETC_RSSR_HWR | RESETC_RSSR_WDR
+ | RESETC_RSSR_SMR | RESETC_RSSR_SWR, RESETC_RSSR);
+
+ switch (state) {
+/* case PM_SUSPEND_ON:
+ puv3_cpu_idle();
+ break; */
+ case PM_SUSPEND_MEM:
+ puv3_cpu_pm_prepare();
+ puv3_cpu_suspend(PM_PMCR_SFB);
+ break;
+ }
+}
+
+static int puv3_cpu_pm_valid(suspend_state_t state)
+{
+ return state == PM_SUSPEND_MEM;
+}
+
+static void puv3_cpu_pm_finish(void)
+{
+ /* ensure not to come back here if it wasn't intended */
+ /* PSPR = 0; */
+}
+
+static struct puv3_cpu_pm_fns puv3_cpu_pm_fnss = {
+ .save_count = SLEEP_SAVE_COUNT,
+ .valid = puv3_cpu_pm_valid,
+ .save = puv3_cpu_pm_save,
+ .restore = puv3_cpu_pm_restore,
+ .enter = puv3_cpu_pm_enter,
+ .prepare = puv3_cpu_pm_prepare,
+ .finish = puv3_cpu_pm_finish,
+};
+
+static void __init puv3_init_pm(void)
+{
+ puv3_cpu_pm_fns = &puv3_cpu_pm_fnss;
+}
+#else
+static inline void puv3_init_pm(void) {}
+#endif
+
+void puv3_ps2_init(void)
+{
+ struct clk *bclk32;
+
+ bclk32 = clk_get(NULL, "BUS32_CLK");
+ writel(clk_get_rate(bclk32) / 200000, PS2_CNT); /* should > 5us */
+}
+
+void __init puv3_core_init(void)
+{
+ puv3_init_pm();
+ puv3_ps2_init();
+
+ platform_device_register_simple("PKUnity-v3-RTC", -1,
+ puv3_rtc_resources, ARRAY_SIZE(puv3_rtc_resources));
+ platform_device_register_simple("PKUnity-v3-UMAL", -1,
+ puv3_umal_resources, ARRAY_SIZE(puv3_umal_resources));
+ platform_device_register_simple("PKUnity-v3-MMC", -1,
+ puv3_mmc_resources, ARRAY_SIZE(puv3_mmc_resources));
+ platform_device_register_simple("PKUnity-v3-UNIGFX", -1,
+ puv3_unigfx_resources, ARRAY_SIZE(puv3_unigfx_resources));
+ platform_device_register_simple("PKUnity-v3-PWM", -1,
+ puv3_pwm_resources, ARRAY_SIZE(puv3_pwm_resources));
+ platform_device_register_simple("PKUnity-v3-UART", 0,
+ puv3_uart0_resources, ARRAY_SIZE(puv3_uart0_resources));
+ platform_device_register_simple("PKUnity-v3-UART", 1,
+ puv3_uart1_resources, ARRAY_SIZE(puv3_uart1_resources));
+ platform_device_register_simple("PKUnity-v3-AC97", -1, NULL, 0);
+ platform_device_register_resndata(&platform_bus, "musb_hdrc", -1,
+ puv3_usb_resources, ARRAY_SIZE(puv3_usb_resources),
+ &puv3_usb_plat, sizeof(puv3_usb_plat));
+}
+
diff --git a/arch/unicore32/kernel/puv3-nb0916.c b/arch/unicore32/kernel/puv3-nb0916.c
new file mode 100644
index 000000000000..e731c561ed4e
--- /dev/null
+++ b/arch/unicore32/kernel/puv3-nb0916.c
@@ -0,0 +1,145 @@
+/*
+ * linux/arch/unicore32/kernel/puv3-nb0916.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/sysdev.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/physmap.h>
+#include <linux/io.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/pwm_backlight.h>
+#include <linux/gpio.h>
+#include <linux/gpio_keys.h>
+#include <linux/input.h>
+
+#include <mach/hardware.h>
+
+static struct physmap_flash_data physmap_flash_data = {
+ .width = 1,
+};
+
+static struct resource physmap_flash_resource = {
+ .start = 0xFFF80000,
+ .end = 0xFFFFFFFF,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource puv3_i2c_resources[] = {
+ [0] = {
+ .start = io_v2p(PKUNITY_I2C_BASE),
+ .end = io_v2p(PKUNITY_I2C_BASE) + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = IRQ_I2C,
+ .end = IRQ_I2C,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct platform_pwm_backlight_data nb0916_backlight_data = {
+ .pwm_id = 0,
+ .max_brightness = 100,
+ .dft_brightness = 100,
+ .pwm_period_ns = 70 * 1024,
+};
+
+static struct gpio_keys_button nb0916_gpio_keys[] = {
+ {
+ .type = EV_KEY,
+ .code = KEY_POWER,
+ .gpio = GPI_SOFF_REQ,
+ .desc = "Power Button",
+ .wakeup = 1,
+ .active_low = 1,
+ },
+ {
+ .type = EV_KEY,
+ .code = BTN_TOUCH,
+ .gpio = GPI_BTN_TOUCH,
+ .desc = "Touchpad Button",
+ .wakeup = 1,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_keys_platform_data nb0916_gpio_button_data = {
+ .buttons = nb0916_gpio_keys,
+ .nbuttons = ARRAY_SIZE(nb0916_gpio_keys),
+};
+
+static irqreturn_t nb0916_lcdcaseoff_handler(int irq, void *dev_id)
+{
+ if (gpio_get_value(GPI_LCD_CASE_OFF))
+ gpio_set_value(GPO_LCD_EN, 1);
+ else
+ gpio_set_value(GPO_LCD_EN, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nb0916_overheat_handler(int irq, void *dev_id)
+{
+ machine_halt();
+ /* SYSTEM HALT, NO RETURN */
+ return IRQ_HANDLED;
+}
+
+static struct i2c_board_info __initdata puv3_i2c_devices[] = {
+ { I2C_BOARD_INFO("lm75", I2C_TAR_THERMAL), },
+ { I2C_BOARD_INFO("bq27200", I2C_TAR_PWIC), },
+ { I2C_BOARD_INFO("24c02", I2C_TAR_EEPROM), },
+};
+
+int __init mach_nb0916_init(void)
+{
+ i2c_register_board_info(0, puv3_i2c_devices,
+ ARRAY_SIZE(puv3_i2c_devices));
+
+ platform_device_register_simple("PKUnity-v3-I2C", -1,
+ puv3_i2c_resources, ARRAY_SIZE(puv3_i2c_resources));
+
+ platform_device_register_data(&platform_bus, "pwm-backlight", -1,
+ &nb0916_backlight_data, sizeof(nb0916_backlight_data));
+
+ platform_device_register_data(&platform_bus, "gpio-keys", -1,
+ &nb0916_gpio_button_data, sizeof(nb0916_gpio_button_data));
+
+ platform_device_register_resndata(&platform_bus, "physmap-flash", -1,
+ &physmap_flash_resource, 1,
+ &physmap_flash_data, sizeof(physmap_flash_data));
+
+ if (request_irq(gpio_to_irq(GPI_LCD_CASE_OFF),
+ &nb0916_lcdcaseoff_handler,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "NB0916 lcd case off", NULL) < 0) {
+
+ printk(KERN_DEBUG "LCD-Case-OFF IRQ %d not available\n",
+ gpio_to_irq(GPI_LCD_CASE_OFF));
+ }
+
+ if (request_irq(gpio_to_irq(GPI_OTP_INT), &nb0916_overheat_handler,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "NB0916 overheating protection", NULL) < 0) {
+
+ printk(KERN_DEBUG "Overheating Protection IRQ %d not available\n",
+ gpio_to_irq(GPI_OTP_INT));
+ }
+
+ return 0;
+}
+
+subsys_initcall_sync(mach_nb0916_init);
diff --git a/arch/unicore32/kernel/pwm.c b/arch/unicore32/kernel/pwm.c
new file mode 100644
index 000000000000..4615d51e3ba6
--- /dev/null
+++ b/arch/unicore32/kernel/pwm.c
@@ -0,0 +1,263 @@
+/*
+ * linux/arch/unicore32/kernel/pwm.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/pwm.h>
+
+#include <asm/div64.h>
+#include <mach/hardware.h>
+
+struct pwm_device {
+ struct list_head node;
+ struct platform_device *pdev;
+
+ const char *label;
+ struct clk *clk;
+ int clk_enabled;
+
+ unsigned int use_count;
+ unsigned int pwm_id;
+};
+
+/*
+ * period_ns = 10^9 * (PRESCALE + 1) * (PV + 1) / PWM_CLK_RATE
+ * duty_ns = 10^9 * (PRESCALE + 1) * DC / PWM_CLK_RATE
+ */
+int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ unsigned long long c;
+ unsigned long period_cycles, prescale, pv, dc;
+
+ if (pwm == NULL || period_ns == 0 || duty_ns > period_ns)
+ return -EINVAL;
+
+ c = clk_get_rate(pwm->clk);
+ c = c * period_ns;
+ do_div(c, 1000000000);
+ period_cycles = c;
+
+ if (period_cycles < 1)
+ period_cycles = 1;
+ prescale = (period_cycles - 1) / 1024;
+ pv = period_cycles / (prescale + 1) - 1;
+
+ if (prescale > 63)
+ return -EINVAL;
+
+ if (duty_ns == period_ns)
+ dc = OST_PWMDCCR_FDCYCLE;
+ else
+ dc = (pv + 1) * duty_ns / period_ns;
+
+ /* NOTE: the clock to PWM has to be enabled first
+ * before writing to the registers
+ */
+ clk_enable(pwm->clk);
+ OST_PWMPWCR = prescale;
+ OST_PWMDCCR = pv - dc;
+ OST_PWMPCR = pv;
+ clk_disable(pwm->clk);
+
+ return 0;
+}
+EXPORT_SYMBOL(pwm_config);
+
+int pwm_enable(struct pwm_device *pwm)
+{
+ int rc = 0;
+
+ if (!pwm->clk_enabled) {
+ rc = clk_enable(pwm->clk);
+ if (!rc)
+ pwm->clk_enabled = 1;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(pwm_enable);
+
+void pwm_disable(struct pwm_device *pwm)
+{
+ if (pwm->clk_enabled) {
+ clk_disable(pwm->clk);
+ pwm->clk_enabled = 0;
+ }
+}
+EXPORT_SYMBOL(pwm_disable);
+
+static DEFINE_MUTEX(pwm_lock);
+static LIST_HEAD(pwm_list);
+
+struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+ struct pwm_device *pwm;
+ int found = 0;
+
+ mutex_lock(&pwm_lock);
+
+ list_for_each_entry(pwm, &pwm_list, node) {
+ if (pwm->pwm_id == pwm_id) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (found) {
+ if (pwm->use_count == 0) {
+ pwm->use_count++;
+ pwm->label = label;
+ } else
+ pwm = ERR_PTR(-EBUSY);
+ } else
+ pwm = ERR_PTR(-ENOENT);
+
+ mutex_unlock(&pwm_lock);
+ return pwm;
+}
+EXPORT_SYMBOL(pwm_request);
+
+void pwm_free(struct pwm_device *pwm)
+{
+ mutex_lock(&pwm_lock);
+
+ if (pwm->use_count) {
+ pwm->use_count--;
+ pwm->label = NULL;
+ } else
+ pr_warning("PWM device already freed\n");
+
+ mutex_unlock(&pwm_lock);
+}
+EXPORT_SYMBOL(pwm_free);
+
+static inline void __add_pwm(struct pwm_device *pwm)
+{
+ mutex_lock(&pwm_lock);
+ list_add_tail(&pwm->node, &pwm_list);
+ mutex_unlock(&pwm_lock);
+}
+
+static struct pwm_device *pwm_probe(struct platform_device *pdev,
+ unsigned int pwm_id, struct pwm_device *parent_pwm)
+{
+ struct pwm_device *pwm;
+ struct resource *r;
+ int ret = 0;
+
+ pwm = kzalloc(sizeof(struct pwm_device), GFP_KERNEL);
+ if (pwm == NULL) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pwm->clk = clk_get(NULL, "OST_CLK");
+ if (IS_ERR(pwm->clk)) {
+ ret = PTR_ERR(pwm->clk);
+ goto err_free;
+ }
+ pwm->clk_enabled = 0;
+
+ pwm->use_count = 0;
+ pwm->pwm_id = pwm_id;
+ pwm->pdev = pdev;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ ret = -ENODEV;
+ goto err_free_clk;
+ }
+
+ r = request_mem_region(r->start, resource_size(r), pdev->name);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "failed to request memory resource\n");
+ ret = -EBUSY;
+ goto err_free_clk;
+ }
+
+ __add_pwm(pwm);
+ platform_set_drvdata(pdev, pwm);
+ return pwm;
+
+err_free_clk:
+ clk_put(pwm->clk);
+err_free:
+ kfree(pwm);
+ return ERR_PTR(ret);
+}
+
+static int __devinit puv3_pwm_probe(struct platform_device *pdev)
+{
+ struct pwm_device *pwm = pwm_probe(pdev, pdev->id, NULL);
+
+ if (IS_ERR(pwm))
+ return PTR_ERR(pwm);
+
+ return 0;
+}
+
+static int __devexit pwm_remove(struct platform_device *pdev)
+{
+ struct pwm_device *pwm;
+ struct resource *r;
+
+ pwm = platform_get_drvdata(pdev);
+ if (pwm == NULL)
+ return -ENODEV;
+
+ mutex_lock(&pwm_lock);
+ list_del(&pwm->node);
+ mutex_unlock(&pwm_lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(r->start, resource_size(r));
+
+ clk_put(pwm->clk);
+ kfree(pwm);
+ return 0;
+}
+
+static struct platform_driver puv3_pwm_driver = {
+ .driver = {
+ .name = "PKUnity-v3-PWM",
+ },
+ .probe = puv3_pwm_probe,
+ .remove = __devexit_p(pwm_remove),
+};
+
+static int __init pwm_init(void)
+{
+ int ret = 0;
+
+ ret = platform_driver_register(&puv3_pwm_driver);
+ if (ret) {
+ printk(KERN_ERR "failed to register puv3_pwm_driver\n");
+ return ret;
+ }
+
+ return ret;
+}
+arch_initcall(pwm_init);
+
+static void __exit pwm_exit(void)
+{
+ platform_driver_unregister(&puv3_pwm_driver);
+}
+module_exit(pwm_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/arch/unicore32/kernel/rtc.c b/arch/unicore32/kernel/rtc.c
new file mode 100644
index 000000000000..c5f068295b51
--- /dev/null
+++ b/arch/unicore32/kernel/rtc.c
@@ -0,0 +1,380 @@
+/*
+ * linux/arch/unicore32/kernel/rtc.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/clk.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include <asm/irq.h>
+#include <mach/hardware.h>
+
+static struct resource *puv3_rtc_mem;
+
+static int puv3_rtc_alarmno = IRQ_RTCAlarm;
+static int puv3_rtc_tickno = IRQ_RTC;
+
+static DEFINE_SPINLOCK(puv3_rtc_pie_lock);
+
+/* IRQ Handlers */
+
+static irqreturn_t puv3_rtc_alarmirq(int irq, void *id)
+{
+ struct rtc_device *rdev = id;
+
+ writel(readl(RTC_RTSR) | RTC_RTSR_AL, RTC_RTSR);
+ rtc_update_irq(rdev, 1, RTC_AF | RTC_IRQF);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t puv3_rtc_tickirq(int irq, void *id)
+{
+ struct rtc_device *rdev = id;
+
+ writel(readl(RTC_RTSR) | RTC_RTSR_HZ, RTC_RTSR);
+ rtc_update_irq(rdev, 1, RTC_PF | RTC_IRQF);
+ return IRQ_HANDLED;
+}
+
+/* Update control registers */
+static void puv3_rtc_setaie(int to)
+{
+ unsigned int tmp;
+
+ pr_debug("%s: aie=%d\n", __func__, to);
+
+ tmp = readl(RTC_RTSR) & ~RTC_RTSR_ALE;
+
+ if (to)
+ tmp |= RTC_RTSR_ALE;
+
+ writel(tmp, RTC_RTSR);
+}
+
+static int puv3_rtc_setpie(struct device *dev, int enabled)
+{
+ unsigned int tmp;
+
+ pr_debug("%s: pie=%d\n", __func__, enabled);
+
+ spin_lock_irq(&puv3_rtc_pie_lock);
+ tmp = readl(RTC_RTSR) & ~RTC_RTSR_HZE;
+
+ if (enabled)
+ tmp |= RTC_RTSR_HZE;
+
+ writel(tmp, RTC_RTSR);
+ spin_unlock_irq(&puv3_rtc_pie_lock);
+
+ return 0;
+}
+
+static int puv3_rtc_setfreq(struct device *dev, int freq)
+{
+ return 0;
+}
+
+/* Time read/write */
+
+static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
+{
+ rtc_time_to_tm(readl(RTC_RCNR), rtc_tm);
+
+ pr_debug("read time %02x.%02x.%02x %02x/%02x/%02x\n",
+ rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday,
+ rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec);
+
+ return 0;
+}
+
+static int puv3_rtc_settime(struct device *dev, struct rtc_time *tm)
+{
+ unsigned long rtc_count = 0;
+
+ pr_debug("set time %02d.%02d.%02d %02d/%02d/%02d\n",
+ tm->tm_year, tm->tm_mon, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
+
+ rtc_tm_to_time(tm, &rtc_count);
+ writel(rtc_count, RTC_RCNR);
+
+ return 0;
+}
+
+static int puv3_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_time *alm_tm = &alrm->time;
+
+ rtc_time_to_tm(readl(RTC_RTAR), alm_tm);
+
+ alrm->enabled = readl(RTC_RTSR) & RTC_RTSR_ALE;
+
+ pr_debug("read alarm %02x %02x.%02x.%02x %02x/%02x/%02x\n",
+ alrm->enabled,
+ alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday,
+ alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec);
+
+ return 0;
+}
+
+static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rtc_time *tm = &alrm->time;
+ unsigned long rtcalarm_count = 0;
+
+ pr_debug("puv3_rtc_setalarm: %d, %02x/%02x/%02x %02x.%02x.%02x\n",
+ alrm->enabled,
+ tm->tm_mday & 0xff, tm->tm_mon & 0xff, tm->tm_year & 0xff,
+ tm->tm_hour & 0xff, tm->tm_min & 0xff, tm->tm_sec);
+
+ rtc_tm_to_time(tm, &rtcalarm_count);
+ writel(rtcalarm_count, RTC_RTAR);
+
+ puv3_rtc_setaie(alrm->enabled);
+
+ if (alrm->enabled)
+ enable_irq_wake(puv3_rtc_alarmno);
+ else
+ disable_irq_wake(puv3_rtc_alarmno);
+
+ return 0;
+}
+
+static int puv3_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+ seq_printf(seq, "periodic_IRQ\t: %s\n",
+ (readl(RTC_RTSR) & RTC_RTSR_HZE) ? "yes" : "no");
+ return 0;
+}
+
+static int puv3_rtc_open(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = request_irq(puv3_rtc_alarmno, puv3_rtc_alarmirq,
+ IRQF_DISABLED, "pkunity-rtc alarm", rtc_dev);
+
+ if (ret) {
+ dev_err(dev, "IRQ%d error %d\n", puv3_rtc_alarmno, ret);
+ return ret;
+ }
+
+ ret = request_irq(puv3_rtc_tickno, puv3_rtc_tickirq,
+ IRQF_DISABLED, "pkunity-rtc tick", rtc_dev);
+
+ if (ret) {
+ dev_err(dev, "IRQ%d error %d\n", puv3_rtc_tickno, ret);
+ goto tick_err;
+ }
+
+ return ret;
+
+ tick_err:
+ free_irq(puv3_rtc_alarmno, rtc_dev);
+ return ret;
+}
+
+static void puv3_rtc_release(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
+
+ /* do not clear AIE here, it may be needed for wake */
+
+ puv3_rtc_setpie(dev, 0);
+ free_irq(puv3_rtc_alarmno, rtc_dev);
+ free_irq(puv3_rtc_tickno, rtc_dev);
+}
+
+static const struct rtc_class_ops puv3_rtcops = {
+ .open = puv3_rtc_open,
+ .release = puv3_rtc_release,
+ .read_time = puv3_rtc_gettime,
+ .set_time = puv3_rtc_settime,
+ .read_alarm = puv3_rtc_getalarm,
+ .set_alarm = puv3_rtc_setalarm,
+ .irq_set_freq = puv3_rtc_setfreq,
+ .irq_set_state = puv3_rtc_setpie,
+ .proc = puv3_rtc_proc,
+};
+
+static void puv3_rtc_enable(struct platform_device *pdev, int en)
+{
+ if (!en) {
+ writel(readl(RTC_RTSR) & ~RTC_RTSR_HZE, RTC_RTSR);
+ } else {
+ /* re-enable the device, and check it is ok */
+
+ if ((readl(RTC_RTSR) & RTC_RTSR_HZE) == 0) {
+ dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
+ writel(readl(RTC_RTSR) | RTC_RTSR_HZE, RTC_RTSR);
+ }
+ }
+}
+
+static int puv3_rtc_remove(struct platform_device *dev)
+{
+ struct rtc_device *rtc = platform_get_drvdata(dev);
+
+ platform_set_drvdata(dev, NULL);
+ rtc_device_unregister(rtc);
+
+ puv3_rtc_setpie(&dev->dev, 0);
+ puv3_rtc_setaie(0);
+
+ release_resource(puv3_rtc_mem);
+ kfree(puv3_rtc_mem);
+
+ return 0;
+}
+
+static int puv3_rtc_probe(struct platform_device *pdev)
+{
+ struct rtc_device *rtc;
+ struct resource *res;
+ int ret;
+
+ pr_debug("%s: probe=%p\n", __func__, pdev);
+
+ /* find the IRQs */
+
+ puv3_rtc_tickno = platform_get_irq(pdev, 1);
+ if (puv3_rtc_tickno < 0) {
+ dev_err(&pdev->dev, "no irq for rtc tick\n");
+ return -ENOENT;
+ }
+
+ puv3_rtc_alarmno = platform_get_irq(pdev, 0);
+ if (puv3_rtc_alarmno < 0) {
+ dev_err(&pdev->dev, "no irq for alarm\n");
+ return -ENOENT;
+ }
+
+ pr_debug("PKUnity_rtc: tick irq %d, alarm irq %d\n",
+ puv3_rtc_tickno, puv3_rtc_alarmno);
+
+ /* get the memory region */
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "failed to get memory region resource\n");
+ return -ENOENT;
+ }
+
+ puv3_rtc_mem = request_mem_region(res->start,
+ res->end-res->start+1,
+ pdev->name);
+
+ if (puv3_rtc_mem == NULL) {
+ dev_err(&pdev->dev, "failed to reserve memory region\n");
+ ret = -ENOENT;
+ goto err_nores;
+ }
+
+ puv3_rtc_enable(pdev, 1);
+
+ puv3_rtc_setfreq(&pdev->dev, 1);
+
+ /* register RTC and exit */
+
+ rtc = rtc_device_register("pkunity", &pdev->dev, &puv3_rtcops,
+ THIS_MODULE);
+
+ if (IS_ERR(rtc)) {
+ dev_err(&pdev->dev, "cannot attach rtc\n");
+ ret = PTR_ERR(rtc);
+ goto err_nortc;
+ }
+
+ /* platform setup code should have handled this; sigh */
+ if (!device_can_wakeup(&pdev->dev))
+ device_init_wakeup(&pdev->dev, 1);
+
+ platform_set_drvdata(pdev, rtc);
+ return 0;
+
+ err_nortc:
+ puv3_rtc_enable(pdev, 0);
+ release_resource(puv3_rtc_mem);
+
+ err_nores:
+ return ret;
+}
+
+#ifdef CONFIG_PM
+
+/* RTC Power management control */
+
+static int ticnt_save;
+
+static int puv3_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ /* save RTAR for anyone using periodic interrupts */
+ ticnt_save = readl(RTC_RTAR);
+ puv3_rtc_enable(pdev, 0);
+ return 0;
+}
+
+static int puv3_rtc_resume(struct platform_device *pdev)
+{
+ puv3_rtc_enable(pdev, 1);
+ writel(ticnt_save, RTC_RTAR);
+ return 0;
+}
+#else
+#define puv3_rtc_suspend NULL
+#define puv3_rtc_resume NULL
+#endif
+
+static struct platform_driver puv3_rtcdrv = {
+ .probe = puv3_rtc_probe,
+ .remove = __devexit_p(puv3_rtc_remove),
+ .suspend = puv3_rtc_suspend,
+ .resume = puv3_rtc_resume,
+ .driver = {
+ .name = "PKUnity-v3-RTC",
+ .owner = THIS_MODULE,
+ }
+};
+
+static char __initdata banner[] = "PKUnity-v3 RTC, (c) 2009 PKUnity Co.\n";
+
+static int __init puv3_rtc_init(void)
+{
+ printk(banner);
+ return platform_driver_register(&puv3_rtcdrv);
+}
+
+static void __exit puv3_rtc_exit(void)
+{
+ platform_driver_unregister(&puv3_rtcdrv);
+}
+
+module_init(puv3_rtc_init);
+module_exit(puv3_rtc_exit);
+
+MODULE_DESCRIPTION("RTC Driver for the PKUnity v3 chip");
+MODULE_AUTHOR("Hu Dongliang");
+MODULE_LICENSE("GPL v2");
+
diff --git a/arch/unicore32/kernel/setup.c b/arch/unicore32/kernel/setup.c
new file mode 100644
index 000000000000..1e175a82844d
--- /dev/null
+++ b/arch/unicore32/kernel/setup.c
@@ -0,0 +1,360 @@
+/*
+ * linux/arch/unicore32/kernel/setup.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/utsname.h>
+#include <linux/initrd.h>
+#include <linux/console.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/screen_info.h>
+#include <linux/init.h>
+#include <linux/root_dev.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/memblock.h>
+#include <linux/elf.h>
+#include <linux/io.h>
+
+#include <asm/cputype.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/traps.h>
+
+#include "setup.h"
+
+#ifndef MEM_SIZE
+#define MEM_SIZE (16*1024*1024)
+#endif
+
+struct stack {
+ u32 irq[3];
+ u32 abt[3];
+ u32 und[3];
+} ____cacheline_aligned;
+
+static struct stack stacks[NR_CPUS];
+
+char elf_platform[ELF_PLATFORM_SIZE];
+EXPORT_SYMBOL(elf_platform);
+
+static char __initdata cmd_line[COMMAND_LINE_SIZE];
+
+static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
+
+/*
+ * Standard memory resources
+ */
+static struct resource mem_res[] = {
+ {
+ .name = "Video RAM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .name = "Kernel text",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM
+ },
+ {
+ .name = "Kernel data",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM
+ }
+};
+
+#define video_ram mem_res[0]
+#define kernel_code mem_res[1]
+#define kernel_data mem_res[2]
+
+/*
+ * These functions re-use the assembly code in head.S, which
+ * already provide the required functionality.
+ */
+static void __init setup_processor(void)
+{
+ printk(KERN_DEFAULT "CPU: UniCore-II [%08x] revision %d, cr=%08lx\n",
+ uc32_cpuid, (int)(uc32_cpuid >> 16) & 15, cr_alignment);
+
+ sprintf(init_utsname()->machine, "puv3");
+ sprintf(elf_platform, "ucv2");
+}
+
+/*
+ * cpu_init - initialise one CPU.
+ *
+ * cpu_init sets up the per-CPU stacks.
+ */
+void cpu_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct stack *stk = &stacks[cpu];
+
+ /*
+ * setup stacks for re-entrant exception handlers
+ */
+ __asm__ (
+ "mov.a asr, %1\n\t"
+ "add sp, %0, %2\n\t"
+ "mov.a asr, %3\n\t"
+ "add sp, %0, %4\n\t"
+ "mov.a asr, %5\n\t"
+ "add sp, %0, %6\n\t"
+ "mov.a asr, %7"
+ :
+ : "r" (stk),
+ "r" (PSR_R_BIT | PSR_I_BIT | INTR_MODE),
+ "I" (offsetof(struct stack, irq[0])),
+ "r" (PSR_R_BIT | PSR_I_BIT | ABRT_MODE),
+ "I" (offsetof(struct stack, abt[0])),
+ "r" (PSR_R_BIT | PSR_I_BIT | EXTN_MODE),
+ "I" (offsetof(struct stack, und[0])),
+ "r" (PSR_R_BIT | PSR_I_BIT | PRIV_MODE)
+ : "r30", "cc");
+}
+
+static int __init uc32_add_memory(unsigned long start, unsigned long size)
+{
+ struct membank *bank = &meminfo.bank[meminfo.nr_banks];
+
+ if (meminfo.nr_banks >= NR_BANKS) {
+ printk(KERN_CRIT "NR_BANKS too low, "
+ "ignoring memory at %#lx\n", start);
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure that start/size are aligned to a page boundary.
+ * Size is appropriately rounded down, start is rounded up.
+ */
+ size -= start & ~PAGE_MASK;
+
+ bank->start = PAGE_ALIGN(start);
+ bank->size = size & PAGE_MASK;
+
+ /*
+ * Check whether this memory region has non-zero size or
+ * invalid node number.
+ */
+ if (bank->size == 0)
+ return -EINVAL;
+
+ meminfo.nr_banks++;
+ return 0;
+}
+
+/*
+ * Pick out the memory size. We look for mem=size@start,
+ * where start and size are "size[KkMm]"
+ */
+static int __init early_mem(char *p)
+{
+ static int usermem __initdata = 1;
+ unsigned long size, start;
+ char *endp;
+
+ /*
+ * If the user specifies memory size, we
+ * blow away any automatically generated
+ * size.
+ */
+ if (usermem) {
+ usermem = 0;
+ meminfo.nr_banks = 0;
+ }
+
+ start = PHYS_OFFSET;
+ size = memparse(p, &endp);
+ if (*endp == '@')
+ start = memparse(endp + 1, NULL);
+
+ uc32_add_memory(start, size);
+
+ return 0;
+}
+early_param("mem", early_mem);
+
+static void __init
+request_standard_resources(struct meminfo *mi)
+{
+ struct resource *res;
+ int i;
+
+ kernel_code.start = virt_to_phys(_stext);
+ kernel_code.end = virt_to_phys(_etext - 1);
+ kernel_data.start = virt_to_phys(_sdata);
+ kernel_data.end = virt_to_phys(_end - 1);
+
+ for (i = 0; i < mi->nr_banks; i++) {
+ if (mi->bank[i].size == 0)
+ continue;
+
+ res = alloc_bootmem_low(sizeof(*res));
+ res->name = "System RAM";
+ res->start = mi->bank[i].start;
+ res->end = mi->bank[i].start + mi->bank[i].size - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+ request_resource(&iomem_resource, res);
+
+ if (kernel_code.start >= res->start &&
+ kernel_code.end <= res->end)
+ request_resource(res, &kernel_code);
+ if (kernel_data.start >= res->start &&
+ kernel_data.end <= res->end)
+ request_resource(res, &kernel_data);
+ }
+
+ video_ram.start = PKUNITY_UNIGFX_MMAP_BASE;
+ video_ram.end = PKUNITY_UNIGFX_MMAP_BASE + PKUNITY_UNIGFX_MMAP_SIZE;
+ request_resource(&iomem_resource, &video_ram);
+}
+
+static void (*init_machine)(void) __initdata;
+
+static int __init customize_machine(void)
+{
+ /* customizes platform devices, or adds new ones */
+ if (init_machine)
+ init_machine();
+ return 0;
+}
+arch_initcall(customize_machine);
+
+void __init setup_arch(char **cmdline_p)
+{
+ char *from = default_command_line;
+
+ setup_processor();
+
+ init_mm.start_code = (unsigned long) _stext;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = (unsigned long) _end;
+
+ /* parse_early_param needs a boot_command_line */
+ strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
+
+ /* populate cmd_line too for later use, preserving boot_command_line */
+ strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = cmd_line;
+
+ parse_early_param();
+
+ uc32_memblock_init(&meminfo);
+
+ paging_init();
+ request_standard_resources(&meminfo);
+
+ cpu_init();
+
+ /*
+ * Set up various architecture-specific pointers
+ */
+ init_machine = puv3_core_init;
+
+#ifdef CONFIG_VT
+#if defined(CONFIG_VGA_CONSOLE)
+ conswitchp = &vga_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+#endif
+ early_trap_init();
+}
+
+static struct cpu cpuinfo_unicore;
+
+static int __init topology_init(void)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ register_cpu(&cpuinfo_unicore, i);
+
+ return 0;
+}
+subsys_initcall(topology_init);
+
+#ifdef CONFIG_HAVE_PROC_CPU
+static int __init proc_cpu_init(void)
+{
+ struct proc_dir_entry *res;
+
+ res = proc_mkdir("cpu", NULL);
+ if (!res)
+ return -ENOMEM;
+ return 0;
+}
+fs_initcall(proc_cpu_init);
+#endif
+
+static int c_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "Processor\t: UniCore-II rev %d (%s)\n",
+ (int)(uc32_cpuid >> 16) & 15, elf_platform);
+
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ loops_per_jiffy / (500000/HZ),
+ (loops_per_jiffy / (5000/HZ)) % 100);
+
+ /* dump out the processor features */
+ seq_puts(m, "Features\t: CMOV UC-F64");
+
+ seq_printf(m, "\nCPU implementer\t: 0x%02x\n", uc32_cpuid >> 24);
+ seq_printf(m, "CPU architecture: 2\n");
+ seq_printf(m, "CPU revision\t: %d\n", (uc32_cpuid >> 16) & 15);
+
+ seq_printf(m, "Cache type\t: write-back\n"
+ "Cache clean\t: cp0 c5 ops\n"
+ "Cache lockdown\t: not support\n"
+ "Cache format\t: Harvard\n");
+
+ seq_puts(m, "\n");
+
+ seq_printf(m, "Hardware\t: PKUnity v3\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < 1 ? (void *)1 : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return NULL;
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = c_show
+};
diff --git a/arch/unicore32/kernel/setup.h b/arch/unicore32/kernel/setup.h
new file mode 100644
index 000000000000..dcd1306eb5c6
--- /dev/null
+++ b/arch/unicore32/kernel/setup.h
@@ -0,0 +1,30 @@
+/*
+ * linux/arch/unicore32/kernel/setup.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __UNICORE_KERNEL_SETUP_H__
+#define __UNICORE_KERNEL_SETUP_H__
+
+extern void paging_init(void);
+extern void puv3_core_init(void);
+
+extern void puv3_ps2_init(void);
+extern void pci_puv3_preinit(void);
+extern void __init puv3_init_gpio(void);
+
+extern void setup_mm_for_reboot(char mode);
+
+extern char __stubs_start[], __stubs_end[];
+extern char __vectors_start[], __vectors_end[];
+
+extern void kernel_thread_helper(void);
+
+extern void __init early_signal_init(void);
+#endif
diff --git a/arch/unicore32/kernel/signal.c b/arch/unicore32/kernel/signal.c
new file mode 100644
index 000000000000..b163fca56789
--- /dev/null
+++ b/arch/unicore32/kernel/signal.c
@@ -0,0 +1,494 @@
+/*
+ * linux/arch/unicore32/kernel/signal.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/personality.h>
+#include <linux/freezer.h>
+#include <linux/uaccess.h>
+#include <linux/tracehook.h>
+#include <linux/elf.h>
+#include <linux/unistd.h>
+
+#include <asm/cacheflush.h>
+#include <asm/ucontext.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+/*
+ * For UniCore syscalls, we encode the syscall number into the instruction.
+ */
+#define SWI_SYS_SIGRETURN (0xff000000) /* error number for new abi */
+#define SWI_SYS_RT_SIGRETURN (0xff000000 | (__NR_rt_sigreturn))
+#define SWI_SYS_RESTART (0xff000000 | (__NR_restart_syscall))
+
+#define KERN_SIGRETURN_CODE (KUSER_VECPAGE_BASE + 0x00000500)
+#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
+
+const unsigned long sigreturn_codes[3] = {
+ SWI_SYS_SIGRETURN, SWI_SYS_RT_SIGRETURN,
+};
+
+const unsigned long syscall_restart_code[2] = {
+ SWI_SYS_RESTART, /* swi __NR_restart_syscall */
+ 0x69efc004, /* ldr pc, [sp], #4 */
+};
+
+/*
+ * Do a signal return; undo the signal stack. These are aligned to 64-bit.
+ */
+struct sigframe {
+ struct ucontext uc;
+ unsigned long retcode[2];
+};
+
+struct rt_sigframe {
+ struct siginfo info;
+ struct sigframe sig;
+};
+
+static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
+{
+ sigset_t set;
+ int err;
+
+ err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+ if (err == 0) {
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ spin_lock_irq(&current->sighand->siglock);
+ current->blocked = set;
+ recalc_sigpending();
+ spin_unlock_irq(&current->sighand->siglock);
+ }
+
+ err |= __get_user(regs->UCreg_00, &sf->uc.uc_mcontext.regs.UCreg_00);
+ err |= __get_user(regs->UCreg_01, &sf->uc.uc_mcontext.regs.UCreg_01);
+ err |= __get_user(regs->UCreg_02, &sf->uc.uc_mcontext.regs.UCreg_02);
+ err |= __get_user(regs->UCreg_03, &sf->uc.uc_mcontext.regs.UCreg_03);
+ err |= __get_user(regs->UCreg_04, &sf->uc.uc_mcontext.regs.UCreg_04);
+ err |= __get_user(regs->UCreg_05, &sf->uc.uc_mcontext.regs.UCreg_05);
+ err |= __get_user(regs->UCreg_06, &sf->uc.uc_mcontext.regs.UCreg_06);
+ err |= __get_user(regs->UCreg_07, &sf->uc.uc_mcontext.regs.UCreg_07);
+ err |= __get_user(regs->UCreg_08, &sf->uc.uc_mcontext.regs.UCreg_08);
+ err |= __get_user(regs->UCreg_09, &sf->uc.uc_mcontext.regs.UCreg_09);
+ err |= __get_user(regs->UCreg_10, &sf->uc.uc_mcontext.regs.UCreg_10);
+ err |= __get_user(regs->UCreg_11, &sf->uc.uc_mcontext.regs.UCreg_11);
+ err |= __get_user(regs->UCreg_12, &sf->uc.uc_mcontext.regs.UCreg_12);
+ err |= __get_user(regs->UCreg_13, &sf->uc.uc_mcontext.regs.UCreg_13);
+ err |= __get_user(regs->UCreg_14, &sf->uc.uc_mcontext.regs.UCreg_14);
+ err |= __get_user(regs->UCreg_15, &sf->uc.uc_mcontext.regs.UCreg_15);
+ err |= __get_user(regs->UCreg_16, &sf->uc.uc_mcontext.regs.UCreg_16);
+ err |= __get_user(regs->UCreg_17, &sf->uc.uc_mcontext.regs.UCreg_17);
+ err |= __get_user(regs->UCreg_18, &sf->uc.uc_mcontext.regs.UCreg_18);
+ err |= __get_user(regs->UCreg_19, &sf->uc.uc_mcontext.regs.UCreg_19);
+ err |= __get_user(regs->UCreg_20, &sf->uc.uc_mcontext.regs.UCreg_20);
+ err |= __get_user(regs->UCreg_21, &sf->uc.uc_mcontext.regs.UCreg_21);
+ err |= __get_user(regs->UCreg_22, &sf->uc.uc_mcontext.regs.UCreg_22);
+ err |= __get_user(regs->UCreg_23, &sf->uc.uc_mcontext.regs.UCreg_23);
+ err |= __get_user(regs->UCreg_24, &sf->uc.uc_mcontext.regs.UCreg_24);
+ err |= __get_user(regs->UCreg_25, &sf->uc.uc_mcontext.regs.UCreg_25);
+ err |= __get_user(regs->UCreg_26, &sf->uc.uc_mcontext.regs.UCreg_26);
+ err |= __get_user(regs->UCreg_fp, &sf->uc.uc_mcontext.regs.UCreg_fp);
+ err |= __get_user(regs->UCreg_ip, &sf->uc.uc_mcontext.regs.UCreg_ip);
+ err |= __get_user(regs->UCreg_sp, &sf->uc.uc_mcontext.regs.UCreg_sp);
+ err |= __get_user(regs->UCreg_lr, &sf->uc.uc_mcontext.regs.UCreg_lr);
+ err |= __get_user(regs->UCreg_pc, &sf->uc.uc_mcontext.regs.UCreg_pc);
+ err |= __get_user(regs->UCreg_asr, &sf->uc.uc_mcontext.regs.UCreg_asr);
+
+ err |= !valid_user_regs(regs);
+
+ return err;
+}
+
+asmlinkage int __sys_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ /*
+ * Since we stacked the signal on a 64-bit boundary,
+ * then 'sp' should be word aligned here. If it's
+ * not, then the user is trying to mess with us.
+ */
+ if (regs->UCreg_sp & 7)
+ goto badframe;
+
+ frame = (struct rt_sigframe __user *)regs->UCreg_sp;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+
+ if (restore_sigframe(regs, &frame->sig))
+ goto badframe;
+
+ if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->UCreg_sp)
+ == -EFAULT)
+ goto badframe;
+
+ return regs->UCreg_00;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+static int setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs,
+ sigset_t *set)
+{
+ int err = 0;
+
+ err |= __put_user(regs->UCreg_00, &sf->uc.uc_mcontext.regs.UCreg_00);
+ err |= __put_user(regs->UCreg_01, &sf->uc.uc_mcontext.regs.UCreg_01);
+ err |= __put_user(regs->UCreg_02, &sf->uc.uc_mcontext.regs.UCreg_02);
+ err |= __put_user(regs->UCreg_03, &sf->uc.uc_mcontext.regs.UCreg_03);
+ err |= __put_user(regs->UCreg_04, &sf->uc.uc_mcontext.regs.UCreg_04);
+ err |= __put_user(regs->UCreg_05, &sf->uc.uc_mcontext.regs.UCreg_05);
+ err |= __put_user(regs->UCreg_06, &sf->uc.uc_mcontext.regs.UCreg_06);
+ err |= __put_user(regs->UCreg_07, &sf->uc.uc_mcontext.regs.UCreg_07);
+ err |= __put_user(regs->UCreg_08, &sf->uc.uc_mcontext.regs.UCreg_08);
+ err |= __put_user(regs->UCreg_09, &sf->uc.uc_mcontext.regs.UCreg_09);
+ err |= __put_user(regs->UCreg_10, &sf->uc.uc_mcontext.regs.UCreg_10);
+ err |= __put_user(regs->UCreg_11, &sf->uc.uc_mcontext.regs.UCreg_11);
+ err |= __put_user(regs->UCreg_12, &sf->uc.uc_mcontext.regs.UCreg_12);
+ err |= __put_user(regs->UCreg_13, &sf->uc.uc_mcontext.regs.UCreg_13);
+ err |= __put_user(regs->UCreg_14, &sf->uc.uc_mcontext.regs.UCreg_14);
+ err |= __put_user(regs->UCreg_15, &sf->uc.uc_mcontext.regs.UCreg_15);
+ err |= __put_user(regs->UCreg_16, &sf->uc.uc_mcontext.regs.UCreg_16);
+ err |= __put_user(regs->UCreg_17, &sf->uc.uc_mcontext.regs.UCreg_17);
+ err |= __put_user(regs->UCreg_18, &sf->uc.uc_mcontext.regs.UCreg_18);
+ err |= __put_user(regs->UCreg_19, &sf->uc.uc_mcontext.regs.UCreg_19);
+ err |= __put_user(regs->UCreg_20, &sf->uc.uc_mcontext.regs.UCreg_20);
+ err |= __put_user(regs->UCreg_21, &sf->uc.uc_mcontext.regs.UCreg_21);
+ err |= __put_user(regs->UCreg_22, &sf->uc.uc_mcontext.regs.UCreg_22);
+ err |= __put_user(regs->UCreg_23, &sf->uc.uc_mcontext.regs.UCreg_23);
+ err |= __put_user(regs->UCreg_24, &sf->uc.uc_mcontext.regs.UCreg_24);
+ err |= __put_user(regs->UCreg_25, &sf->uc.uc_mcontext.regs.UCreg_25);
+ err |= __put_user(regs->UCreg_26, &sf->uc.uc_mcontext.regs.UCreg_26);
+ err |= __put_user(regs->UCreg_fp, &sf->uc.uc_mcontext.regs.UCreg_fp);
+ err |= __put_user(regs->UCreg_ip, &sf->uc.uc_mcontext.regs.UCreg_ip);
+ err |= __put_user(regs->UCreg_sp, &sf->uc.uc_mcontext.regs.UCreg_sp);
+ err |= __put_user(regs->UCreg_lr, &sf->uc.uc_mcontext.regs.UCreg_lr);
+ err |= __put_user(regs->UCreg_pc, &sf->uc.uc_mcontext.regs.UCreg_pc);
+ err |= __put_user(regs->UCreg_asr, &sf->uc.uc_mcontext.regs.UCreg_asr);
+
+ err |= __put_user(current->thread.trap_no,
+ &sf->uc.uc_mcontext.trap_no);
+ err |= __put_user(current->thread.error_code,
+ &sf->uc.uc_mcontext.error_code);
+ err |= __put_user(current->thread.address,
+ &sf->uc.uc_mcontext.fault_address);
+ err |= __put_user(set->sig[0], &sf->uc.uc_mcontext.oldmask);
+
+ err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
+
+ return err;
+}
+
+static inline void __user *get_sigframe(struct k_sigaction *ka,
+ struct pt_regs *regs, int framesize)
+{
+ unsigned long sp = regs->UCreg_sp;
+ void __user *frame;
+
+ /*
+ * This is the X/Open sanctioned signal stack switching.
+ */
+ if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ /*
+ * ATPCS B01 mandates 8-byte alignment
+ */
+ frame = (void __user *)((sp - framesize) & ~7);
+
+ /*
+ * Check that we can actually write to the signal frame.
+ */
+ if (!access_ok(VERIFY_WRITE, frame, framesize))
+ frame = NULL;
+
+ return frame;
+}
+
+static int setup_return(struct pt_regs *regs, struct k_sigaction *ka,
+ unsigned long __user *rc, void __user *frame, int usig)
+{
+ unsigned long handler = (unsigned long)ka->sa.sa_handler;
+ unsigned long retcode;
+ unsigned long asr = regs->UCreg_asr & ~PSR_f;
+
+ unsigned int idx = 0;
+
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ idx += 1;
+
+ if (__put_user(sigreturn_codes[idx], rc) ||
+ __put_user(sigreturn_codes[idx+1], rc+1))
+ return 1;
+
+ retcode = KERN_SIGRETURN_CODE + (idx << 2);
+
+ regs->UCreg_00 = usig;
+ regs->UCreg_sp = (unsigned long)frame;
+ regs->UCreg_lr = retcode;
+ regs->UCreg_pc = handler;
+ regs->UCreg_asr = asr;
+
+ return 0;
+}
+
+static int setup_frame(int usig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct sigframe __user *frame = get_sigframe(ka, regs, sizeof(*frame));
+ int err = 0;
+
+ if (!frame)
+ return 1;
+
+ /*
+ * Set uc.uc_flags to a value which sc.trap_no would never have.
+ */
+ err |= __put_user(0x5ac3c35a, &frame->uc.uc_flags);
+
+ err |= setup_sigframe(frame, regs, set);
+ if (err == 0)
+ err |= setup_return(regs, ka, frame->retcode, frame, usig);
+
+ return err;
+}
+
+static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame =
+ get_sigframe(ka, regs, sizeof(*frame));
+ stack_t stack;
+ int err = 0;
+
+ if (!frame)
+ return 1;
+
+ err |= copy_siginfo_to_user(&frame->info, info);
+
+ err |= __put_user(0, &frame->sig.uc.uc_flags);
+ err |= __put_user(NULL, &frame->sig.uc.uc_link);
+
+ memset(&stack, 0, sizeof(stack));
+ stack.ss_sp = (void __user *)current->sas_ss_sp;
+ stack.ss_flags = sas_ss_flags(regs->UCreg_sp);
+ stack.ss_size = current->sas_ss_size;
+ err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack));
+
+ err |= setup_sigframe(&frame->sig, regs, set);
+ if (err == 0)
+ err |= setup_return(regs, ka, frame->sig.retcode, frame, usig);
+
+ if (err == 0) {
+ /*
+ * For realtime signals we must also set the second and third
+ * arguments for the signal handler.
+ */
+ regs->UCreg_01 = (unsigned long)&frame->info;
+ regs->UCreg_02 = (unsigned long)&frame->sig.uc;
+ }
+
+ return err;
+}
+
+static inline void setup_syscall_restart(struct pt_regs *regs)
+{
+ regs->UCreg_00 = regs->UCreg_ORIG_00;
+ regs->UCreg_pc -= 4;
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static int handle_signal(unsigned long sig, struct k_sigaction *ka,
+ siginfo_t *info, sigset_t *oldset,
+ struct pt_regs *regs, int syscall)
+{
+ struct thread_info *thread = current_thread_info();
+ struct task_struct *tsk = current;
+ int usig = sig;
+ int ret;
+
+ /*
+ * If we were from a system call, check for system call restarting...
+ */
+ if (syscall) {
+ switch (regs->UCreg_00) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->UCreg_00 = -EINTR;
+ break;
+ case -ERESTARTSYS:
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->UCreg_00 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ setup_syscall_restart(regs);
+ }
+ }
+
+ /*
+ * translate the signal
+ */
+ if (usig < 32 && thread->exec_domain
+ && thread->exec_domain->signal_invmap)
+ usig = thread->exec_domain->signal_invmap[usig];
+
+ /*
+ * Set up the stack frame
+ */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ ret = setup_rt_frame(usig, ka, info, oldset, regs);
+ else
+ ret = setup_frame(usig, ka, oldset, regs);
+
+ /*
+ * Check that the resulting registers are actually sane.
+ */
+ ret |= !valid_user_regs(regs);
+
+ if (ret != 0) {
+ force_sigsegv(sig, tsk);
+ return ret;
+ }
+
+ /*
+ * Block the signal if we were successful.
+ */
+ spin_lock_irq(&tsk->sighand->siglock);
+ sigorsets(&tsk->blocked, &tsk->blocked,
+ &ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&tsk->blocked, sig);
+ recalc_sigpending();
+ spin_unlock_irq(&tsk->sighand->siglock);
+
+ return 0;
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+static void do_signal(struct pt_regs *regs, int syscall)
+{
+ struct k_sigaction ka;
+ siginfo_t info;
+ int signr;
+
+ /*
+ * We want the common case to go fast, which
+ * is why we may in certain cases get here from
+ * kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return;
+
+ if (try_to_freeze())
+ goto no_signal;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+ sigset_t *oldset;
+
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = &current->saved_sigmask;
+ else
+ oldset = &current->blocked;
+ if (handle_signal(signr, &ka, &info, oldset, regs, syscall)
+ == 0) {
+ /*
+ * A signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag.
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ }
+ return;
+ }
+
+ no_signal:
+ /*
+ * No signal to deliver to the process - restart the syscall.
+ */
+ if (syscall) {
+ if (regs->UCreg_00 == -ERESTART_RESTARTBLOCK) {
+ u32 __user *usp;
+
+ regs->UCreg_sp -= 4;
+ usp = (u32 __user *)regs->UCreg_sp;
+
+ if (put_user(regs->UCreg_pc, usp) == 0) {
+ regs->UCreg_pc = KERN_RESTART_CODE;
+ } else {
+ regs->UCreg_sp += 4;
+ force_sigsegv(0, current);
+ }
+ }
+ if (regs->UCreg_00 == -ERESTARTNOHAND ||
+ regs->UCreg_00 == -ERESTARTSYS ||
+ regs->UCreg_00 == -ERESTARTNOINTR) {
+ setup_syscall_restart(regs);
+ }
+
+ /* If there's no signal to deliver, we just put the saved
+ * sigmask back.
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
+ }
+ }
+}
+
+asmlinkage void do_notify_resume(struct pt_regs *regs,
+ unsigned int thread_flags, int syscall)
+{
+ if (thread_flags & _TIF_SIGPENDING)
+ do_signal(regs, syscall);
+
+ if (thread_flags & _TIF_NOTIFY_RESUME) {
+ clear_thread_flag(TIF_NOTIFY_RESUME);
+ tracehook_notify_resume(regs);
+ if (current->replacement_session_keyring)
+ key_replace_session_keyring();
+ }
+}
+
+/*
+ * Copy signal return handlers into the vector page, and
+ * set sigreturn to be a pointer to these.
+ */
+void __init early_signal_init(void)
+{
+ memcpy((void *)kuser_vecpage_to_vectors(KERN_SIGRETURN_CODE),
+ sigreturn_codes, sizeof(sigreturn_codes));
+ memcpy((void *)kuser_vecpage_to_vectors(KERN_RESTART_CODE),
+ syscall_restart_code, sizeof(syscall_restart_code));
+ /* Need not to flush icache, since early_trap_init will do it last. */
+}
diff --git a/arch/unicore32/kernel/sleep.S b/arch/unicore32/kernel/sleep.S
new file mode 100644
index 000000000000..607a104aec59
--- /dev/null
+++ b/arch/unicore32/kernel/sleep.S
@@ -0,0 +1,202 @@
+/*
+ * linux/arch/unicore32/kernel/sleep.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <mach/hardware.h>
+
+ .text
+
+pkunity_cpu_save_cp:
+
+ @ get coprocessor registers
+
+ movc r3, p0.c7, #0 @ PID
+ movc r4, p0.c2, #0 @ translation table base addr
+ movc r5, p0.c1, #0 @ control reg
+
+
+ @ store them plus current virtual stack ptr on stack
+ mov r6, sp
+ stm.w (r3 - r6), [sp-]
+
+ mov pc, lr
+
+pkunity_cpu_save_sp:
+ @ preserve phys address of stack
+ mov r0, sp
+ stw.w lr, [sp+], #-4
+ b.l sleep_phys_sp
+ ldw r1, =sleep_save_sp
+ stw r0, [r1]
+ ldw.w pc, [sp]+, #4
+
+/*
+ * puv3_cpu_suspend()
+ *
+ * Forces CPU into sleep state.
+ *
+ * r0 = value for PWRMODE M field for desired sleep state
+ */
+
+ENTRY(puv3_cpu_suspend)
+ stm.w (r16 - r27, lr), [sp-] @ save registers on stack
+ stm.w (r4 - r15), [sp-] @ save registers on stack
+
+#ifdef CONFIG_UNICORE_FPU_F64
+ sfm.w (f0 - f7 ), [sp-]
+ sfm.w (f8 - f15), [sp-]
+ sfm.w (f16 - f23), [sp-]
+ sfm.w (f24 - f31), [sp-]
+ cff r4, s31
+ stm.w (r4), [sp-]
+#endif
+ b.l pkunity_cpu_save_cp
+
+ b.l pkunity_cpu_save_sp
+
+ @ clean data cache
+ mov r1, #0
+ movc p0.c5, r1, #14
+ nop
+ nop
+ nop
+ nop
+
+
+
+ @ DDR2 BaseAddr
+ ldw r0, =(PKUNITY_DDR2CTRL_BASE)
+
+ @ PM BaseAddr
+ ldw r1, =(PKUNITY_PM_BASE)
+
+ @ set PLL_SYS_CFG reg, 275
+ movl r6, #0x00002401
+ stw r6, [r1+], #0x18
+ @ set PLL_DDR_CFG reg, 66MHz
+ movl r6, #0x00100c00
+ stw r6, [r1+], #0x1c
+
+ @ set wake up source
+ movl r8, #0x800001ff @ epip4d
+ stw r8, [r1+], #0xc
+
+ @ set PGSR
+ movl r5, #0x40000
+ stw r5, [r1+], #0x10
+
+ @ prepare DDR2 refresh settings
+ ldw r5, [r0+], #0x24
+ or r5, r5, #0x00000001
+
+ @ prepare PMCR for PLL changing
+ movl r6, #0xc
+
+ @ prepare for closing PLL
+ movl r7, #0x1
+
+ @ prepare sleep mode
+ mov r8, #0x1
+
+@ movl r0, 0x11111111
+@ put_word_ocd r0
+ b pkunity_cpu_do_suspend
+
+ .ltorg
+ .align 5
+pkunity_cpu_do_suspend:
+ b 101f
+ @ put DDR2 into self-refresh
+100: stw r5, [r0+], #0x24
+ @ change PLL
+ stw r6, [r1]
+ b 1f
+
+ .ltorg
+ .align 5
+101: b 102f
+ @ wait for PLL changing complete
+1: ldw r6, [r1+], #0x44
+ csub.a r6, #0x1
+ bne 1b
+ b 2f
+
+ .ltorg
+ .align 5
+102: b 100b
+ @ close PLL
+2: stw r7, [r1+], #0x4
+ @ enter sleep mode
+ stw r8, [r1]
+3: b 3b
+
+
+
+
+/*
+ * puv3_cpu_resume()
+ *
+ * entry point from bootloader into kernel during resume
+ *
+ * Note: Yes, part of the following code is located into the .data section.
+ * This is to allow sleep_save_sp to be accessed with a relative load
+ * while we can't rely on any MMU translation. We could have put
+ * sleep_save_sp in the .text section as well, but some setups might
+ * insist on it to be truly read-only.
+ */
+
+ .data
+ .align 5
+ENTRY(puv3_cpu_resume)
+@ movl r0, 0x20202020
+@ put_word_ocd r0
+
+ ldw r0, sleep_save_sp @ stack phys addr
+ ldw r2, =resume_after_mmu @ its absolute virtual address
+ ldm (r3 - r6), [r0]+ @ CP regs + virt stack ptr
+ mov sp, r6 @ CP regs + virt stack ptr
+
+ mov r1, #0
+ movc p0.c6, r1, #6 @ invalidate I & D TLBs
+ movc p0.c5, r1, #28 @ invalidate I & D caches, BTB
+
+ movc p0.c7, r3, #0 @ PID
+ movc p0.c2, r4, #0 @ translation table base addr
+ movc p0.c1, r5, #0 @ control reg, turn on mmu
+ nop
+ jump r2
+ nop
+ nop
+ nop
+ nop
+ nop
+
+sleep_save_sp:
+ .word 0 @ preserve stack phys ptr here
+
+ .text
+resume_after_mmu:
+@ movl r0, 0x30303030
+@ put_word_ocd r0
+
+#ifdef CONFIG_UNICORE_FPU_F64
+ lfm.w (f0 - f7 ), [sp]+
+ lfm.w (f8 - f15), [sp]+
+ lfm.w (f16 - f23), [sp]+
+ lfm.w (f24 - f31), [sp]+
+ ldm.w (r4), [sp]+
+ ctf r4, s31
+#endif
+ ldm.w (r4 - r15), [sp]+ @ restore registers from stack
+ ldm.w (r16 - r27, pc), [sp]+ @ return to caller
diff --git a/arch/unicore32/kernel/stacktrace.c b/arch/unicore32/kernel/stacktrace.c
new file mode 100644
index 000000000000..b34030bdabe3
--- /dev/null
+++ b/arch/unicore32/kernel/stacktrace.c
@@ -0,0 +1,131 @@
+/*
+ * linux/arch/unicore32/kernel/stacktrace.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+
+#include <asm/stacktrace.h>
+
+#if defined(CONFIG_FRAME_POINTER)
+/*
+ * Unwind the current stack frame and store the new register values in the
+ * structure passed as argument. Unwinding is equivalent to a function return,
+ * hence the new PC value rather than LR should be used for backtrace.
+ *
+ * With framepointer enabled, a simple function prologue looks like this:
+ * mov ip, sp
+ * stmdb sp!, {fp, ip, lr, pc}
+ * sub fp, ip, #4
+ *
+ * A simple function epilogue looks like this:
+ * ldm sp, {fp, sp, pc}
+ *
+ * Note that with framepointer enabled, even the leaf functions have the same
+ * prologue and epilogue, therefore we can ignore the LR value in this case.
+ */
+int notrace unwind_frame(struct stackframe *frame)
+{
+ unsigned long high, low;
+ unsigned long fp = frame->fp;
+
+ /* only go to a higher address on the stack */
+ low = frame->sp;
+ high = ALIGN(low, THREAD_SIZE);
+
+ /* check current frame pointer is within bounds */
+ if (fp < (low + 12) || fp + 4 >= high)
+ return -EINVAL;
+
+ /* restore the registers from the stack frame */
+ frame->fp = *(unsigned long *)(fp - 12);
+ frame->sp = *(unsigned long *)(fp - 8);
+ frame->pc = *(unsigned long *)(fp - 4);
+
+ return 0;
+}
+#endif
+
+void notrace walk_stackframe(struct stackframe *frame,
+ int (*fn)(struct stackframe *, void *), void *data)
+{
+ while (1) {
+ int ret;
+
+ if (fn(frame, data))
+ break;
+ ret = unwind_frame(frame);
+ if (ret < 0)
+ break;
+ }
+}
+EXPORT_SYMBOL(walk_stackframe);
+
+#ifdef CONFIG_STACKTRACE
+struct stack_trace_data {
+ struct stack_trace *trace;
+ unsigned int no_sched_functions;
+ unsigned int skip;
+};
+
+static int save_trace(struct stackframe *frame, void *d)
+{
+ struct stack_trace_data *data = d;
+ struct stack_trace *trace = data->trace;
+ unsigned long addr = frame->pc;
+
+ if (data->no_sched_functions && in_sched_functions(addr))
+ return 0;
+ if (data->skip) {
+ data->skip--;
+ return 0;
+ }
+
+ trace->entries[trace->nr_entries++] = addr;
+
+ return trace->nr_entries >= trace->max_entries;
+}
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ struct stack_trace_data data;
+ struct stackframe frame;
+
+ data.trace = trace;
+ data.skip = trace->skip;
+
+ if (tsk != current) {
+ data.no_sched_functions = 1;
+ frame.fp = thread_saved_fp(tsk);
+ frame.sp = thread_saved_sp(tsk);
+ frame.lr = 0; /* recovered from the stack */
+ frame.pc = thread_saved_pc(tsk);
+ } else {
+ register unsigned long current_sp asm("sp");
+
+ data.no_sched_functions = 0;
+ frame.fp = (unsigned long)__builtin_frame_address(0);
+ frame.sp = current_sp;
+ frame.lr = (unsigned long)__builtin_return_address(0);
+ frame.pc = (unsigned long)save_stack_trace_tsk;
+ }
+
+ walk_stackframe(&frame, save_trace, &data);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+#endif
diff --git a/arch/unicore32/kernel/sys.c b/arch/unicore32/kernel/sys.c
new file mode 100644
index 000000000000..3afe60a39ac9
--- /dev/null
+++ b/arch/unicore32/kernel/sys.c
@@ -0,0 +1,126 @@
+/*
+ * linux/arch/unicore32/kernel/sys.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/ipc.h>
+#include <linux/uaccess.h>
+
+#include <asm/syscalls.h>
+#include <asm/cacheflush.h>
+
+/* Clone a task - this clones the calling program thread.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp,
+ void __user *parent_tid, void __user *child_tid,
+ struct pt_regs *regs)
+{
+ if (!newsp)
+ newsp = regs->UCreg_sp;
+
+ return do_fork(clone_flags, newsp, regs, 0,
+ parent_tid, child_tid);
+}
+
+/* sys_execve() executes a new program.
+ * This is called indirectly via a small wrapper
+ */
+asmlinkage long __sys_execve(const char __user *filename,
+ const char __user *const __user *argv,
+ const char __user *const __user *envp,
+ struct pt_regs *regs)
+{
+ int error;
+ char *fn;
+
+ fn = getname(filename);
+ error = PTR_ERR(fn);
+ if (IS_ERR(fn))
+ goto out;
+ error = do_execve(fn, argv, envp, regs);
+ putname(fn);
+out:
+ return error;
+}
+
+int kernel_execve(const char *filename,
+ const char *const argv[],
+ const char *const envp[])
+{
+ struct pt_regs regs;
+ int ret;
+
+ memset(&regs, 0, sizeof(struct pt_regs));
+ ret = do_execve(filename,
+ (const char __user *const __user *)argv,
+ (const char __user *const __user *)envp, &regs);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * Save argc to the register structure for userspace.
+ */
+ regs.UCreg_00 = ret;
+
+ /*
+ * We were successful. We won't be returning to our caller, but
+ * instead to user space by manipulating the kernel stack.
+ */
+ asm("add r0, %0, %1\n\t"
+ "mov r1, %2\n\t"
+ "mov r2, %3\n\t"
+ "mov r22, #0\n\t" /* not a syscall */
+ "mov r23, %0\n\t" /* thread structure */
+ "b.l memmove\n\t" /* copy regs to top of stack */
+ "mov sp, r0\n\t" /* reposition stack pointer */
+ "b ret_to_user"
+ :
+ : "r" (current_thread_info()),
+ "Ir" (THREAD_START_SP - sizeof(regs)),
+ "r" (&regs),
+ "Ir" (sizeof(regs))
+ : "r0", "r1", "r2", "r3", "ip", "lr", "memory");
+
+ out:
+ return ret;
+}
+EXPORT_SYMBOL(kernel_execve);
+
+/* Note: used by the compat code even in 64-bit Linux. */
+SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, unsigned long, off_4k)
+{
+ return sys_mmap_pgoff(addr, len, prot, flags, fd,
+ off_4k);
+}
+
+/* Provide the actual syscall number to call mapping. */
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+/* Note that we don't include <linux/unistd.h> but <asm/unistd.h> */
+void *sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls-1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/unicore32/kernel/time.c b/arch/unicore32/kernel/time.c
new file mode 100644
index 000000000000..080710c09241
--- /dev/null
+++ b/arch/unicore32/kernel/time.c
@@ -0,0 +1,143 @@
+/*
+ * linux/arch/unicore32/kernel/time.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/timex.h>
+#include <linux/clockchips.h>
+
+#include <mach/hardware.h>
+
+#define MIN_OSCR_DELTA 2
+
+static irqreturn_t puv3_ost0_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *c = dev_id;
+
+ /* Disarm the compare/match, signal the event. */
+ writel(readl(OST_OIER) & ~OST_OIER_E0, OST_OIER);
+ writel(readl(OST_OSSR) & ~OST_OSSR_M0, OST_OSSR);
+ c->event_handler(c);
+
+ return IRQ_HANDLED;
+}
+
+static int
+puv3_osmr0_set_next_event(unsigned long delta, struct clock_event_device *c)
+{
+ unsigned long next, oscr;
+
+ writel(readl(OST_OIER) | OST_OIER_E0, OST_OIER);
+ next = readl(OST_OSCR) + delta;
+ writel(next, OST_OSMR0);
+ oscr = readl(OST_OSCR);
+
+ return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0;
+}
+
+static void
+puv3_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *c)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ writel(readl(OST_OIER) & ~OST_OIER_E0, OST_OIER);
+ writel(readl(OST_OSSR) & ~OST_OSSR_M0, OST_OSSR);
+ break;
+
+ case CLOCK_EVT_MODE_RESUME:
+ case CLOCK_EVT_MODE_PERIODIC:
+ break;
+ }
+}
+
+static struct clock_event_device ckevt_puv3_osmr0 = {
+ .name = "osmr0",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_next_event = puv3_osmr0_set_next_event,
+ .set_mode = puv3_osmr0_set_mode,
+};
+
+static cycle_t puv3_read_oscr(struct clocksource *cs)
+{
+ return readl(OST_OSCR);
+}
+
+static struct clocksource cksrc_puv3_oscr = {
+ .name = "oscr",
+ .rating = 200,
+ .read = puv3_read_oscr,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static struct irqaction puv3_timer_irq = {
+ .name = "ost0",
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = puv3_ost0_interrupt,
+ .dev_id = &ckevt_puv3_osmr0,
+};
+
+void __init time_init(void)
+{
+ writel(0, OST_OIER); /* disable any timer interrupts */
+ writel(0, OST_OSSR); /* clear status on all timers */
+
+ clockevents_calc_mult_shift(&ckevt_puv3_osmr0, CLOCK_TICK_RATE, 5);
+
+ ckevt_puv3_osmr0.max_delta_ns =
+ clockevent_delta2ns(0x7fffffff, &ckevt_puv3_osmr0);
+ ckevt_puv3_osmr0.min_delta_ns =
+ clockevent_delta2ns(MIN_OSCR_DELTA * 2, &ckevt_puv3_osmr0) + 1;
+ ckevt_puv3_osmr0.cpumask = cpumask_of(0);
+
+ setup_irq(IRQ_TIMER0, &puv3_timer_irq);
+
+ clocksource_register_hz(&cksrc_puv3_oscr, CLOCK_TICK_RATE);
+ clockevents_register_device(&ckevt_puv3_osmr0);
+}
+
+#ifdef CONFIG_PM
+unsigned long osmr[4], oier;
+
+void puv3_timer_suspend(void)
+{
+ osmr[0] = readl(OST_OSMR0);
+ osmr[1] = readl(OST_OSMR1);
+ osmr[2] = readl(OST_OSMR2);
+ osmr[3] = readl(OST_OSMR3);
+ oier = readl(OST_OIER);
+}
+
+void puv3_timer_resume(void)
+{
+ writel(0, OST_OSSR);
+ writel(osmr[0], OST_OSMR0);
+ writel(osmr[1], OST_OSMR1);
+ writel(osmr[2], OST_OSMR2);
+ writel(osmr[3], OST_OSMR3);
+ writel(oier, OST_OIER);
+
+ /*
+ * OSMR0 is the system timer: make sure OSCR is sufficiently behind
+ */
+ writel(readl(OST_OSMR0) - LATCH, OST_OSCR);
+}
+#else
+void puv3_timer_suspend(void) { };
+void puv3_timer_resume(void) { };
+#endif
+
diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c
new file mode 100644
index 000000000000..25abbb101729
--- /dev/null
+++ b/arch/unicore32/kernel/traps.c
@@ -0,0 +1,333 @@
+/*
+ * linux/arch/unicore32/kernel/traps.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 'traps.c' handles hardware exceptions after we have saved some state.
+ * Mostly a debugging aid, but will probably kill the offending process.
+ */
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/spinlock.h>
+#include <linux/personality.h>
+#include <linux/kallsyms.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+#include <linux/unistd.h>
+
+#include <asm/cacheflush.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+
+#include "setup.h"
+
+static void dump_mem(const char *, const char *, unsigned long, unsigned long);
+
+void dump_backtrace_entry(unsigned long where,
+ unsigned long from, unsigned long frame)
+{
+#ifdef CONFIG_KALLSYMS
+ printk(KERN_DEFAULT "[<%08lx>] (%pS) from [<%08lx>] (%pS)\n",
+ where, (void *)where, from, (void *)from);
+#else
+ printk(KERN_DEFAULT "Function entered at [<%08lx>] from [<%08lx>]\n",
+ where, from);
+#endif
+}
+
+/*
+ * Stack pointers should always be within the kernels view of
+ * physical memory. If it is not there, then we can't dump
+ * out any information relating to the stack.
+ */
+static int verify_stack(unsigned long sp)
+{
+ if (sp < PAGE_OFFSET ||
+ (sp > (unsigned long)high_memory && high_memory != NULL))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ * Dump out the contents of some memory nicely...
+ */
+static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+ unsigned long top)
+{
+ unsigned long first;
+ mm_segment_t fs;
+ int i;
+
+ /*
+ * We need to switch to kernel mode so that we can use __get_user
+ * to safely read from kernel space. Note that we now dump the
+ * code first, just in case the backtrace kills us.
+ */
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ printk(KERN_DEFAULT "%s%s(0x%08lx to 0x%08lx)\n",
+ lvl, str, bottom, top);
+
+ for (first = bottom & ~31; first < top; first += 32) {
+ unsigned long p;
+ char str[sizeof(" 12345678") * 8 + 1];
+
+ memset(str, ' ', sizeof(str));
+ str[sizeof(str) - 1] = '\0';
+
+ for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
+ if (p >= bottom && p < top) {
+ unsigned long val;
+ if (__get_user(val, (unsigned long *)p) == 0)
+ sprintf(str + i * 9, " %08lx", val);
+ else
+ sprintf(str + i * 9, " ????????");
+ }
+ }
+ printk(KERN_DEFAULT "%s%04lx:%s\n", lvl, first & 0xffff, str);
+ }
+
+ set_fs(fs);
+}
+
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+ unsigned long addr = instruction_pointer(regs);
+ const int width = 8;
+ mm_segment_t fs;
+ char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
+ int i;
+
+ /*
+ * We need to switch to kernel mode so that we can use __get_user
+ * to safely read from kernel space. Note that we now dump the
+ * code first, just in case the backtrace kills us.
+ */
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+
+ for (i = -4; i < 1; i++) {
+ unsigned int val, bad;
+
+ bad = __get_user(val, &((u32 *)addr)[i]);
+
+ if (!bad)
+ p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
+ width, val);
+ else {
+ p += sprintf(p, "bad PC value");
+ break;
+ }
+ }
+ printk(KERN_DEFAULT "%sCode: %s\n", lvl, str);
+
+ set_fs(fs);
+}
+
+static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
+{
+ unsigned int fp, mode;
+ int ok = 1;
+
+ printk(KERN_DEFAULT "Backtrace: ");
+
+ if (!tsk)
+ tsk = current;
+
+ if (regs) {
+ fp = regs->UCreg_fp;
+ mode = processor_mode(regs);
+ } else if (tsk != current) {
+ fp = thread_saved_fp(tsk);
+ mode = 0x10;
+ } else {
+ asm("mov %0, fp" : "=r" (fp) : : "cc");
+ mode = 0x10;
+ }
+
+ if (!fp) {
+ printk("no frame pointer");
+ ok = 0;
+ } else if (verify_stack(fp)) {
+ printk("invalid frame pointer 0x%08x", fp);
+ ok = 0;
+ } else if (fp < (unsigned long)end_of_stack(tsk))
+ printk("frame pointer underflow");
+ printk("\n");
+
+ if (ok)
+ c_backtrace(fp, mode);
+}
+
+void dump_stack(void)
+{
+ dump_backtrace(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
+
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+ dump_backtrace(NULL, tsk);
+ barrier();
+}
+
+static int __die(const char *str, int err, struct thread_info *thread,
+ struct pt_regs *regs)
+{
+ struct task_struct *tsk = thread->task;
+ static int die_counter;
+ int ret;
+
+ printk(KERN_EMERG "Internal error: %s: %x [#%d]\n",
+ str, err, ++die_counter);
+ sysfs_printk_last_file();
+
+ /* trap and error numbers are mostly meaningless on UniCore */
+ ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, \
+ SIGSEGV);
+ if (ret == NOTIFY_STOP)
+ return ret;
+
+ print_modules();
+ __show_regs(regs);
+ printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
+ TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
+
+ if (!user_mode(regs) || in_interrupt()) {
+ dump_mem(KERN_EMERG, "Stack: ", regs->UCreg_sp,
+ THREAD_SIZE + (unsigned long)task_stack_page(tsk));
+ dump_backtrace(regs, tsk);
+ dump_instr(KERN_EMERG, regs);
+ }
+
+ return ret;
+}
+
+DEFINE_SPINLOCK(die_lock);
+
+/*
+ * This function is protected against re-entrancy.
+ */
+void die(const char *str, struct pt_regs *regs, int err)
+{
+ struct thread_info *thread = current_thread_info();
+ int ret;
+
+ oops_enter();
+
+ spin_lock_irq(&die_lock);
+ console_verbose();
+ bust_spinlocks(1);
+ ret = __die(str, err, thread, regs);
+
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE);
+ spin_unlock_irq(&die_lock);
+ oops_exit();
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
+ if (ret != NOTIFY_STOP)
+ do_exit(SIGSEGV);
+}
+
+void uc32_notify_die(const char *str, struct pt_regs *regs,
+ struct siginfo *info, unsigned long err, unsigned long trap)
+{
+ if (user_mode(regs)) {
+ current->thread.error_code = err;
+ current->thread.trap_no = trap;
+
+ force_sig_info(info->si_signo, info, current);
+ } else
+ die(str, regs, err);
+}
+
+/*
+ * bad_mode handles the impossible case in the vectors. If you see one of
+ * these, then it's extremely serious, and could mean you have buggy hardware.
+ * It never returns, and never tries to sync. We hope that we can at least
+ * dump out some state information...
+ */
+asmlinkage void bad_mode(struct pt_regs *regs, unsigned int reason)
+{
+ console_verbose();
+
+ printk(KERN_CRIT "Bad mode detected with reason 0x%x\n", reason);
+
+ die("Oops - bad mode", regs, 0);
+ local_irq_disable();
+ panic("bad mode");
+}
+
+void __pte_error(const char *file, int line, unsigned long val)
+{
+ printk(KERN_DEFAULT "%s:%d: bad pte %08lx.\n", file, line, val);
+}
+
+void __pmd_error(const char *file, int line, unsigned long val)
+{
+ printk(KERN_DEFAULT "%s:%d: bad pmd %08lx.\n", file, line, val);
+}
+
+void __pgd_error(const char *file, int line, unsigned long val)
+{
+ printk(KERN_DEFAULT "%s:%d: bad pgd %08lx.\n", file, line, val);
+}
+
+asmlinkage void __div0(void)
+{
+ printk(KERN_DEFAULT "Division by zero in kernel.\n");
+ dump_stack();
+}
+EXPORT_SYMBOL(__div0);
+
+void abort(void)
+{
+ BUG();
+
+ /* if that doesn't kill us, halt */
+ panic("Oops failed to kill thread");
+}
+EXPORT_SYMBOL(abort);
+
+void __init trap_init(void)
+{
+ return;
+}
+
+void __init early_trap_init(void)
+{
+ unsigned long vectors = VECTORS_BASE;
+
+ /*
+ * Copy the vectors, stubs (in entry-unicore.S)
+ * into the vector page, mapped at 0xffff0000, and ensure these
+ * are visible to the instruction stream.
+ */
+ memcpy((void *)vectors,
+ __vectors_start,
+ __vectors_end - __vectors_start);
+ memcpy((void *)vectors + 0x200,
+ __stubs_start,
+ __stubs_end - __stubs_start);
+
+ early_signal_init();
+
+ flush_icache_range(vectors, vectors + PAGE_SIZE);
+}
diff --git a/arch/unicore32/kernel/vmlinux.lds.S b/arch/unicore32/kernel/vmlinux.lds.S
new file mode 100644
index 000000000000..0b4eb89729e7
--- /dev/null
+++ b/arch/unicore32/kernel/vmlinux.lds.S
@@ -0,0 +1,61 @@
+/*
+ * linux/arch/unicore32/kernel/vmlinux.lds.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/thread_info.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+
+OUTPUT_ARCH(unicore32)
+ENTRY(stext)
+
+jiffies = jiffies_64;
+
+SECTIONS
+{
+ . = PAGE_OFFSET + KERNEL_IMAGE_START;
+
+ _text = .;
+ __init_begin = .;
+ HEAD_TEXT_SECTION
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
+ PERCPU(PAGE_SIZE)
+ __init_end = .;
+
+ _stext = .;
+ .text : { /* Real text segment */
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+
+ *(.fixup)
+ *(.gnu.warning)
+ }
+ _etext = .;
+
+ _sdata = .;
+ RO_DATA_SECTION(PAGE_SIZE)
+ RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
+
+ EXCEPTION_TABLE(32)
+ NOTES
+
+ BSS_SECTION(0, 0, 0)
+ _end = .;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+
+ DISCARDS /* Exit code and data */
+}
diff --git a/arch/unicore32/lib/Makefile b/arch/unicore32/lib/Makefile
new file mode 100644
index 000000000000..87229a558b36
--- /dev/null
+++ b/arch/unicore32/lib/Makefile
@@ -0,0 +1,27 @@
+#
+# linux/arch/unicore32/lib/Makefile
+#
+# Copyright (C) 2001-2010 GUAN Xue-tao
+#
+
+lib-y := backtrace.o delay.o findbit.o
+lib-y += strncpy_from_user.o strnlen_user.o
+lib-y += clear_user.o copy_page.o
+lib-y += copy_from_user.o copy_to_user.o
+
+GNU_LIBC_A := $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libc.a)
+GNU_LIBC_A_OBJS := memchr.o memcpy.o memmove.o memset.o
+GNU_LIBC_A_OBJS += strchr.o strrchr.o
+GNU_LIBC_A_OBJS += rawmemchr.o # needed by strrchr.o
+
+GNU_LIBGCC_A := $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libgcc.a)
+GNU_LIBGCC_A_OBJS := _ashldi3.o _ashrdi3.o _lshrdi3.o
+GNU_LIBGCC_A_OBJS += _divsi3.o _modsi3.o _ucmpdi2.o _umodsi3.o _udivsi3.o
+
+lib-y += $(GNU_LIBC_A_OBJS) $(GNU_LIBGCC_A_OBJS)
+
+$(addprefix $(obj)/, $(GNU_LIBC_A_OBJS)):
+ $(Q)$(AR) p $(GNU_LIBC_A) $(notdir $@) > $@
+
+$(addprefix $(obj)/, $(GNU_LIBGCC_A_OBJS)):
+ $(Q)$(AR) p $(GNU_LIBGCC_A) $(notdir $@) > $@
diff --git a/arch/unicore32/lib/backtrace.S b/arch/unicore32/lib/backtrace.S
new file mode 100644
index 000000000000..ef01d77f2f65
--- /dev/null
+++ b/arch/unicore32/lib/backtrace.S
@@ -0,0 +1,163 @@
+/*
+ * linux/arch/unicore32/lib/backtrace.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+@ fp is 0 or stack frame
+
+#define frame v4
+#define sv_fp v5
+#define sv_pc v6
+#define offset v8
+
+ENTRY(__backtrace)
+ mov r0, fp
+
+ENTRY(c_backtrace)
+
+#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK)
+ mov pc, lr
+ENDPROC(__backtrace)
+ENDPROC(c_backtrace)
+#else
+ stm.w (v4 - v8, lr), [sp-] @ Save an extra register
+ @ so we have a location...
+ mov.a frame, r0 @ if frame pointer is zero
+ beq no_frame @ we have no stack frames
+
+1: stm.w (pc), [sp-] @ calculate offset of PC stored
+ ldw.w r0, [sp]+, #4 @ by stmfd for this CPU
+ adr r1, 1b
+ sub offset, r0, r1
+
+/*
+ * Stack frame layout:
+ * optionally saved caller registers (r4 - r10)
+ * saved fp
+ * saved sp
+ * saved lr
+ * frame => saved pc
+ * optionally saved arguments (r0 - r3)
+ * saved sp => <next word>
+ *
+ * Functions start with the following code sequence:
+ * mov ip, sp
+ * stm.w (r0 - r3), [sp-] (optional)
+ * corrected pc => stm.w sp, (..., fp, ip, lr, pc)
+ */
+for_each_frame:
+
+1001: ldw sv_pc, [frame+], #0 @ get saved pc
+1002: ldw sv_fp, [frame+], #-12 @ get saved fp
+
+ sub sv_pc, sv_pc, offset @ Correct PC for prefetching
+
+1003: ldw r2, [sv_pc+], #-4 @ if stmfd sp, {args} exists,
+ ldw r3, .Ldsi+4 @ adjust saved 'pc' back one
+ cxor.a r3, r2 >> #14 @ instruction
+ beq 201f
+ sub r0, sv_pc, #4 @ allow for mov
+ b 202f
+201:
+ sub r0, sv_pc, #8 @ allow for mov + stmia
+202:
+ ldw r1, [frame+], #-4 @ get saved lr
+ mov r2, frame
+ b.l dump_backtrace_entry
+
+ ldw r1, [sv_pc+], #-4 @ if stmfd sp, {args} exists,
+ ldw r3, .Ldsi+4
+ cxor.a r3, r1 >> #14
+ bne 1004f
+ ldw r0, [frame+], #-8 @ get sp
+ sub r0, r0, #4 @ point at the last arg
+ b.l .Ldumpstm @ dump saved registers
+
+1004: ldw r1, [sv_pc+], #0 @ if stmfd {, fp, ip, lr, pc}
+ ldw r3, .Ldsi @ instruction exists,
+ cxor.a r3, r1 >> #14
+ bne 201f
+ sub r0, frame, #16
+ b.l .Ldumpstm @ dump saved registers
+201:
+ cxor.a sv_fp, #0 @ zero saved fp means
+ beq no_frame @ no further frames
+
+ csub.a sv_fp, frame @ next frame must be
+ mov frame, sv_fp @ above the current frame
+ bua for_each_frame
+
+1006: adr r0, .Lbad
+ mov r1, frame
+ b.l printk
+no_frame: ldm.w (v4 - v8, pc), [sp]+
+ENDPROC(__backtrace)
+ENDPROC(c_backtrace)
+
+ .pushsection __ex_table,"a"
+ .align 3
+ .long 1001b, 1006b
+ .long 1002b, 1006b
+ .long 1003b, 1006b
+ .long 1004b, 1006b
+ .popsection
+
+#define instr v4
+#define reg v5
+#define stack v6
+
+.Ldumpstm: stm.w (instr, reg, stack, v7, lr), [sp-]
+ mov stack, r0
+ mov instr, r1
+ mov reg, #14
+ mov v7, #0
+1: mov r3, #1
+ csub.a reg, #8
+ bne 201f
+ sub reg, reg, #3
+201:
+ cand.a instr, r3 << reg
+ beq 2f
+ add v7, v7, #1
+ cxor.a v7, #6
+ cmoveq v7, #1
+ cmoveq r1, #'\n'
+ cmovne r1, #' '
+ ldw.w r3, [stack]+, #-4
+ mov r2, reg
+ csub.a r2, #8
+ bsl 201f
+ sub r2, r2, #3
+201:
+ cand.a instr, #0x40 @ if H is 1, high 16 regs
+ beq 201f
+ add r2, r2, #0x10 @ so r2 need add 16
+201:
+ adr r0, .Lfp
+ b.l printk
+2: sub.a reg, reg, #1
+ bns 1b
+ cxor.a v7, #0
+ beq 201f
+ adr r0, .Lcr
+ b.l printk
+201: ldm.w (instr, reg, stack, v7, pc), [sp]+
+
+.Lfp: .asciz "%cr%d:%08x"
+.Lcr: .asciz "\n"
+.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n"
+ .align
+.Ldsi: .word 0x92eec000 >> 14 @ stm.w sp, (... fp, ip, lr, pc)
+ .word 0x92e10000 >> 14 @ stm.w sp, ()
+
+#endif
diff --git a/arch/unicore32/lib/clear_user.S b/arch/unicore32/lib/clear_user.S
new file mode 100644
index 000000000000..20047f7224fd
--- /dev/null
+++ b/arch/unicore32/lib/clear_user.S
@@ -0,0 +1,57 @@
+/*
+ * linux/arch/unicore32/lib/clear_user.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+
+/* Prototype: int __clear_user(void *addr, size_t sz)
+ * Purpose : clear some user memory
+ * Params : addr - user memory address to clear
+ * : sz - number of bytes to clear
+ * Returns : number of bytes NOT cleared
+ */
+WEAK(__clear_user)
+ stm.w (lr), [sp-]
+ stm.w (r1), [sp-]
+ mov r2, #0
+ csub.a r1, #4
+ bsl 2f
+ and.a ip, r0, #3
+ beq 1f
+ csub.a ip, #2
+ strusr r2, r0, 1
+ strusr r2, r0, 1, el
+ strusr r2, r0, 1, sl
+ rsub ip, ip, #4
+ sub r1, r1, ip @ 7 6 5 4 3 2 1
+1: sub.a r1, r1, #8 @ -1 -2 -3 -4 -5 -6 -7
+ strusr r2, r0, 4, ns, rept=2
+ bns 1b
+ add.a r1, r1, #4 @ 3 2 1 0 -1 -2 -3
+ strusr r2, r0, 4, ns
+2: cand.a r1, #2 @ 1x 1x 0x 0x 1x 1x 0x
+ strusr r2, r0, 1, ne, rept=2
+ cand.a r1, #1 @ x1 x0 x1 x0 x1 x0 x1
+ beq 3f
+USER( stb.u r2, [r0])
+3: mov r0, #0
+ ldm.w (r1), [sp]+
+ ldm.w (pc), [sp]+
+ENDPROC(__clear_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+9001: ldm.w (r0), [sp]+
+ ldm.w (pc), [sp]+
+ .popsection
+
diff --git a/arch/unicore32/lib/copy_from_user.S b/arch/unicore32/lib/copy_from_user.S
new file mode 100644
index 000000000000..ab0767ea5dbd
--- /dev/null
+++ b/arch/unicore32/lib/copy_from_user.S
@@ -0,0 +1,108 @@
+/*
+ * linux/arch/unicore32/lib/copy_from_user.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Prototype:
+ *
+ * size_t __copy_from_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+ * copy a block to kernel memory from user memory
+ *
+ * Params:
+ *
+ * to = kernel memory
+ * from = user memory
+ * n = number of bytes to copy
+ *
+ * Return value:
+ *
+ * Number of bytes NOT copied.
+ */
+
+ .macro ldr1w ptr reg abort
+ ldrusr \reg, \ptr, 4, abort=\abort
+ .endm
+
+ .macro ldr4w ptr reg1 reg2 reg3 reg4 abort
+100: ldm.w (\reg1, \reg2, \reg3, \reg4), [\ptr]+
+ .pushsection __ex_table, "a"
+ .align 3
+ .long 100b, \abort
+ .popsection
+ .endm
+
+ .macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+100: ldm.w (\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8), [\ptr]+
+ .pushsection __ex_table, "a"
+ .align 3
+ .long 100b, \abort
+ .popsection
+ .endm
+
+ .macro ldr1b ptr reg cond=al abort
+ ldrusr \reg, \ptr, 1, \cond, abort=\abort
+ .endm
+
+ .macro str1w ptr reg abort
+ stw.w \reg, [\ptr]+, #4
+ .endm
+
+ .macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+ stm.w (\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8), [\ptr]+
+ .endm
+
+ .macro str1b ptr reg cond=al abort
+ .ifnc \cond, al
+ b\cond 201f
+ b 202f
+ .endif
+201: stb.w \reg, [\ptr]+, #1
+202:
+ .endm
+
+ .macro enter
+ mov r3, #0
+ stm.w (r0, r2, r3), [sp-]
+ .endm
+
+ .macro exit
+ add sp, sp, #8
+ ldm.w (r0), [sp]+
+ mov pc, lr
+ .endm
+
+ .text
+
+ENTRY(__copy_from_user)
+
+#include "copy_template.S"
+
+ENDPROC(__copy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+ copy_abort_preamble
+ ldm.w (r1, r2), [sp]+
+ sub r3, r0, r1
+ rsub r2, r3, r2
+ stw r2, [sp]
+ mov r1, #0
+ b.l memset
+ ldw.w r0, [sp]+, #4
+ copy_abort_end
+ .popsection
+
diff --git a/arch/unicore32/lib/copy_page.S b/arch/unicore32/lib/copy_page.S
new file mode 100644
index 000000000000..3a448d755ade
--- /dev/null
+++ b/arch/unicore32/lib/copy_page.S
@@ -0,0 +1,39 @@
+/*
+ * linux/arch/unicore32/lib/copy_page.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * ASM optimised string functions
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <generated/asm-offsets.h>
+#include <asm/cache.h>
+
+#define COPY_COUNT (PAGE_SZ/256)
+
+ .text
+ .align 5
+/*
+ * UniCore optimised copy_page routine
+ */
+ENTRY(copy_page)
+ stm.w (r17 - r19, lr), [sp-]
+ mov r17, r0
+ mov r18, r1
+ mov r19, #COPY_COUNT
+1:
+ .rept 4
+ ldm.w (r0 - r15), [r18]+
+ stm.w (r0 - r15), [r17]+
+ .endr
+ sub.a r19, r19, #1
+ bne 1b
+ ldm.w (r17 - r19, pc), [sp]+
+ENDPROC(copy_page)
diff --git a/arch/unicore32/lib/copy_template.S b/arch/unicore32/lib/copy_template.S
new file mode 100644
index 000000000000..524287fc0120
--- /dev/null
+++ b/arch/unicore32/lib/copy_template.S
@@ -0,0 +1,214 @@
+/*
+ * linux/arch/unicore32/lib/copy_template.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Theory of operation
+ * -------------------
+ *
+ * This file provides the core code for a forward memory copy used in
+ * the implementation of memcopy(), copy_to_user() and copy_from_user().
+ *
+ * The including file must define the following accessor macros
+ * according to the need of the given function:
+ *
+ * ldr1w ptr reg abort
+ *
+ * This loads one word from 'ptr', stores it in 'reg' and increments
+ * 'ptr' to the next word. The 'abort' argument is used for fixup tables.
+ *
+ * ldr4w ptr reg1 reg2 reg3 reg4 abort
+ * ldr8w ptr, reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+ *
+ * This loads four or eight words starting from 'ptr', stores them
+ * in provided registers and increments 'ptr' past those words.
+ * The'abort' argument is used for fixup tables.
+ *
+ * ldr1b ptr reg cond abort
+ *
+ * Similar to ldr1w, but it loads a byte and increments 'ptr' one byte.
+ * It also must apply the condition code if provided, otherwise the
+ * "al" condition is assumed by default.
+ *
+ * str1w ptr reg abort
+ * str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+ * str1b ptr reg cond abort
+ *
+ * Same as their ldr* counterparts, but data is stored to 'ptr' location
+ * rather than being loaded.
+ *
+ * enter
+ *
+ * Preserve the provided registers on the stack plus any additional
+ * data as needed by the implementation including this code. Called
+ * upon code entry.
+ *
+ * exit
+ *
+ * Restore registers with the values previously saved with the
+ * 'preserv' macro. Called upon code termination.
+ */
+
+
+ enter
+
+ sub.a r2, r2, #4
+ bsl 8f
+ and.a ip, r0, #3
+ bne 9f
+ and.a ip, r1, #3
+ bne 10f
+
+1: sub.a r2, r2, #(28)
+ stm.w (r5 - r8), [sp-]
+ bsl 5f
+
+3:
+4: ldr8w r1, r3, r4, r5, r6, r7, r8, r10, r11, abort=20f
+ sub.a r2, r2, #32
+ str8w r0, r3, r4, r5, r6, r7, r8, r10, r11, abort=20f
+ beg 3b
+
+5: and.a ip, r2, #28
+ rsub ip, ip, #32
+ beq 7f
+ add pc, pc, ip @ C is always clear here
+ nop
+
+ ldr1w r1, r3, abort=20f
+ ldr1w r1, r4, abort=20f
+ ldr1w r1, r5, abort=20f
+ ldr1w r1, r6, abort=20f
+ ldr1w r1, r7, abort=20f
+ ldr1w r1, r8, abort=20f
+ ldr1w r1, r11, abort=20f
+
+ add pc, pc, ip
+ nop
+
+ str1w r0, r3, abort=20f
+ str1w r0, r4, abort=20f
+ str1w r0, r5, abort=20f
+ str1w r0, r6, abort=20f
+ str1w r0, r7, abort=20f
+ str1w r0, r8, abort=20f
+ str1w r0, r11, abort=20f
+
+7: ldm.w (r5 - r8), [sp]+
+
+8: mov.a r2, r2 << #31
+ ldr1b r1, r3, ne, abort=21f
+ ldr1b r1, r4, ea, abort=21f
+ ldr1b r1, r10, ea, abort=21f
+ str1b r0, r3, ne, abort=21f
+ str1b r0, r4, ea, abort=21f
+ str1b r0, r10, ea, abort=21f
+
+ exit
+
+9: rsub ip, ip, #4
+ csub.a ip, #2
+ ldr1b r1, r3, sg, abort=21f
+ ldr1b r1, r4, eg, abort=21f
+ ldr1b r1, r11, abort=21f
+ str1b r0, r3, sg, abort=21f
+ str1b r0, r4, eg, abort=21f
+ sub.a r2, r2, ip
+ str1b r0, r11, abort=21f
+ bsl 8b
+ and.a ip, r1, #3
+ beq 1b
+
+10: andn r1, r1, #3
+ csub.a ip, #2
+ ldr1w r1, r11, abort=21f
+ beq 17f
+ bsg 18f
+
+
+ .macro forward_copy_shift a b
+
+ sub.a r2, r2, #28
+ bsl 14f
+
+11: stm.w (r5 - r9), [sp-]
+
+12:
+ ldr4w r1, r4, r5, r6, r7, abort=19f
+ mov r3, r11 pull #\a
+ sub.a r2, r2, #32
+ ldr4w r1, r8, r9, r10, r11, abort=19f
+ or r3, r3, r4 push #\b
+ mov r4, r4 pull #\a
+ or r4, r4, r5 push #\b
+ mov r5, r5 pull #\a
+ or r5, r5, r6 push #\b
+ mov r6, r6 pull #\a
+ or r6, r6, r7 push #\b
+ mov r7, r7 pull #\a
+ or r7, r7, r8 push #\b
+ mov r8, r8 pull #\a
+ or r8, r8, r9 push #\b
+ mov r9, r9 pull #\a
+ or r9, r9, r10 push #\b
+ mov r10, r10 pull #\a
+ or r10, r10, r11 push #\b
+ str8w r0, r3, r4, r5, r6, r7, r8, r9, r10, , abort=19f
+ beg 12b
+
+ ldm.w (r5 - r9), [sp]+
+
+14: and.a ip, r2, #28
+ beq 16f
+
+15: mov r3, r11 pull #\a
+ ldr1w r1, r11, abort=21f
+ sub.a ip, ip, #4
+ or r3, r3, r11 push #\b
+ str1w r0, r3, abort=21f
+ bsg 15b
+
+16: sub r1, r1, #(\b / 8)
+ b 8b
+
+ .endm
+
+
+ forward_copy_shift a=8 b=24
+
+17: forward_copy_shift a=16 b=16
+
+18: forward_copy_shift a=24 b=8
+
+
+/*
+ * Abort preamble and completion macros.
+ * If a fixup handler is required then those macros must surround it.
+ * It is assumed that the fixup code will handle the private part of
+ * the exit macro.
+ */
+
+ .macro copy_abort_preamble
+19: ldm.w (r5 - r9), [sp]+
+ b 21f
+299: .word 0 @ store lr
+ @ to avoid function call in fixup
+20: ldm.w (r5 - r8), [sp]+
+21:
+ adr r1, 299b
+ stw lr, [r1]
+ .endm
+
+ .macro copy_abort_end
+ adr lr, 299b
+ ldw pc, [lr]
+ .endm
+
diff --git a/arch/unicore32/lib/copy_to_user.S b/arch/unicore32/lib/copy_to_user.S
new file mode 100644
index 000000000000..6e22151c840d
--- /dev/null
+++ b/arch/unicore32/lib/copy_to_user.S
@@ -0,0 +1,96 @@
+/*
+ * linux/arch/unicore32/lib/copy_to_user.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Prototype:
+ *
+ * size_t __copy_to_user(void *to, const void *from, size_t n)
+ *
+ * Purpose:
+ *
+ * copy a block to user memory from kernel memory
+ *
+ * Params:
+ *
+ * to = user memory
+ * from = kernel memory
+ * n = number of bytes to copy
+ *
+ * Return value:
+ *
+ * Number of bytes NOT copied.
+ */
+
+ .macro ldr1w ptr reg abort
+ ldw.w \reg, [\ptr]+, #4
+ .endm
+
+ .macro ldr4w ptr reg1 reg2 reg3 reg4 abort
+ ldm.w (\reg1, \reg2, \reg3, \reg4), [\ptr]+
+ .endm
+
+ .macro ldr8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+ ldm.w (\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8), [\ptr]+
+ .endm
+
+ .macro ldr1b ptr reg cond=al abort
+ notcond \cond, .+8
+ ldb.w \reg, [\ptr]+, #1
+ .endm
+
+ .macro str1w ptr reg abort
+ strusr \reg, \ptr, 4, abort=\abort
+ .endm
+
+ .macro str8w ptr reg1 reg2 reg3 reg4 reg5 reg6 reg7 reg8 abort
+100: stm.w (\reg1, \reg2, \reg3, \reg4, \reg5, \reg6, \reg7, \reg8), [\ptr]+
+
+ .pushsection __ex_table, "a"
+ .long 100b, \abort
+ .popsection
+ .endm
+
+ .macro str1b ptr reg cond=al abort
+ strusr \reg, \ptr, 1, \cond, abort=\abort
+ .endm
+
+ .macro enter
+ mov r3, #0
+ stm.w (r0, r2, r3), [sp-]
+ .endm
+
+ .macro exit
+ add sp, sp, #8
+ ldm.w (r0), [sp]+
+ mov pc, lr
+ .endm
+
+ .text
+
+WEAK(__copy_to_user)
+
+#include "copy_template.S"
+
+ENDPROC(__copy_to_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+ copy_abort_preamble
+ ldm.w (r1, r2, r3), [sp]+
+ sub r0, r0, r1
+ rsub r0, r0, r2
+ copy_abort_end
+ .popsection
+
diff --git a/arch/unicore32/lib/delay.S b/arch/unicore32/lib/delay.S
new file mode 100644
index 000000000000..24664c009e78
--- /dev/null
+++ b/arch/unicore32/lib/delay.S
@@ -0,0 +1,51 @@
+/*
+ * linux/arch/unicore32/lib/delay.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/param.h>
+ .text
+
+.LC0: .word loops_per_jiffy
+.LC1: .word (2199023*HZ)>>11
+
+/*
+ * r0 <= 2000
+ * lpj <= 0x01ffffff (max. 3355 bogomips)
+ * HZ <= 1000
+ */
+
+ENTRY(__udelay)
+ ldw r2, .LC1
+ mul r0, r2, r0
+ENTRY(__const_udelay) @ 0 <= r0 <= 0x7fffff06
+ ldw r2, .LC0
+ ldw r2, [r2] @ max = 0x01ffffff
+ mov r0, r0 >> #14 @ max = 0x0001ffff
+ mov r2, r2 >> #10 @ max = 0x00007fff
+ mul r0, r2, r0 @ max = 2^32-1
+ mov.a r0, r0 >> #6
+ cmoveq pc, lr
+
+/*
+ * loops = r0 * HZ * loops_per_jiffy / 1000000
+ *
+ * Oh, if only we had a cycle counter...
+ */
+
+@ Delay routine
+ENTRY(__delay)
+ sub.a r0, r0, #2
+ bua __delay
+ mov pc, lr
+ENDPROC(__udelay)
+ENDPROC(__const_udelay)
+ENDPROC(__delay)
diff --git a/arch/unicore32/lib/findbit.S b/arch/unicore32/lib/findbit.S
new file mode 100644
index 000000000000..c360ce905d8b
--- /dev/null
+++ b/arch/unicore32/lib/findbit.S
@@ -0,0 +1,98 @@
+/*
+ * linux/arch/unicore32/lib/findbit.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+/*
+ * Purpose : Find a 'zero' bit
+ * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit);
+ */
+__uc32_find_first_zero_bit:
+ cxor.a r1, #0
+ beq 3f
+ mov r2, #0
+1: ldb r3, [r0+], r2 >> #3
+ xor.a r3, r3, #0xff @ invert bits
+ bne .L_found @ any now set - found zero bit
+ add r2, r2, #8 @ next bit pointer
+2: csub.a r2, r1 @ any more?
+ bub 1b
+3: mov r0, r1 @ no free bits
+ mov pc, lr
+
+/*
+ * Purpose : Find next 'zero' bit
+ * Prototype: int find_next_zero_bit
+ * (void *addr, unsigned int maxbit, int offset)
+ */
+ENTRY(__uc32_find_next_zero_bit)
+ cxor.a r1, #0
+ beq 3b
+ and.a ip, r2, #7
+ beq 1b @ If new byte, goto old routine
+ ldb r3, [r0+], r2 >> #3
+ xor r3, r3, #0xff @ now looking for a 1 bit
+ mov.a r3, r3 >> ip @ shift off unused bits
+ bne .L_found
+ or r2, r2, #7 @ if zero, then no bits here
+ add r2, r2, #1 @ align bit pointer
+ b 2b @ loop for next bit
+ENDPROC(__uc32_find_next_zero_bit)
+
+/*
+ * Purpose : Find a 'one' bit
+ * Prototype: int find_first_bit
+ * (const unsigned long *addr, unsigned int maxbit);
+ */
+__uc32_find_first_bit:
+ cxor.a r1, #0
+ beq 3f
+ mov r2, #0
+1: ldb r3, [r0+], r2 >> #3
+ mov.a r3, r3
+ bne .L_found @ any now set - found zero bit
+ add r2, r2, #8 @ next bit pointer
+2: csub.a r2, r1 @ any more?
+ bub 1b
+3: mov r0, r1 @ no free bits
+ mov pc, lr
+
+/*
+ * Purpose : Find next 'one' bit
+ * Prototype: int find_next_zero_bit
+ * (void *addr, unsigned int maxbit, int offset)
+ */
+ENTRY(__uc32_find_next_bit)
+ cxor.a r1, #0
+ beq 3b
+ and.a ip, r2, #7
+ beq 1b @ If new byte, goto old routine
+ ldb r3, [r0+], r2 >> #3
+ mov.a r3, r3 >> ip @ shift off unused bits
+ bne .L_found
+ or r2, r2, #7 @ if zero, then no bits here
+ add r2, r2, #1 @ align bit pointer
+ b 2b @ loop for next bit
+ENDPROC(__uc32_find_next_bit)
+
+/*
+ * One or more bits in the LSB of r3 are assumed to be set.
+ */
+.L_found:
+ rsub r1, r3, #0
+ and r3, r3, r1
+ cntlz r3, r3
+ rsub r3, r3, #31
+ add r0, r2, r3
+ mov pc, lr
+
diff --git a/arch/unicore32/lib/strncpy_from_user.S b/arch/unicore32/lib/strncpy_from_user.S
new file mode 100644
index 000000000000..ff6c304d5c7e
--- /dev/null
+++ b/arch/unicore32/lib/strncpy_from_user.S
@@ -0,0 +1,45 @@
+/*
+ * linux/arch/unicore32/lib/strncpy_from_user.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+
+ .text
+ .align 5
+
+/*
+ * Copy a string from user space to kernel space.
+ * r0 = dst, r1 = src, r2 = byte length
+ * returns the number of characters copied (strlen of copied string),
+ * -EFAULT on exception, or "len" if we fill the whole buffer
+ */
+ENTRY(__strncpy_from_user)
+ mov ip, r1
+1: sub.a r2, r2, #1
+ ldrusr r3, r1, 1, ns
+ bfs 2f
+ stb.w r3, [r0]+, #1
+ cxor.a r3, #0
+ bne 1b
+ sub r1, r1, #1 @ take NUL character out of count
+2: sub r0, r1, ip
+ mov pc, lr
+ENDPROC(__strncpy_from_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+9001: mov r3, #0
+ stb r3, [r0+], #0 @ null terminate
+ mov r0, #-EFAULT
+ mov pc, lr
+ .popsection
+
diff --git a/arch/unicore32/lib/strnlen_user.S b/arch/unicore32/lib/strnlen_user.S
new file mode 100644
index 000000000000..75863030f21d
--- /dev/null
+++ b/arch/unicore32/lib/strnlen_user.S
@@ -0,0 +1,42 @@
+/*
+ * linux/arch/unicore32/lib/strnlen_user.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/errno.h>
+
+ .text
+ .align 5
+
+/* Prototype: unsigned long __strnlen_user(const char *str, long n)
+ * Purpose : get length of a string in user memory
+ * Params : str - address of string in user memory
+ * Returns : length of string *including terminator*
+ * or zero on exception, or n + 1 if too long
+ */
+ENTRY(__strnlen_user)
+ mov r2, r0
+1:
+ ldrusr r3, r0, 1
+ cxor.a r3, #0
+ beq 2f
+ sub.a r1, r1, #1
+ bne 1b
+ add r0, r0, #1
+2: sub r0, r0, r2
+ mov pc, lr
+ENDPROC(__strnlen_user)
+
+ .pushsection .fixup,"ax"
+ .align 0
+9001: mov r0, #0
+ mov pc, lr
+ .popsection
diff --git a/arch/unicore32/mm/Kconfig b/arch/unicore32/mm/Kconfig
new file mode 100644
index 000000000000..5f77fb3c63be
--- /dev/null
+++ b/arch/unicore32/mm/Kconfig
@@ -0,0 +1,50 @@
+comment "Processor Type"
+
+# Select CPU types depending on the architecture selected. This selects
+# which CPUs we support in the kernel image, and the compiler instruction
+# optimiser behaviour.
+
+config CPU_UCV2
+ def_bool y
+
+comment "Processor Features"
+
+config CPU_ICACHE_DISABLE
+ bool "Disable I-Cache (I-bit)"
+ help
+ Say Y here to disable the processor instruction cache. Unless
+ you have a reason not to or are unsure, say N.
+
+config CPU_DCACHE_DISABLE
+ bool "Disable D-Cache (D-bit)"
+ help
+ Say Y here to disable the processor data cache. Unless
+ you have a reason not to or are unsure, say N.
+
+config CPU_DCACHE_WRITETHROUGH
+ bool "Force write through D-cache"
+ help
+ Say Y here to use the data cache in writethrough mode. Unless you
+ specifically require this or are unsure, say N.
+
+config CPU_DCACHE_LINE_DISABLE
+ bool "Disable D-cache line ops"
+ default y
+ help
+ Say Y here to disable the data cache line operations.
+
+config CPU_TLB_SINGLE_ENTRY_DISABLE
+ bool "Disable TLB single entry ops"
+ default y
+ help
+ Say Y here to disable the TLB single entry operations.
+
+config SWIOTLB
+ def_bool y
+
+config IOMMU_HELPER
+ def_bool SWIOTLB
+
+config NEED_SG_DMA_LENGTH
+ def_bool SWIOTLB
+
diff --git a/arch/unicore32/mm/Makefile b/arch/unicore32/mm/Makefile
new file mode 100644
index 000000000000..46c166699319
--- /dev/null
+++ b/arch/unicore32/mm/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for the linux unicore-specific parts of the memory manager.
+#
+
+obj-y := extable.o fault.o init.o pgd.o mmu.o
+obj-y += flush.o ioremap.o
+
+obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
+
+obj-$(CONFIG_MODULES) += proc-syms.o
+
+obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
+
+obj-$(CONFIG_CPU_UCV2) += cache-ucv2.o tlb-ucv2.o proc-ucv2.o
+
diff --git a/arch/unicore32/mm/alignment.c b/arch/unicore32/mm/alignment.c
new file mode 100644
index 000000000000..28f576d733ee
--- /dev/null
+++ b/arch/unicore32/mm/alignment.c
@@ -0,0 +1,523 @@
+/*
+ * linux/arch/unicore32/mm/alignment.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/*
+ * TODO:
+ * FPU ldm/stm not handling
+ */
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+
+#include <asm/tlbflush.h>
+#include <asm/unaligned.h>
+
+#define CODING_BITS(i) (i & 0xe0000120)
+
+#define LDST_P_BIT(i) (i & (1 << 28)) /* Preindex */
+#define LDST_U_BIT(i) (i & (1 << 27)) /* Add offset */
+#define LDST_W_BIT(i) (i & (1 << 25)) /* Writeback */
+#define LDST_L_BIT(i) (i & (1 << 24)) /* Load */
+
+#define LDST_P_EQ_U(i) ((((i) ^ ((i) >> 1)) & (1 << 27)) == 0)
+
+#define LDSTH_I_BIT(i) (i & (1 << 26)) /* half-word immed */
+#define LDM_S_BIT(i) (i & (1 << 26)) /* write ASR from BSR */
+#define LDM_H_BIT(i) (i & (1 << 6)) /* select r0-r15 or r16-r31 */
+
+#define RN_BITS(i) ((i >> 19) & 31) /* Rn */
+#define RD_BITS(i) ((i >> 14) & 31) /* Rd */
+#define RM_BITS(i) (i & 31) /* Rm */
+
+#define REGMASK_BITS(i) (((i & 0x7fe00) >> 3) | (i & 0x3f))
+#define OFFSET_BITS(i) (i & 0x03fff)
+
+#define SHIFT_BITS(i) ((i >> 9) & 0x1f)
+#define SHIFT_TYPE(i) (i & 0xc0)
+#define SHIFT_LSL 0x00
+#define SHIFT_LSR 0x40
+#define SHIFT_ASR 0x80
+#define SHIFT_RORRRX 0xc0
+
+union offset_union {
+ unsigned long un;
+ signed long sn;
+};
+
+#define TYPE_ERROR 0
+#define TYPE_FAULT 1
+#define TYPE_LDST 2
+#define TYPE_DONE 3
+#define TYPE_SWAP 4
+#define TYPE_COLS 5 /* Coprocessor load/store */
+
+#define get8_unaligned_check(val, addr, err) \
+ __asm__( \
+ "1: ldb.u %1, [%2], #1\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %0, #1\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 3b\n" \
+ " .popsection\n" \
+ : "=r" (err), "=&r" (val), "=r" (addr) \
+ : "0" (err), "2" (addr))
+
+#define get8t_unaligned_check(val, addr, err) \
+ __asm__( \
+ "1: ldb.u %1, [%2], #1\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %0, #1\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 3b\n" \
+ " .popsection\n" \
+ : "=r" (err), "=&r" (val), "=r" (addr) \
+ : "0" (err), "2" (addr))
+
+#define get16_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ get8_unaligned_check(val, a, err); \
+ get8_unaligned_check(v, a, err); \
+ val |= v << 8; \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put16_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr; \
+ __asm__( \
+ "1: stb.u %1, [%2], #1\n" \
+ " mov %1, %1 >> #8\n" \
+ "2: stb.u %1, [%2]\n" \
+ "3:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "4: mov %0, #1\n" \
+ " b 3b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 4b\n" \
+ " .long 2b, 4b\n" \
+ " .popsection\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define __put32_unaligned_check(ins, val, addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr; \
+ __asm__( \
+ "1: "ins" %1, [%2], #1\n" \
+ " mov %1, %1 >> #8\n" \
+ "2: "ins" %1, [%2], #1\n" \
+ " mov %1, %1 >> #8\n" \
+ "3: "ins" %1, [%2], #1\n" \
+ " mov %1, %1 >> #8\n" \
+ "4: "ins" %1, [%2]\n" \
+ "5:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "6: mov %0, #1\n" \
+ " b 5b\n" \
+ " .popsection\n" \
+ " .pushsection __ex_table,\"a\"\n" \
+ " .align 3\n" \
+ " .long 1b, 6b\n" \
+ " .long 2b, 6b\n" \
+ " .long 3b, 6b\n" \
+ " .long 4b, 6b\n" \
+ " .popsection\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define get32_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ get8_unaligned_check(val, a, err); \
+ get8_unaligned_check(v, a, err); \
+ val |= v << 8; \
+ get8_unaligned_check(v, a, err); \
+ val |= v << 16; \
+ get8_unaligned_check(v, a, err); \
+ val |= v << 24; \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put32_unaligned_check(val, addr) \
+ __put32_unaligned_check("stb.u", val, addr)
+
+#define get32t_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ get8t_unaligned_check(val, a, err); \
+ get8t_unaligned_check(v, a, err); \
+ val |= v << 8; \
+ get8t_unaligned_check(v, a, err); \
+ val |= v << 16; \
+ get8t_unaligned_check(v, a, err); \
+ val |= v << 24; \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put32t_unaligned_check(val, addr) \
+ __put32_unaligned_check("stb.u", val, addr)
+
+static void
+do_alignment_finish_ldst(unsigned long addr, unsigned long instr,
+ struct pt_regs *regs, union offset_union offset)
+{
+ if (!LDST_U_BIT(instr))
+ offset.un = -offset.un;
+
+ if (!LDST_P_BIT(instr))
+ addr += offset.un;
+
+ if (!LDST_P_BIT(instr) || LDST_W_BIT(instr))
+ regs->uregs[RN_BITS(instr)] = addr;
+}
+
+static int
+do_alignment_ldrhstrh(unsigned long addr, unsigned long instr,
+ struct pt_regs *regs)
+{
+ unsigned int rd = RD_BITS(instr);
+
+ /* old value 0x40002120, can't judge swap instr correctly */
+ if ((instr & 0x4b003fe0) == 0x40000120)
+ goto swp;
+
+ if (LDST_L_BIT(instr)) {
+ unsigned long val;
+ get16_unaligned_check(val, addr);
+
+ /* signed half-word? */
+ if (instr & 0x80)
+ val = (signed long)((signed short)val);
+
+ regs->uregs[rd] = val;
+ } else
+ put16_unaligned_check(regs->uregs[rd], addr);
+
+ return TYPE_LDST;
+
+swp:
+ /* only handle swap word
+ * for swap byte should not active this alignment exception */
+ get32_unaligned_check(regs->uregs[RD_BITS(instr)], addr);
+ put32_unaligned_check(regs->uregs[RM_BITS(instr)], addr);
+ return TYPE_SWAP;
+
+fault:
+ return TYPE_FAULT;
+}
+
+static int
+do_alignment_ldrstr(unsigned long addr, unsigned long instr,
+ struct pt_regs *regs)
+{
+ unsigned int rd = RD_BITS(instr);
+
+ if (!LDST_P_BIT(instr) && LDST_W_BIT(instr))
+ goto trans;
+
+ if (LDST_L_BIT(instr))
+ get32_unaligned_check(regs->uregs[rd], addr);
+ else
+ put32_unaligned_check(regs->uregs[rd], addr);
+ return TYPE_LDST;
+
+trans:
+ if (LDST_L_BIT(instr))
+ get32t_unaligned_check(regs->uregs[rd], addr);
+ else
+ put32t_unaligned_check(regs->uregs[rd], addr);
+ return TYPE_LDST;
+
+fault:
+ return TYPE_FAULT;
+}
+
+/*
+ * LDM/STM alignment handler.
+ *
+ * There are 4 variants of this instruction:
+ *
+ * B = rn pointer before instruction, A = rn pointer after instruction
+ * ------ increasing address ----->
+ * | | r0 | r1 | ... | rx | |
+ * PU = 01 B A
+ * PU = 11 B A
+ * PU = 00 A B
+ * PU = 10 A B
+ */
+static int
+do_alignment_ldmstm(unsigned long addr, unsigned long instr,
+ struct pt_regs *regs)
+{
+ unsigned int rd, rn, pc_correction, reg_correction, nr_regs, regbits;
+ unsigned long eaddr, newaddr;
+
+ if (LDM_S_BIT(instr))
+ goto bad;
+
+ pc_correction = 4; /* processor implementation defined */
+
+ /* count the number of registers in the mask to be transferred */
+ nr_regs = hweight16(REGMASK_BITS(instr)) * 4;
+
+ rn = RN_BITS(instr);
+ newaddr = eaddr = regs->uregs[rn];
+
+ if (!LDST_U_BIT(instr))
+ nr_regs = -nr_regs;
+ newaddr += nr_regs;
+ if (!LDST_U_BIT(instr))
+ eaddr = newaddr;
+
+ if (LDST_P_EQ_U(instr)) /* U = P */
+ eaddr += 4;
+
+ /*
+ * This is a "hint" - we already have eaddr worked out by the
+ * processor for us.
+ */
+ if (addr != eaddr) {
+ printk(KERN_ERR "LDMSTM: PC = %08lx, instr = %08lx, "
+ "addr = %08lx, eaddr = %08lx\n",
+ instruction_pointer(regs), instr, addr, eaddr);
+ show_regs(regs);
+ }
+
+ if (LDM_H_BIT(instr))
+ reg_correction = 0x10;
+ else
+ reg_correction = 0x00;
+
+ for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
+ regbits >>= 1, rd += 1)
+ if (regbits & 1) {
+ if (LDST_L_BIT(instr))
+ get32_unaligned_check(regs->
+ uregs[rd + reg_correction], eaddr);
+ else
+ put32_unaligned_check(regs->
+ uregs[rd + reg_correction], eaddr);
+ eaddr += 4;
+ }
+
+ if (LDST_W_BIT(instr))
+ regs->uregs[rn] = newaddr;
+ return TYPE_DONE;
+
+fault:
+ regs->UCreg_pc -= pc_correction;
+ return TYPE_FAULT;
+
+bad:
+ printk(KERN_ERR "Alignment trap: not handling ldm with s-bit set\n");
+ return TYPE_ERROR;
+}
+
+static int
+do_alignment(unsigned long addr, unsigned int error_code, struct pt_regs *regs)
+{
+ union offset_union offset;
+ unsigned long instr, instrptr;
+ int (*handler) (unsigned long addr, unsigned long instr,
+ struct pt_regs *regs);
+ unsigned int type;
+
+ instrptr = instruction_pointer(regs);
+ if (instrptr >= PAGE_OFFSET)
+ instr = *(unsigned long *)instrptr;
+ else {
+ __asm__ __volatile__(
+ "ldw.u %0, [%1]\n"
+ : "=&r"(instr)
+ : "r"(instrptr));
+ }
+
+ regs->UCreg_pc += 4;
+
+ switch (CODING_BITS(instr)) {
+ case 0x40000120: /* ldrh or strh */
+ if (LDSTH_I_BIT(instr))
+ offset.un = (instr & 0x3e00) >> 4 | (instr & 31);
+ else
+ offset.un = regs->uregs[RM_BITS(instr)];
+ handler = do_alignment_ldrhstrh;
+ break;
+
+ case 0x60000000: /* ldr or str immediate */
+ case 0x60000100: /* ldr or str immediate */
+ case 0x60000020: /* ldr or str immediate */
+ case 0x60000120: /* ldr or str immediate */
+ offset.un = OFFSET_BITS(instr);
+ handler = do_alignment_ldrstr;
+ break;
+
+ case 0x40000000: /* ldr or str register */
+ offset.un = regs->uregs[RM_BITS(instr)];
+ {
+ unsigned int shiftval = SHIFT_BITS(instr);
+
+ switch (SHIFT_TYPE(instr)) {
+ case SHIFT_LSL:
+ offset.un <<= shiftval;
+ break;
+
+ case SHIFT_LSR:
+ offset.un >>= shiftval;
+ break;
+
+ case SHIFT_ASR:
+ offset.sn >>= shiftval;
+ break;
+
+ case SHIFT_RORRRX:
+ if (shiftval == 0) {
+ offset.un >>= 1;
+ if (regs->UCreg_asr & PSR_C_BIT)
+ offset.un |= 1 << 31;
+ } else
+ offset.un = offset.un >> shiftval |
+ offset.un << (32 - shiftval);
+ break;
+ }
+ }
+ handler = do_alignment_ldrstr;
+ break;
+
+ case 0x80000000: /* ldm or stm */
+ case 0x80000020: /* ldm or stm */
+ handler = do_alignment_ldmstm;
+ break;
+
+ default:
+ goto bad;
+ }
+
+ type = handler(addr, instr, regs);
+
+ if (type == TYPE_ERROR || type == TYPE_FAULT)
+ goto bad_or_fault;
+
+ if (type == TYPE_LDST)
+ do_alignment_finish_ldst(addr, instr, regs, offset);
+
+ return 0;
+
+bad_or_fault:
+ if (type == TYPE_ERROR)
+ goto bad;
+ regs->UCreg_pc -= 4;
+ /*
+ * We got a fault - fix it up, or die.
+ */
+ do_bad_area(addr, error_code, regs);
+ return 0;
+
+bad:
+ /*
+ * Oops, we didn't handle the instruction.
+ * However, we must handle fpu instr firstly.
+ */
+#ifdef CONFIG_UNICORE_FPU_F64
+ /* handle co.load/store */
+#define CODING_COLS 0xc0000000
+#define COLS_OFFSET_BITS(i) (i & 0x1FF)
+#define COLS_L_BITS(i) (i & (1<<24))
+#define COLS_FN_BITS(i) ((i>>14) & 31)
+ if ((instr & 0xe0000000) == CODING_COLS) {
+ unsigned int fn = COLS_FN_BITS(instr);
+ unsigned long val = 0;
+ if (COLS_L_BITS(instr)) {
+ get32t_unaligned_check(val, addr);
+ switch (fn) {
+#define ASM_MTF(n) case n: \
+ __asm__ __volatile__("MTF %0, F" __stringify(n) \
+ : : "r"(val)); \
+ break;
+ ASM_MTF(0); ASM_MTF(1); ASM_MTF(2); ASM_MTF(3);
+ ASM_MTF(4); ASM_MTF(5); ASM_MTF(6); ASM_MTF(7);
+ ASM_MTF(8); ASM_MTF(9); ASM_MTF(10); ASM_MTF(11);
+ ASM_MTF(12); ASM_MTF(13); ASM_MTF(14); ASM_MTF(15);
+ ASM_MTF(16); ASM_MTF(17); ASM_MTF(18); ASM_MTF(19);
+ ASM_MTF(20); ASM_MTF(21); ASM_MTF(22); ASM_MTF(23);
+ ASM_MTF(24); ASM_MTF(25); ASM_MTF(26); ASM_MTF(27);
+ ASM_MTF(28); ASM_MTF(29); ASM_MTF(30); ASM_MTF(31);
+#undef ASM_MTF
+ }
+ } else {
+ switch (fn) {
+#define ASM_MFF(n) case n: \
+ __asm__ __volatile__("MFF %0, F" __stringify(n) \
+ : : "r"(val)); \
+ break;
+ ASM_MFF(0); ASM_MFF(1); ASM_MFF(2); ASM_MFF(3);
+ ASM_MFF(4); ASM_MFF(5); ASM_MFF(6); ASM_MFF(7);
+ ASM_MFF(8); ASM_MFF(9); ASM_MFF(10); ASM_MFF(11);
+ ASM_MFF(12); ASM_MFF(13); ASM_MFF(14); ASM_MFF(15);
+ ASM_MFF(16); ASM_MFF(17); ASM_MFF(18); ASM_MFF(19);
+ ASM_MFF(20); ASM_MFF(21); ASM_MFF(22); ASM_MFF(23);
+ ASM_MFF(24); ASM_MFF(25); ASM_MFF(26); ASM_MFF(27);
+ ASM_MFF(28); ASM_MFF(29); ASM_MFF(30); ASM_MFF(31);
+#undef ASM_MFF
+ }
+ put32t_unaligned_check(val, addr);
+ }
+ return TYPE_COLS;
+ }
+fault:
+ return TYPE_FAULT;
+#endif
+ printk(KERN_ERR "Alignment trap: not handling instruction "
+ "%08lx at [<%08lx>]\n", instr, instrptr);
+ return 1;
+}
+
+/*
+ * This needs to be done after sysctl_init, otherwise sys/ will be
+ * overwritten. Actually, this shouldn't be in sys/ at all since
+ * it isn't a sysctl, and it doesn't contain sysctl information.
+ */
+static int __init alignment_init(void)
+{
+ hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,
+ "alignment exception");
+
+ return 0;
+}
+
+fs_initcall(alignment_init);
diff --git a/arch/unicore32/mm/cache-ucv2.S b/arch/unicore32/mm/cache-ucv2.S
new file mode 100644
index 000000000000..ecaa1727f906
--- /dev/null
+++ b/arch/unicore32/mm/cache-ucv2.S
@@ -0,0 +1,212 @@
+/*
+ * linux/arch/unicore32/mm/cache-ucv2.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the "shell" of the UniCore-v2 processor support.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/assembler.h>
+#include <asm/page.h>
+
+#include "proc-macros.S"
+
+/*
+ * __cpuc_flush_icache_all()
+ * __cpuc_flush_kern_all()
+ * __cpuc_flush_user_all()
+ *
+ * Flush the entire cache.
+ */
+ENTRY(__cpuc_flush_icache_all)
+ /*FALLTHROUGH*/
+ENTRY(__cpuc_flush_kern_all)
+ /*FALLTHROUGH*/
+ENTRY(__cpuc_flush_user_all)
+ mov r0, #0
+ movc p0.c5, r0, #14 @ Dcache flush all
+ nop8
+
+ mov r0, #0
+ movc p0.c5, r0, #20 @ Icache invalidate all
+ nop8
+
+ mov pc, lr
+
+/*
+ * __cpuc_flush_user_range(start, end, flags)
+ *
+ * Flush a range of TLB entries in the specified address space.
+ *
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ * - flags - vm_area_struct flags describing address space
+ */
+ENTRY(__cpuc_flush_user_range)
+ cxor.a r2, #0
+ beq __cpuc_dma_flush_range
+
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ andn r0, r0, #CACHE_LINESIZE - 1 @ Safety check
+ sub r1, r1, r0
+ csub.a r1, #MAX_AREA_SIZE
+ bsg 2f
+
+ andn r1, r1, #CACHE_LINESIZE - 1
+ add r1, r1, #CACHE_LINESIZE
+
+101: dcacheline_flush r0, r11, r12
+
+ add r0, r0, #CACHE_LINESIZE
+ sub.a r1, r1, #CACHE_LINESIZE
+ bns 101b
+ b 3f
+#endif
+2: mov ip, #0
+ movc p0.c5, ip, #14 @ Dcache flush all
+ nop8
+
+3: mov ip, #0
+ movc p0.c5, ip, #20 @ Icache invalidate all
+ nop8
+
+ mov pc, lr
+
+/*
+ * __cpuc_coherent_kern_range(start,end)
+ * __cpuc_coherent_user_range(start,end)
+ *
+ * Ensure that the I and D caches are coherent within specified
+ * region. This is typically used when code has been written to
+ * a memory region, and will be executed.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+ENTRY(__cpuc_coherent_kern_range)
+ /* FALLTHROUGH */
+ENTRY(__cpuc_coherent_user_range)
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ andn r0, r0, #CACHE_LINESIZE - 1 @ Safety check
+ sub r1, r1, r0
+ csub.a r1, #MAX_AREA_SIZE
+ bsg 2f
+
+ andn r1, r1, #CACHE_LINESIZE - 1
+ add r1, r1, #CACHE_LINESIZE
+
+ @ r0 va2pa r10
+ mov r9, #PAGE_SZ
+ sub r9, r9, #1 @ PAGE_MASK
+101: va2pa r0, r10, r11, r12, r13, 2f @ r10 is PA
+ b 103f
+102: cand.a r0, r9
+ beq 101b
+
+103: movc p0.c5, r10, #11 @ Dcache clean line of R10
+ nop8
+
+ add r0, r0, #CACHE_LINESIZE
+ add r10, r10, #CACHE_LINESIZE
+ sub.a r1, r1, #CACHE_LINESIZE
+ bns 102b
+ b 3f
+#endif
+2: mov ip, #0
+ movc p0.c5, ip, #10 @ Dcache clean all
+ nop8
+
+3: mov ip, #0
+ movc p0.c5, ip, #20 @ Icache invalidate all
+ nop8
+
+ mov pc, lr
+
+/*
+ * __cpuc_flush_kern_dcache_area(void *addr, size_t size)
+ *
+ * - addr - kernel address
+ * - size - region size
+ */
+ENTRY(__cpuc_flush_kern_dcache_area)
+ mov ip, #0
+ movc p0.c5, ip, #14 @ Dcache flush all
+ nop8
+ mov pc, lr
+
+/*
+ * __cpuc_dma_clean_range(start,end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+ENTRY(__cpuc_dma_clean_range)
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ andn r0, r0, #CACHE_LINESIZE - 1
+ sub r1, r1, r0
+ andn r1, r1, #CACHE_LINESIZE - 1
+ add r1, r1, #CACHE_LINESIZE
+
+ csub.a r1, #MAX_AREA_SIZE
+ bsg 2f
+
+ @ r0 va2pa r10
+ mov r9, #PAGE_SZ
+ sub r9, r9, #1 @ PAGE_MASK
+101: va2pa r0, r10, r11, r12, r13, 2f @ r10 is PA
+ b 1f
+102: cand.a r0, r9
+ beq 101b
+
+1: movc p0.c5, r10, #11 @ Dcache clean line of R10
+ nop8
+ add r0, r0, #CACHE_LINESIZE
+ add r10, r10, #CACHE_LINESIZE
+ sub.a r1, r1, #CACHE_LINESIZE
+ bns 102b
+ mov pc, lr
+#endif
+2: mov ip, #0
+ movc p0.c5, ip, #10 @ Dcache clean all
+ nop8
+
+ mov pc, lr
+
+/*
+ * __cpuc_dma_inv_range(start,end)
+ * __cpuc_dma_flush_range(start,end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+__cpuc_dma_inv_range:
+ /* FALLTHROUGH */
+ENTRY(__cpuc_dma_flush_range)
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ andn r0, r0, #CACHE_LINESIZE - 1
+ sub r1, r1, r0
+ andn r1, r1, #CACHE_LINESIZE - 1
+ add r1, r1, #CACHE_LINESIZE
+
+ csub.a r1, #MAX_AREA_SIZE
+ bsg 2f
+
+ @ r0 va2pa r10
+101: dcacheline_flush r0, r11, r12
+
+ add r0, r0, #CACHE_LINESIZE
+ sub.a r1, r1, #CACHE_LINESIZE
+ bns 101b
+ mov pc, lr
+#endif
+2: mov ip, #0
+ movc p0.c5, ip, #14 @ Dcache flush all
+ nop8
+
+ mov pc, lr
+
diff --git a/arch/unicore32/mm/dma-swiotlb.c b/arch/unicore32/mm/dma-swiotlb.c
new file mode 100644
index 000000000000..bfa9fbb2bbb1
--- /dev/null
+++ b/arch/unicore32/mm/dma-swiotlb.c
@@ -0,0 +1,34 @@
+/*
+ * Contains routines needed to support swiotlb for UniCore32.
+ *
+ * Copyright (C) 2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/pci.h>
+#include <linux/cache.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/swiotlb.h>
+#include <linux/bootmem.h>
+
+#include <asm/dma.h>
+
+struct dma_map_ops swiotlb_dma_map_ops = {
+ .alloc_coherent = swiotlb_alloc_coherent,
+ .free_coherent = swiotlb_free_coherent,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .dma_supported = swiotlb_dma_supported,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
+ .mapping_error = swiotlb_dma_mapping_error,
+};
+EXPORT_SYMBOL(swiotlb_dma_map_ops);
diff --git a/arch/unicore32/mm/extable.c b/arch/unicore32/mm/extable.c
new file mode 100644
index 000000000000..6564180eb285
--- /dev/null
+++ b/arch/unicore32/mm/extable.c
@@ -0,0 +1,24 @@
+/*
+ * linux/arch/unicore32/mm/extable.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(instruction_pointer(regs));
+ if (fixup)
+ regs->UCreg_pc = fixup->fixup;
+
+ return fixup != NULL;
+}
diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c
new file mode 100644
index 000000000000..283aa4b50b7a
--- /dev/null
+++ b/arch/unicore32/mm/fault.c
@@ -0,0 +1,479 @@
+/*
+ * linux/arch/unicore32/mm/fault.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/mm.h>
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/kprobes.h>
+#include <linux/uaccess.h>
+#include <linux/page-flags.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+/*
+ * Fault status register encodings. We steal bit 31 for our own purposes.
+ */
+#define FSR_LNX_PF (1 << 31)
+
+static inline int fsr_fs(unsigned int fsr)
+{
+ /* xyabcde will be abcde+xy */
+ return (fsr & 31) + ((fsr & (3 << 5)) >> 5);
+}
+
+/*
+ * This is useful to dump out the page tables associated with
+ * 'addr' in mm 'mm'.
+ */
+void show_pte(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+
+ if (!mm)
+ mm = &init_mm;
+
+ printk(KERN_ALERT "pgd = %p\n", mm->pgd);
+ pgd = pgd_offset(mm, addr);
+ printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
+
+ do {
+ pmd_t *pmd;
+ pte_t *pte;
+
+ if (pgd_none(*pgd))
+ break;
+
+ if (pgd_bad(*pgd)) {
+ printk("(bad)");
+ break;
+ }
+
+ pmd = pmd_offset((pud_t *) pgd, addr);
+ if (PTRS_PER_PMD != 1)
+ printk(", *pmd=%08lx", pmd_val(*pmd));
+
+ if (pmd_none(*pmd))
+ break;
+
+ if (pmd_bad(*pmd)) {
+ printk("(bad)");
+ break;
+ }
+
+ /* We must not map this if we have highmem enabled */
+ if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
+ break;
+
+ pte = pte_offset_map(pmd, addr);
+ printk(", *pte=%08lx", pte_val(*pte));
+ pte_unmap(pte);
+ } while (0);
+
+ printk("\n");
+}
+
+/*
+ * Oops. The kernel tried to access some page that wasn't present.
+ */
+static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
+ unsigned int fsr, struct pt_regs *regs)
+{
+ /*
+ * Are we prepared to handle this kernel fault?
+ */
+ if (fixup_exception(regs))
+ return;
+
+ /*
+ * No handler, we'll have to terminate things with extreme prejudice.
+ */
+ bust_spinlocks(1);
+ printk(KERN_ALERT
+ "Unable to handle kernel %s at virtual address %08lx\n",
+ (addr < PAGE_SIZE) ? "NULL pointer dereference" :
+ "paging request", addr);
+
+ show_pte(mm, addr);
+ die("Oops", regs, fsr);
+ bust_spinlocks(0);
+ do_exit(SIGKILL);
+}
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * User mode accesses just cause a SIGSEGV
+ */
+static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
+ unsigned int fsr, unsigned int sig, int code,
+ struct pt_regs *regs)
+{
+ struct siginfo si;
+
+ tsk->thread.address = addr;
+ tsk->thread.error_code = fsr;
+ tsk->thread.trap_no = 14;
+ si.si_signo = sig;
+ si.si_errno = 0;
+ si.si_code = code;
+ si.si_addr = (void __user *)addr;
+ force_sig_info(sig, &si, tsk);
+}
+
+void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->active_mm;
+
+ /*
+ * If we are in kernel mode at this point, we
+ * have no context to handle this fault with.
+ */
+ if (user_mode(regs))
+ __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
+ else
+ __do_kernel_fault(mm, addr, fsr, regs);
+}
+
+#define VM_FAULT_BADMAP 0x010000
+#define VM_FAULT_BADACCESS 0x020000
+
+/*
+ * Check that the permissions on the VMA allow for the fault which occurred.
+ * If we encountered a write fault, we must have write permission, otherwise
+ * we allow any permission.
+ */
+static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
+{
+ unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
+
+ if (!(fsr ^ 0x12)) /* write? */
+ mask = VM_WRITE;
+ if (fsr & FSR_LNX_PF)
+ mask = VM_EXEC;
+
+ return vma->vm_flags & mask ? false : true;
+}
+
+static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
+ struct task_struct *tsk)
+{
+ struct vm_area_struct *vma;
+ int fault;
+
+ vma = find_vma(mm, addr);
+ fault = VM_FAULT_BADMAP;
+ if (unlikely(!vma))
+ goto out;
+ if (unlikely(vma->vm_start > addr))
+ goto check_stack;
+
+ /*
+ * Ok, we have a good vm_area for this
+ * memory access, so we can handle it.
+ */
+good_area:
+ if (access_error(fsr, vma)) {
+ fault = VM_FAULT_BADACCESS;
+ goto out;
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+ * sure we exit gracefully rather than endlessly redo the fault.
+ */
+ fault = handle_mm_fault(mm, vma, addr & PAGE_MASK,
+ (!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
+ if (unlikely(fault & VM_FAULT_ERROR))
+ return fault;
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+ return fault;
+
+check_stack:
+ if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
+ goto good_area;
+out:
+ return fault;
+}
+
+static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ int fault, sig, code;
+
+ tsk = current;
+ mm = tsk->mm;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_atomic() || !mm)
+ goto no_context;
+
+ /*
+ * As per x86, we may deadlock here. However, since the kernel only
+ * validly references user space from well defined areas of the code,
+ * we can bug out early if this is from code which shouldn't.
+ */
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!user_mode(regs)
+ && !search_exception_tables(regs->UCreg_pc))
+ goto no_context;
+ down_read(&mm->mmap_sem);
+ } else {
+ /*
+ * The above down_read_trylock() might have succeeded in
+ * which case, we'll have missed the might_sleep() from
+ * down_read()
+ */
+ might_sleep();
+#ifdef CONFIG_DEBUG_VM
+ if (!user_mode(regs) &&
+ !search_exception_tables(regs->UCreg_pc))
+ goto no_context;
+#endif
+ }
+
+ fault = __do_pf(mm, addr, fsr, tsk);
+ up_read(&mm->mmap_sem);
+
+ /*
+ * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
+ */
+ if (likely(!(fault &
+ (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
+ return 0;
+
+ if (fault & VM_FAULT_OOM) {
+ /*
+ * We ran out of memory, call the OOM killer, and return to
+ * userspace (which will retry the fault, or kill us if we
+ * got oom-killed)
+ */
+ pagefault_out_of_memory();
+ return 0;
+ }
+
+ /*
+ * If we are in kernel mode at this point, we
+ * have no context to handle this fault with.
+ */
+ if (!user_mode(regs))
+ goto no_context;
+
+ if (fault & VM_FAULT_SIGBUS) {
+ /*
+ * We had some memory, but were unable to
+ * successfully fix up this page fault.
+ */
+ sig = SIGBUS;
+ code = BUS_ADRERR;
+ } else {
+ /*
+ * Something tried to access memory that
+ * isn't in our memory map..
+ */
+ sig = SIGSEGV;
+ code = fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR;
+ }
+
+ __do_user_fault(tsk, addr, fsr, sig, code, regs);
+ return 0;
+
+no_context:
+ __do_kernel_fault(mm, addr, fsr, regs);
+ return 0;
+}
+
+/*
+ * First Level Translation Fault Handler
+ *
+ * We enter here because the first level page table doesn't contain
+ * a valid entry for the address.
+ *
+ * If the address is in kernel space (>= TASK_SIZE), then we are
+ * probably faulting in the vmalloc() area.
+ *
+ * If the init_task's first level page tables contains the relevant
+ * entry, we copy the it to this task. If not, we send the process
+ * a signal, fixup the exception, or oops the kernel.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may be in an
+ * interrupt or a critical region, and should only copy the information
+ * from the master page table, nothing more.
+ */
+static int do_ifault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ unsigned int index;
+ pgd_t *pgd, *pgd_k;
+ pmd_t *pmd, *pmd_k;
+
+ if (addr < TASK_SIZE)
+ return do_pf(addr, fsr, regs);
+
+ if (user_mode(regs))
+ goto bad_area;
+
+ index = pgd_index(addr);
+
+ pgd = cpu_get_pgd() + index;
+ pgd_k = init_mm.pgd + index;
+
+ if (pgd_none(*pgd_k))
+ goto bad_area;
+
+ pmd_k = pmd_offset((pud_t *) pgd_k, addr);
+ pmd = pmd_offset((pud_t *) pgd, addr);
+
+ if (pmd_none(*pmd_k))
+ goto bad_area;
+
+ set_pmd(pmd, *pmd_k);
+ flush_pmd_entry(pmd);
+ return 0;
+
+bad_area:
+ do_bad_area(addr, fsr, regs);
+ return 0;
+}
+
+/*
+ * This abort handler always returns "fault".
+ */
+static int do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ return 1;
+}
+
+static int do_good(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ unsigned int res1, res2;
+
+ printk("dabt exception but no error!\n");
+
+ __asm__ __volatile__(
+ "mff %0,f0\n"
+ "mff %1,f1\n"
+ : "=r"(res1), "=r"(res2)
+ :
+ : "memory");
+
+ printk(KERN_EMERG "r0 :%08x r1 :%08x\n", res1, res2);
+ panic("shut up\n");
+ return 0;
+}
+
+static struct fsr_info {
+ int (*fn) (unsigned long addr, unsigned int fsr, struct pt_regs *regs);
+ int sig;
+ int code;
+ const char *name;
+} fsr_info[] = {
+ /*
+ * The following are the standard Unicore-I and UniCore-II aborts.
+ */
+ { do_good, SIGBUS, 0, "no error" },
+ { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
+ { do_bad, SIGBUS, BUS_OBJERR, "external exception" },
+ { do_bad, SIGBUS, 0, "burst operation" },
+ { do_bad, SIGBUS, 0, "unknown 00100" },
+ { do_ifault, SIGSEGV, SEGV_MAPERR, "2nd level pt non-exist"},
+ { do_bad, SIGBUS, 0, "2nd lvl large pt non-exist" },
+ { do_bad, SIGBUS, 0, "invalid pte" },
+ { do_pf, SIGSEGV, SEGV_MAPERR, "page miss" },
+ { do_bad, SIGBUS, 0, "middle page miss" },
+ { do_bad, SIGBUS, 0, "large page miss" },
+ { do_pf, SIGSEGV, SEGV_MAPERR, "super page (section) miss" },
+ { do_bad, SIGBUS, 0, "unknown 01100" },
+ { do_bad, SIGBUS, 0, "unknown 01101" },
+ { do_bad, SIGBUS, 0, "unknown 01110" },
+ { do_bad, SIGBUS, 0, "unknown 01111" },
+ { do_bad, SIGBUS, 0, "addr: up 3G or IO" },
+ { do_pf, SIGSEGV, SEGV_ACCERR, "read unreadable addr" },
+ { do_pf, SIGSEGV, SEGV_ACCERR, "write unwriteable addr"},
+ { do_pf, SIGSEGV, SEGV_ACCERR, "exec unexecutable addr"},
+ { do_bad, SIGBUS, 0, "unknown 10100" },
+ { do_bad, SIGBUS, 0, "unknown 10101" },
+ { do_bad, SIGBUS, 0, "unknown 10110" },
+ { do_bad, SIGBUS, 0, "unknown 10111" },
+ { do_bad, SIGBUS, 0, "unknown 11000" },
+ { do_bad, SIGBUS, 0, "unknown 11001" },
+ { do_bad, SIGBUS, 0, "unknown 11010" },
+ { do_bad, SIGBUS, 0, "unknown 11011" },
+ { do_bad, SIGBUS, 0, "unknown 11100" },
+ { do_bad, SIGBUS, 0, "unknown 11101" },
+ { do_bad, SIGBUS, 0, "unknown 11110" },
+ { do_bad, SIGBUS, 0, "unknown 11111" }
+};
+
+void __init hook_fault_code(int nr,
+ int (*fn) (unsigned long, unsigned int, struct pt_regs *),
+ int sig, int code, const char *name)
+{
+ if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
+ BUG();
+
+ fsr_info[nr].fn = fn;
+ fsr_info[nr].sig = sig;
+ fsr_info[nr].code = code;
+ fsr_info[nr].name = name;
+}
+
+/*
+ * Dispatch a data abort to the relevant handler.
+ */
+asmlinkage void do_DataAbort(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
+{
+ const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
+ struct siginfo info;
+
+ if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
+ return;
+
+ printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
+ inf->name, fsr, addr);
+
+ info.si_signo = inf->sig;
+ info.si_errno = 0;
+ info.si_code = inf->code;
+ info.si_addr = (void __user *)addr;
+ uc32_notify_die("", regs, &info, fsr, 0);
+}
+
+asmlinkage void do_PrefetchAbort(unsigned long addr,
+ unsigned int ifsr, struct pt_regs *regs)
+{
+ const struct fsr_info *inf = fsr_info + fsr_fs(ifsr);
+ struct siginfo info;
+
+ if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
+ return;
+
+ printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
+ inf->name, ifsr, addr);
+
+ info.si_signo = inf->sig;
+ info.si_errno = 0;
+ info.si_code = inf->code;
+ info.si_addr = (void __user *)addr;
+ uc32_notify_die("", regs, &info, ifsr, 0);
+}
diff --git a/arch/unicore32/mm/flush.c b/arch/unicore32/mm/flush.c
new file mode 100644
index 000000000000..93478cc8b26d
--- /dev/null
+++ b/arch/unicore32/mm/flush.c
@@ -0,0 +1,98 @@
+/*
+ * linux/arch/unicore32/mm/flush.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+
+#include <asm/cacheflush.h>
+#include <asm/system.h>
+#include <asm/tlbflush.h>
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+}
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ if (vma->vm_flags & VM_EXEC)
+ __flush_icache_all();
+}
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr,
+ unsigned long pfn)
+{
+}
+
+static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *kaddr, unsigned long len)
+{
+ /* VIPT non-aliasing D-cache */
+ if (vma->vm_flags & VM_EXEC) {
+ unsigned long addr = (unsigned long)kaddr;
+
+ __cpuc_coherent_kern_range(addr, addr + len);
+ }
+}
+
+/*
+ * Copy user data from/to a page which is mapped into a different
+ * processes address space. Really, we want to allow our "user
+ * space" model to handle this.
+ *
+ * Note that this code needs to run on the current CPU.
+ */
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ memcpy(dst, src, len);
+ flush_ptrace_access(vma, page, uaddr, dst, len);
+}
+
+void __flush_dcache_page(struct address_space *mapping, struct page *page)
+{
+ /*
+ * Writeback any data associated with the kernel mapping of this
+ * page. This ensures that data in the physical page is mutually
+ * coherent with the kernels mapping.
+ */
+ __cpuc_flush_kern_dcache_area(page_address(page), PAGE_SIZE);
+}
+
+/*
+ * Ensure cache coherency between kernel mapping and userspace mapping
+ * of this page.
+ */
+void flush_dcache_page(struct page *page)
+{
+ struct address_space *mapping;
+
+ /*
+ * The zero page is never written to, so never has any dirty
+ * cache lines, and therefore never needs to be flushed.
+ */
+ if (page == ZERO_PAGE(0))
+ return;
+
+ mapping = page_mapping(page);
+
+ if (mapping && !mapping_mapped(mapping))
+ clear_bit(PG_dcache_clean, &page->flags);
+ else {
+ __flush_dcache_page(mapping, page);
+ if (mapping)
+ __flush_icache_all();
+ set_bit(PG_dcache_clean, &page->flags);
+ }
+}
+EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
new file mode 100644
index 000000000000..3dbe3709b69d
--- /dev/null
+++ b/arch/unicore32/mm/init.c
@@ -0,0 +1,517 @@
+/*
+ * linux/arch/unicore32/mm/init.c
+ *
+ * Copyright (C) 2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/swap.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mman.h>
+#include <linux/nodemask.h>
+#include <linux/initrd.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+#include <linux/memblock.h>
+#include <linux/sort.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/sizes.h>
+#include <asm/tlb.h>
+#include <mach/map.h>
+
+#include "mm.h"
+
+static unsigned long phys_initrd_start __initdata = 0x01000000;
+static unsigned long phys_initrd_size __initdata = SZ_8M;
+
+static int __init early_initrd(char *p)
+{
+ unsigned long start, size;
+ char *endp;
+
+ start = memparse(p, &endp);
+ if (*endp == ',') {
+ size = memparse(endp + 1, NULL);
+
+ phys_initrd_start = start;
+ phys_initrd_size = size;
+ }
+ return 0;
+}
+early_param("initrd", early_initrd);
+
+/*
+ * This keeps memory configuration data used by a couple memory
+ * initialization functions, as well as show_mem() for the skipping
+ * of holes in the memory map. It is populated by uc32_add_memory().
+ */
+struct meminfo meminfo;
+
+void show_mem(void)
+{
+ int free = 0, total = 0, reserved = 0;
+ int shared = 0, cached = 0, slab = 0, i;
+ struct meminfo *mi = &meminfo;
+
+ printk(KERN_DEFAULT "Mem-info:\n");
+ show_free_areas();
+
+ for_each_bank(i, mi) {
+ struct membank *bank = &mi->bank[i];
+ unsigned int pfn1, pfn2;
+ struct page *page, *end;
+
+ pfn1 = bank_pfn_start(bank);
+ pfn2 = bank_pfn_end(bank);
+
+ page = pfn_to_page(pfn1);
+ end = pfn_to_page(pfn2 - 1) + 1;
+
+ do {
+ total++;
+ if (PageReserved(page))
+ reserved++;
+ else if (PageSwapCache(page))
+ cached++;
+ else if (PageSlab(page))
+ slab++;
+ else if (!page_count(page))
+ free++;
+ else
+ shared += page_count(page) - 1;
+ page++;
+ } while (page < end);
+ }
+
+ printk(KERN_DEFAULT "%d pages of RAM\n", total);
+ printk(KERN_DEFAULT "%d free pages\n", free);
+ printk(KERN_DEFAULT "%d reserved pages\n", reserved);
+ printk(KERN_DEFAULT "%d slab pages\n", slab);
+ printk(KERN_DEFAULT "%d pages shared\n", shared);
+ printk(KERN_DEFAULT "%d pages swap cached\n", cached);
+}
+
+static void __init find_limits(unsigned long *min, unsigned long *max_low,
+ unsigned long *max_high)
+{
+ struct meminfo *mi = &meminfo;
+ int i;
+
+ *min = -1UL;
+ *max_low = *max_high = 0;
+
+ for_each_bank(i, mi) {
+ struct membank *bank = &mi->bank[i];
+ unsigned long start, end;
+
+ start = bank_pfn_start(bank);
+ end = bank_pfn_end(bank);
+
+ if (*min > start)
+ *min = start;
+ if (*max_high < end)
+ *max_high = end;
+ if (bank->highmem)
+ continue;
+ if (*max_low < end)
+ *max_low = end;
+ }
+}
+
+static void __init uc32_bootmem_init(unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+ struct memblock_region *reg;
+ unsigned int boot_pages;
+ phys_addr_t bitmap;
+ pg_data_t *pgdat;
+
+ /*
+ * Allocate the bootmem bitmap page. This must be in a region
+ * of memory which has already been mapped.
+ */
+ boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
+ bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
+ __pfn_to_phys(end_pfn));
+
+ /*
+ * Initialise the bootmem allocator, handing the
+ * memory banks over to bootmem.
+ */
+ node_set_online(0);
+ pgdat = NODE_DATA(0);
+ init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
+
+ /* Free the lowmem regions from memblock into bootmem. */
+ for_each_memblock(memory, reg) {
+ unsigned long start = memblock_region_memory_base_pfn(reg);
+ unsigned long end = memblock_region_memory_end_pfn(reg);
+
+ if (end >= end_pfn)
+ end = end_pfn;
+ if (start >= end)
+ break;
+
+ free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
+ }
+
+ /* Reserve the lowmem memblock reserved regions in bootmem. */
+ for_each_memblock(reserved, reg) {
+ unsigned long start = memblock_region_reserved_base_pfn(reg);
+ unsigned long end = memblock_region_reserved_end_pfn(reg);
+
+ if (end >= end_pfn)
+ end = end_pfn;
+ if (start >= end)
+ break;
+
+ reserve_bootmem(__pfn_to_phys(start),
+ (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
+ }
+}
+
+static void __init uc32_bootmem_free(unsigned long min, unsigned long max_low,
+ unsigned long max_high)
+{
+ unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
+ struct memblock_region *reg;
+
+ /*
+ * initialise the zones.
+ */
+ memset(zone_size, 0, sizeof(zone_size));
+
+ /*
+ * The memory size has already been determined. If we need
+ * to do anything fancy with the allocation of this memory
+ * to the zones, now is the time to do it.
+ */
+ zone_size[0] = max_low - min;
+
+ /*
+ * Calculate the size of the holes.
+ * holes = node_size - sum(bank_sizes)
+ */
+ memcpy(zhole_size, zone_size, sizeof(zhole_size));
+ for_each_memblock(memory, reg) {
+ unsigned long start = memblock_region_memory_base_pfn(reg);
+ unsigned long end = memblock_region_memory_end_pfn(reg);
+
+ if (start < max_low) {
+ unsigned long low_end = min(end, max_low);
+ zhole_size[0] -= low_end - start;
+ }
+ }
+
+ /*
+ * Adjust the sizes according to any special requirements for
+ * this machine type.
+ */
+ arch_adjust_zones(zone_size, zhole_size);
+
+ free_area_init_node(0, zone_size, min, zhole_size);
+}
+
+int pfn_valid(unsigned long pfn)
+{
+ return memblock_is_memory(pfn << PAGE_SHIFT);
+}
+EXPORT_SYMBOL(pfn_valid);
+
+static void uc32_memory_present(void)
+{
+}
+
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
+void __init uc32_memblock_init(struct meminfo *mi)
+{
+ int i;
+
+ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),
+ meminfo_cmp, NULL);
+
+ memblock_init();
+ for (i = 0; i < mi->nr_banks; i++)
+ memblock_add(mi->bank[i].start, mi->bank[i].size);
+
+ /* Register the kernel text, kernel data and initrd with memblock. */
+ memblock_reserve(__pa(_text), _end - _text);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (phys_initrd_size) {
+ memblock_reserve(phys_initrd_start, phys_initrd_size);
+
+ /* Now convert initrd to virtual addresses */
+ initrd_start = __phys_to_virt(phys_initrd_start);
+ initrd_end = initrd_start + phys_initrd_size;
+ }
+#endif
+
+ uc32_mm_memblock_reserve();
+
+ memblock_analyze();
+ memblock_dump_all();
+}
+
+void __init bootmem_init(void)
+{
+ unsigned long min, max_low, max_high;
+
+ max_low = max_high = 0;
+
+ find_limits(&min, &max_low, &max_high);
+
+ uc32_bootmem_init(min, max_low);
+
+#ifdef CONFIG_SWIOTLB
+ swiotlb_init(1);
+#endif
+ /*
+ * Sparsemem tries to allocate bootmem in memory_present(),
+ * so must be done after the fixed reservations
+ */
+ uc32_memory_present();
+
+ /*
+ * sparse_init() needs the bootmem allocator up and running.
+ */
+ sparse_init();
+
+ /*
+ * Now free the memory - free_area_init_node needs
+ * the sparse mem_map arrays initialized by sparse_init()
+ * for memmap_init_zone(), otherwise all PFNs are invalid.
+ */
+ uc32_bootmem_free(min, max_low, max_high);
+
+ high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
+
+ /*
+ * This doesn't seem to be used by the Linux memory manager any
+ * more, but is used by ll_rw_block. If we can get rid of it, we
+ * also get rid of some of the stuff above as well.
+ *
+ * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
+ * the system, not the maximum PFN.
+ */
+ max_low_pfn = max_low - PHYS_PFN_OFFSET;
+ max_pfn = max_high - PHYS_PFN_OFFSET;
+}
+
+static inline int free_area(unsigned long pfn, unsigned long end, char *s)
+{
+ unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
+
+ for (; pfn < end; pfn++) {
+ struct page *page = pfn_to_page(pfn);
+ ClearPageReserved(page);
+ init_page_count(page);
+ __free_page(page);
+ pages++;
+ }
+
+ if (size && s)
+ printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
+
+ return pages;
+}
+
+static inline void
+free_memmap(unsigned long start_pfn, unsigned long end_pfn)
+{
+ struct page *start_pg, *end_pg;
+ unsigned long pg, pgend;
+
+ /*
+ * Convert start_pfn/end_pfn to a struct page pointer.
+ */
+ start_pg = pfn_to_page(start_pfn - 1) + 1;
+ end_pg = pfn_to_page(end_pfn);
+
+ /*
+ * Convert to physical addresses, and
+ * round start upwards and end downwards.
+ */
+ pg = PAGE_ALIGN(__pa(start_pg));
+ pgend = __pa(end_pg) & PAGE_MASK;
+
+ /*
+ * If there are free pages between these,
+ * free the section of the memmap array.
+ */
+ if (pg < pgend)
+ free_bootmem(pg, pgend - pg);
+}
+
+/*
+ * The mem_map array can get very big. Free the unused area of the memory map.
+ */
+static void __init free_unused_memmap(struct meminfo *mi)
+{
+ unsigned long bank_start, prev_bank_end = 0;
+ unsigned int i;
+
+ /*
+ * This relies on each bank being in address order.
+ * The banks are sorted previously in bootmem_init().
+ */
+ for_each_bank(i, mi) {
+ struct membank *bank = &mi->bank[i];
+
+ bank_start = bank_pfn_start(bank);
+
+ /*
+ * If we had a previous bank, and there is a space
+ * between the current bank and the previous, free it.
+ */
+ if (prev_bank_end && prev_bank_end < bank_start)
+ free_memmap(prev_bank_end, bank_start);
+
+ /*
+ * Align up here since the VM subsystem insists that the
+ * memmap entries are valid from the bank end aligned to
+ * MAX_ORDER_NR_PAGES.
+ */
+ prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
+ }
+}
+
+/*
+ * mem_init() marks the free areas in the mem_map and tells us how much
+ * memory is free. This is done after various parts of the system have
+ * claimed their memory after the kernel image.
+ */
+void __init mem_init(void)
+{
+ unsigned long reserved_pages, free_pages;
+ struct memblock_region *reg;
+ int i;
+
+ max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
+
+ /* this will put all unused low memory onto the freelists */
+ free_unused_memmap(&meminfo);
+
+ totalram_pages += free_all_bootmem();
+
+ reserved_pages = free_pages = 0;
+
+ for_each_bank(i, &meminfo) {
+ struct membank *bank = &meminfo.bank[i];
+ unsigned int pfn1, pfn2;
+ struct page *page, *end;
+
+ pfn1 = bank_pfn_start(bank);
+ pfn2 = bank_pfn_end(bank);
+
+ page = pfn_to_page(pfn1);
+ end = pfn_to_page(pfn2 - 1) + 1;
+
+ do {
+ if (PageReserved(page))
+ reserved_pages++;
+ else if (!page_count(page))
+ free_pages++;
+ page++;
+ } while (page < end);
+ }
+
+ /*
+ * Since our memory may not be contiguous, calculate the
+ * real number of pages we have in this system
+ */
+ printk(KERN_INFO "Memory:");
+ num_physpages = 0;
+ for_each_memblock(memory, reg) {
+ unsigned long pages = memblock_region_memory_end_pfn(reg) -
+ memblock_region_memory_base_pfn(reg);
+ num_physpages += pages;
+ printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
+ }
+ printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
+
+ printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
+ nr_free_pages() << (PAGE_SHIFT-10),
+ free_pages << (PAGE_SHIFT-10),
+ reserved_pages << (PAGE_SHIFT-10),
+ totalhigh_pages << (PAGE_SHIFT-10));
+
+ printk(KERN_NOTICE "Virtual kernel memory layout:\n"
+ " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .data : 0x%p" " - 0x%p" " (%4d kB)\n",
+
+ VECTORS_BASE, VECTORS_BASE + PAGE_SIZE,
+ DIV_ROUND_UP(PAGE_SIZE, SZ_1K),
+ VMALLOC_START, VMALLOC_END,
+ DIV_ROUND_UP((VMALLOC_END - VMALLOC_START), SZ_1M),
+ PAGE_OFFSET, (unsigned long)high_memory,
+ DIV_ROUND_UP(((unsigned long)high_memory - PAGE_OFFSET), SZ_1M),
+ MODULES_VADDR, MODULES_END,
+ DIV_ROUND_UP((MODULES_END - MODULES_VADDR), SZ_1M),
+
+ __init_begin, __init_end,
+ DIV_ROUND_UP((__init_end - __init_begin), SZ_1K),
+ _stext, _etext,
+ DIV_ROUND_UP((_etext - _stext), SZ_1K),
+ _sdata, _edata,
+ DIV_ROUND_UP((_edata - _sdata), SZ_1K));
+
+ BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
+ BUG_ON(TASK_SIZE > MODULES_VADDR);
+
+ if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
+ /*
+ * On a machine this small we won't get
+ * anywhere without overcommit, so turn
+ * it on by default.
+ */
+ sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
+ }
+}
+
+void free_initmem(void)
+{
+ totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
+ __phys_to_pfn(__pa(__init_end)),
+ "init");
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+static int keep_initrd;
+
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+ if (!keep_initrd)
+ totalram_pages += free_area(__phys_to_pfn(__pa(start)),
+ __phys_to_pfn(__pa(end)),
+ "initrd");
+}
+
+static int __init keepinitrd_setup(char *__unused)
+{
+ keep_initrd = 1;
+ return 1;
+}
+
+__setup("keepinitrd", keepinitrd_setup);
+#endif
diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
new file mode 100644
index 000000000000..b7a605597b08
--- /dev/null
+++ b/arch/unicore32/mm/ioremap.c
@@ -0,0 +1,261 @@
+/*
+ * linux/arch/unicore32/mm/ioremap.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *
+ * Re-map IO memory to kernel address space so that we can access it.
+ *
+ * This allows a driver to remap an arbitrary region of bus memory into
+ * virtual space. One should *only* use readl, writel, memcpy_toio and
+ * so on with such remapped areas.
+ *
+ * Because UniCore only has a 32-bit address space we can't address the
+ * whole of the (physical) PCI space at once. PCI huge-mode addressing
+ * allows us to circumvent this restriction by splitting PCI space into
+ * two 2GB chunks and mapping only one at a time into processor memory.
+ * We use MMU protection domains to trap any attempt to access the bank
+ * that is not currently mapped. (This isn't fully implemented yet.)
+ */
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+
+#include <asm/cputype.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/sizes.h>
+
+#include <mach/map.h>
+#include "mm.h"
+
+/*
+ * Used by ioremap() and iounmap() code to mark (super)section-mapped
+ * I/O regions in vm_struct->flags field.
+ */
+#define VM_UNICORE_SECTION_MAPPING 0x80000000
+
+int ioremap_page(unsigned long virt, unsigned long phys,
+ const struct mem_type *mtype)
+{
+ return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
+ __pgprot(mtype->prot_pte));
+}
+EXPORT_SYMBOL(ioremap_page);
+
+/*
+ * Section support is unsafe on SMP - If you iounmap and ioremap a region,
+ * the other CPUs will not see this change until their next context switch.
+ * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
+ * which requires the new ioremap'd region to be referenced, the CPU will
+ * reference the _old_ region.
+ *
+ * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
+ * mask the size back to 4MB aligned or we will overflow in the loop below.
+ */
+static void unmap_area_sections(unsigned long virt, unsigned long size)
+{
+ unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
+ pgd_t *pgd;
+
+ flush_cache_vunmap(addr, end);
+ pgd = pgd_offset_k(addr);
+ do {
+ pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);
+
+ pmd = *pmdp;
+ if (!pmd_none(pmd)) {
+ /*
+ * Clear the PMD from the page table, and
+ * increment the kvm sequence so others
+ * notice this change.
+ *
+ * Note: this is still racy on SMP machines.
+ */
+ pmd_clear(pmdp);
+
+ /*
+ * Free the page table, if there was one.
+ */
+ if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
+ pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
+ }
+
+ addr += PGDIR_SIZE;
+ pgd++;
+ } while (addr < end);
+
+ flush_tlb_kernel_range(virt, end);
+}
+
+static int
+remap_area_sections(unsigned long virt, unsigned long pfn,
+ size_t size, const struct mem_type *type)
+{
+ unsigned long addr = virt, end = virt + size;
+ pgd_t *pgd;
+
+ /*
+ * Remove and free any PTE-based mapping, and
+ * sync the current kernel mapping.
+ */
+ unmap_area_sections(virt, size);
+
+ pgd = pgd_offset_k(addr);
+ do {
+ pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);
+
+ set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect));
+ pfn += SZ_4M >> PAGE_SHIFT;
+ flush_pmd_entry(pmd);
+
+ addr += PGDIR_SIZE;
+ pgd++;
+ } while (addr < end);
+
+ return 0;
+}
+
+void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn,
+ unsigned long offset, size_t size, unsigned int mtype, void *caller)
+{
+ const struct mem_type *type;
+ int err;
+ unsigned long addr;
+ struct vm_struct *area;
+
+ /*
+ * High mappings must be section aligned
+ */
+ if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK))
+ return NULL;
+
+ /*
+ * Don't allow RAM to be mapped
+ */
+ if (pfn_valid(pfn)) {
+ printk(KERN_WARNING "BUG: Your driver calls ioremap() on\n"
+ "system memory. This leads to architecturally\n"
+ "unpredictable behaviour, and ioremap() will fail in\n"
+ "the next kernel release. Please fix your driver.\n");
+ WARN_ON(1);
+ }
+
+ type = get_mem_type(mtype);
+ if (!type)
+ return NULL;
+
+ /*
+ * Page align the mapping size, taking account of any offset.
+ */
+ size = PAGE_ALIGN(offset + size);
+
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
+ if (!area)
+ return NULL;
+ addr = (unsigned long)area->addr;
+
+ if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
+ area->flags |= VM_UNICORE_SECTION_MAPPING;
+ err = remap_area_sections(addr, pfn, size, type);
+ } else
+ err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
+ __pgprot(type->prot_pte));
+
+ if (err) {
+ vunmap((void *)addr);
+ return NULL;
+ }
+
+ flush_cache_vmap(addr, addr + size);
+ return (void __iomem *) (offset + addr);
+}
+
+void __iomem *__uc32_ioremap_caller(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
+{
+ unsigned long last_addr;
+ unsigned long offset = phys_addr & ~PAGE_MASK;
+ unsigned long pfn = __phys_to_pfn(phys_addr);
+
+ /*
+ * Don't allow wraparound or zero size
+ */
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
+ return NULL;
+
+ return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype, caller);
+}
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem *
+__uc32_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
+ unsigned int mtype)
+{
+ return __uc32_ioremap_pfn_caller(pfn, offset, size, mtype,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__uc32_ioremap_pfn);
+
+void __iomem *
+__uc32_ioremap(unsigned long phys_addr, size_t size)
+{
+ return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__uc32_ioremap);
+
+void __iomem *
+__uc32_ioremap_cached(unsigned long phys_addr, size_t size)
+{
+ return __uc32_ioremap_caller(phys_addr, size, MT_DEVICE_CACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__uc32_ioremap_cached);
+
+void __uc32_iounmap(volatile void __iomem *io_addr)
+{
+ void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
+ struct vm_struct **p, *tmp;
+
+ /*
+ * If this is a section based mapping we need to handle it
+ * specially as the VM subsystem does not know how to handle
+ * such a beast. We need the lock here b/c we need to clear
+ * all the mappings before the area can be reclaimed
+ * by someone else.
+ */
+ write_lock(&vmlist_lock);
+ for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
+ if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
+ if (tmp->flags & VM_UNICORE_SECTION_MAPPING) {
+ unmap_area_sections((unsigned long)tmp->addr,
+ tmp->size);
+ }
+ break;
+ }
+ }
+ write_unlock(&vmlist_lock);
+
+ vunmap(addr);
+}
+EXPORT_SYMBOL(__uc32_iounmap);
diff --git a/arch/unicore32/mm/mm.h b/arch/unicore32/mm/mm.h
new file mode 100644
index 000000000000..3296bca0f1f7
--- /dev/null
+++ b/arch/unicore32/mm/mm.h
@@ -0,0 +1,39 @@
+/*
+ * linux/arch/unicore32/mm/mm.h
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/* the upper-most page table pointer */
+extern pmd_t *top_pmd;
+extern int sysctl_overcommit_memory;
+
+#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
+
+static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
+{
+ return pmd_offset((pud_t *)pgd, virt);
+}
+
+static inline pmd_t *pmd_off_k(unsigned long virt)
+{
+ return pmd_off(pgd_offset_k(virt), virt);
+}
+
+struct mem_type {
+ unsigned int prot_pte;
+ unsigned int prot_l1;
+ unsigned int prot_sect;
+};
+
+const struct mem_type *get_mem_type(unsigned int type);
+
+extern void __flush_dcache_page(struct address_space *, struct page *);
+
+void __init bootmem_init(void);
+void uc32_mm_memblock_reserve(void);
diff --git a/arch/unicore32/mm/mmu.c b/arch/unicore32/mm/mmu.c
new file mode 100644
index 000000000000..7bf3d588631f
--- /dev/null
+++ b/arch/unicore32/mm/mmu.c
@@ -0,0 +1,533 @@
+/*
+ * linux/arch/unicore32/mm/mmu.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/mman.h>
+#include <linux/nodemask.h>
+#include <linux/memblock.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <linux/io.h>
+
+#include <asm/cputype.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/sizes.h>
+#include <asm/tlb.h>
+
+#include <mach/map.h>
+
+#include "mm.h"
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+/*
+ * empty_zero_page is a special page that is used for
+ * zero-initialized data and COW.
+ */
+struct page *empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
+
+/*
+ * The pmd table for the upper-most set of pages.
+ */
+pmd_t *top_pmd;
+
+pgprot_t pgprot_user;
+EXPORT_SYMBOL(pgprot_user);
+
+pgprot_t pgprot_kernel;
+EXPORT_SYMBOL(pgprot_kernel);
+
+static int __init noalign_setup(char *__unused)
+{
+ cr_alignment &= ~CR_A;
+ cr_no_alignment &= ~CR_A;
+ set_cr(cr_alignment);
+ return 1;
+}
+__setup("noalign", noalign_setup);
+
+void adjust_cr(unsigned long mask, unsigned long set)
+{
+ unsigned long flags;
+
+ mask &= ~CR_A;
+
+ set &= mask;
+
+ local_irq_save(flags);
+
+ cr_no_alignment = (cr_no_alignment & ~mask) | set;
+ cr_alignment = (cr_alignment & ~mask) | set;
+
+ set_cr((get_cr() & ~mask) | set);
+
+ local_irq_restore(flags);
+}
+
+struct map_desc {
+ unsigned long virtual;
+ unsigned long pfn;
+ unsigned long length;
+ unsigned int type;
+};
+
+#define PROT_PTE_DEVICE (PTE_PRESENT | PTE_YOUNG | \
+ PTE_DIRTY | PTE_READ | PTE_WRITE)
+#define PROT_SECT_DEVICE (PMD_TYPE_SECT | PMD_PRESENT | \
+ PMD_SECT_READ | PMD_SECT_WRITE)
+
+static struct mem_type mem_types[] = {
+ [MT_DEVICE] = { /* Strongly ordered */
+ .prot_pte = PROT_PTE_DEVICE,
+ .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
+ .prot_sect = PROT_SECT_DEVICE,
+ },
+ /*
+ * MT_KUSER: pte for vecpage -- cacheable,
+ * and sect for unigfx mmap -- noncacheable
+ */
+ [MT_KUSER] = {
+ .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY |
+ PTE_CACHEABLE | PTE_READ | PTE_EXEC,
+ .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
+ .prot_sect = PROT_SECT_DEVICE,
+ },
+ [MT_HIGH_VECTORS] = {
+ .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY |
+ PTE_CACHEABLE | PTE_READ | PTE_WRITE |
+ PTE_EXEC,
+ .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
+ },
+ [MT_MEMORY] = {
+ .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY |
+ PTE_WRITE | PTE_EXEC,
+ .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT,
+ .prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE |
+ PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC,
+ },
+ [MT_ROM] = {
+ .prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE |
+ PMD_SECT_READ,
+ },
+};
+
+const struct mem_type *get_mem_type(unsigned int type)
+{
+ return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
+}
+EXPORT_SYMBOL(get_mem_type);
+
+/*
+ * Adjust the PMD section entries according to the CPU in use.
+ */
+static void __init build_mem_type_table(void)
+{
+ pgprot_user = __pgprot(PTE_PRESENT | PTE_YOUNG | PTE_CACHEABLE);
+ pgprot_kernel = __pgprot(PTE_PRESENT | PTE_YOUNG |
+ PTE_DIRTY | PTE_READ | PTE_WRITE |
+ PTE_EXEC | PTE_CACHEABLE);
+}
+
+#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
+
+static void __init *early_alloc(unsigned long sz)
+{
+ void *ptr = __va(memblock_alloc(sz, sz));
+ memset(ptr, 0, sz);
+ return ptr;
+}
+
+static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
+ unsigned long prot)
+{
+ if (pmd_none(*pmd)) {
+ pte_t *pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
+ __pmd_populate(pmd, __pa(pte) | prot);
+ }
+ BUG_ON(pmd_bad(*pmd));
+ return pte_offset_kernel(pmd, addr);
+}
+
+static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
+ unsigned long end, unsigned long pfn,
+ const struct mem_type *type)
+{
+ pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
+ do {
+ set_pte(pte, pfn_pte(pfn, __pgprot(type->prot_pte)));
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+}
+
+static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
+ unsigned long end, unsigned long phys,
+ const struct mem_type *type)
+{
+ pmd_t *pmd = pmd_offset((pud_t *)pgd, addr);
+
+ /*
+ * Try a section mapping - end, addr and phys must all be aligned
+ * to a section boundary.
+ */
+ if (((addr | end | phys) & ~SECTION_MASK) == 0) {
+ pmd_t *p = pmd;
+
+ do {
+ set_pmd(pmd, __pmd(phys | type->prot_sect));
+ phys += SECTION_SIZE;
+ } while (pmd++, addr += SECTION_SIZE, addr != end);
+
+ flush_pmd_entry(p);
+ } else {
+ /*
+ * No need to loop; pte's aren't interested in the
+ * individual L1 entries.
+ */
+ alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
+ }
+}
+
+/*
+ * Create the page directory entries and any necessary
+ * page tables for the mapping specified by `md'. We
+ * are able to cope here with varying sizes and address
+ * offsets, and we take full advantage of sections.
+ */
+static void __init create_mapping(struct map_desc *md)
+{
+ unsigned long phys, addr, length, end;
+ const struct mem_type *type;
+ pgd_t *pgd;
+
+ if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
+ printk(KERN_WARNING "BUG: not creating mapping for "
+ "0x%08llx at 0x%08lx in user region\n",
+ __pfn_to_phys((u64)md->pfn), md->virtual);
+ return;
+ }
+
+ if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
+ md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
+ printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
+ "overlaps vmalloc space\n",
+ __pfn_to_phys((u64)md->pfn), md->virtual);
+ }
+
+ type = &mem_types[md->type];
+
+ addr = md->virtual & PAGE_MASK;
+ phys = (unsigned long)__pfn_to_phys(md->pfn);
+ length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
+
+ if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
+ printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
+ "be mapped using pages, ignoring.\n",
+ __pfn_to_phys(md->pfn), addr);
+ return;
+ }
+
+ pgd = pgd_offset_k(addr);
+ end = addr + length;
+ do {
+ unsigned long next = pgd_addr_end(addr, end);
+
+ alloc_init_section(pgd, addr, next, phys, type);
+
+ phys += next - addr;
+ addr = next;
+ } while (pgd++, addr != end);
+}
+
+static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
+
+/*
+ * vmalloc=size forces the vmalloc area to be exactly 'size'
+ * bytes. This can be used to increase (or decrease) the vmalloc
+ * area - the default is 128m.
+ */
+static int __init early_vmalloc(char *arg)
+{
+ unsigned long vmalloc_reserve = memparse(arg, NULL);
+
+ if (vmalloc_reserve < SZ_16M) {
+ vmalloc_reserve = SZ_16M;
+ printk(KERN_WARNING
+ "vmalloc area too small, limiting to %luMB\n",
+ vmalloc_reserve >> 20);
+ }
+
+ if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
+ vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
+ printk(KERN_WARNING
+ "vmalloc area is too big, limiting to %luMB\n",
+ vmalloc_reserve >> 20);
+ }
+
+ vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
+ return 0;
+}
+early_param("vmalloc", early_vmalloc);
+
+static phys_addr_t lowmem_limit __initdata = SZ_1G;
+
+static void __init sanity_check_meminfo(void)
+{
+ int i, j;
+
+ lowmem_limit = __pa(vmalloc_min - 1) + 1;
+ memblock_set_current_limit(lowmem_limit);
+
+ for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
+ struct membank *bank = &meminfo.bank[j];
+ *bank = meminfo.bank[i];
+ j++;
+ }
+ meminfo.nr_banks = j;
+}
+
+static inline void prepare_page_table(void)
+{
+ unsigned long addr;
+ phys_addr_t end;
+
+ /*
+ * Clear out all the mappings below the kernel image.
+ */
+ for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
+ pmd_clear(pmd_off_k(addr));
+
+ for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
+ pmd_clear(pmd_off_k(addr));
+
+ /*
+ * Find the end of the first block of lowmem.
+ */
+ end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
+ if (end >= lowmem_limit)
+ end = lowmem_limit;
+
+ /*
+ * Clear out all the kernel space mappings, except for the first
+ * memory bank, up to the end of the vmalloc region.
+ */
+ for (addr = __phys_to_virt(end);
+ addr < VMALLOC_END; addr += PGDIR_SIZE)
+ pmd_clear(pmd_off_k(addr));
+}
+
+/*
+ * Reserve the special regions of memory
+ */
+void __init uc32_mm_memblock_reserve(void)
+{
+ /*
+ * Reserve the page tables. These are already in use,
+ * and can only be in node 0.
+ */
+ memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
+
+#ifdef CONFIG_PUV3_UNIGFX
+ /*
+ * These should likewise go elsewhere. They pre-reserve the
+ * screen/video memory region at the 48M~64M of main system memory.
+ */
+ memblock_reserve(PKUNITY_UNIGFX_MMAP_BASE, PKUNITY_UNIGFX_MMAP_SIZE);
+ memblock_reserve(PKUNITY_UVC_MMAP_BASE, PKUNITY_UVC_MMAP_SIZE);
+#endif
+}
+
+/*
+ * Set up device the mappings. Since we clear out the page tables for all
+ * mappings above VMALLOC_END, we will remove any debug device mappings.
+ * This means you have to be careful how you debug this function, or any
+ * called function. This means you can't use any function or debugging
+ * method which may touch any device, otherwise the kernel _will_ crash.
+ */
+static void __init devicemaps_init(void)
+{
+ struct map_desc map;
+ unsigned long addr;
+ void *vectors;
+
+ /*
+ * Allocate the vector page early.
+ */
+ vectors = early_alloc(PAGE_SIZE);
+
+ for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
+ pmd_clear(pmd_off_k(addr));
+
+ /*
+ * Create a mapping for UniGFX VRAM
+ */
+#ifdef CONFIG_PUV3_UNIGFX
+ map.pfn = __phys_to_pfn(PKUNITY_UNIGFX_MMAP_BASE);
+ map.virtual = KUSER_UNIGFX_BASE;
+ map.length = PKUNITY_UNIGFX_MMAP_SIZE;
+ map.type = MT_KUSER;
+ create_mapping(&map);
+#endif
+
+ /*
+ * Create a mapping for the machine vectors at the high-vectors
+ * location (0xffff0000). If we aren't using high-vectors, also
+ * create a mapping at the low-vectors virtual address.
+ */
+ map.pfn = __phys_to_pfn(virt_to_phys(vectors));
+ map.virtual = VECTORS_BASE;
+ map.length = PAGE_SIZE;
+ map.type = MT_HIGH_VECTORS;
+ create_mapping(&map);
+
+ /*
+ * Create a mapping for the kuser page at the special
+ * location (0xbfff0000) to the same vectors location.
+ */
+ map.pfn = __phys_to_pfn(virt_to_phys(vectors));
+ map.virtual = KUSER_VECPAGE_BASE;
+ map.length = PAGE_SIZE;
+ map.type = MT_KUSER;
+ create_mapping(&map);
+
+ /*
+ * Finally flush the caches and tlb to ensure that we're in a
+ * consistent state wrt the writebuffer. This also ensures that
+ * any write-allocated cache lines in the vector page are written
+ * back. After this point, we can start to touch devices again.
+ */
+ local_flush_tlb_all();
+ flush_cache_all();
+}
+
+static void __init map_lowmem(void)
+{
+ struct memblock_region *reg;
+
+ /* Map all the lowmem memory banks. */
+ for_each_memblock(memory, reg) {
+ phys_addr_t start = reg->base;
+ phys_addr_t end = start + reg->size;
+ struct map_desc map;
+
+ if (end > lowmem_limit)
+ end = lowmem_limit;
+ if (start >= end)
+ break;
+
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY;
+
+ create_mapping(&map);
+ }
+}
+
+/*
+ * paging_init() sets up the page tables, initialises the zone memory
+ * maps, and sets up the zero page, bad page and bad page tables.
+ */
+void __init paging_init(void)
+{
+ void *zero_page;
+
+ build_mem_type_table();
+ sanity_check_meminfo();
+ prepare_page_table();
+ map_lowmem();
+ devicemaps_init();
+
+ top_pmd = pmd_off_k(0xffff0000);
+
+ /* allocate the zero page. */
+ zero_page = early_alloc(PAGE_SIZE);
+
+ bootmem_init();
+
+ empty_zero_page = virt_to_page(zero_page);
+ __flush_dcache_page(NULL, empty_zero_page);
+}
+
+/*
+ * In order to soft-boot, we need to insert a 1:1 mapping in place of
+ * the user-mode pages. This will then ensure that we have predictable
+ * results when turning the mmu off
+ */
+void setup_mm_for_reboot(char mode)
+{
+ unsigned long base_pmdval;
+ pgd_t *pgd;
+ int i;
+
+ /*
+ * We need to access to user-mode page tables here. For kernel threads
+ * we don't have any user-mode mappings so we use the context that we
+ * "borrowed".
+ */
+ pgd = current->active_mm->pgd;
+
+ base_pmdval = PMD_SECT_WRITE | PMD_SECT_READ | PMD_TYPE_SECT;
+
+ for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
+ unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
+ pmd_t *pmd;
+
+ pmd = pmd_off(pgd, i << PGDIR_SHIFT);
+ set_pmd(pmd, __pmd(pmdval));
+ flush_pmd_entry(pmd);
+ }
+
+ local_flush_tlb_all();
+}
+
+/*
+ * Take care of architecture specific things when placing a new PTE into
+ * a page table, or changing an existing PTE. Basically, there are two
+ * things that we need to take care of:
+ *
+ * 1. If PG_dcache_clean is not set for the page, we need to ensure
+ * that any cache entries for the kernels virtual memory
+ * range are written back to the page.
+ * 2. If we have multiple shared mappings of the same space in
+ * an object, we need to deal with the cache aliasing issues.
+ *
+ * Note that the pte lock will be held.
+ */
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ unsigned long pfn = pte_pfn(*ptep);
+ struct address_space *mapping;
+ struct page *page;
+
+ if (!pfn_valid(pfn))
+ return;
+
+ /*
+ * The zero page is never written to, so never has any dirty
+ * cache lines, and therefore never needs to be flushed.
+ */
+ page = pfn_to_page(pfn);
+ if (page == ZERO_PAGE(0))
+ return;
+
+ mapping = page_mapping(page);
+ if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+ __flush_dcache_page(mapping, page);
+ if (mapping)
+ if (vma->vm_flags & VM_EXEC)
+ __flush_icache_all();
+}
diff --git a/arch/unicore32/mm/pgd.c b/arch/unicore32/mm/pgd.c
new file mode 100644
index 000000000000..08b8d4295e70
--- /dev/null
+++ b/arch/unicore32/mm/pgd.c
@@ -0,0 +1,102 @@
+/*
+ * linux/arch/unicore32/mm/pgd.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+
+#include <asm/pgalloc.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+
+#include "mm.h"
+
+#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
+
+/*
+ * need to get a 4k page for level 1
+ */
+pgd_t *get_pgd_slow(struct mm_struct *mm)
+{
+ pgd_t *new_pgd, *init_pgd;
+ pmd_t *new_pmd, *init_pmd;
+ pte_t *new_pte, *init_pte;
+
+ new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 0);
+ if (!new_pgd)
+ goto no_pgd;
+
+ memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
+
+ /*
+ * Copy over the kernel and IO PGD entries
+ */
+ init_pgd = pgd_offset_k(0);
+ memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
+ (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
+
+ clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
+
+ if (!vectors_high()) {
+ /*
+ * On UniCore, first page must always be allocated since it
+ * contains the machine vectors.
+ */
+ new_pmd = pmd_alloc(mm, (pud_t *)new_pgd, 0);
+ if (!new_pmd)
+ goto no_pmd;
+
+ new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
+ if (!new_pte)
+ goto no_pte;
+
+ init_pmd = pmd_offset((pud_t *)init_pgd, 0);
+ init_pte = pte_offset_map(init_pmd, 0);
+ set_pte(new_pte, *init_pte);
+ pte_unmap(init_pte);
+ pte_unmap(new_pte);
+ }
+
+ return new_pgd;
+
+no_pte:
+ pmd_free(mm, new_pmd);
+no_pmd:
+ free_pages((unsigned long)new_pgd, 0);
+no_pgd:
+ return NULL;
+}
+
+void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
+{
+ pmd_t *pmd;
+ pgtable_t pte;
+
+ if (!pgd)
+ return;
+
+ /* pgd is always present and good */
+ pmd = pmd_off(pgd, 0);
+ if (pmd_none(*pmd))
+ goto free;
+ if (pmd_bad(*pmd)) {
+ pmd_ERROR(*pmd);
+ pmd_clear(pmd);
+ goto free;
+ }
+
+ pte = pmd_pgtable(*pmd);
+ pmd_clear(pmd);
+ pte_free(mm, pte);
+ pmd_free(mm, pmd);
+free:
+ free_pages((unsigned long) pgd, 0);
+}
diff --git a/arch/unicore32/mm/proc-macros.S b/arch/unicore32/mm/proc-macros.S
new file mode 100644
index 000000000000..51560d68c894
--- /dev/null
+++ b/arch/unicore32/mm/proc-macros.S
@@ -0,0 +1,145 @@
+/*
+ * linux/arch/unicore32/mm/proc-macros.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * We need constants.h for:
+ * VMA_VM_MM
+ * VMA_VM_FLAGS
+ * VM_EXEC
+ */
+#include <generated/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/memory.h>
+
+/*
+ * the cache line sizes of the I and D cache are the same
+ */
+#define CACHE_LINESIZE 32
+
+/*
+ * This is the maximum size of an area which will be invalidated
+ * using the single invalidate entry instructions. Anything larger
+ * than this, and we go for the whole cache.
+ *
+ * This value should be chosen such that we choose the cheapest
+ * alternative.
+ */
+#ifdef CONFIG_CPU_UCV2
+#define MAX_AREA_SIZE 0x800 /* 64 cache line */
+#endif
+
+/*
+ * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
+ */
+ .macro vma_vm_mm, rd, rn
+ ldw \rd, [\rn+], #VMA_VM_MM
+ .endm
+
+/*
+ * vma_vm_flags - get vma->vm_flags
+ */
+ .macro vma_vm_flags, rd, rn
+ ldw \rd, [\rn+], #VMA_VM_FLAGS
+ .endm
+
+ .macro tsk_mm, rd, rn
+ ldw \rd, [\rn+], #TI_TASK
+ ldw \rd, [\rd+], #TSK_ACTIVE_MM
+ .endm
+
+/*
+ * act_mm - get current->active_mm
+ */
+ .macro act_mm, rd
+ andn \rd, sp, #8128
+ andn \rd, \rd, #63
+ ldw \rd, [\rd+], #TI_TASK
+ ldw \rd, [\rd+], #TSK_ACTIVE_MM
+ .endm
+
+/*
+ * mmid - get context id from mm pointer (mm->context.id)
+ */
+ .macro mmid, rd, rn
+ ldw \rd, [\rn+], #MM_CONTEXT_ID
+ .endm
+
+/*
+ * mask_asid - mask the ASID from the context ID
+ */
+ .macro asid, rd, rn
+ and \rd, \rn, #255
+ .endm
+
+ .macro crval, clear, mmuset, ucset
+ .word \clear
+ .word \mmuset
+ .endm
+
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+/*
+ * va2pa va, pa, tbl, msk, off, err
+ * This macro is used to translate virtual address to its physical address.
+ *
+ * va: virtual address
+ * pa: physical address, result is stored in this register
+ * tbl, msk, off: temp registers, will be destroyed
+ * err: jump to error label if the physical address not exist
+ * NOTE: all regs must be different
+ */
+ .macro va2pa, va, pa, tbl, msk, off, err=990f
+ movc \pa, p0.c2, #0
+ mov \off, \va >> #22 @ off <- index of 1st page table
+ adr \tbl, 910f @ tbl <- table of 1st page table
+900: @ ---- handle 1, 2 page table
+ add \pa, \pa, #PAGE_OFFSET @ pa <- virt addr of page table
+ ldw \pa, [\pa+], \off << #2 @ pa <- the content of pt
+ cand.a \pa, #4 @ test exist bit
+ beq \err @ if not exist
+ and \off, \pa, #3 @ off <- the last 2 bits
+ add \tbl, \tbl, \off << #3 @ cmove table pointer
+ ldw \msk, [\tbl+], #0 @ get the mask
+ ldw pc, [\tbl+], #4
+930: @ ---- handle 2nd page table
+ and \pa, \pa, \msk @ pa <- phys addr of 2nd pt
+ mov \off, \va << #10
+ cntlo \tbl, \msk @ use tbl as temp reg
+ mov \off, \off >> \tbl
+ mov \off, \off >> #2 @ off <- index of 2nd pt
+ adr \tbl, 920f @ tbl <- table of 2nd pt
+ b 900b
+910: @ 1st level page table
+ .word 0xfffff000, 930b @ second level page table
+ .word 0xfffffc00, 930b @ second level large page table
+ .word 0x00000000, \err @ invalid
+ .word 0xffc00000, 980f @ super page
+
+920: @ 2nd level page table
+ .word 0xfffff000, 980f @ page
+ .word 0xffffc000, 980f @ middle page
+ .word 0xffff0000, 980f @ large page
+ .word 0x00000000, \err @ invalid
+980:
+ andn \tbl, \va, \msk
+ and \pa, \pa, \msk
+ or \pa, \pa, \tbl
+990:
+ .endm
+#endif
+
+ .macro dcacheline_flush, addr, t1, t2
+ mov \t1, \addr << #20
+ ldw \t2, =_stext @ _stext must ALIGN(4096)
+ add \t2, \t2, \t1 >> #20
+ ldw \t1, [\t2+], #0x0000
+ ldw \t1, [\t2+], #0x1000
+ ldw \t1, [\t2+], #0x2000
+ ldw \t1, [\t2+], #0x3000
+ .endm
diff --git a/arch/unicore32/mm/proc-syms.c b/arch/unicore32/mm/proc-syms.c
new file mode 100644
index 000000000000..f30071e3665d
--- /dev/null
+++ b/arch/unicore32/mm/proc-syms.c
@@ -0,0 +1,23 @@
+/*
+ * linux/arch/unicore32/mm/proc-syms.c
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/page.h>
+
+EXPORT_SYMBOL(cpu_dcache_clean_area);
+EXPORT_SYMBOL(cpu_set_pte);
+
+EXPORT_SYMBOL(__cpuc_dma_flush_range);
+EXPORT_SYMBOL(__cpuc_dma_clean_range);
diff --git a/arch/unicore32/mm/proc-ucv2.S b/arch/unicore32/mm/proc-ucv2.S
new file mode 100644
index 000000000000..9d296092e362
--- /dev/null
+++ b/arch/unicore32/mm/proc-ucv2.S
@@ -0,0 +1,134 @@
+/*
+ * linux/arch/unicore32/mm/proc-ucv2.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/hwcap.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/pgtable.h>
+
+#include "proc-macros.S"
+
+ENTRY(cpu_proc_fin)
+ stm.w (lr), [sp-]
+ mov ip, #PSR_R_BIT | PSR_I_BIT | PRIV_MODE
+ mov.a asr, ip
+ b.l __cpuc_flush_kern_all
+ ldm.w (pc), [sp]+
+
+/*
+ * cpu_reset(loc)
+ *
+ * Perform a soft reset of the system. Put the CPU into the
+ * same state as it would be if it had been reset, and branch
+ * to what would be the reset vector.
+ *
+ * - loc - location to jump to for soft reset
+ */
+ .align 5
+ENTRY(cpu_reset)
+ mov ip, #0
+ movc p0.c5, ip, #28 @ Cache invalidate all
+ nop8
+
+ movc p0.c6, ip, #6 @ TLB invalidate all
+ nop8
+
+ movc ip, p0.c1, #0 @ ctrl register
+ or ip, ip, #0x2000 @ vector base address
+ andn ip, ip, #0x000f @ ............idam
+ movc p0.c1, ip, #0 @ disable caches and mmu
+ nop
+ mov pc, r0 @ jump to loc
+ nop8
+
+/*
+ * cpu_do_idle()
+ *
+ * Idle the processor (eg, wait for interrupt).
+ *
+ * IRQs are already disabled.
+ */
+ENTRY(cpu_do_idle)
+ mov r0, #0 @ PCI address
+ .rept 8
+ ldw r1, [r0]
+ .endr
+ mov pc, lr
+
+ENTRY(cpu_dcache_clean_area)
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ csub.a r1, #MAX_AREA_SIZE
+ bsg 101f
+ mov r9, #PAGE_SZ
+ sub r9, r9, #1 @ PAGE_MASK
+1: va2pa r0, r10, r11, r12, r13 @ r10 is PA
+ b 3f
+2: cand.a r0, r9
+ beq 1b
+3: movc p0.c5, r10, #11 @ clean D entry
+ nop8
+ add r0, r0, #CACHE_LINESIZE
+ add r10, r10, #CACHE_LINESIZE
+ sub.a r1, r1, #CACHE_LINESIZE
+ bua 2b
+ mov pc, lr
+#endif
+101: mov ip, #0
+ movc p0.c5, ip, #10 @ Dcache clean all
+ nop8
+
+ mov pc, lr
+
+/*
+ * cpu_do_switch_mm(pgd_phys)
+ *
+ * Set the translation table base pointer to be pgd_phys
+ *
+ * - pgd_phys - physical address of new pgd
+ *
+ * It is assumed that:
+ * - we are not using split page tables
+ */
+ .align 5
+ENTRY(cpu_do_switch_mm)
+ movc p0.c2, r0, #0 @ update page table ptr
+ nop8
+
+ movc p0.c6, ip, #6 @ TLB invalidate all
+ nop8
+
+ mov pc, lr
+
+/*
+ * cpu_set_pte(ptep, pte)
+ *
+ * Set a level 2 translation table entry.
+ *
+ * - ptep - pointer to level 2 translation table entry
+ * - pte - PTE value to store
+ */
+ .align 5
+ENTRY(cpu_set_pte)
+ stw r1, [r0]
+#ifndef CONFIG_CPU_DCACHE_LINE_DISABLE
+ sub r2, r0, #PAGE_OFFSET
+ movc p0.c5, r2, #11 @ Dcache clean line
+ nop8
+#else
+ mov ip, #0
+ movc p0.c5, ip, #10 @ Dcache clean all
+ nop8
+ @dcacheline_flush r0, r2, ip
+#endif
+ mov pc, lr
+
diff --git a/arch/unicore32/mm/tlb-ucv2.S b/arch/unicore32/mm/tlb-ucv2.S
new file mode 100644
index 000000000000..061d455f9a15
--- /dev/null
+++ b/arch/unicore32/mm/tlb-ucv2.S
@@ -0,0 +1,89 @@
+/*
+ * linux/arch/unicore32/mm/tlb-ucv2.S
+ *
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Copyright (C) 2001-2010 GUAN Xue-tao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+#include "proc-macros.S"
+
+/*
+ * __cpu_flush_user_tlb_range(start, end, vma)
+ *
+ * Invalidate a range of TLB entries in the specified address space.
+ *
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ * - vma - vma_struct describing address range
+ */
+ENTRY(__cpu_flush_user_tlb_range)
+#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
+ mov r0, r0 >> #PAGE_SHIFT @ align address
+ mov r0, r0 << #PAGE_SHIFT
+ vma_vm_flags r2, r2 @ get vma->vm_flags
+1:
+ movc p0.c6, r0, #3
+ nop8
+
+ cand.a r2, #VM_EXEC @ Executable area ?
+ beq 2f
+
+ movc p0.c6, r0, #5
+ nop8
+2:
+ add r0, r0, #PAGE_SZ
+ csub.a r0, r1
+ beb 1b
+#else
+ movc p0.c6, r0, #2
+ nop8
+
+ cand.a r2, #VM_EXEC @ Executable area ?
+ beq 2f
+
+ movc p0.c6, r0, #4
+ nop8
+2:
+#endif
+ mov pc, lr
+
+/*
+ * __cpu_flush_kern_tlb_range(start,end)
+ *
+ * Invalidate a range of kernel TLB entries
+ *
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ */
+ENTRY(__cpu_flush_kern_tlb_range)
+#ifndef CONFIG_CPU_TLB_SINGLE_ENTRY_DISABLE
+ mov r0, r0 >> #PAGE_SHIFT @ align address
+ mov r0, r0 << #PAGE_SHIFT
+1:
+ movc p0.c6, r0, #3
+ nop8
+
+ movc p0.c6, r0, #5
+ nop8
+
+ add r0, r0, #PAGE_SZ
+ csub.a r0, r1
+ beb 1b
+#else
+ movc p0.c6, r0, #2
+ nop8
+
+ movc p0.c6, r0, #4
+ nop8
+#endif
+ mov pc, lr
+
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 65634190ffd6..6801959a8b2a 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -15,7 +15,7 @@ static u32 *flush_words;
const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
- { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_MISC) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
{}
};
EXPORT_SYMBOL(amd_nb_misc_ids);
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
index 4a5a42b842ad..755a31e0f5b0 100644
--- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
@@ -315,8 +315,6 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
input.count = 4;
input.pointer = in_params;
- input.count = 4;
- input.pointer = in_params;
in_params[0].type = ACPI_TYPE_BUFFER;
in_params[0].buffer.length = 16;
in_params[0].buffer.pointer = OSC_UUID;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index c567dec854f6..1ae4133e6bd6 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -630,8 +630,7 @@ static void print_basics(struct powernow_k8_data *data)
data->powernow_table[j].frequency/1000);
} else {
printk(KERN_INFO PFX
- " %d : fid 0x%x (%d MHz), vid 0x%x\n",
- j,
+ "fid 0x%x (%d MHz), vid 0x%x\n",
data->powernow_table[j].index & 0xff,
data->powernow_table[j].frequency/1000,
data->powernow_table[j].index >> 8);
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 8c4085a95ef1..e37b407a0ee8 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -50,7 +50,7 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
name = "ioapic-level";
}
- irq = xen_map_pirq_gsi(map_irq.pirq, gsi, shareable, name);
+ irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name);
printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
@@ -237,6 +237,7 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev)
{
int rc;
int share = 1;
+ int pirq;
u8 gsi;
rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
@@ -246,13 +247,21 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev)
return rc;
}
+ rc = xen_allocate_pirq_gsi(gsi);
+ if (rc < 0) {
+ dev_warn(&dev->dev, "Xen PCI: failed to allocate a PIRQ for GSI%d: %d\n",
+ gsi, rc);
+ return rc;
+ }
+ pirq = rc;
+
if (gsi < NR_IRQS_LEGACY)
share = 0;
- rc = xen_allocate_pirq(gsi, share, "pcifront");
+ rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront");
if (rc < 0) {
- dev_warn(&dev->dev, "Xen PCI: failed to register GSI%d: %d\n",
- gsi, rc);
+ dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
+ gsi, pirq, rc);
return rc;
}
@@ -309,7 +318,7 @@ int __init pci_xen_hvm_init(void)
#ifdef CONFIG_XEN_DOM0
static int xen_register_pirq(u32 gsi, int triggering)
{
- int rc, irq;
+ int rc, pirq, irq = -1;
struct physdev_map_pirq map_irq;
int shareable = 0;
char *name;
@@ -325,17 +334,20 @@ static int xen_register_pirq(u32 gsi, int triggering)
name = "ioapic-level";
}
- irq = xen_allocate_pirq(gsi, shareable, name);
-
- printk(KERN_DEBUG "xen: --> irq=%d\n", irq);
+ pirq = xen_allocate_pirq_gsi(gsi);
+ if (pirq < 0)
+ goto out;
+ irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name);
if (irq < 0)
goto out;
+ printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d\n", pirq, irq);
+
map_irq.domid = DOMID_SELF;
map_irq.type = MAP_PIRQ_TYPE_GSI;
map_irq.index = gsi;
- map_irq.pirq = irq;
+ map_irq.pirq = pirq;
rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
if (rc) {
@@ -422,13 +434,18 @@ static int __init pci_xen_initial_domain(void)
void __init xen_setup_pirqs(void)
{
- int irq;
+ int pirq, irq;
pci_xen_initial_domain();
if (0 == nr_ioapics) {
- for (irq = 0; irq < NR_IRQS_LEGACY; irq++)
- xen_allocate_pirq(irq, 0, "xt-pic");
+ for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
+ pirq = xen_allocate_pirq_gsi(irq);
+ if (WARN(pirq < 0,
+ "Could not allocate PIRQ for legacy interrupt\n"))
+ break;
+ irq = xen_bind_pirq_gsi_to_irq(irq, pirq, 0, "xt-pic");
+ }
return;
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3f6f3347aa17..5695fa66d565 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -79,8 +79,7 @@
/*
* Protects atomic reservation decrease/increase against concurrent increases.
- * Also protects non-atomic updates of current_pages and driver_pages, and
- * balloon lists.
+ * Also protects non-atomic updates of current_pages and balloon lists.
*/
DEFINE_SPINLOCK(xen_reservation_lock);
diff --git a/block/blk-core.c b/block/blk-core.c
index 518dd423a5fe..a63336d49f30 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2045,9 +2045,26 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
if (error && req->cmd_type == REQ_TYPE_FS &&
!(req->cmd_flags & REQ_QUIET)) {
- printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
- req->rq_disk ? req->rq_disk->disk_name : "?",
- (unsigned long long)blk_rq_pos(req));
+ char *error_type;
+
+ switch (error) {
+ case -ENOLINK:
+ error_type = "recoverable transport";
+ break;
+ case -EREMOTEIO:
+ error_type = "critical target";
+ break;
+ case -EBADE:
+ error_type = "critical nexus";
+ break;
+ case -EIO:
+ default:
+ error_type = "I/O";
+ break;
+ }
+ printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
+ error_type, req->rq_disk ? req->rq_disk->disk_name : "?",
+ (unsigned long long)blk_rq_pos(req));
}
blk_account_io_completion(req, nr_bytes);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5cb4d09919d6..0f17ad8585d7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1371,7 +1371,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
goto out;
if (cpufreq_driver->suspend) {
- ret = cpufreq_driver->suspend(cpu_policy, pmsg);
+ ret = cpufreq_driver->suspend(cpu_policy);
if (ret)
printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
"step on CPU %u\n", cpu_policy->cpu);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 94284c8473b1..33b56e5c5c14 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -76,8 +76,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
/*
- * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
- * different CPUs. It protects dbs_enable in governor start/stop.
+ * dbs_mutex protects dbs_enable in governor start/stop.
*/
static DEFINE_MUTEX(dbs_mutex);
@@ -116,7 +115,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
if (wall)
*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
- return (cputime64_t)jiffies_to_usecs(idle_time);;
+ return (cputime64_t)jiffies_to_usecs(idle_time);
}
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -162,21 +161,12 @@ static struct notifier_block dbs_cpufreq_notifier_block = {
};
/************************** sysfs interface ************************/
-static ssize_t show_sampling_rate_max(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
- "sysfs file is deprecated - used by: %s\n", current->comm);
- return sprintf(buf, "%u\n", -1U);
-}
-
static ssize_t show_sampling_rate_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", min_sampling_rate);
}
-define_one_global_ro(sampling_rate_max);
define_one_global_ro(sampling_rate_min);
/* cpufreq_conservative Governor Tunables */
@@ -193,33 +183,6 @@ show_one(down_threshold, down_threshold);
show_one(ignore_nice_load, ignore_nice);
show_one(freq_step, freq_step);
-/*** delete after deprecation time ***/
-#define DEPRECATION_MSG(file_name) \
- printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
- "interface is deprecated - " #file_name "\n");
-
-#define show_one_old(file_name) \
-static ssize_t show_##file_name##_old \
-(struct cpufreq_policy *unused, char *buf) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return show_##file_name(NULL, NULL, buf); \
-}
-show_one_old(sampling_rate);
-show_one_old(sampling_down_factor);
-show_one_old(up_threshold);
-show_one_old(down_threshold);
-show_one_old(ignore_nice_load);
-show_one_old(freq_step);
-show_one_old(sampling_rate_min);
-show_one_old(sampling_rate_max);
-
-cpufreq_freq_attr_ro_old(sampling_rate_min);
-cpufreq_freq_attr_ro_old(sampling_rate_max);
-
-/*** delete after deprecation time ***/
-
static ssize_t store_sampling_down_factor(struct kobject *a,
struct attribute *b,
const char *buf, size_t count)
@@ -231,10 +194,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_down_factor = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -248,10 +208,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -262,16 +219,11 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
int ret;
ret = sscanf(buf, "%u", &input);
- mutex_lock(&dbs_mutex);
if (ret != 1 || input > 100 ||
- input <= dbs_tuners_ins.down_threshold) {
- mutex_unlock(&dbs_mutex);
+ input <= dbs_tuners_ins.down_threshold)
return -EINVAL;
- }
dbs_tuners_ins.up_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -282,17 +234,12 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
int ret;
ret = sscanf(buf, "%u", &input);
- mutex_lock(&dbs_mutex);
/* cannot be lower than 11 otherwise freq will not fall */
if (ret != 1 || input < 11 || input > 100 ||
- input >= dbs_tuners_ins.up_threshold) {
- mutex_unlock(&dbs_mutex);
+ input >= dbs_tuners_ins.up_threshold)
return -EINVAL;
- }
dbs_tuners_ins.down_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -311,11 +258,9 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- mutex_lock(&dbs_mutex);
- if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
- mutex_unlock(&dbs_mutex);
+ if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
return count;
- }
+
dbs_tuners_ins.ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle */
@@ -327,8 +272,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (dbs_tuners_ins.ignore_nice)
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -347,10 +290,7 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
/* no need to test here if freq_step is zero as the user might actually
* want this, they would be crazy though :) */
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.freq_step = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -362,7 +302,6 @@ define_one_global_rw(ignore_nice_load);
define_one_global_rw(freq_step);
static struct attribute *dbs_attributes[] = {
- &sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
&sampling_down_factor.attr,
@@ -378,49 +317,6 @@ static struct attribute_group dbs_attr_group = {
.name = "conservative",
};
-/*** delete after deprecation time ***/
-
-#define write_one_old(file_name) \
-static ssize_t store_##file_name##_old \
-(struct cpufreq_policy *unused, const char *buf, size_t count) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return store_##file_name(NULL, NULL, buf, count); \
-}
-write_one_old(sampling_rate);
-write_one_old(sampling_down_factor);
-write_one_old(up_threshold);
-write_one_old(down_threshold);
-write_one_old(ignore_nice_load);
-write_one_old(freq_step);
-
-cpufreq_freq_attr_rw_old(sampling_rate);
-cpufreq_freq_attr_rw_old(sampling_down_factor);
-cpufreq_freq_attr_rw_old(up_threshold);
-cpufreq_freq_attr_rw_old(down_threshold);
-cpufreq_freq_attr_rw_old(ignore_nice_load);
-cpufreq_freq_attr_rw_old(freq_step);
-
-static struct attribute *dbs_attributes_old[] = {
- &sampling_rate_max_old.attr,
- &sampling_rate_min_old.attr,
- &sampling_rate_old.attr,
- &sampling_down_factor_old.attr,
- &up_threshold_old.attr,
- &down_threshold_old.attr,
- &ignore_nice_load_old.attr,
- &freq_step_old.attr,
- NULL
-};
-
-static struct attribute_group dbs_attr_group_old = {
- .attrs = dbs_attributes_old,
- .name = "conservative",
-};
-
-/*** delete after deprecation time ***/
-
/************************** sysfs end ************************/
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
@@ -596,12 +492,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_mutex);
- rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
- if (rc) {
- mutex_unlock(&dbs_mutex);
- return rc;
- }
-
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
@@ -664,7 +554,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
- sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
dbs_enable--;
mutex_destroy(&this_dbs_info->timer_mutex);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 58aa85ea5ec6..891360edecdd 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -99,8 +99,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
/*
- * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
- * different CPUs. It protects dbs_enable in governor start/stop.
+ * dbs_mutex protects dbs_enable in governor start/stop.
*/
static DEFINE_MUTEX(dbs_mutex);
@@ -235,21 +234,12 @@ static void ondemand_powersave_bias_init(void)
/************************** sysfs interface ************************/
-static ssize_t show_sampling_rate_max(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
- "sysfs file is deprecated - used by: %s\n", current->comm);
- return sprintf(buf, "%u\n", -1U);
-}
-
static ssize_t show_sampling_rate_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", min_sampling_rate);
}
-define_one_global_ro(sampling_rate_max);
define_one_global_ro(sampling_rate_min);
/* cpufreq_ondemand Governor Tunables */
@@ -266,32 +256,6 @@ show_one(sampling_down_factor, sampling_down_factor);
show_one(ignore_nice_load, ignore_nice);
show_one(powersave_bias, powersave_bias);
-/*** delete after deprecation time ***/
-
-#define DEPRECATION_MSG(file_name) \
- printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
- "interface is deprecated - " #file_name "\n");
-
-#define show_one_old(file_name) \
-static ssize_t show_##file_name##_old \
-(struct cpufreq_policy *unused, char *buf) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return show_##file_name(NULL, NULL, buf); \
-}
-show_one_old(sampling_rate);
-show_one_old(up_threshold);
-show_one_old(ignore_nice_load);
-show_one_old(powersave_bias);
-show_one_old(sampling_rate_min);
-show_one_old(sampling_rate_max);
-
-cpufreq_freq_attr_ro_old(sampling_rate_min);
-cpufreq_freq_attr_ro_old(sampling_rate_max);
-
-/*** delete after deprecation time ***/
-
static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -300,11 +264,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
-
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -317,11 +277,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
-
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.io_is_busy = !!input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -336,11 +292,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL;
}
-
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.up_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -353,7 +305,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
@@ -362,8 +313,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
dbs_info = &per_cpu(od_cpu_dbs_info, j);
dbs_info->rate_mult = 1;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -382,9 +331,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- mutex_lock(&dbs_mutex);
if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
- mutex_unlock(&dbs_mutex);
return count;
}
dbs_tuners_ins.ignore_nice = input;
@@ -399,8 +346,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -417,11 +362,8 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
if (input > 1000)
input = 1000;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.powersave_bias = input;
ondemand_powersave_bias_init();
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -433,7 +375,6 @@ define_one_global_rw(ignore_nice_load);
define_one_global_rw(powersave_bias);
static struct attribute *dbs_attributes[] = {
- &sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
&up_threshold.attr,
@@ -449,43 +390,6 @@ static struct attribute_group dbs_attr_group = {
.name = "ondemand",
};
-/*** delete after deprecation time ***/
-
-#define write_one_old(file_name) \
-static ssize_t store_##file_name##_old \
-(struct cpufreq_policy *unused, const char *buf, size_t count) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return store_##file_name(NULL, NULL, buf, count); \
-}
-write_one_old(sampling_rate);
-write_one_old(up_threshold);
-write_one_old(ignore_nice_load);
-write_one_old(powersave_bias);
-
-cpufreq_freq_attr_rw_old(sampling_rate);
-cpufreq_freq_attr_rw_old(up_threshold);
-cpufreq_freq_attr_rw_old(ignore_nice_load);
-cpufreq_freq_attr_rw_old(powersave_bias);
-
-static struct attribute *dbs_attributes_old[] = {
- &sampling_rate_max_old.attr,
- &sampling_rate_min_old.attr,
- &sampling_rate_old.attr,
- &up_threshold_old.attr,
- &ignore_nice_load_old.attr,
- &powersave_bias_old.attr,
- NULL
-};
-
-static struct attribute_group dbs_attr_group_old = {
- .attrs = dbs_attributes_old,
- .name = "ondemand",
-};
-
-/*** delete after deprecation time ***/
-
/************************** sysfs end ************************/
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
@@ -642,12 +546,7 @@ static void do_dbs_timer(struct work_struct *work)
unsigned int cpu = dbs_info->cpu;
int sample_type = dbs_info->sample_type;
- /* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
- * dbs_info->rate_mult);
-
- if (num_online_cpus() > 1)
- delay -= jiffies % delay;
+ int delay;
mutex_lock(&dbs_info->timer_mutex);
@@ -660,10 +559,20 @@ static void do_dbs_timer(struct work_struct *work)
/* Setup timer for SUB_SAMPLE */
dbs_info->sample_type = DBS_SUB_SAMPLE;
delay = dbs_info->freq_hi_jiffies;
+ } else {
+ /* We want all CPUs to do sampling nearly on
+ * same jiffy
+ */
+ delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
+ * dbs_info->rate_mult);
+
+ if (num_online_cpus() > 1)
+ delay -= jiffies % delay;
}
} else {
__cpufreq_driver_target(dbs_info->cur_policy,
dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ delay = dbs_info->freq_lo_jiffies;
}
schedule_delayed_work_on(cpu, &dbs_info->work, delay);
mutex_unlock(&dbs_info->timer_mutex);
@@ -727,12 +636,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_mutex);
- rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
- if (rc) {
- mutex_unlock(&dbs_mutex);
- return rc;
- }
-
dbs_enable++;
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
@@ -785,7 +688,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
- sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
mutex_destroy(&this_dbs_info->timer_mutex);
dbs_enable--;
mutex_unlock(&dbs_mutex);
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 23e03554f0d3..0be30e978c85 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -25,59 +25,12 @@ static struct mem_ctl_info **mcis;
static struct ecc_settings **ecc_stngs;
/*
- * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
- * later.
- */
-static int ddr2_dbam_revCG[] = {
- [0] = 32,
- [1] = 64,
- [2] = 128,
- [3] = 256,
- [4] = 512,
- [5] = 1024,
- [6] = 2048,
-};
-
-static int ddr2_dbam_revD[] = {
- [0] = 32,
- [1] = 64,
- [2 ... 3] = 128,
- [4] = 256,
- [5] = 512,
- [6] = 256,
- [7] = 512,
- [8 ... 9] = 1024,
- [10] = 2048,
-};
-
-static int ddr2_dbam[] = { [0] = 128,
- [1] = 256,
- [2 ... 4] = 512,
- [5 ... 6] = 1024,
- [7 ... 8] = 2048,
- [9 ... 10] = 4096,
- [11] = 8192,
-};
-
-static int ddr3_dbam[] = { [0] = -1,
- [1] = 256,
- [2] = 512,
- [3 ... 4] = -1,
- [5 ... 6] = 1024,
- [7 ... 8] = 2048,
- [9 ... 10] = 4096,
- [11] = 8192,
-};
-
-/*
* Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
* bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
* or higher value'.
*
*FIXME: Produce a better mapping/linearisation.
*/
-
-
struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
@@ -107,6 +60,79 @@ struct scrubrate {
{ 0x00, 0UL}, /* scrubbing off */
};
+static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 *val, const char *func)
+{
+ int err = 0;
+
+ err = pci_read_config_dword(pdev, offset, val);
+ if (err)
+ amd64_warn("%s: error reading F%dx%03x.\n",
+ func, PCI_FUNC(pdev->devfn), offset);
+
+ return err;
+}
+
+int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 val, const char *func)
+{
+ int err = 0;
+
+ err = pci_write_config_dword(pdev, offset, val);
+ if (err)
+ amd64_warn("%s: error writing to F%dx%03x.\n",
+ func, PCI_FUNC(pdev->devfn), offset);
+
+ return err;
+}
+
+/*
+ *
+ * Depending on the family, F2 DCT reads need special handling:
+ *
+ * K8: has a single DCT only
+ *
+ * F10h: each DCT has its own set of regs
+ * DCT0 -> F2x040..
+ * DCT1 -> F2x140..
+ *
+ * F15h: we select which DCT we access using F1x10C[DctCfgSel]
+ *
+ */
+static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+ const char *func)
+{
+ if (addr >= 0x100)
+ return -EINVAL;
+
+ return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
+static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+ const char *func)
+{
+ return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
+static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+ const char *func)
+{
+ u32 reg = 0;
+ u8 dct = 0;
+
+ if (addr >= 0x140 && addr <= 0x1a0) {
+ dct = 1;
+ addr -= 0x100;
+ }
+
+ amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
+ reg &= 0xfffffffe;
+ reg |= dct;
+ amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
+
+ return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
/*
* Memory scrubber control interface. For K8, memory scrubbing is handled by
* hardware and can involve L2 cache, dcache as well as the main memory. With
@@ -156,7 +182,7 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
+ pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
if (scrubval)
return scrubrates[i].bandwidth;
@@ -167,8 +193,12 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
{
struct amd64_pvt *pvt = mci->pvt_info;
+ u32 min_scrubrate = 0x5;
+
+ if (boot_cpu_data.x86 == 0xf)
+ min_scrubrate = 0x0;
- return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
+ return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
}
static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
@@ -177,7 +207,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
u32 scrubval = 0;
int i, retval = -EINVAL;
- amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
+ amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
scrubval = scrubval & 0x001F;
@@ -192,63 +222,14 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
return retval;
}
-/* Map from a CSROW entry to the mask entry that operates on it */
-static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
-{
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
- return csrow;
- else
- return csrow >> 1;
-}
-
-/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
-static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
-{
- if (dct == 0)
- return pvt->dcsb0[csrow];
- else
- return pvt->dcsb1[csrow];
-}
-
-/*
- * Return the 'mask' address the i'th CS entry. This function is needed because
- * there number of DCSM registers on Rev E and prior vs Rev F and later is
- * different.
- */
-static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
-{
- if (dct == 0)
- return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
- else
- return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
-}
-
-
/*
- * In *base and *limit, pass back the full 40-bit base and limit physical
- * addresses for the node given by node_id. This information is obtained from
- * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
- * base and limit addresses are of type SysAddr, as defined at the start of
- * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
- * in the address range they represent.
+ * returns true if the SysAddr given by sys_addr matches the
+ * DRAM base/limit associated with node_id
*/
-static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
- u64 *base, u64 *limit)
+static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
+ unsigned nid)
{
- *base = pvt->dram_base[node_id];
- *limit = pvt->dram_limit[node_id];
-}
-
-/*
- * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
- * with node_id
- */
-static int amd64_base_limit_match(struct amd64_pvt *pvt,
- u64 sys_addr, int node_id)
-{
- u64 base, limit, addr;
-
- amd64_get_base_and_limit(pvt, node_id, &base, &limit);
+ u64 addr;
/* The K8 treats this as a 40-bit value. However, bits 63-40 will be
* all ones if the most significant implemented address bit is 1.
@@ -258,7 +239,8 @@ static int amd64_base_limit_match(struct amd64_pvt *pvt,
*/
addr = sys_addr & 0x000000ffffffffffull;
- return (addr >= base) && (addr <= limit);
+ return ((addr >= get_dram_base(pvt, nid)) &&
+ (addr <= get_dram_limit(pvt, nid)));
}
/*
@@ -271,7 +253,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
u64 sys_addr)
{
struct amd64_pvt *pvt;
- int node_id;
+ unsigned node_id;
u32 intlv_en, bits;
/*
@@ -285,10 +267,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
* registers. Therefore we arbitrarily choose to read it from the
* register for node 0.
*/
- intlv_en = pvt->dram_IntlvEn[0];
+ intlv_en = dram_intlv_en(pvt, 0);
if (intlv_en == 0) {
- for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
+ for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
if (amd64_base_limit_match(pvt, sys_addr, node_id))
goto found;
}
@@ -305,10 +287,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
bits = (((u32) sys_addr) >> 12) & intlv_en;
for (node_id = 0; ; ) {
- if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
+ if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
break; /* intlv_sel field matches */
- if (++node_id >= DRAM_REG_COUNT)
+ if (++node_id >= DRAM_RANGES)
goto err_no_match;
}
@@ -321,7 +303,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
}
found:
- return edac_mc_find(node_id);
+ return edac_mc_find((int)node_id);
err_no_match:
debugf2("sys_addr 0x%lx doesn't match any node\n",
@@ -331,37 +313,50 @@ err_no_match:
}
/*
- * Extract the DRAM CS base address from selected csrow register.
+ * compute the CS base address of the @csrow on the DRAM controller @dct.
+ * For details see F2x[5C:40] in the processor's BKDG
*/
-static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
+static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
+ u64 *base, u64 *mask)
{
- return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
- pvt->dcs_shift;
-}
+ u64 csbase, csmask, base_bits, mask_bits;
+ u8 addr_shift;
-/*
- * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
- */
-static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
-{
- u64 dcsm_bits, other_bits;
- u64 mask;
-
- /* Extract bits from DRAM CS Mask. */
- dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
+ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
+ csbase = pvt->csels[dct].csbases[csrow];
+ csmask = pvt->csels[dct].csmasks[csrow];
+ base_bits = GENMASK(21, 31) | GENMASK(9, 15);
+ mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
+ addr_shift = 4;
+ } else {
+ csbase = pvt->csels[dct].csbases[csrow];
+ csmask = pvt->csels[dct].csmasks[csrow >> 1];
+ addr_shift = 8;
- other_bits = pvt->dcsm_mask;
- other_bits = ~(other_bits << pvt->dcs_shift);
+ if (boot_cpu_data.x86 == 0x15)
+ base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
+ else
+ base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
+ }
- /*
- * The extracted bits from DCSM belong in the spaces represented by
- * the cleared bits in other_bits.
- */
- mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
+ *base = (csbase & base_bits) << addr_shift;
- return mask;
+ *mask = ~0ULL;
+ /* poke holes for the csmask */
+ *mask &= ~(mask_bits << addr_shift);
+ /* OR them in */
+ *mask |= (csmask & mask_bits) << addr_shift;
}
+#define for_each_chip_select(i, dct, pvt) \
+ for (i = 0; i < pvt->csels[dct].b_cnt; i++)
+
+#define chip_select_base(i, dct, pvt) \
+ pvt->csels[dct].csbases[i]
+
+#define for_each_chip_select_mask(i, dct, pvt) \
+ for (i = 0; i < pvt->csels[dct].m_cnt; i++)
+
/*
* @input_addr is an InputAddr associated with the node given by mci. Return the
* csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
@@ -374,19 +369,13 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
pvt = mci->pvt_info;
- /*
- * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
- * base/mask register pair, test the condition shown near the start of
- * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
- */
- for (csrow = 0; csrow < pvt->cs_count; csrow++) {
-
- /* This DRAM chip select is disabled on this node */
- if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
+ for_each_chip_select(csrow, 0, pvt) {
+ if (!csrow_enabled(csrow, 0, pvt))
continue;
- base = base_from_dct_base(pvt, csrow);
- mask = ~mask_from_dct_mask(pvt, csrow);
+ get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
+
+ mask = ~mask;
if ((input_addr & mask) == (base & mask)) {
debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
@@ -396,7 +385,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
return csrow;
}
}
-
debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
(unsigned long)input_addr, pvt->mc_node_id);
@@ -404,19 +392,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
}
/*
- * Return the base value defined by the DRAM Base register for the node
- * represented by mci. This function returns the full 40-bit value despite the
- * fact that the register only stores bits 39-24 of the value. See section
- * 3.4.4.1 (BKDG #26094, K8, revA-E)
- */
-static inline u64 get_dram_base(struct mem_ctl_info *mci)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
-
- return pvt->dram_base[pvt->mc_node_id];
-}
-
-/*
* Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
* for the node represented by mci. Info is passed back in *hole_base,
* *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
@@ -445,14 +420,13 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
return 1;
}
- /* only valid for Fam10h */
- if (boot_cpu_data.x86 == 0x10 &&
- (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
+ /* valid for Fam10h and above */
+ if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
return 1;
}
- if ((pvt->dhar & DHAR_VALID) == 0) {
+ if (!dhar_valid(pvt)) {
debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
pvt->mc_node_id);
return 1;
@@ -476,15 +450,15 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
* addresses in the hole so that they start at 0x100000000.
*/
- base = dhar_base(pvt->dhar);
+ base = dhar_base(pvt);
*hole_base = base;
*hole_size = (0x1ull << 32) - base;
if (boot_cpu_data.x86 > 0xf)
- *hole_offset = f10_dhar_offset(pvt->dhar);
+ *hole_offset = f10_dhar_offset(pvt);
else
- *hole_offset = k8_dhar_offset(pvt->dhar);
+ *hole_offset = k8_dhar_offset(pvt);
debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
pvt->mc_node_id, (unsigned long)*hole_base,
@@ -525,10 +499,11 @@ EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
*/
static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
{
+ struct amd64_pvt *pvt = mci->pvt_info;
u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
int ret = 0;
- dram_base = get_dram_base(mci);
+ dram_base = get_dram_base(pvt, pvt->mc_node_id);
ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
&hole_size);
@@ -556,7 +531,7 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
* section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
* Programmer's Manual Volume 1 Application Programming.
*/
- dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
+ dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
"DramAddr 0x%lx\n", (unsigned long)sys_addr,
@@ -592,9 +567,9 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
* See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
* concerning translating a DramAddr to an InputAddr.
*/
- intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
- input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
- (dram_addr & 0xfff);
+ intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
+ input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
+ (dram_addr & 0xfff);
debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
intlv_shift, (unsigned long)dram_addr,
@@ -628,7 +603,7 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
{
struct amd64_pvt *pvt;
- int node_id, intlv_shift;
+ unsigned node_id, intlv_shift;
u64 bits, dram_addr;
u32 intlv_sel;
@@ -642,10 +617,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
*/
pvt = mci->pvt_info;
node_id = pvt->mc_node_id;
- BUG_ON((node_id < 0) || (node_id > 7));
- intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
+ BUG_ON(node_id > 7);
+ intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
if (intlv_shift == 0) {
debugf1(" InputAddr 0x%lx translates to DramAddr of "
"same value\n", (unsigned long)input_addr);
@@ -653,10 +628,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
return input_addr;
}
- bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
- (input_addr & 0xfff);
+ bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
+ (input_addr & 0xfff);
- intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
+ intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
dram_addr = bits + (intlv_sel << 12);
debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
@@ -673,7 +648,7 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
{
struct amd64_pvt *pvt = mci->pvt_info;
- u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
+ u64 hole_base, hole_offset, hole_size, base, sys_addr;
int ret = 0;
ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
@@ -691,7 +666,7 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
}
}
- amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
+ base = get_dram_base(pvt, pvt->mc_node_id);
sys_addr = dram_addr + base;
/*
@@ -736,13 +711,12 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
u64 base, mask;
pvt = mci->pvt_info;
- BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
+ BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
- base = base_from_dct_base(pvt, csrow);
- mask = mask_from_dct_mask(pvt, csrow);
+ get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
*input_addr_min = base & ~mask;
- *input_addr_max = base | mask | pvt->dcs_mask_notused;
+ *input_addr_max = base | mask;
}
/* Map the Error address to a PAGE and PAGE OFFSET. */
@@ -775,18 +749,13 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
-static u16 extract_syndrome(struct err_regs *err)
-{
- return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
-}
-
/*
* Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
* are ECC capable.
*/
static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
{
- int bit;
+ u8 bit;
enum dev_type edac_cap = EDAC_FLAG_NONE;
bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
@@ -799,8 +768,7 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
return edac_cap;
}
-
-static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
+static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
static void amd64_dump_dramcfg_low(u32 dclr, int chan)
{
@@ -813,8 +781,9 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
debugf1(" PAR/ERR parity: %s\n",
(dclr & BIT(8)) ? "enabled" : "disabled");
- debugf1(" DCT 128bit mode width: %s\n",
- (dclr & BIT(11)) ? "128b" : "64b");
+ if (boot_cpu_data.x86 == 0x10)
+ debugf1(" DCT 128bit mode width: %s\n",
+ (dclr & BIT(11)) ? "128b" : "64b");
debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
(dclr & BIT(12)) ? "yes" : "no",
@@ -824,16 +793,16 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
}
/* Display and decode various NB registers for debug purposes. */
-static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
+static void dump_misc_regs(struct amd64_pvt *pvt)
{
debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
debugf1(" NB two channel DRAM capable: %s\n",
- (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
+ (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
- (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
- (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
+ (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
+ (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
amd64_dump_dramcfg_low(pvt->dclr0, 0);
@@ -841,130 +810,84 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
"offset: 0x%08x\n",
- pvt->dhar,
- dhar_base(pvt->dhar),
- (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
- : f10_dhar_offset(pvt->dhar));
+ pvt->dhar, dhar_base(pvt),
+ (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
+ : f10_dhar_offset(pvt));
- debugf1(" DramHoleValid: %s\n",
- (pvt->dhar & DHAR_VALID) ? "yes" : "no");
+ debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
- amd64_debug_display_dimm_sizes(0, pvt);
+ amd64_debug_display_dimm_sizes(pvt, 0);
/* everything below this point is Fam10h and above */
if (boot_cpu_data.x86 == 0xf)
return;
- amd64_debug_display_dimm_sizes(1, pvt);
+ amd64_debug_display_dimm_sizes(pvt, 1);
- amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
+ amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
amd64_dump_dramcfg_low(pvt->dclr1, 1);
}
-/* Read in both of DBAM registers */
-static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
-{
- amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0);
-
- if (boot_cpu_data.x86 >= 0x10)
- amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
-}
-
/*
- * NOTE: CPU Revision Dependent code: Rev E and Rev F
- *
- * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
- * set the shift factor for the DCSB and DCSM values.
- *
- * ->dcs_mask_notused, RevE:
- *
- * To find the max InputAddr for the csrow, start with the base address and set
- * all bits that are "don't care" bits in the test at the start of section
- * 3.5.4 (p. 84).
- *
- * The "don't care" bits are all set bits in the mask and all bits in the gaps
- * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
- * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
- * gaps.
- *
- * ->dcs_mask_notused, RevF and later:
- *
- * To find the max InputAddr for the csrow, start with the base address and set
- * all bits that are "don't care" bits in the test at the start of NPT section
- * 4.5.4 (p. 87).
- *
- * The "don't care" bits are all set bits in the mask and all bits in the gaps
- * between bit ranges [36:27] and [21:13].
- *
- * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
- * which are all bits in the above-mentioned gaps.
+ * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
*/
-static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
+static void prep_chip_selects(struct amd64_pvt *pvt)
{
-
if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
- pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
- pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
- pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
- pvt->dcs_shift = REV_E_DCS_SHIFT;
- pvt->cs_count = 8;
- pvt->num_dcsm = 8;
+ pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
+ pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
} else {
- pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
- pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
- pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
- pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
- pvt->cs_count = 8;
- pvt->num_dcsm = 4;
+ pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
+ pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
}
}
/*
- * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
+ * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
*/
-static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
+static void read_dct_base_mask(struct amd64_pvt *pvt)
{
- int cs, reg;
+ int cs;
- amd64_set_dct_base_and_mask(pvt);
+ prep_chip_selects(pvt);
- for (cs = 0; cs < pvt->cs_count; cs++) {
- reg = K8_DCSB0 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs]))
+ for_each_chip_select(cs, 0, pvt) {
+ int reg0 = DCSB0 + (cs * 4);
+ int reg1 = DCSB1 + (cs * 4);
+ u32 *base0 = &pvt->csels[0].csbases[cs];
+ u32 *base1 = &pvt->csels[1].csbases[cs];
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsb0[cs], reg);
-
- /* If DCT are NOT ganged, then read in DCT1's base */
- if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
- reg = F10_DCSB1 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg,
- &pvt->dcsb1[cs]))
- debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsb1[cs], reg);
- } else {
- pvt->dcsb1[cs] = 0;
- }
+ cs, *base0, reg0);
+
+ if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+ continue;
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
+ debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, reg1);
}
- for (cs = 0; cs < pvt->num_dcsm; cs++) {
- reg = K8_DCSM0 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs]))
+ for_each_chip_select_mask(cs, 0, pvt) {
+ int reg0 = DCSM0 + (cs * 4);
+ int reg1 = DCSM1 + (cs * 4);
+ u32 *mask0 = &pvt->csels[0].csmasks[cs];
+ u32 *mask1 = &pvt->csels[1].csmasks[cs];
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsm0[cs], reg);
-
- /* If DCT are NOT ganged, then read in DCT1's mask */
- if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
- reg = F10_DCSM1 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg,
- &pvt->dcsm1[cs]))
- debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsm1[cs], reg);
- } else {
- pvt->dcsm1[cs] = 0;
- }
+ cs, *mask0, reg0);
+
+ if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+ continue;
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
+ debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, reg1);
}
}
@@ -972,7 +895,10 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
{
enum mem_type type;
- if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
+ /* F15h supports only DDR3 */
+ if (boot_cpu_data.x86 >= 0x15)
+ type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
+ else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
if (pvt->dchr0 & DDR3_MODE)
type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
else
@@ -986,26 +912,14 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
return type;
}
-/*
- * Read the DRAM Configuration Low register. It differs between CG, D & E revs
- * and the later RevF memory controllers (DDR vs DDR2)
- *
- * Return:
- * number of memory channels in operation
- * Pass back:
- * contents of the DCL0_LOW register
- */
+/* Get the number of DCT channels the memory controller is using. */
static int k8_early_channel_count(struct amd64_pvt *pvt)
{
- int flag, err = 0;
-
- err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
- if (err)
- return err;
+ int flag;
if (pvt->ext_model >= K8_REV_F)
/* RevF (NPT) and later */
- flag = pvt->dclr0 & F10_WIDTH_128;
+ flag = pvt->dclr0 & WIDTH_128;
else
/* RevE and earlier */
flag = pvt->dclr0 & REVE_WIDTH_128;
@@ -1016,55 +930,47 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
return (flag) ? 2 : 1;
}
-/* extract the ERROR ADDRESS for the K8 CPUs */
-static u64 k8_get_error_address(struct mem_ctl_info *mci,
- struct err_regs *info)
+/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
+static u64 get_error_address(struct mce *m)
{
- return (((u64) (info->nbeah & 0xff)) << 32) +
- (info->nbeal & ~0x03);
+ u8 start_bit = 1;
+ u8 end_bit = 47;
+
+ if (boot_cpu_data.x86 == 0xf) {
+ start_bit = 3;
+ end_bit = 39;
+ }
+
+ return m->addr & GENMASK(start_bit, end_bit);
}
-/*
- * Read the Base and Limit registers for K8 based Memory controllers; extract
- * fields from the 'raw' reg into separate data fields
- *
- * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
- */
-static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
{
- u32 low;
- u32 off = dram << 3; /* 8 bytes between DRAM entries */
+ int off = range << 3;
- amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low);
+ amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
+ amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
- /* Extract parts into separate data entries */
- pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
- pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
- pvt->dram_rw_en[dram] = (low & 0x3);
+ if (boot_cpu_data.x86 == 0xf)
+ return;
- amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low);
+ if (!dram_rw(pvt, range))
+ return;
- /*
- * Extract parts into separate data entries. Limit is the HIGHEST memory
- * location of the region, so lower 24 bits need to be all ones
- */
- pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
- pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
- pvt->dram_DstNode[dram] = (low & 0x7);
+ amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
+ amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
}
-static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
- struct err_regs *err_info, u64 sys_addr)
+static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
+ u16 syndrome)
{
struct mem_ctl_info *src_mci;
+ struct amd64_pvt *pvt = mci->pvt_info;
int channel, csrow;
u32 page, offset;
- u16 syndrome;
-
- syndrome = extract_syndrome(err_info);
/* CHIPKILL enabled */
- if (err_info->nbcfg & K8_NBCFG_CHIPKILL) {
+ if (pvt->nbcfg & NBCFG_CHIPKILL) {
channel = get_channel_from_ecc_syndrome(mci, syndrome);
if (channel < 0) {
/*
@@ -1113,18 +1019,41 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
}
}
-static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
+static int ddr2_cs_size(unsigned i, bool dct_width)
{
- int *dbam_map;
+ unsigned shift = 0;
- if (pvt->ext_model >= K8_REV_F)
- dbam_map = ddr2_dbam;
- else if (pvt->ext_model >= K8_REV_D)
- dbam_map = ddr2_dbam_revD;
+ if (i <= 2)
+ shift = i;
+ else if (!(i & 0x1))
+ shift = i >> 1;
else
- dbam_map = ddr2_dbam_revCG;
+ shift = (i + 1) >> 1;
- return dbam_map[cs_mode];
+ return 128 << (shift + !!dct_width);
+}
+
+static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
+{
+ u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
+
+ if (pvt->ext_model >= K8_REV_F) {
+ WARN_ON(cs_mode > 11);
+ return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
+ }
+ else if (pvt->ext_model >= K8_REV_D) {
+ WARN_ON(cs_mode > 10);
+
+ if (cs_mode == 3 || cs_mode == 8)
+ return 32 << (cs_mode - 1);
+ else
+ return 32 << cs_mode;
+ }
+ else {
+ WARN_ON(cs_mode > 6);
+ return 32 << cs_mode;
+ }
}
/*
@@ -1135,17 +1064,13 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
* Pass back:
* contents of the DCL0_LOW register
*/
-static int f10_early_channel_count(struct amd64_pvt *pvt)
+static int f1x_early_channel_count(struct amd64_pvt *pvt)
{
- int dbams[] = { DBAM0, DBAM1 };
int i, j, channels = 0;
- u32 dbam;
- /* If we are in 128 bit mode, then we are using 2 channels */
- if (pvt->dclr0 & F10_WIDTH_128) {
- channels = 2;
- return channels;
- }
+ /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
+ if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
+ return 2;
/*
* Need to check if in unganged mode: In such, there are 2 channels,
@@ -1162,9 +1087,8 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
* is more than just one DIMM present in unganged mode. Need to check
* both controllers since DIMMs can be placed in either one.
*/
- for (i = 0; i < ARRAY_SIZE(dbams); i++) {
- if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam))
- goto err_reg;
+ for (i = 0; i < 2; i++) {
+ u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
for (j = 0; j < 4; j++) {
if (DBAM_DIMM(j, dbam) > 0) {
@@ -1180,216 +1104,191 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
amd64_info("MCT channel count: %d\n", channels);
return channels;
-
-err_reg:
- return -1;
-
}
-static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
+static int ddr3_cs_size(unsigned i, bool dct_width)
{
- int *dbam_map;
+ unsigned shift = 0;
+ int cs_size = 0;
- if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
- dbam_map = ddr3_dbam;
+ if (i == 0 || i == 3 || i == 4)
+ cs_size = -1;
+ else if (i <= 2)
+ shift = i;
+ else if (i == 12)
+ shift = 7;
+ else if (!(i & 0x1))
+ shift = i >> 1;
else
- dbam_map = ddr2_dbam;
+ shift = (i + 1) >> 1;
+
+ if (cs_size != -1)
+ cs_size = (128 * (1 << !!dct_width)) << shift;
- return dbam_map[cs_mode];
+ return cs_size;
}
-static u64 f10_get_error_address(struct mem_ctl_info *mci,
- struct err_regs *info)
+static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
{
- return (((u64) (info->nbeah & 0xffff)) << 32) +
- (info->nbeal & ~0x01);
+ u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
+
+ WARN_ON(cs_mode > 11);
+
+ if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
+ return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
+ else
+ return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
}
/*
- * Read the Base and Limit registers for F10 based Memory controllers. Extract
- * fields from the 'raw' reg into separate data fields.
- *
- * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
+ * F15h supports only 64bit DCT interfaces
*/
-static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
{
- u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
-
- low_offset = K8_DRAM_BASE_LOW + (dram << 3);
- high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
-
- /* read the 'raw' DRAM BASE Address register */
- amd64_read_pci_cfg(pvt->F1, low_offset, &low_base);
- amd64_read_pci_cfg(pvt->F1, high_offset, &high_base);
-
- /* Extract parts into separate data entries */
- pvt->dram_rw_en[dram] = (low_base & 0x3);
-
- if (pvt->dram_rw_en[dram] == 0)
- return;
-
- pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
-
- pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
- (((u64)low_base & 0xFFFF0000) << 8);
-
- low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
- high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
-
- /* read the 'raw' LIMIT registers */
- amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit);
- amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit);
-
- pvt->dram_DstNode[dram] = (low_limit & 0x7);
- pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
+ WARN_ON(cs_mode > 12);
- /*
- * Extract address values and form a LIMIT address. Limit is the HIGHEST
- * memory location of the region, so low 24 bits need to be all ones.
- */
- pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
- (((u64) low_limit & 0xFFFF0000) << 8) |
- 0x00FFFFFF;
+ return ddr3_cs_size(cs_mode, false);
}
-static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
+static void read_dram_ctl_register(struct amd64_pvt *pvt)
{
- if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW,
- &pvt->dram_ctl_select_low)) {
- debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
- "High range addresses at: 0x%x\n",
- pvt->dram_ctl_select_low,
- dct_sel_baseaddr(pvt));
+ if (boot_cpu_data.x86 == 0xf)
+ return;
+
+ if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
+ debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
+ pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
- debugf0(" DCT mode: %s, All DCTs on: %s\n",
- (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
- (dct_dram_enabled(pvt) ? "yes" : "no"));
+ debugf0(" DCTs operate in %s mode.\n",
+ (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
if (!dct_ganging_enabled(pvt))
debugf0(" Address range split per DCT: %s\n",
(dct_high_range_enabled(pvt) ? "yes" : "no"));
- debugf0(" DCT data interleave for ECC: %s, "
+ debugf0(" data interleave for ECC: %s, "
"DRAM cleared since last warm reset: %s\n",
(dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
(dct_memory_cleared(pvt) ? "yes" : "no"));
- debugf0(" DCT channel interleave: %s, "
- "DCT interleave bits selector: 0x%x\n",
+ debugf0(" channel interleave: %s, "
+ "interleave bits selector: 0x%x\n",
(dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
dct_sel_interleave_addr(pvt));
}
- amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH,
- &pvt->dram_ctl_select_high);
+ amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
}
/*
- * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
+ * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
* Interleaving Modes.
*/
-static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
- int hi_range_sel, u32 intlv_en)
+static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
+ bool hi_range_sel, u8 intlv_en)
{
- u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
+ u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
if (dct_ganging_enabled(pvt))
- cs = 0;
- else if (hi_range_sel)
- cs = dct_sel_high;
- else if (dct_interleave_enabled(pvt)) {
- /*
- * see F2x110[DctSelIntLvAddr] - channel interleave mode
- */
- if (dct_sel_interleave_addr(pvt) == 0)
- cs = sys_addr >> 6 & 1;
- else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
- temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
+ return 0;
- if (dct_sel_interleave_addr(pvt) & 1)
- cs = (sys_addr >> 9 & 1) ^ temp;
- else
- cs = (sys_addr >> 6 & 1) ^ temp;
- } else if (intlv_en & 4)
- cs = sys_addr >> 15 & 1;
- else if (intlv_en & 2)
- cs = sys_addr >> 14 & 1;
- else if (intlv_en & 1)
- cs = sys_addr >> 13 & 1;
- else
- cs = sys_addr >> 12 & 1;
- } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
- cs = ~dct_sel_high & 1;
- else
- cs = 0;
+ if (hi_range_sel)
+ return dct_sel_high;
- return cs;
-}
+ /*
+ * see F2x110[DctSelIntLvAddr] - channel interleave mode
+ */
+ if (dct_interleave_enabled(pvt)) {
+ u8 intlv_addr = dct_sel_interleave_addr(pvt);
-static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
-{
- if (intlv_en == 1)
- return 1;
- else if (intlv_en == 3)
- return 2;
- else if (intlv_en == 7)
- return 3;
+ /* return DCT select function: 0=DCT0, 1=DCT1 */
+ if (!intlv_addr)
+ return sys_addr >> 6 & 1;
+
+ if (intlv_addr & 0x2) {
+ u8 shift = intlv_addr & 0x1 ? 9 : 6;
+ u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
+
+ return ((sys_addr >> shift) & 1) ^ temp;
+ }
+
+ return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
+ }
+
+ if (dct_high_range_enabled(pvt))
+ return ~dct_sel_high & 1;
return 0;
}
-/* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
-static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
- u32 dct_sel_base_addr,
- u64 dct_sel_base_off,
- u32 hole_valid, u32 hole_off,
- u64 dram_base)
+/* Convert the sys_addr to the normalized DCT address */
+static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
+ u64 sys_addr, bool hi_rng,
+ u32 dct_sel_base_addr)
{
u64 chan_off;
+ u64 dram_base = get_dram_base(pvt, range);
+ u64 hole_off = f10_dhar_offset(pvt);
+ u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
- if (hi_range_sel) {
- if (!(dct_sel_base_addr & 0xFFFF0000) &&
- hole_valid && (sys_addr >= 0x100000000ULL))
- chan_off = hole_off << 16;
+ if (hi_rng) {
+ /*
+ * if
+ * base address of high range is below 4Gb
+ * (bits [47:27] at [31:11])
+ * DRAM address space on this DCT is hoisted above 4Gb &&
+ * sys_addr > 4Gb
+ *
+ * remove hole offset from sys_addr
+ * else
+ * remove high range offset from sys_addr
+ */
+ if ((!(dct_sel_base_addr >> 16) ||
+ dct_sel_base_addr < dhar_base(pvt)) &&
+ dhar_valid(pvt) &&
+ (sys_addr >= BIT_64(32)))
+ chan_off = hole_off;
else
chan_off = dct_sel_base_off;
} else {
- if (hole_valid && (sys_addr >= 0x100000000ULL))
- chan_off = hole_off << 16;
+ /*
+ * if
+ * we have a valid hole &&
+ * sys_addr > 4Gb
+ *
+ * remove hole
+ * else
+ * remove dram base to normalize to DCT address
+ */
+ if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
+ chan_off = hole_off;
else
- chan_off = dram_base & 0xFFFFF8000000ULL;
+ chan_off = dram_base;
}
- return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
- (chan_off & 0x0000FFFFFF800000ULL);
+ return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
}
-/* Hack for the time being - Can we get this from BIOS?? */
-#define CH0SPARE_RANK 0
-#define CH1SPARE_RANK 1
-
/*
* checks if the csrow passed in is marked as SPARED, if so returns the new
* spare row
*/
-static inline int f10_process_possible_spare(int csrow,
- u32 cs, struct amd64_pvt *pvt)
-{
- u32 swap_done;
- u32 bad_dram_cs;
-
- /* Depending on channel, isolate respective SPARING info */
- if (cs) {
- swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
- bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
- if (swap_done && (csrow == bad_dram_cs))
- csrow = CH1SPARE_RANK;
- } else {
- swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
- bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
- if (swap_done && (csrow == bad_dram_cs))
- csrow = CH0SPARE_RANK;
+static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
+{
+ int tmp_cs;
+
+ if (online_spare_swap_done(pvt, dct) &&
+ csrow == online_spare_bad_dramcs(pvt, dct)) {
+
+ for_each_chip_select(tmp_cs, dct, pvt) {
+ if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
+ csrow = tmp_cs;
+ break;
+ }
+ }
}
return csrow;
}
@@ -1402,11 +1301,11 @@ static inline int f10_process_possible_spare(int csrow,
* -EINVAL: NOT FOUND
* 0..csrow = Chip-Select Row
*/
-static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
+static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- u32 cs_base, cs_mask;
+ u64 cs_base, cs_mask;
int cs_found = -EINVAL;
int csrow;
@@ -1416,39 +1315,25 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
pvt = mci->pvt_info;
- debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
-
- for (csrow = 0; csrow < pvt->cs_count; csrow++) {
+ debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
- cs_base = amd64_get_dct_base(pvt, cs, csrow);
- if (!(cs_base & K8_DCSB_CS_ENABLE))
+ for_each_chip_select(csrow, dct, pvt) {
+ if (!csrow_enabled(csrow, dct, pvt))
continue;
- /*
- * We have an ENABLED CSROW, Isolate just the MASK bits of the
- * target: [28:19] and [13:5], which map to [36:27] and [21:13]
- * of the actual address.
- */
- cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
-
- /*
- * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
- * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
- */
- cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
+ get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
- debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
- csrow, cs_base, cs_mask);
+ debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
+ csrow, cs_base, cs_mask);
- cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
+ cs_mask = ~cs_mask;
- debugf1(" Final CSMask=0x%x\n", cs_mask);
- debugf1(" (InputAddr & ~CSMask)=0x%x "
- "(CSBase & ~CSMask)=0x%x\n",
- (in_addr & ~cs_mask), (cs_base & ~cs_mask));
+ debugf1(" (InputAddr & ~CSMask)=0x%llx "
+ "(CSBase & ~CSMask)=0x%llx\n",
+ (in_addr & cs_mask), (cs_base & cs_mask));
- if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
- cs_found = f10_process_possible_spare(csrow, cs, pvt);
+ if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
+ cs_found = f10_process_possible_spare(pvt, dct, csrow);
debugf1(" MATCH csrow=%d\n", cs_found);
break;
@@ -1457,38 +1342,75 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
return cs_found;
}
-/* For a given @dram_range, check if @sys_addr falls within it. */
-static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
- u64 sys_addr, int *nid, int *chan_sel)
+/*
+ * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
+ * swapped with a region located at the bottom of memory so that the GPU can use
+ * the interleaved region and thus two channels.
+ */
+static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
{
- int node_id, cs_found = -EINVAL, high_range = 0;
- u32 intlv_en, intlv_sel, intlv_shift, hole_off;
- u32 hole_valid, tmp, dct_sel_base, channel;
- u64 dram_base, chan_addr, dct_sel_base_off;
+ u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
- dram_base = pvt->dram_base[dram_range];
- intlv_en = pvt->dram_IntlvEn[dram_range];
+ if (boot_cpu_data.x86 == 0x10) {
+ /* only revC3 and revE have that feature */
+ if (boot_cpu_data.x86_model < 4 ||
+ (boot_cpu_data.x86_model < 0xa &&
+ boot_cpu_data.x86_mask < 3))
+ return sys_addr;
+ }
- node_id = pvt->dram_DstNode[dram_range];
- intlv_sel = pvt->dram_IntlvSel[dram_range];
+ amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
- debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
- dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
+ if (!(swap_reg & 0x1))
+ return sys_addr;
- /*
- * This assumes that one node's DHAR is the same as all the other
- * nodes' DHAR.
- */
- hole_off = (pvt->dhar & 0x0000FF80);
- hole_valid = (pvt->dhar & 0x1);
- dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
+ swap_base = (swap_reg >> 3) & 0x7f;
+ swap_limit = (swap_reg >> 11) & 0x7f;
+ rgn_size = (swap_reg >> 20) & 0x7f;
+ tmp_addr = sys_addr >> 27;
- debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
- hole_off, hole_valid, intlv_sel);
+ if (!(sys_addr >> 34) &&
+ (((tmp_addr >= swap_base) &&
+ (tmp_addr <= swap_limit)) ||
+ (tmp_addr < rgn_size)))
+ return sys_addr ^ (u64)swap_base << 27;
+
+ return sys_addr;
+}
+
+/* For a given @dram_range, check if @sys_addr falls within it. */
+static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
+ u64 sys_addr, int *nid, int *chan_sel)
+{
+ int cs_found = -EINVAL;
+ u64 chan_addr;
+ u32 dct_sel_base;
+ u8 channel;
+ bool high_range = false;
+
+ u8 node_id = dram_dst_node(pvt, range);
+ u8 intlv_en = dram_intlv_en(pvt, range);
+ u32 intlv_sel = dram_intlv_sel(pvt, range);
+
+ debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
+ range, sys_addr, get_dram_limit(pvt, range));
+
+ if (dhar_valid(pvt) &&
+ dhar_base(pvt) <= sys_addr &&
+ sys_addr < BIT_64(32)) {
+ amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
+ sys_addr);
+ return -EINVAL;
+ }
if (intlv_en &&
- (intlv_sel != ((sys_addr >> 12) & intlv_en)))
+ (intlv_sel != ((sys_addr >> 12) & intlv_en))) {
+ amd64_warn("Botched intlv bits, en: 0x%x, sel: 0x%x\n",
+ intlv_en, intlv_sel);
return -EINVAL;
+ }
+
+ sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
dct_sel_base = dct_sel_baseaddr(pvt);
@@ -1499,38 +1421,41 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
if (dct_high_range_enabled(pvt) &&
!dct_ganging_enabled(pvt) &&
((sys_addr >> 27) >= (dct_sel_base >> 11)))
- high_range = 1;
-
- channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
+ high_range = true;
- chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
- dct_sel_base_off, hole_valid,
- hole_off, dram_base);
+ channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
- intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
+ chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
+ high_range, dct_sel_base);
- /* remove Node ID (in case of memory interleaving) */
- tmp = chan_addr & 0xFC0;
+ /* Remove node interleaving, see F1x120 */
+ if (intlv_en)
+ chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
+ (chan_addr & 0xfff);
- chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
-
- /* remove channel interleave and hash */
+ /* remove channel interleave */
if (dct_interleave_enabled(pvt) &&
!dct_high_range_enabled(pvt) &&
!dct_ganging_enabled(pvt)) {
- if (dct_sel_interleave_addr(pvt) != 1)
- chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
- else {
- tmp = chan_addr & 0xFC0;
- chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
- | tmp;
- }
+
+ if (dct_sel_interleave_addr(pvt) != 1) {
+ if (dct_sel_interleave_addr(pvt) == 0x3)
+ /* hash 9 */
+ chan_addr = ((chan_addr >> 10) << 9) |
+ (chan_addr & 0x1ff);
+ else
+ /* A[6] or hash 6 */
+ chan_addr = ((chan_addr >> 7) << 6) |
+ (chan_addr & 0x3f);
+ } else
+ /* A[12] */
+ chan_addr = ((chan_addr >> 13) << 12) |
+ (chan_addr & 0xfff);
}
- debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
- chan_addr, (u32)(chan_addr >> 8));
+ debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr);
- cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
+ cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
if (cs_found >= 0) {
*nid = node_id;
@@ -1539,23 +1464,21 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
return cs_found;
}
-static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
+static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
int *node, int *chan_sel)
{
- int dram_range, cs_found = -EINVAL;
- u64 dram_base, dram_limit;
+ int cs_found = -EINVAL;
+ unsigned range;
- for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
+ for (range = 0; range < DRAM_RANGES; range++) {
- if (!pvt->dram_rw_en[dram_range])
+ if (!dram_rw(pvt, range))
continue;
- dram_base = pvt->dram_base[dram_range];
- dram_limit = pvt->dram_limit[dram_range];
+ if ((get_dram_base(pvt, range) <= sys_addr) &&
+ (get_dram_limit(pvt, range) >= sys_addr)) {
- if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
-
- cs_found = f10_match_to_this_node(pvt, dram_range,
+ cs_found = f1x_match_to_this_node(pvt, range,
sys_addr, node,
chan_sel);
if (cs_found >= 0)
@@ -1572,16 +1495,14 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
* The @sys_addr is usually an error address received from the hardware
* (MCX_ADDR).
*/
-static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
- struct err_regs *err_info,
- u64 sys_addr)
+static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
+ u16 syndrome)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 page, offset;
int nid, csrow, chan = 0;
- u16 syndrome;
- csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
+ csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
if (csrow < 0) {
edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
@@ -1590,14 +1511,12 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
error_address_to_page_and_offset(sys_addr, &page, &offset);
- syndrome = extract_syndrome(err_info);
-
/*
* We need the syndromes for channel detection only when we're
* ganged. Otherwise @chan should already contain the channel at
* this point.
*/
- if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
+ if (dct_ganging_enabled(pvt))
chan = get_channel_from_ecc_syndrome(mci, syndrome);
if (chan >= 0)
@@ -1614,16 +1533,16 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
/*
* debug routine to display the memory sizes of all logical DIMMs and its
- * CSROWs as well
+ * CSROWs
*/
-static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
+static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{
int dimm, size0, size1, factor = 0;
- u32 dbam;
- u32 *dcsb;
+ u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
+ u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
if (boot_cpu_data.x86 == 0xf) {
- if (pvt->dclr0 & F10_WIDTH_128)
+ if (pvt->dclr0 & WIDTH_128)
factor = 1;
/* K8 families < revF not supported yet */
@@ -1634,7 +1553,8 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
}
dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
- dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0;
+ dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
+ : pvt->csels[0].csbases;
debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
@@ -1644,12 +1564,14 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
for (dimm = 0; dimm < 4; dimm++) {
size0 = 0;
- if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
- size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
+ if (dcsb[dimm*2] & DCSB_CS_ENABLE)
+ size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
+ DBAM_DIMM(dimm, dbam));
size1 = 0;
- if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
- size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
+ if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
+ size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
+ DBAM_DIMM(dimm, dbam));
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
dimm * 2, size0 << factor,
@@ -1664,10 +1586,9 @@ static struct amd64_family_type amd64_family_types[] = {
.f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
.ops = {
.early_channel_count = k8_early_channel_count,
- .get_error_address = k8_get_error_address,
- .read_dram_base_limit = k8_read_dram_base_limit,
.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
.dbam_to_cs = k8_dbam_to_chip_select,
+ .read_dct_pci_cfg = k8_read_dct_pci_cfg,
}
},
[F10_CPUS] = {
@@ -1675,12 +1596,21 @@ static struct amd64_family_type amd64_family_types[] = {
.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
.f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
.ops = {
- .early_channel_count = f10_early_channel_count,
- .get_error_address = f10_get_error_address,
- .read_dram_base_limit = f10_read_dram_base_limit,
- .read_dram_ctl_register = f10_read_dram_ctl_register,
- .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f10_dbam_to_chip_select,
+ .read_dct_pci_cfg = f10_read_dct_pci_cfg,
+ }
+ },
+ [F15_CPUS] = {
+ .ctl_name = "F15h",
+ .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
+ .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
+ .ops = {
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
+ .dbam_to_cs = f15_dbam_to_chip_select,
+ .read_dct_pci_cfg = f15_read_dct_pci_cfg,
}
},
};
@@ -1770,15 +1700,15 @@ static u16 x8_vectors[] = {
0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
};
-static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
- int v_dim)
+static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
+ unsigned v_dim)
{
unsigned int i, err_sym;
for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
u16 s = syndrome;
- int v_idx = err_sym * v_dim;
- int v_end = (err_sym + 1) * v_dim;
+ unsigned v_idx = err_sym * v_dim;
+ unsigned v_end = (err_sym + 1) * v_dim;
/* walk over all 16 bits of the syndrome */
for (i = 1; i < (1U << 16); i <<= 1) {
@@ -1850,51 +1780,50 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
struct amd64_pvt *pvt = mci->pvt_info;
int err_sym = -1;
- if (pvt->syn_type == 8)
+ if (pvt->ecc_sym_sz == 8)
err_sym = decode_syndrome(syndrome, x8_vectors,
ARRAY_SIZE(x8_vectors),
- pvt->syn_type);
- else if (pvt->syn_type == 4)
+ pvt->ecc_sym_sz);
+ else if (pvt->ecc_sym_sz == 4)
err_sym = decode_syndrome(syndrome, x4_vectors,
ARRAY_SIZE(x4_vectors),
- pvt->syn_type);
+ pvt->ecc_sym_sz);
else {
- amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
+ amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
return err_sym;
}
- return map_err_sym_to_channel(err_sym, pvt->syn_type);
+ return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
}
/*
* Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
* ADDRESS and process.
*/
-static void amd64_handle_ce(struct mem_ctl_info *mci,
- struct err_regs *info)
+static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
{
struct amd64_pvt *pvt = mci->pvt_info;
u64 sys_addr;
+ u16 syndrome;
/* Ensure that the Error Address is VALID */
- if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
+ if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
return;
}
- sys_addr = pvt->ops->get_error_address(mci, info);
+ sys_addr = get_error_address(m);
+ syndrome = extract_syndrome(m->status);
amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
- pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
+ pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
}
/* Handle any Un-correctable Errors (UEs) */
-static void amd64_handle_ue(struct mem_ctl_info *mci,
- struct err_regs *info)
+static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
{
- struct amd64_pvt *pvt = mci->pvt_info;
struct mem_ctl_info *log_mci, *src_mci = NULL;
int csrow;
u64 sys_addr;
@@ -1902,13 +1831,13 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
log_mci = mci;
- if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
+ if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
return;
}
- sys_addr = pvt->ops->get_error_address(mci, info);
+ sys_addr = get_error_address(m);
/*
* Find out which node the error address belongs to. This may be
@@ -1936,14 +1865,14 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
}
static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
- struct err_regs *info)
+ struct mce *m)
{
- u16 ec = EC(info->nbsl);
- u8 xec = XEC(info->nbsl, 0x1f);
- int ecc_type = (info->nbsh >> 13) & 0x3;
+ u16 ec = EC(m->status);
+ u8 xec = XEC(m->status, 0x1f);
+ u8 ecc_type = (m->status >> 45) & 0x3;
/* Bail early out if this was an 'observed' error */
- if (PP(ec) == K8_NBSL_PP_OBS)
+ if (PP(ec) == NBSL_PP_OBS)
return;
/* Do only ECC errors */
@@ -1951,34 +1880,16 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
return;
if (ecc_type == 2)
- amd64_handle_ce(mci, info);
+ amd64_handle_ce(mci, m);
else if (ecc_type == 1)
- amd64_handle_ue(mci, info);
+ amd64_handle_ue(mci, m);
}
void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
{
struct mem_ctl_info *mci = mcis[node_id];
- struct err_regs regs;
-
- regs.nbsl = (u32) m->status;
- regs.nbsh = (u32)(m->status >> 32);
- regs.nbeal = (u32) m->addr;
- regs.nbeah = (u32)(m->addr >> 32);
- regs.nbcfg = nbcfg;
-
- __amd64_decode_bus_error(mci, &regs);
-
- /*
- * Check the UE bit of the NB status high register, if set generate some
- * logs. If NOT a GART error, then process the event as a NO-INFO event.
- * If it was a GART error, skip that process.
- *
- * FIXME: this should go somewhere else, if at all.
- */
- if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
- edac_mc_handle_ue_no_info(mci, "UE bit is set");
+ __amd64_decode_bus_error(mci, m);
}
/*
@@ -2027,9 +1938,10 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt)
*/
static void read_mc_regs(struct amd64_pvt *pvt)
{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
u64 msr_val;
u32 tmp;
- int dram;
+ unsigned range;
/*
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
@@ -2046,75 +1958,66 @@ static void read_mc_regs(struct amd64_pvt *pvt)
} else
debugf0(" TOP_MEM2 disabled.\n");
- amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
+ amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
- if (pvt->ops->read_dram_ctl_register)
- pvt->ops->read_dram_ctl_register(pvt);
+ read_dram_ctl_register(pvt);
- for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
- /*
- * Call CPU specific READ function to get the DRAM Base and
- * Limit values from the DCT.
- */
- pvt->ops->read_dram_base_limit(pvt, dram);
+ for (range = 0; range < DRAM_RANGES; range++) {
+ u8 rw;
- /*
- * Only print out debug info on rows with both R and W Enabled.
- * Normal processing, compiler should optimize this whole 'if'
- * debug output block away.
- */
- if (pvt->dram_rw_en[dram] != 0) {
- debugf1(" DRAM-BASE[%d]: 0x%016llx "
- "DRAM-LIMIT: 0x%016llx\n",
- dram,
- pvt->dram_base[dram],
- pvt->dram_limit[dram]);
-
- debugf1(" IntlvEn=%s %s %s "
- "IntlvSel=%d DstNode=%d\n",
- pvt->dram_IntlvEn[dram] ?
- "Enabled" : "Disabled",
- (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
- (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
- pvt->dram_IntlvSel[dram],
- pvt->dram_DstNode[dram]);
- }
+ /* read settings for this DRAM range */
+ read_dram_base_limit_regs(pvt, range);
+
+ rw = dram_rw(pvt, range);
+ if (!rw)
+ continue;
+
+ debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
+ range,
+ get_dram_base(pvt, range),
+ get_dram_limit(pvt, range));
+
+ debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
+ dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
+ (rw & 0x1) ? "R" : "-",
+ (rw & 0x2) ? "W" : "-",
+ dram_intlv_sel(pvt, range),
+ dram_dst_node(pvt, range));
}
- amd64_read_dct_base_mask(pvt);
+ read_dct_base_mask(pvt);
- amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
- amd64_read_dbam_reg(pvt);
+ amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
+ amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
- amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
- amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0);
+ amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
+ amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
- if (boot_cpu_data.x86 >= 0x10) {
- if (!dct_ganging_enabled(pvt)) {
- amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1);
- amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
- }
- amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+ if (!dct_ganging_enabled(pvt)) {
+ amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
+ amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
}
- if (boot_cpu_data.x86 == 0x10 &&
- boot_cpu_data.x86_model > 7 &&
- /* F3x180[EccSymbolSize]=1 => x8 symbols */
- tmp & BIT(25))
- pvt->syn_type = 8;
- else
- pvt->syn_type = 4;
+ pvt->ecc_sym_sz = 4;
- amd64_dump_misc_regs(pvt);
+ if (c->x86 >= 0x10) {
+ amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+ amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
+
+ /* F10h, revD and later can do x8 ECC too */
+ if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
+ pvt->ecc_sym_sz = 8;
+ }
+ dump_misc_regs(pvt);
}
/*
* NOTE: CPU Revision Dependent code
*
* Input:
- * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
+ * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
* k8 private pointer to -->
* DRAM Bank Address mapping register
* node_id
@@ -2144,7 +2047,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
* encompasses
*
*/
-static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
+static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{
u32 cs_mode, nr_pages;
@@ -2157,7 +2060,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
*/
cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
- nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
+ nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
/*
* If dual channel then double the memory size of single channel.
@@ -2180,23 +2083,22 @@ static int init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow;
struct amd64_pvt *pvt = mci->pvt_info;
- u64 input_addr_min, input_addr_max, sys_addr;
+ u64 input_addr_min, input_addr_max, sys_addr, base, mask;
u32 val;
int i, empty = 1;
- amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val);
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
pvt->nbcfg = val;
- pvt->ctl_error_info.nbcfg = val;
debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
pvt->mc_node_id, val,
- !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE));
+ !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
- for (i = 0; i < pvt->cs_count; i++) {
+ for_each_chip_select(i, 0, pvt) {
csrow = &mci->csrows[i];
- if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
+ if (!csrow_enabled(i, 0, pvt)) {
debugf1("----CSROW %d EMPTY for node %d\n", i,
pvt->mc_node_id);
continue;
@@ -2206,13 +2108,15 @@ static int init_csrows(struct mem_ctl_info *mci)
i, pvt->mc_node_id);
empty = 0;
- csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
+ csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
- csrow->page_mask = ~mask_from_dct_mask(pvt, i);
+
+ get_cs_base_and_mask(pvt, i, 0, &base, &mask);
+ csrow->page_mask = ~mask;
/* 8 bytes of resolution */
csrow->mtype = amd64_determine_memory_type(pvt, i);
@@ -2231,9 +2135,9 @@ static int init_csrows(struct mem_ctl_info *mci)
/*
* determine whether CHIPKILL or JUST ECC or NO ECC is operating
*/
- if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
+ if (pvt->nbcfg & NBCFG_ECC_ENABLE)
csrow->edac_mode =
- (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
+ (pvt->nbcfg & NBCFG_CHIPKILL) ?
EDAC_S4ECD4ED : EDAC_SECDED;
else
csrow->edac_mode = EDAC_NONE;
@@ -2243,7 +2147,7 @@ static int init_csrows(struct mem_ctl_info *mci)
}
/* get all cores on this DCT */
-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
+static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
{
int cpu;
@@ -2253,7 +2157,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
}
/* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
{
cpumask_var_t mask;
int cpu, nbe;
@@ -2270,7 +2174,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
for_each_cpu(cpu, mask) {
struct msr *reg = per_cpu_ptr(msrs, cpu);
- nbe = reg->l & K8_MSR_MCGCTL_NBE;
+ nbe = reg->l & MSR_MCGCTL_NBE;
debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
cpu, reg->q,
@@ -2305,16 +2209,16 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
struct msr *reg = per_cpu_ptr(msrs, cpu);
if (on) {
- if (reg->l & K8_MSR_MCGCTL_NBE)
+ if (reg->l & MSR_MCGCTL_NBE)
s->flags.nb_mce_enable = 1;
- reg->l |= K8_MSR_MCGCTL_NBE;
+ reg->l |= MSR_MCGCTL_NBE;
} else {
/*
* Turn off NB MCE reporting only when it was off before
*/
if (!s->flags.nb_mce_enable)
- reg->l &= ~K8_MSR_MCGCTL_NBE;
+ reg->l &= ~MSR_MCGCTL_NBE;
}
}
wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
@@ -2328,40 +2232,38 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
struct pci_dev *F3)
{
bool ret = true;
- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ u32 value, mask = 0x3; /* UECC/CECC enable */
if (toggle_ecc_err_reporting(s, nid, ON)) {
amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
return false;
}
- amd64_read_pci_cfg(F3, K8_NBCTL, &value);
+ amd64_read_pci_cfg(F3, NBCTL, &value);
- /* turn on UECCEn and CECCEn bits */
s->old_nbctl = value & mask;
s->nbctl_valid = true;
value |= mask;
- pci_write_config_dword(F3, K8_NBCTL, value);
+ amd64_write_pci_cfg(F3, NBCTL, value);
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
- debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- nid, value,
- !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
+ debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
- if (!(value & K8_NBCFG_ECC_ENABLE)) {
+ if (!(value & NBCFG_ECC_ENABLE)) {
amd64_warn("DRAM ECC disabled on this node, enabling...\n");
s->flags.nb_ecc_prev = 0;
/* Attempt to turn on DRAM ECC Enable */
- value |= K8_NBCFG_ECC_ENABLE;
- pci_write_config_dword(F3, K8_NBCFG, value);
+ value |= NBCFG_ECC_ENABLE;
+ amd64_write_pci_cfg(F3, NBCFG, value);
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
- if (!(value & K8_NBCFG_ECC_ENABLE)) {
+ if (!(value & NBCFG_ECC_ENABLE)) {
amd64_warn("Hardware rejected DRAM ECC enable,"
"check memory DIMM configuration.\n");
ret = false;
@@ -2372,9 +2274,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
s->flags.nb_ecc_prev = 1;
}
- debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- nid, value,
- !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
+ debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
return ret;
}
@@ -2382,22 +2283,23 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
struct pci_dev *F3)
{
- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ u32 value, mask = 0x3; /* UECC/CECC enable */
+
if (!s->nbctl_valid)
return;
- amd64_read_pci_cfg(F3, K8_NBCTL, &value);
+ amd64_read_pci_cfg(F3, NBCTL, &value);
value &= ~mask;
value |= s->old_nbctl;
- pci_write_config_dword(F3, K8_NBCTL, value);
+ amd64_write_pci_cfg(F3, NBCTL, value);
/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
if (!s->flags.nb_ecc_prev) {
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
- value &= ~K8_NBCFG_ECC_ENABLE;
- pci_write_config_dword(F3, K8_NBCFG, value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
+ value &= ~NBCFG_ECC_ENABLE;
+ amd64_write_pci_cfg(F3, NBCFG, value);
}
/* restore the NB Enable MCGCTL bit */
@@ -2423,9 +2325,9 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid)
u8 ecc_en = 0;
bool nb_mce_en = false;
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
- ecc_en = !!(value & K8_NBCFG_ECC_ENABLE);
+ ecc_en = !!(value & NBCFG_ECC_ENABLE);
amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
@@ -2463,23 +2365,24 @@ static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
mci->mc_driver_sysfs_attributes = sysfs_attrs;
}
-static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
+static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
+ struct amd64_family_type *fam)
{
struct amd64_pvt *pvt = mci->pvt_info;
mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
- if (pvt->nbcap & K8_NBCAP_SECDED)
+ if (pvt->nbcap & NBCAP_SECDED)
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
- if (pvt->nbcap & K8_NBCAP_CHIPKILL)
+ if (pvt->nbcap & NBCAP_CHIPKILL)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
mci->edac_cap = amd64_determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = EDAC_AMD64_VERSION;
- mci->ctl_name = pvt->ctl_name;
+ mci->ctl_name = fam->ctl_name;
mci->dev_name = pci_name(pvt->F2);
mci->ctl_page_to_phys = NULL;
@@ -2500,14 +2403,16 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
case 0xf:
fam_type = &amd64_family_types[K8_CPUS];
pvt->ops = &amd64_family_types[K8_CPUS].ops;
- pvt->ctl_name = fam_type->ctl_name;
- pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
break;
+
case 0x10:
fam_type = &amd64_family_types[F10_CPUS];
pvt->ops = &amd64_family_types[F10_CPUS].ops;
- pvt->ctl_name = fam_type->ctl_name;
- pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
+ break;
+
+ case 0x15:
+ fam_type = &amd64_family_types[F15_CPUS];
+ pvt->ops = &amd64_family_types[F15_CPUS].ops;
break;
default:
@@ -2517,7 +2422,7 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
pvt->ext_model = boot_cpu_data.x86_model >> 4;
- amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
+ amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
(fam == 0xf ?
(pvt->ext_model >= K8_REV_F ? "revF or later "
: "revE or earlier ")
@@ -2564,14 +2469,14 @@ static int amd64_init_one_instance(struct pci_dev *F2)
goto err_siblings;
ret = -ENOMEM;
- mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, nid);
+ mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
if (!mci)
goto err_siblings;
mci->pvt_info = pvt;
mci->dev = &pvt->F2->dev;
- setup_mci_misc_attrs(mci);
+ setup_mci_misc_attrs(mci, fam_type);
if (init_csrows(mci))
mci->edac_cap = EDAC_FLAG_NONE;
@@ -2714,6 +2619,15 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = {
.class = 0,
.class_mask = 0,
},
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ },
+
{0, }
};
MODULE_DEVICE_TABLE(pci, amd64_pci_table);
@@ -2754,7 +2668,7 @@ static int __init amd64_edac_init(void)
{
int err = -ENODEV;
- edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
+ printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
opstate_init();
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 613ec72b0f65..11be36a311eb 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -144,7 +144,7 @@
* sections 3.5.4 and 3.5.5 for more information.
*/
-#define EDAC_AMD64_VERSION "v3.3.0"
+#define EDAC_AMD64_VERSION "3.4.0"
#define EDAC_MOD_STR "amd64_edac"
/* Extended Model from CPUID, for CPU Revision numbers */
@@ -153,85 +153,64 @@
#define K8_REV_F 4
/* Hardware limit on ChipSelect rows per MC and processors per system */
-#define MAX_CS_COUNT 8
-#define DRAM_REG_COUNT 8
+#define NUM_CHIPSELECTS 8
+#define DRAM_RANGES 8
#define ON true
#define OFF false
/*
+ * Create a contiguous bitmask starting at bit position @lo and ending at
+ * position @hi. For example
+ *
+ * GENMASK(21, 39) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(lo, hi) (((1ULL << ((hi) - (lo) + 1)) - 1) << (lo))
+
+/*
* PCI-defined configuration space registers
*/
+#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
+#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
/*
* Function 1 - Address Map
*/
-#define K8_DRAM_BASE_LOW 0x40
-#define K8_DRAM_LIMIT_LOW 0x44
-#define K8_DHAR 0xf0
-
-#define DHAR_VALID BIT(0)
-#define F10_DRAM_MEM_HOIST_VALID BIT(1)
+#define DRAM_BASE_LO 0x40
+#define DRAM_LIMIT_LO 0x44
-#define DHAR_BASE_MASK 0xff000000
-#define dhar_base(dhar) (dhar & DHAR_BASE_MASK)
+#define dram_intlv_en(pvt, i) ((u8)((pvt->ranges[i].base.lo >> 8) & 0x7))
+#define dram_rw(pvt, i) ((u8)(pvt->ranges[i].base.lo & 0x3))
+#define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7))
+#define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7))
-#define K8_DHAR_OFFSET_MASK 0x0000ff00
-#define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16)
+#define DHAR 0xf0
+#define dhar_valid(pvt) ((pvt)->dhar & BIT(0))
+#define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1))
+#define dhar_base(pvt) ((pvt)->dhar & 0xff000000)
+#define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16)
-#define F10_DHAR_OFFSET_MASK 0x0000ff80
/* NOTE: Extra mask bit vs K8 */
-#define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16)
+#define f10_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff80) << 16)
+#define DCT_CFG_SEL 0x10C
-/* F10 High BASE/LIMIT registers */
-#define F10_DRAM_BASE_HIGH 0x140
-#define F10_DRAM_LIMIT_HIGH 0x144
+#define DRAM_BASE_HI 0x140
+#define DRAM_LIMIT_HI 0x144
/*
* Function 2 - DRAM controller
*/
-#define K8_DCSB0 0x40
-#define F10_DCSB1 0x140
+#define DCSB0 0x40
+#define DCSB1 0x140
+#define DCSB_CS_ENABLE BIT(0)
-#define K8_DCSB_CS_ENABLE BIT(0)
-#define K8_DCSB_NPT_SPARE BIT(1)
-#define K8_DCSB_NPT_TESTFAIL BIT(2)
+#define DCSM0 0x60
+#define DCSM1 0x160
-/*
- * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
- * the address
- */
-#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
-#define REV_E_DCS_SHIFT 4
-
-#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
-#define REV_F_F1Xh_DCS_SHIFT 8
-
-/*
- * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
- * to form the address
- */
-#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
-#define REV_F_DCS_SHIFT 8
-
-/* DRAM CS Mask Registers */
-#define K8_DCSM0 0x60
-#define F10_DCSM1 0x160
-
-/* REV E: select [29:21] and [15:9] from DCSM */
-#define REV_E_DCSM_MASK_BITS 0x3FE0FE00
-
-/* unused bits [24:20] and [12:0] */
-#define REV_E_DCS_NOTUSED_BITS 0x01F01FFF
-
-/* REV F and later: select [28:19] and [13:5] from DCSM */
-#define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0
-
-/* unused bits [26:22] and [12:0] */
-#define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF
+#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
#define DBAM0 0x80
#define DBAM1 0x180
@@ -241,148 +220,84 @@
#define DBAM_MAX_VALUE 11
-
-#define F10_DCLR_0 0x90
-#define F10_DCLR_1 0x190
+#define DCLR0 0x90
+#define DCLR1 0x190
#define REVE_WIDTH_128 BIT(16)
-#define F10_WIDTH_128 BIT(11)
+#define WIDTH_128 BIT(11)
+#define DCHR0 0x94
+#define DCHR1 0x194
+#define DDR3_MODE BIT(8)
-#define F10_DCHR_0 0x94
-#define F10_DCHR_1 0x194
+#define DCT_SEL_LO 0x110
+#define dct_sel_baseaddr(pvt) ((pvt)->dct_sel_lo & 0xFFFFF800)
+#define dct_sel_interleave_addr(pvt) (((pvt)->dct_sel_lo >> 6) & 0x3)
+#define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0))
+#define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2))
-#define F10_DCHR_FOUR_RANK_DIMM BIT(18)
-#define DDR3_MODE BIT(8)
-#define F10_DCHR_MblMode BIT(6)
+#define dct_ganging_enabled(pvt) ((boot_cpu_data.x86 == 0x10) && ((pvt)->dct_sel_lo & BIT(4)))
+#define dct_data_intlv_enabled(pvt) ((pvt)->dct_sel_lo & BIT(5))
+#define dct_memory_cleared(pvt) ((pvt)->dct_sel_lo & BIT(10))
-#define F10_DCTL_SEL_LOW 0x110
-#define dct_sel_baseaddr(pvt) ((pvt->dram_ctl_select_low) & 0xFFFFF800)
-#define dct_sel_interleave_addr(pvt) (((pvt->dram_ctl_select_low) >> 6) & 0x3)
-#define dct_high_range_enabled(pvt) (pvt->dram_ctl_select_low & BIT(0))
-#define dct_interleave_enabled(pvt) (pvt->dram_ctl_select_low & BIT(2))
-#define dct_ganging_enabled(pvt) (pvt->dram_ctl_select_low & BIT(4))
-#define dct_data_intlv_enabled(pvt) (pvt->dram_ctl_select_low & BIT(5))
-#define dct_dram_enabled(pvt) (pvt->dram_ctl_select_low & BIT(8))
-#define dct_memory_cleared(pvt) (pvt->dram_ctl_select_low & BIT(10))
+#define SWAP_INTLV_REG 0x10c
-#define F10_DCTL_SEL_HIGH 0x114
+#define DCT_SEL_HI 0x114
/*
* Function 3 - Misc Control
*/
-#define K8_NBCTL 0x40
-
-/* Correctable ECC error reporting enable */
-#define K8_NBCTL_CECCEn BIT(0)
-
-/* UnCorrectable ECC error reporting enable */
-#define K8_NBCTL_UECCEn BIT(1)
+#define NBCTL 0x40
-#define K8_NBCFG 0x44
-#define K8_NBCFG_CHIPKILL BIT(23)
-#define K8_NBCFG_ECC_ENABLE BIT(22)
+#define NBCFG 0x44
+#define NBCFG_CHIPKILL BIT(23)
+#define NBCFG_ECC_ENABLE BIT(22)
-#define K8_NBSL 0x48
-
-
-/* Family F10h: Normalized Extended Error Codes */
-#define F10_NBSL_EXT_ERR_RES 0x0
+/* F3x48: NBSL */
#define F10_NBSL_EXT_ERR_ECC 0x8
+#define NBSL_PP_OBS 0x2
-/* Next two are overloaded values */
-#define F10_NBSL_EXT_ERR_LINK_PROTO 0xB
-#define F10_NBSL_EXT_ERR_L3_PROTO 0xB
-
-#define F10_NBSL_EXT_ERR_NB_ARRAY 0xC
-#define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD
-#define F10_NBSL_EXT_ERR_LINK_RETRY 0xE
-
-/* Next two are overloaded values */
-#define F10_NBSL_EXT_ERR_GART_WALK 0xF
-#define F10_NBSL_EXT_ERR_DEV_WALK 0xF
-
-/* 0x10 to 0x1B: Reserved */
-#define F10_NBSL_EXT_ERR_L3_DATA 0x1C
-#define F10_NBSL_EXT_ERR_L3_TAG 0x1D
-#define F10_NBSL_EXT_ERR_L3_LRU 0x1E
-
-/* K8: Normalized Extended Error Codes */
-#define K8_NBSL_EXT_ERR_ECC 0x0
-#define K8_NBSL_EXT_ERR_CRC 0x1
-#define K8_NBSL_EXT_ERR_SYNC 0x2
-#define K8_NBSL_EXT_ERR_MST 0x3
-#define K8_NBSL_EXT_ERR_TGT 0x4
-#define K8_NBSL_EXT_ERR_GART 0x5
-#define K8_NBSL_EXT_ERR_RMW 0x6
-#define K8_NBSL_EXT_ERR_WDT 0x7
-#define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8
-#define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD
-
-/*
- * The following are for BUS type errors AFTER values have been normalized by
- * shifting right
- */
-#define K8_NBSL_PP_SRC 0x0
-#define K8_NBSL_PP_RES 0x1
-#define K8_NBSL_PP_OBS 0x2
-#define K8_NBSL_PP_GENERIC 0x3
-
-#define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF)
-
-#define K8_NBEAL 0x50
-#define K8_NBEAH 0x54
-#define K8_SCRCTRL 0x58
-
-#define F10_NB_CFG_LOW 0x88
+#define SCRCTRL 0x58
#define F10_ONLINE_SPARE 0xB0
-#define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1))
-#define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3))
-#define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007)
-#define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007)
+#define online_spare_swap_done(pvt, c) (((pvt)->online_spare >> (1 + 2 * (c))) & 0x1)
+#define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7)
#define F10_NB_ARRAY_ADDR 0xB8
-
-#define F10_NB_ARRAY_DRAM_ECC 0x80000000
+#define F10_NB_ARRAY_DRAM_ECC BIT(31)
/* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */
#define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1)
#define F10_NB_ARRAY_DATA 0xBC
-
#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
(BIT(((word) & 0xF) + 20) | \
BIT(17) | bits)
-
#define SET_NB_DRAM_INJECTION_READ(word, bits) \
(BIT(((word) & 0xF) + 20) | \
BIT(16) | bits)
-#define K8_NBCAP 0xE8
-#define K8_NBCAP_CORES (BIT(12)|BIT(13))
-#define K8_NBCAP_CHIPKILL BIT(4)
-#define K8_NBCAP_SECDED BIT(3)
-#define K8_NBCAP_DCT_DUAL BIT(0)
+#define NBCAP 0xE8
+#define NBCAP_CHIPKILL BIT(4)
+#define NBCAP_SECDED BIT(3)
+#define NBCAP_DCT_DUAL BIT(0)
#define EXT_NB_MCA_CFG 0x180
/* MSRs */
-#define K8_MSR_MCGCTL_NBE BIT(4)
-
-#define K8_MSR_MC4CTL 0x0410
-#define K8_MSR_MC4STAT 0x0411
-#define K8_MSR_MC4ADDR 0x0412
+#define MSR_MCGCTL_NBE BIT(4)
/* AMD sets the first MC device at device ID 0x18. */
-static inline int get_node_id(struct pci_dev *pdev)
+static inline u8 get_node_id(struct pci_dev *pdev)
{
return PCI_SLOT(pdev->devfn) - 0x18;
}
-enum amd64_chipset_families {
+enum amd_families {
K8_CPUS = 0,
F10_CPUS,
+ F15_CPUS,
+ NUM_FAMILIES,
};
/* Error injection control structure */
@@ -392,13 +307,35 @@ struct error_injection {
u32 bit_map;
};
+/* low and high part of PCI config space regs */
+struct reg_pair {
+ u32 lo, hi;
+};
+
+/*
+ * See F1x[1, 0][7C:40] DRAM Base/Limit Registers
+ */
+struct dram_range {
+ struct reg_pair base;
+ struct reg_pair lim;
+};
+
+/* A DCT chip selects collection */
+struct chip_select {
+ u32 csbases[NUM_CHIPSELECTS];
+ u8 b_cnt;
+
+ u32 csmasks[NUM_CHIPSELECTS];
+ u8 m_cnt;
+};
+
struct amd64_pvt {
struct low_ops *ops;
/* pci_device handles which we utilize */
struct pci_dev *F1, *F2, *F3;
- int mc_node_id; /* MC index of this MC node */
+ unsigned mc_node_id; /* MC index of this MC node */
int ext_model; /* extended model value of this node */
int channel_count;
@@ -414,60 +351,50 @@ struct amd64_pvt {
u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
- /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
- u32 dcsb0[MAX_CS_COUNT];
- u32 dcsb1[MAX_CS_COUNT];
-
- /* DRAM CS Mask Registers F2x[1,0][6C:60] */
- u32 dcsm0[MAX_CS_COUNT];
- u32 dcsm1[MAX_CS_COUNT];
-
- /*
- * Decoded parts of DRAM BASE and LIMIT Registers
- * F1x[78,70,68,60,58,50,48,40]
- */
- u64 dram_base[DRAM_REG_COUNT];
- u64 dram_limit[DRAM_REG_COUNT];
- u8 dram_IntlvSel[DRAM_REG_COUNT];
- u8 dram_IntlvEn[DRAM_REG_COUNT];
- u8 dram_DstNode[DRAM_REG_COUNT];
- u8 dram_rw_en[DRAM_REG_COUNT];
-
- /*
- * The following fields are set at (load) run time, after CPU revision
- * has been determined, since the dct_base and dct_mask registers vary
- * based on revision
- */
- u32 dcsb_base; /* DCSB base bits */
- u32 dcsm_mask; /* DCSM mask bits */
- u32 cs_count; /* num chip selects (== num DCSB registers) */
- u32 num_dcsm; /* Number of DCSM registers */
- u32 dcs_mask_notused; /* DCSM notused mask bits */
- u32 dcs_shift; /* DCSB and DCSM shift value */
+ /* one for each DCT */
+ struct chip_select csels[2];
+
+ /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
+ struct dram_range ranges[DRAM_RANGES];
u64 top_mem; /* top of memory below 4GB */
u64 top_mem2; /* top of memory above 4GB */
- u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */
- u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */
- u32 online_spare; /* On-Line spare Reg */
+ u32 dct_sel_lo; /* DRAM Controller Select Low */
+ u32 dct_sel_hi; /* DRAM Controller Select High */
+ u32 online_spare; /* On-Line spare Reg */
/* x4 or x8 syndromes in use */
- u8 syn_type;
-
- /* temp storage for when input is received from sysfs */
- struct err_regs ctl_error_info;
+ u8 ecc_sym_sz;
/* place to store error injection parameters prior to issue */
struct error_injection injection;
+};
- /* DCT per-family scrubrate setting */
- u32 min_scrubrate;
+static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
+{
+ u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
- /* family name this instance is running on */
- const char *ctl_name;
+ if (boot_cpu_data.x86 == 0xf)
+ return addr;
-};
+ return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
+}
+
+static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i)
+{
+ u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;
+
+ if (boot_cpu_data.x86 == 0xf)
+ return lim;
+
+ return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim;
+}
+
+static inline u16 extract_syndrome(u64 status)
+{
+ return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00);
+}
/*
* per-node ECC settings descriptor
@@ -482,14 +409,6 @@ struct ecc_settings {
} flags;
};
-extern const char *tt_msgs[4];
-extern const char *ll_msgs[4];
-extern const char *rrrr_msgs[16];
-extern const char *to_msgs[2];
-extern const char *pp_msgs[4];
-extern const char *ii_msgs[4];
-extern const char *htlink_msgs[8];
-
#ifdef CONFIG_EDAC_DEBUG
#define NUM_DBG_ATTRS 5
#else
@@ -511,14 +430,11 @@ extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
*/
struct low_ops {
int (*early_channel_count) (struct amd64_pvt *pvt);
-
- u64 (*get_error_address) (struct mem_ctl_info *mci,
- struct err_regs *info);
- void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram);
- void (*read_dram_ctl_register) (struct amd64_pvt *pvt);
- void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci,
- struct err_regs *info, u64 SystemAddr);
- int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode);
+ void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
+ u16 syndrome);
+ int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode);
+ int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset,
+ u32 *val, const char *func);
};
struct amd64_family_type {
@@ -527,28 +443,17 @@ struct amd64_family_type {
struct low_ops ops;
};
-static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
- u32 *val, const char *func)
-{
- int err = 0;
-
- err = pci_read_config_dword(pdev, offset, val);
- if (err)
- amd64_warn("%s: error reading F%dx%x.\n",
- func, PCI_FUNC(pdev->devfn), offset);
-
- return err;
-}
+int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 val, const char *func);
#define amd64_read_pci_cfg(pdev, offset, val) \
- amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
+ __amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
-/*
- * For future CPU versions, verify the following as new 'slow' rates appear and
- * modify the necessary skip values for the supported CPU.
- */
-#define K8_MIN_SCRUB_RATE_BITS 0x0
-#define F10_MIN_SCRUB_RATE_BITS 0x5
+#define amd64_write_pci_cfg(pdev, offset, val) \
+ __amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
+
+#define amd64_read_dct_pci_cfg(pvt, offset, val) \
+ pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__)
int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
u64 *hole_offset, u64 *hole_size);
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index 688478de1cbd..303f10e03dda 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -117,13 +117,13 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
/* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section);
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
@@ -150,13 +150,13 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
/* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section);
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 39d97cfdf58c..73196f7b7229 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -785,10 +785,10 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
{
int err;
- debugf1("%s()\n", __func__);
+ debugf4("%s()\n", __func__);
while (sysfs_attrib) {
- debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
+ debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
struct mcidev_sysfs_group_kobj *grp_kobj;
@@ -818,7 +818,7 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
if (err < 0)
return err;
} else if (sysfs_attrib->attr.name) {
- debugf0("%s() file %s\n", __func__,
+ debugf4("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
err = sysfs_create_file(kobj, &sysfs_attrib->attr);
@@ -853,26 +853,26 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
* Remove first all the atributes
*/
while (sysfs_attrib) {
- debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
+ debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
- debugf1("%s() seeking for group %s\n",
+ debugf4("%s() seeking for group %s\n",
__func__, sysfs_attrib->grp->name);
list_for_each_entry(grp_kobj,
&mci->grp_kobj_list, list) {
- debugf1("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
+ debugf4("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
if (grp_kobj->grp == sysfs_attrib->grp) {
edac_remove_mci_instance_attributes(mci,
grp_kobj->grp->mcidev_attr,
&grp_kobj->kobj, count + 1);
- debugf0("%s() group %s\n", __func__,
+ debugf4("%s() group %s\n", __func__,
sysfs_attrib->grp->name);
kobject_put(&grp_kobj->kobj);
}
}
- debugf1("%s() end of seeking for group %s\n",
+ debugf4("%s() end of seeking for group %s\n",
__func__, sysfs_attrib->grp->name);
} else if (sysfs_attrib->attr.name) {
- debugf0("%s() file %s\n", __func__,
+ debugf4("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
sysfs_remove_file(kobj, &sysfs_attrib->attr);
} else
@@ -979,7 +979,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
debugf0("%s()\n", __func__);
/* remove all csrow kobjects */
- debugf0("%s() unregister this mci kobj\n", __func__);
+ debugf4("%s() unregister this mci kobj\n", __func__);
for (i = 0; i < mci->nr_csrows; i++) {
if (mci->csrows[i].nr_pages > 0) {
debugf0("%s() unreg csrow-%d\n", __func__, i);
@@ -989,18 +989,18 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
/* remove this mci instance's attribtes */
if (mci->mc_driver_sysfs_attributes) {
- debugf0("%s() unregister mci private attributes\n", __func__);
+ debugf4("%s() unregister mci private attributes\n", __func__);
edac_remove_mci_instance_attributes(mci,
mci->mc_driver_sysfs_attributes,
&mci->edac_mci_kobj, 0);
}
/* remove the symlink */
- debugf0("%s() remove_link\n", __func__);
+ debugf4("%s() remove_link\n", __func__);
sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
/* unregister this instance's kobject */
- debugf0("%s() remove_mci_instance\n", __func__);
+ debugf4("%s() remove_mci_instance\n", __func__);
kobject_put(&mci->edac_mci_kobj);
}
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index f6cf73d93359..795cfbc0bf50 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -594,6 +594,7 @@ static bool nb_noop_mce(u16 ec, u8 xec)
void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
u16 ec = EC(m->status);
u8 xec = XEC(m->status, 0x1f);
u32 nbsh = (u32)(m->status >> 32);
@@ -602,9 +603,8 @@ void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
pr_emerg(HW_ERR "Northbridge Error (node %d", node_id);
/* F10h, revD can disable ErrCpu[3:0] through ErrCpuVal */
- if ((boot_cpu_data.x86 == 0x10) &&
- (boot_cpu_data.x86_model > 7)) {
- if (nbsh & K8_NBSH_ERR_CPU_VAL)
+ if (c->x86 == 0x10 && c->x86_model > 7) {
+ if (nbsh & NBSH_ERR_CPU_VAL)
core = nbsh & nb_err_cpumask;
} else {
u8 assoc_cpus = nbsh & nb_err_cpumask;
@@ -646,7 +646,7 @@ void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
if (!fam_ops->nb_mce(ec, xec))
goto wrong_nb_mce;
- if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10)
+ if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15)
if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder)
nb_bus_decoder(node_id, m, nbcfg);
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index 45dda47173f2..795a3206acf5 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -31,19 +31,10 @@
#define R4(x) (((x) >> 4) & 0xf)
#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!")
-#define K8_NBSH 0x4C
-
-#define K8_NBSH_VALID_BIT BIT(31)
-#define K8_NBSH_OVERFLOW BIT(30)
-#define K8_NBSH_UC_ERR BIT(29)
-#define K8_NBSH_ERR_EN BIT(28)
-#define K8_NBSH_MISCV BIT(27)
-#define K8_NBSH_VALID_ERROR_ADDR BIT(26)
-#define K8_NBSH_PCC BIT(25)
-#define K8_NBSH_ERR_CPU_VAL BIT(24)
-#define K8_NBSH_CECC BIT(14)
-#define K8_NBSH_UECC BIT(13)
-#define K8_NBSH_ERR_SCRUBER BIT(8)
+/*
+ * F3x4C bits (MCi_STATUS' high half)
+ */
+#define NBSH_ERR_CPU_VAL BIT(24)
enum tt_ids {
TT_INSTR = 0,
@@ -86,17 +77,6 @@ extern const char *to_msgs[];
extern const char *ii_msgs[];
/*
- * relevant NB regs
- */
-struct err_regs {
- u32 nbcfg;
- u32 nbsh;
- u32 nbsl;
- u32 nbeah;
- u32 nbeal;
-};
-
-/*
* per-family decoder ops
*/
struct amd_decoder_ops {
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 0902d4460039..a6feb78c404c 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -73,32 +73,17 @@ source "drivers/gpu/drm/radeon/Kconfig"
config DRM_I810
tristate "Intel I810"
- # BKL usage in order to avoid AB-BA deadlocks, may become BROKEN_ON_SMP
- depends on DRM && AGP && AGP_INTEL && BKL
+ # !PREEMPT because of missing ioctl locking
+ depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
selected, the module will be called i810. AGP support is required
for this driver to work.
-choice
- prompt "Intel 830M, 845G, 852GM, 855GM, 865G"
- depends on DRM && AGP && AGP_INTEL
- optional
-
-config DRM_I830
- tristate "i830 driver"
- # BKL usage in order to avoid AB-BA deadlocks, i830 may get removed
- depends on BKL
- help
- Choose this option if you have a system that has Intel 830M, 845G,
- 852GM, 855GM or 865G integrated graphics. If M is selected, the
- module will be called i830. AGP support is required for this driver
- to work. This driver is used by the older X releases X.org 6.7 and
- XFree86 4.3. If unsure, build this and i915 as modules and the X server
- will load the correct one.
-
config DRM_I915
- tristate "i915 driver"
+ tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
+ depends on DRM
+ depends on AGP
depends on AGP_INTEL
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
@@ -115,12 +100,20 @@ config DRM_I915
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
help
- Choose this option if you have a system that has Intel 830M, 845G,
- 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
- module will be called i915. AGP support is required for this driver
- to work. This driver is used by the Intel driver in X.org 6.8 and
- XFree86 4.4 and above. If unsure, build this and i830 as modules and
- the X server will load the correct one.
+ Choose this option if you have a system that has "Intel Graphics
+ Media Accelerator" or "HD Graphics" integrated graphics,
+ including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
+ G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
+ Core i5, Core i7 as well as Atom CPUs with integrated graphics.
+ If M is selected, the module will be called i915. AGP support
+ is required for this driver to work. This driver is used by
+ the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
+ replaces the older i830 module that supported a subset of the
+ hardware in older X.org releases.
+
+ Note that the older i810/i815 chipsets require the use of the
+ i810 driver instead, and the Atom z5xx series has an entirely
+ different implementation.
config DRM_I915_KMS
bool "Enable modesetting on intel by default"
@@ -132,8 +125,6 @@ config DRM_I915_KMS
the driver to bind to PCI devices, which precludes loading things
like intelfb.
-endchoice
-
config DRM_MGA
tristate "Matrox g200/g400"
depends on DRM && PCI
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 997c43d04909..89cf05a72d1c 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,7 +12,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
- drm_trace_points.o drm_global.o
+ drm_trace_points.o drm_global.o drm_usb.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
@@ -29,7 +29,6 @@ obj-$(CONFIG_DRM_R128) += r128/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
-obj-$(CONFIG_DRM_I830) += i830/
obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 654faa803dcb..4c95b5fd9df3 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2694,3 +2694,36 @@ void drm_mode_config_reset(struct drm_device *dev)
connector->funcs->reset(connector);
}
EXPORT_SYMBOL(drm_mode_config_reset);
+
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_create_dumb *args = data;
+
+ if (!dev->driver->dumb_create)
+ return -ENOSYS;
+ return dev->driver->dumb_create(file_priv, dev, args);
+}
+
+int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_map_dumb *args = data;
+
+ /* call driver ioctl to get mmap offset */
+ if (!dev->driver->dumb_map_offset)
+ return -ENOSYS;
+
+ return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
+}
+
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ if (!dev->driver->dumb_destroy)
+ return -ENOSYS;
+
+ return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 271835a71570..93a112d45c1a 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -67,6 +67,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -150,7 +151,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -234,49 +238,6 @@ int drm_lastclose(struct drm_device * dev)
return 0;
}
-/**
- * Module initialization. Called via init_module at module load time, or via
- * linux/init/main.c (this is not currently supported).
- *
- * \return zero on success or a negative number on failure.
- *
- * Initializes an array of drm_device structures, and attempts to
- * initialize all available devices, using consecutive minors, registering the
- * stubs and initializing the device.
- *
- * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
- * after the initialization for driver customization.
- */
-int drm_init(struct drm_driver *driver)
-{
- DRM_DEBUG("\n");
- INIT_LIST_HEAD(&driver->device_list);
-
- if (driver->driver_features & DRIVER_USE_PLATFORM_DEVICE)
- return drm_platform_init(driver);
- else
- return drm_pci_init(driver);
-}
-
-EXPORT_SYMBOL(drm_init);
-
-void drm_exit(struct drm_driver *driver)
-{
- struct drm_device *dev, *tmp;
- DRM_DEBUG("\n");
-
- if (driver->driver_features & DRIVER_MODESET) {
- pci_unregister_driver(&driver->pci_driver);
- } else {
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
- drm_put_dev(dev);
- }
-
- DRM_INFO("Module unloaded\n");
-}
-
-EXPORT_SYMBOL(drm_exit);
-
/** File operations structure */
static const struct file_operations drm_stub_fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a245d17165ae..9c595e3b9c20 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -230,24 +230,32 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
int block, int len)
{
unsigned char start = block * EDID_LENGTH;
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &start,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
+ int ret, retries = 5;
- if (i2c_transfer(adapter, msgs, 2) == 2)
- return 0;
+ /* The core i2c driver will automatically retry the transfer if the
+ * adapter reports EAGAIN. However, we find that bit-banging transfers
+ * are susceptible to errors under a heavily loaded machine and
+ * generate spurious NAKs and timeouts. Retrying the transfer
+ * of the individual block a few times seems to overcome this.
+ */
+ do {
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf,
+ }
+ };
+ ret = i2c_transfer(adapter, msgs, 2);
+ } while (ret != 2 && --retries);
- return -1;
+ return ret == 2 ? 0 : -1;
}
static u8 *
@@ -449,12 +457,11 @@ static void edid_fixup_preferred(struct drm_connector *connector,
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
{
+ struct drm_display_mode *mode = NULL;
int i;
- struct drm_display_mode *ptr, *mode;
- mode = NULL;
for (i = 0; i < drm_num_dmt_modes; i++) {
- ptr = &drm_dmt_modes[i];
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize == ptr->hdisplay &&
vsize == ptr->vdisplay &&
fresh == drm_mode_vrefresh(ptr)) {
@@ -885,7 +892,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
}
static bool
-mode_is_rb(struct drm_display_mode *mode)
+mode_is_rb(const struct drm_display_mode *mode)
{
return (mode->htotal - mode->hdisplay == 160) &&
(mode->hsync_end - mode->hdisplay == 80) &&
@@ -894,7 +901,8 @@ mode_is_rb(struct drm_display_mode *mode)
}
static bool
-mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+mode_in_hsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
{
int hsync, hmin, hmax;
@@ -910,7 +918,8 @@ mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
}
static bool
-mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+mode_in_vsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
{
int vsync, vmin, vmax;
@@ -941,7 +950,7 @@ range_pixel_clock(struct edid *edid, u8 *t)
}
static bool
-mode_in_range(struct drm_display_mode *mode, struct edid *edid,
+mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
struct detailed_timing *timing)
{
u32 max_clock;
@@ -1472,7 +1481,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay)
{
int i, count, num_modes = 0;
- struct drm_display_mode *mode, *ptr;
+ struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
@@ -1482,7 +1491,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
vdisplay = 0;
for (i = 0; i < count; i++) {
- ptr = &drm_dmt_modes[i];
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hdisplay && vdisplay) {
/*
* Only when two are valid, they will be used to check
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index 6eb7592e152f..5f2064489fd5 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -32,7 +32,7 @@
* This table is copied from xfree86/modes/xf86EdidModes.c.
* But the mode with Reduced blank feature is deleted.
*/
-static struct drm_display_mode drm_dmt_modes[] = {
+static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 350, 382, 385, 445, 0,
@@ -266,7 +266,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-static struct drm_display_mode edid_est_modes[] = {
+static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f73ef4390db6..950720473967 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -627,6 +627,11 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
value = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
+ if (info->var.transp.length > 0) {
+ u32 mask = (1 << info->var.transp.length) - 1;
+ mask <<= info->var.transp.offset;
+ value |= mask;
+ }
palette[regno] = value;
return 0;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ea1c4b019ebf..57ce27c9a747 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -101,7 +101,7 @@ drm_gem_init(struct drm_device *dev)
dev->mm_private = mm;
- if (drm_ht_create(&mm->offset_hash, 19)) {
+ if (drm_ht_create(&mm->offset_hash, 12)) {
kfree(mm);
return -ENOMEM;
}
@@ -181,7 +181,7 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
/**
* Removes the mapping from handle to filp for this object.
*/
-static int
+int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
struct drm_device *dev;
@@ -214,6 +214,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
return 0;
}
+EXPORT_SYMBOL(drm_gem_handle_delete);
/**
* Create a handle for this object. This adds a handle reference
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index a93d7b4ddaa6..e3a75688f3cd 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -39,27 +39,18 @@
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
- unsigned int i;
+ unsigned int size = 1 << order;
- ht->size = 1 << order;
ht->order = order;
- ht->fill = 0;
ht->table = NULL;
- ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
- if (!ht->use_vmalloc) {
- ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
- }
- if (!ht->table) {
- ht->use_vmalloc = 1;
- ht->table = vmalloc(ht->size*sizeof(*ht->table));
- }
+ if (size <= PAGE_SIZE / sizeof(*ht->table))
+ ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
+ else
+ ht->table = vzalloc(size*sizeof(*ht->table));
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
- for (i=0; i< ht->size; ++i) {
- INIT_HLIST_HEAD(&ht->table[i]);
- }
return 0;
}
EXPORT_SYMBOL(drm_ht_create);
@@ -180,7 +171,6 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
list = drm_ht_find_key(ht, key);
if (list) {
hlist_del_init(list);
- ht->fill--;
return 0;
}
return -EINVAL;
@@ -189,7 +179,6 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
hlist_del_init(&item->head);
- ht->fill--;
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
@@ -197,10 +186,10 @@ EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht)
{
if (ht->table) {
- if (ht->use_vmalloc)
- vfree(ht->table);
- else
+ if ((PAGE_SIZE / sizeof(*ht->table)) >> ht->order)
kfree(ht->table);
+ else
+ vfree(ht->table);
ht->table = NULL;
}
}
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index be9a9c07d152..ab1162da70f8 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -47,30 +47,19 @@ int drm_name_info(struct seq_file *m, void *data)
struct drm_minor *minor = node->minor;
struct drm_device *dev = minor->dev;
struct drm_master *master = minor->master;
-
+ const char *bus_name;
if (!master)
return 0;
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->platform_device->name,
- dev_name(dev->dev), master->unique);
- } else {
- seq_printf(m, "%s\n",
- dev->driver->platform_device->name);
- }
+ bus_name = dev->driver->bus->get_name(dev);
+ if (master->unique) {
+ seq_printf(m, "%s %s %s\n",
+ bus_name,
+ dev_name(dev->dev), master->unique);
} else {
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->pci_driver.name,
- dev_name(dev->dev), master->unique);
- } else {
- seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
- dev_name(dev->dev));
- }
+ seq_printf(m, "%s %s\n",
+ bus_name, dev_name(dev->dev));
}
-
return 0;
}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 47db4df37a69..7f6912a16761 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -96,7 +96,7 @@ int drm_setunique(struct drm_device *dev, void *data,
{
struct drm_unique *u = data;
struct drm_master *master = file_priv->master;
- int domain, bus, slot, func, ret;
+ int ret;
if (master->unique_len || master->unique)
return -EBUSY;
@@ -104,50 +104,12 @@ int drm_setunique(struct drm_device *dev, void *data,
if (!u->unique_len || u->unique_len > 1024)
return -EINVAL;
- master->unique_len = u->unique_len;
- master->unique_size = u->unique_len + 1;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (!master->unique) {
- ret = -ENOMEM;
- goto err;
- }
-
- if (copy_from_user(master->unique, u->unique, master->unique_len)) {
- ret = -EFAULT;
- goto err;
- }
-
- master->unique[master->unique_len] = '\0';
-
- dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
- strlen(master->unique) + 2, GFP_KERNEL);
- if (!dev->devname) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
+ if (!dev->driver->bus->set_unique)
+ return -EINVAL;
- /* Return error if the busid submitted doesn't match the device's actual
- * busid.
- */
- ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
- if (ret != 3) {
- ret = -EINVAL;
+ ret = dev->driver->bus->set_unique(dev, master, u);
+ if (ret)
goto err;
- }
-
- domain = bus >> 8;
- bus &= 0xff;
-
- if ((domain != drm_get_pci_domain(dev)) ||
- (bus != dev->pdev->bus->number) ||
- (slot != PCI_SLOT(dev->pdev->devfn)) ||
- (func != PCI_FUNC(dev->pdev->devfn))) {
- ret = -EINVAL;
- goto err;
- }
return 0;
@@ -159,74 +121,15 @@ err:
static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
- int len, ret;
+ int ret;
if (master->unique != NULL)
drm_unset_busid(dev, master);
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
- master->unique_len = 10 + strlen(dev->platformdev->name);
- master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
-
- if (master->unique == NULL)
- return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len,
- "platform:%s", dev->platformdev->name);
-
- if (len > master->unique_len) {
- DRM_ERROR("Unique buffer overflowed\n");
- ret = -EINVAL;
- goto err;
- }
-
- dev->devname =
- kmalloc(strlen(dev->platformdev->name) +
- master->unique_len + 2, GFP_KERNEL);
-
- if (dev->devname == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->platformdev->name,
- master->unique);
-
- } else {
- master->unique_len = 40;
- master->unique_size = master->unique_len;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (master->unique == NULL)
- return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len,
- "pci:%04x:%02x:%02x.%d",
- drm_get_pci_domain(dev),
- dev->pdev->bus->number,
- PCI_SLOT(dev->pdev->devfn),
- PCI_FUNC(dev->pdev->devfn));
- if (len >= master->unique_len) {
- DRM_ERROR("buffer overflow");
- ret = -EINVAL;
- goto err;
- } else
- master->unique_len = len;
-
- dev->devname =
- kmalloc(strlen(dev->driver->pci_driver.name) +
- master->unique_len + 2, GFP_KERNEL);
-
- if (dev->devname == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
- }
-
+ ret = dev->driver->bus->set_busid(dev, master);
+ if (ret)
+ goto err;
return 0;
-
err:
drm_unset_busid(dev, master);
return ret;
@@ -365,6 +268,25 @@ int drm_getstats(struct drm_device *dev, void *data,
}
/**
+ * Get device/driver capabilities
+ */
+int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_get_cap *req = data;
+
+ req->value = 0;
+ switch (req->capability) {
+ case DRM_CAP_DUMB_BUFFER:
+ if (dev->driver->dumb_create)
+ req->value = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
* Setversion ioctl.
*
* \param inode device inode.
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 28d1d3c24d65..a34ef97d3c81 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -74,23 +74,13 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
{
struct drm_irq_busid *p = data;
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
+ if (!dev->driver->bus->irq_by_busid)
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
- if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
- (p->busnum & 0xff) != dev->pdev->bus->number ||
- p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
- return -EINVAL;
-
- p->irq = dev->pdev->irq;
-
- DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
- p->irq);
-
- return 0;
+ return dev->driver->bus->irq_by_busid(dev, p);
}
/*
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index c59515ba7e69..add1737dae0d 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -64,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
else {
child =
list_entry(mm->unused_nodes.next,
- struct drm_mm_node, free_stack);
- list_del(&child->free_stack);
+ struct drm_mm_node, node_list);
+ list_del(&child->node_list);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
@@ -94,195 +94,242 @@ int drm_mm_pre_get(struct drm_mm *mm)
return ret;
}
++mm->num_unused;
- list_add_tail(&node->free_stack, &mm->unused_nodes);
+ list_add_tail(&node->node_list, &mm->unused_nodes);
}
spin_unlock(&mm->unused_lock);
return 0;
}
EXPORT_SYMBOL(drm_mm_pre_get);
-static int drm_mm_create_tail_node(struct drm_mm *mm,
- unsigned long start,
- unsigned long size, int atomic)
+static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
- struct drm_mm_node *child;
-
- child = drm_mm_kmalloc(mm, atomic);
- if (unlikely(child == NULL))
- return -ENOMEM;
-
- child->free = 1;
- child->size = size;
- child->start = start;
- child->mm = mm;
+ return hole_node->start + hole_node->size;
+}
- list_add_tail(&child->node_list, &mm->node_list);
- list_add_tail(&child->free_stack, &mm->free_stack);
+static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+ struct drm_mm_node *next_node =
+ list_entry(hole_node->node_list.next, struct drm_mm_node,
+ node_list);
- return 0;
+ return next_node->start;
}
-static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
- unsigned long size,
- int atomic)
+static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
{
- struct drm_mm_node *child;
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long tmp = 0, wasted = 0;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
- child = drm_mm_kmalloc(parent->mm, atomic);
- if (unlikely(child == NULL))
- return NULL;
+ BUG_ON(!hole_node->hole_follows || node->allocated);
- INIT_LIST_HEAD(&child->free_stack);
+ if (alignment)
+ tmp = hole_start % alignment;
- child->size = size;
- child->start = parent->start;
- child->mm = parent->mm;
+ if (!tmp) {
+ hole_node->hole_follows = 0;
+ list_del_init(&hole_node->hole_stack);
+ } else
+ wasted = alignment - tmp;
- list_add_tail(&child->node_list, &parent->node_list);
- INIT_LIST_HEAD(&child->free_stack);
+ node->start = hole_start + wasted;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
- parent->size -= size;
- parent->start += size;
- return child;
-}
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list);
+
+ BUG_ON(node->start + node->size > hole_end);
+ if (node->start + node->size < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
+ } else {
+ node->hole_follows = 0;
+ }
+}
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
int atomic)
{
+ struct drm_mm_node *node;
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
- if (alignment)
- tmp = node->start % alignment;
+ drm_mm_insert_helper(hole_node, node, size, alignment);
- if (tmp) {
- align_splitoff =
- drm_mm_split_at_start(node, alignment - tmp, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
- }
+ return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_generic);
- if (node->size == size) {
- list_del_init(&node->free_stack);
- node->free = 0;
- } else {
- node = drm_mm_split_at_start(node, size, atomic);
- }
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. The preallocated memory node
+ * must be cleared.
+ */
+int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
+{
+ struct drm_mm_node *hole_node;
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
+ hole_node = drm_mm_search_free(mm, size, alignment, 0);
+ if (!hole_node)
+ return -ENOSPC;
- return node;
+ drm_mm_insert_helper(hole_node, node, size, alignment);
+
+ return 0;
}
-EXPORT_SYMBOL(drm_mm_get_block_generic);
+EXPORT_SYMBOL(drm_mm_insert_node);
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end,
- int atomic)
+static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
{
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
- unsigned wasted = 0;
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long tmp = 0, wasted = 0;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+
+ BUG_ON(!hole_node->hole_follows || node->allocated);
- if (node->start < start)
- wasted += start - node->start;
+ if (hole_start < start)
+ wasted += start - hole_start;
if (alignment)
- tmp = ((node->start + wasted) % alignment);
+ tmp = (hole_start + wasted) % alignment;
if (tmp)
wasted += alignment - tmp;
- if (wasted) {
- align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
+
+ if (!wasted) {
+ hole_node->hole_follows = 0;
+ list_del_init(&hole_node->hole_stack);
}
- if (node->size == size) {
- list_del_init(&node->free_stack);
- node->free = 0;
+ node->start = hole_start + wasted;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
+
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list);
+
+ BUG_ON(node->start + node->size > hole_end);
+ BUG_ON(node->start + node->size > end);
+
+ if (node->start + node->size < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
} else {
- node = drm_mm_split_at_start(node, size, atomic);
+ node->hole_follows = 0;
}
+}
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int atomic)
+{
+ struct drm_mm_node *node;
+
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
+
+ drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ start, end);
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
-/*
- * Put a block. Merge with the previous and / or next block if they are free.
- * Otherwise add to the free stack.
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. This is for range
+ * restricted allocations. The preallocated memory node must be cleared.
*/
-
-void drm_mm_put_block(struct drm_mm_node *cur)
+int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
{
+ struct drm_mm_node *hole_node;
- struct drm_mm *mm = cur->mm;
- struct list_head *cur_head = &cur->node_list;
- struct list_head *root_head = &mm->node_list;
- struct drm_mm_node *prev_node = NULL;
- struct drm_mm_node *next_node;
+ hole_node = drm_mm_search_free_in_range(mm, size, alignment,
+ start, end, 0);
+ if (!hole_node)
+ return -ENOSPC;
- int merged = 0;
+ drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ start, end);
- BUG_ON(cur->scanned_block || cur->scanned_prev_free
- || cur->scanned_next_free);
+ return 0;
+}
+EXPORT_SYMBOL(drm_mm_insert_node_in_range);
- if (cur_head->prev != root_head) {
- prev_node =
- list_entry(cur_head->prev, struct drm_mm_node, node_list);
- if (prev_node->free) {
- prev_node->size += cur->size;
- merged = 1;
- }
- }
- if (cur_head->next != root_head) {
- next_node =
- list_entry(cur_head->next, struct drm_mm_node, node_list);
- if (next_node->free) {
- if (merged) {
- prev_node->size += next_node->size;
- list_del(&next_node->node_list);
- list_del(&next_node->free_stack);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&next_node->free_stack,
- &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(next_node);
- spin_unlock(&mm->unused_lock);
- } else {
- next_node->size += cur->size;
- next_node->start = cur->start;
- merged = 1;
- }
- }
- }
- if (!merged) {
- cur->free = 1;
- list_add(&cur->free_stack, &mm->free_stack);
- } else {
- list_del(&cur->node_list);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&cur->free_stack, &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(cur);
- spin_unlock(&mm->unused_lock);
- }
+/**
+ * Remove a memory node from the allocator.
+ */
+void drm_mm_remove_node(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+
+ BUG_ON(node->scanned_block || node->scanned_prev_free
+ || node->scanned_next_free);
+
+ prev_node =
+ list_entry(node->node_list.prev, struct drm_mm_node, node_list);
+
+ if (node->hole_follows) {
+ BUG_ON(drm_mm_hole_node_start(node)
+ == drm_mm_hole_node_end(node));
+ list_del(&node->hole_stack);
+ } else
+ BUG_ON(drm_mm_hole_node_start(node)
+ != drm_mm_hole_node_end(node));
+
+ if (!prev_node->hole_follows) {
+ prev_node->hole_follows = 1;
+ list_add(&prev_node->hole_stack, &mm->hole_stack);
+ } else
+ list_move(&prev_node->hole_stack, &mm->hole_stack);
+
+ list_del(&node->node_list);
+ node->allocated = 0;
}
+EXPORT_SYMBOL(drm_mm_remove_node);
+
+/*
+ * Remove a memory node from the allocator and free the allocated struct
+ * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
+ * drm_mm_get_block functions.
+ */
+void drm_mm_put_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+
+ drm_mm_remove_node(node);
+
+ spin_lock(&mm->unused_lock);
+ if (mm->num_unused < MM_UNUSED_TARGET) {
+ list_add(&node->node_list, &mm->unused_nodes);
+ ++mm->num_unused;
+ } else
+ kfree(node);
+ spin_unlock(&mm->unused_lock);
+}
EXPORT_SYMBOL(drm_mm_put_block);
static int check_free_hole(unsigned long start, unsigned long end,
@@ -319,8 +366,10 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- list_for_each_entry(entry, &mm->free_stack, free_stack) {
- if (!check_free_hole(entry->start, entry->start + entry->size,
+ list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+ BUG_ON(!entry->hole_follows);
+ if (!check_free_hole(drm_mm_hole_node_start(entry),
+ drm_mm_hole_node_end(entry),
size, alignment))
continue;
@@ -353,12 +402,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- list_for_each_entry(entry, &mm->free_stack, free_stack) {
- unsigned long adj_start = entry->start < start ?
- start : entry->start;
- unsigned long adj_end = entry->start + entry->size > end ?
- end : entry->start + entry->size;
+ list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+ unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
+ start : drm_mm_hole_node_start(entry);
+ unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
+ end : drm_mm_hole_node_end(entry);
+ BUG_ON(!entry->hole_follows);
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
@@ -376,6 +426,23 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
EXPORT_SYMBOL(drm_mm_search_free_in_range);
/**
+ * Moves an allocation. To be used with embedded struct drm_mm_node.
+ */
+void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
+{
+ list_replace(&old->node_list, &new->node_list);
+ list_replace(&old->node_list, &new->hole_stack);
+ new->hole_follows = old->hole_follows;
+ new->mm = old->mm;
+ new->start = old->start;
+ new->size = old->size;
+
+ old->allocated = 0;
+ new->allocated = 1;
+}
+EXPORT_SYMBOL(drm_mm_replace_node);
+
+/**
* Initializa lru scanning.
*
* This simply sets up the scanning routines with the parameters for the desired
@@ -393,6 +460,7 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
mm->scan_check_range = 0;
+ mm->prev_scanned_node = NULL;
}
EXPORT_SYMBOL(drm_mm_init_scan);
@@ -418,6 +486,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
mm->scan_start = start;
mm->scan_end = end;
mm->scan_check_range = 1;
+ mm->prev_scanned_node = NULL;
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
@@ -430,70 +499,42 @@ EXPORT_SYMBOL(drm_mm_init_scan_with_range);
int drm_mm_scan_add_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
- struct list_head *prev_free, *next_free;
- struct drm_mm_node *prev_node, *next_node;
+ struct drm_mm_node *prev_node;
+ unsigned long hole_start, hole_end;
unsigned long adj_start;
unsigned long adj_end;
mm->scanned_blocks++;
- prev_free = next_free = NULL;
-
- BUG_ON(node->free);
+ BUG_ON(node->scanned_block);
node->scanned_block = 1;
- node->free = 1;
-
- if (node->node_list.prev != &mm->node_list) {
- prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
- node_list);
-
- if (prev_node->free) {
- list_del(&prev_node->node_list);
- node->start = prev_node->start;
- node->size += prev_node->size;
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
- prev_node->scanned_prev_free = 1;
-
- prev_free = &prev_node->free_stack;
- }
- }
-
- if (node->node_list.next != &mm->node_list) {
- next_node = list_entry(node->node_list.next, struct drm_mm_node,
- node_list);
-
- if (next_node->free) {
- list_del(&next_node->node_list);
-
- node->size += next_node->size;
-
- next_node->scanned_next_free = 1;
-
- next_free = &next_node->free_stack;
- }
- }
-
- /* The free_stack list is not used for allocated objects, so these two
- * pointers can be abused (as long as no allocations in this memory
- * manager happens). */
- node->free_stack.prev = prev_free;
- node->free_stack.next = next_free;
+ node->scanned_preceeds_hole = prev_node->hole_follows;
+ prev_node->hole_follows = 1;
+ list_del(&node->node_list);
+ node->node_list.prev = &prev_node->node_list;
+ node->node_list.next = &mm->prev_scanned_node->node_list;
+ mm->prev_scanned_node = node;
+ hole_start = drm_mm_hole_node_start(prev_node);
+ hole_end = drm_mm_hole_node_end(prev_node);
if (mm->scan_check_range) {
- adj_start = node->start < mm->scan_start ?
- mm->scan_start : node->start;
- adj_end = node->start + node->size > mm->scan_end ?
- mm->scan_end : node->start + node->size;
+ adj_start = hole_start < mm->scan_start ?
+ mm->scan_start : hole_start;
+ adj_end = hole_end > mm->scan_end ?
+ mm->scan_end : hole_end;
} else {
- adj_start = node->start;
- adj_end = node->start + node->size;
+ adj_start = hole_start;
+ adj_end = hole_end;
}
if (check_free_hole(adj_start , adj_end,
mm->scan_size, mm->scan_alignment)) {
- mm->scan_hit_start = node->start;
- mm->scan_hit_size = node->size;
+ mm->scan_hit_start = hole_start;
+ mm->scan_hit_size = hole_end;
return 1;
}
@@ -519,39 +560,19 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
int drm_mm_scan_remove_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
- struct drm_mm_node *prev_node, *next_node;
+ struct drm_mm_node *prev_node;
mm->scanned_blocks--;
BUG_ON(!node->scanned_block);
node->scanned_block = 0;
- node->free = 0;
-
- prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
- free_stack);
- next_node = list_entry(node->free_stack.next, struct drm_mm_node,
- free_stack);
- if (prev_node) {
- BUG_ON(!prev_node->scanned_prev_free);
- prev_node->scanned_prev_free = 0;
-
- list_add_tail(&prev_node->node_list, &node->node_list);
-
- node->start = prev_node->start + prev_node->size;
- node->size -= prev_node->size;
- }
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
- if (next_node) {
- BUG_ON(!next_node->scanned_next_free);
- next_node->scanned_next_free = 0;
-
- list_add(&next_node->node_list, &node->node_list);
-
- node->size -= next_node->size;
- }
-
- INIT_LIST_HEAD(&node->free_stack);
+ prev_node->hole_follows = node->scanned_preceeds_hole;
+ INIT_LIST_HEAD(&node->node_list);
+ list_add(&node->node_list, &prev_node->node_list);
/* Only need to check for containement because start&size for the
* complete resulting free block (not just the desired part) is
@@ -568,7 +589,7 @@ EXPORT_SYMBOL(drm_mm_scan_remove_block);
int drm_mm_clean(struct drm_mm * mm)
{
- struct list_head *head = &mm->node_list;
+ struct list_head *head = &mm->head_node.node_list;
return (head->next->next == head);
}
@@ -576,38 +597,40 @@ EXPORT_SYMBOL(drm_mm_clean);
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
- INIT_LIST_HEAD(&mm->node_list);
- INIT_LIST_HEAD(&mm->free_stack);
+ INIT_LIST_HEAD(&mm->hole_stack);
INIT_LIST_HEAD(&mm->unused_nodes);
mm->num_unused = 0;
mm->scanned_blocks = 0;
spin_lock_init(&mm->unused_lock);
- return drm_mm_create_tail_node(mm, start, size, 0);
+ /* Clever trick to avoid a special case in the free hole tracking. */
+ INIT_LIST_HEAD(&mm->head_node.node_list);
+ INIT_LIST_HEAD(&mm->head_node.hole_stack);
+ mm->head_node.hole_follows = 1;
+ mm->head_node.scanned_block = 0;
+ mm->head_node.scanned_prev_free = 0;
+ mm->head_node.scanned_next_free = 0;
+ mm->head_node.mm = mm;
+ mm->head_node.start = start + size;
+ mm->head_node.size = start - mm->head_node.start;
+ list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+
+ return 0;
}
EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{
- struct list_head *bnode = mm->free_stack.next;
- struct drm_mm_node *entry;
- struct drm_mm_node *next;
+ struct drm_mm_node *entry, *next;
- entry = list_entry(bnode, struct drm_mm_node, free_stack);
-
- if (entry->node_list.next != &mm->node_list ||
- entry->free_stack.next != &mm->free_stack) {
+ if (!list_empty(&mm->head_node.node_list)) {
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
return;
}
- list_del(&entry->free_stack);
- list_del(&entry->node_list);
- kfree(entry);
-
spin_lock(&mm->unused_lock);
- list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
- list_del(&entry->free_stack);
+ list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
+ list_del(&entry->node_list);
kfree(entry);
--mm->num_unused;
}
@@ -620,19 +643,37 @@ EXPORT_SYMBOL(drm_mm_takedown);
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
-
- list_for_each_entry(entry, &mm->node_list, node_list) {
- printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
+ unsigned long total_used = 0, total_free = 0, total = 0;
+ unsigned long hole_start, hole_end, hole_size;
+
+ hole_start = drm_mm_hole_node_start(&mm->head_node);
+ hole_end = drm_mm_hole_node_end(&mm->head_node);
+ hole_size = hole_end - hole_start;
+ if (hole_size)
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
+ prefix, hole_start, hole_end,
+ hole_size);
+ total_free += hole_size;
+
+ drm_mm_for_each_node(entry, mm) {
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
prefix, entry->start, entry->start + entry->size,
- entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
+ entry->size);
+ total_used += entry->size;
+
+ if (entry->hole_follows) {
+ hole_start = drm_mm_hole_node_start(entry);
+ hole_end = drm_mm_hole_node_end(entry);
+ hole_size = hole_end - hole_start;
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
+ prefix, hole_start, hole_end,
+ hole_size);
+ total_free += hole_size;
+ }
}
- printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
+ total = total_free + total_used;
+
+ printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);
@@ -641,17 +682,34 @@ EXPORT_SYMBOL(drm_mm_debug_table);
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
-
- list_for_each_entry(entry, &mm->node_list, node_list) {
- seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
+ unsigned long total_used = 0, total_free = 0, total = 0;
+ unsigned long hole_start, hole_end, hole_size;
+
+ hole_start = drm_mm_hole_node_start(&mm->head_node);
+ hole_end = drm_mm_hole_node_end(&mm->head_node);
+ hole_size = hole_end - hole_start;
+ if (hole_size)
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+ hole_start, hole_end, hole_size);
+ total_free += hole_size;
+
+ drm_mm_for_each_node(entry, mm) {
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
+ entry->start, entry->start + entry->size,
+ entry->size);
+ total_used += entry->size;
+ if (entry->hole_follows) {
+ hole_start = drm_mm_hole_node_start(&mm->head_node);
+ hole_end = drm_mm_hole_node_end(&mm->head_node);
+ hole_size = hole_end - hole_start;
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+ hole_start, hole_end, hole_size);
+ total_free += hole_size;
+ }
}
- seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
+ total = total_free + total_used;
+
+ seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 58e65f92c232..25bf87390f53 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -593,7 +593,7 @@ EXPORT_SYMBOL(drm_mode_height);
*
* Return @modes's hsync rate in kHz, rounded to the nearest int.
*/
-int drm_mode_hsync(struct drm_display_mode *mode)
+int drm_mode_hsync(const struct drm_display_mode *mode)
{
unsigned int calc_val;
@@ -627,7 +627,7 @@ EXPORT_SYMBOL(drm_mode_hsync);
* If it is 70.288, it will return 70Hz.
* If it is 59.6, it will return 60Hz.
*/
-int drm_mode_vrefresh(struct drm_display_mode *mode)
+int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
int refresh = 0;
unsigned int calc_val;
@@ -725,7 +725,7 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo);
* a pointer to it. Used to create new instances of established modes.
*/
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_display_mode *nmode;
int new_id;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index f5bd9e590c80..e1aee4f6a7c6 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -125,6 +125,176 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
EXPORT_SYMBOL(drm_pci_free);
#ifdef CONFIG_PCI
+
+static int drm_get_pci_domain(struct drm_device *dev)
+{
+#ifndef __alpha__
+ /* For historical reasons, drm_get_pci_domain() is busticated
+ * on most archs and has to remain so for userspace interface
+ * < 1.4, except on alpha which was right from the beginning
+ */
+ if (dev->if_version < 0x10004)
+ return 0;
+#endif /* __alpha__ */
+
+ return pci_domain_nr(dev->pdev->bus);
+}
+
+static int drm_pci_get_irq(struct drm_device *dev)
+{
+ return dev->pdev->irq;
+}
+
+static const char *drm_pci_get_name(struct drm_device *dev)
+{
+ struct pci_driver *pdriver = dev->driver->kdriver.pci;
+ return pdriver->name;
+}
+
+int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+ int len, ret;
+ struct pci_driver *pdriver = dev->driver->kdriver.pci;
+ master->unique_len = 40;
+ master->unique_size = master->unique_len;
+ master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+ if (master->unique == NULL)
+ return -ENOMEM;
+
+
+ len = snprintf(master->unique, master->unique_len,
+ "pci:%04x:%02x:%02x.%d",
+ drm_get_pci_domain(dev),
+ dev->pdev->bus->number,
+ PCI_SLOT(dev->pdev->devfn),
+ PCI_FUNC(dev->pdev->devfn));
+
+ if (len >= master->unique_len) {
+ DRM_ERROR("buffer overflow");
+ ret = -EINVAL;
+ goto err;
+ } else
+ master->unique_len = len;
+
+ dev->devname =
+ kmalloc(strlen(pdriver->name) +
+ master->unique_len + 2, GFP_KERNEL);
+
+ if (dev->devname == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", pdriver->name,
+ master->unique);
+
+ return 0;
+err:
+ return ret;
+}
+
+int drm_pci_set_unique(struct drm_device *dev,
+ struct drm_master *master,
+ struct drm_unique *u)
+{
+ int domain, bus, slot, func, ret;
+ const char *bus_name;
+
+ master->unique_len = u->unique_len;
+ master->unique_size = u->unique_len + 1;
+ master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+ if (!master->unique) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (copy_from_user(master->unique, u->unique, master->unique_len)) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ master->unique[master->unique_len] = '\0';
+
+ bus_name = dev->driver->bus->get_name(dev);
+ dev->devname = kmalloc(strlen(bus_name) +
+ strlen(master->unique) + 2, GFP_KERNEL);
+ if (!dev->devname) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", bus_name,
+ master->unique);
+
+ /* Return error if the busid submitted doesn't match the device's actual
+ * busid.
+ */
+ ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
+ if (ret != 3) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ domain = bus >> 8;
+ bus &= 0xff;
+
+ if ((domain != drm_get_pci_domain(dev)) ||
+ (bus != dev->pdev->bus->number) ||
+ (slot != PCI_SLOT(dev->pdev->devfn)) ||
+ (func != PCI_FUNC(dev->pdev->devfn))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ return 0;
+err:
+ return ret;
+}
+
+
+int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
+{
+ if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
+ (p->busnum & 0xff) != dev->pdev->bus->number ||
+ p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+ return -EINVAL;
+
+ p->irq = dev->pdev->irq;
+
+ DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+ p->irq);
+ return 0;
+}
+
+int drm_pci_agp_init(struct drm_device *dev)
+{
+ if (drm_core_has_AGP(dev)) {
+ if (drm_pci_device_is_agp(dev))
+ dev->agp = drm_agp_init(dev);
+ if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
+ && (dev->agp == NULL)) {
+ DRM_ERROR("Cannot initialize the agpgart module.\n");
+ return -EINVAL;
+ }
+ if (drm_core_has_MTRR(dev)) {
+ if (dev->agp)
+ dev->agp->agp_mtrr =
+ mtrr_add(dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size *
+ 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
+ }
+ }
+ return 0;
+}
+
+static struct drm_bus drm_pci_bus = {
+ .bus_type = DRIVER_BUS_PCI,
+ .get_irq = drm_pci_get_irq,
+ .get_name = drm_pci_get_name,
+ .set_busid = drm_pci_set_busid,
+ .set_unique = drm_pci_set_unique,
+ .agp_init = drm_pci_agp_init,
+};
+
/**
* Register.
*
@@ -219,7 +389,7 @@ err_g1:
EXPORT_SYMBOL(drm_get_pci_dev);
/**
- * PCI device initialization. Called via drm_init at module load time,
+ * PCI device initialization. Called direct from modules at load time.
*
* \return zero on success or a negative number on failure.
*
@@ -229,18 +399,24 @@ EXPORT_SYMBOL(drm_get_pci_dev);
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
* after the initialization for driver customization.
*/
-int drm_pci_init(struct drm_driver *driver)
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *pid;
int i;
+ DRM_DEBUG("\n");
+
+ INIT_LIST_HEAD(&driver->device_list);
+ driver->kdriver.pci = pdriver;
+ driver->bus = &drm_pci_bus;
+
if (driver->driver_features & DRIVER_MODESET)
- return pci_register_driver(&driver->pci_driver);
+ return pci_register_driver(pdriver);
/* If not using KMS, fall back to stealth mode manual scanning. */
- for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
- pid = &driver->pci_driver.id_table[i];
+ for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
+ pid = &pdriver->id_table[i];
/* Loop around setting up a DRM device for each PCI device
* matching our ID and device class. If we had the internal
@@ -265,10 +441,27 @@ int drm_pci_init(struct drm_driver *driver)
#else
-int drm_pci_init(struct drm_driver *driver)
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
return -1;
}
#endif
+
+EXPORT_SYMBOL(drm_pci_init);
+
/*@}*/
+void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+ struct drm_device *dev, *tmp;
+ DRM_DEBUG("\n");
+
+ if (driver->driver_features & DRIVER_MODESET) {
+ pci_unregister_driver(pdriver);
+ } else {
+ list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ drm_put_dev(dev);
+ }
+ DRM_INFO("Module unloaded\n");
+}
+EXPORT_SYMBOL(drm_pci_exit);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 92d1d0fb7b75..7223f06d8e58 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -109,8 +109,60 @@ err_g1:
}
EXPORT_SYMBOL(drm_get_platform_dev);
+static int drm_platform_get_irq(struct drm_device *dev)
+{
+ return platform_get_irq(dev->platformdev, 0);
+}
+
+static const char *drm_platform_get_name(struct drm_device *dev)
+{
+ return dev->platformdev->name;
+}
+
+static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+ int len, ret;
+
+ master->unique_len = 10 + strlen(dev->platformdev->name);
+ master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
+
+ if (master->unique == NULL)
+ return -ENOMEM;
+
+ len = snprintf(master->unique, master->unique_len,
+ "platform:%s", dev->platformdev->name);
+
+ if (len > master->unique_len) {
+ DRM_ERROR("Unique buffer overflowed\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dev->devname =
+ kmalloc(strlen(dev->platformdev->name) +
+ master->unique_len + 2, GFP_KERNEL);
+
+ if (dev->devname == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", dev->platformdev->name,
+ master->unique);
+ return 0;
+err:
+ return ret;
+}
+
+static struct drm_bus drm_platform_bus = {
+ .bus_type = DRIVER_BUS_PLATFORM,
+ .get_irq = drm_platform_get_irq,
+ .get_name = drm_platform_get_name,
+ .set_busid = drm_platform_set_busid,
+};
+
/**
- * Platform device initialization. Called via drm_init at module load time,
+ * Platform device initialization. Called direct from modules.
*
* \return zero on success or a negative number on failure.
*
@@ -121,7 +173,24 @@ EXPORT_SYMBOL(drm_get_platform_dev);
* after the initialization for driver customization.
*/
-int drm_platform_init(struct drm_driver *driver)
+int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device)
{
- return drm_get_platform_dev(driver->platform_device, driver);
+ DRM_DEBUG("\n");
+
+ driver->kdriver.platform_device = platform_device;
+ driver->bus = &drm_platform_bus;
+ INIT_LIST_HEAD(&driver->device_list);
+ return drm_get_platform_dev(platform_device, driver);
+}
+EXPORT_SYMBOL(drm_platform_init);
+
+void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device)
+{
+ struct drm_device *dev, *tmp;
+ DRM_DEBUG("\n");
+
+ list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ drm_put_dev(dev);
+ DRM_INFO("Module unloaded\n");
}
+EXPORT_SYMBOL(drm_platform_exit);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index d59edc18301f..001273d57f2d 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -269,25 +269,14 @@ int drm_fill_in_dev(struct drm_device *dev,
dev->driver = driver;
- if (drm_core_has_AGP(dev)) {
- if (drm_device_is_agp(dev))
- dev->agp = drm_agp_init(dev);
- if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
- && (dev->agp == NULL)) {
- DRM_ERROR("Cannot initialize the agpgart module.\n");
- retcode = -EINVAL;
+ if (dev->driver->bus->agp_init) {
+ retcode = dev->driver->bus->agp_init(dev);
+ if (retcode)
goto error_out_unreg;
- }
- if (drm_core_has_MTRR(dev)) {
- if (dev->agp)
- dev->agp->agp_mtrr =
- mtrr_add(dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
- }
}
+
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
@@ -425,7 +414,6 @@ int drm_put_minor(struct drm_minor **minor_p)
*
* Cleans up all DRM device, calling drm_lastclose().
*
- * \sa drm_init
*/
void drm_put_dev(struct drm_device *dev)
{
@@ -475,6 +463,7 @@ void drm_put_dev(struct drm_device *dev)
drm_put_minor(&dev->primary);
+ list_del(&dev->driver_item);
if (dev->devname) {
kfree(dev->devname);
dev->devname = NULL;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 85da4c40694c..2eee8e016b38 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -158,8 +158,15 @@ static ssize_t status_show(struct device *device,
{
struct drm_connector *connector = to_drm_connector(device);
enum drm_connector_status status;
+ int ret;
+
+ ret = mutex_lock_interruptible(&connector->dev->mode_config.mutex);
+ if (ret)
+ return ret;
status = connector->funcs->detect(connector, true);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_connector_status_name(status));
}
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
new file mode 100644
index 000000000000..206d2300d873
--- /dev/null
+++ b/drivers/gpu/drm/drm_usb.c
@@ -0,0 +1,117 @@
+#include "drmP.h"
+#include <linux/usb.h>
+
+#ifdef CONFIG_USB
+int drm_get_usb_dev(struct usb_interface *interface,
+ const struct usb_device_id *id,
+ struct drm_driver *driver)
+{
+ struct drm_device *dev;
+ struct usb_device *usbdev;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ usbdev = interface_to_usbdev(interface);
+ dev->usbdev = usbdev;
+ dev->dev = &usbdev->dev;
+
+ mutex_lock(&drm_global_mutex);
+
+ ret = drm_fill_in_dev(dev, NULL, driver);
+ if (ret) {
+ printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+ goto err_g1;
+ }
+
+ usb_set_intfdata(interface, dev);
+ ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_g1;
+
+ ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_g2;
+
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, 0);
+ if (ret)
+ goto err_g3;
+ }
+
+ /* setup the grouping for the legacy output */
+ ret = drm_mode_group_init_legacy_group(dev,
+ &dev->primary->mode_group);
+ if (ret)
+ goto err_g3;
+
+ list_add_tail(&dev->driver_item, &driver->device_list);
+
+ mutex_unlock(&drm_global_mutex);
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ driver->name, driver->major, driver->minor, driver->patchlevel,
+ driver->date, dev->primary->index);
+
+ return 0;
+
+err_g3:
+ drm_put_minor(&dev->primary);
+err_g2:
+ drm_put_minor(&dev->control);
+err_g1:
+ kfree(dev);
+ mutex_unlock(&drm_global_mutex);
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_get_usb_dev);
+
+static int drm_usb_get_irq(struct drm_device *dev)
+{
+ return 0;
+}
+
+static const char *drm_usb_get_name(struct drm_device *dev)
+{
+ return "USB";
+}
+
+static int drm_usb_set_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ return 0;
+}
+
+static struct drm_bus drm_usb_bus = {
+ .bus_type = DRIVER_BUS_USB,
+ .get_irq = drm_usb_get_irq,
+ .get_name = drm_usb_get_name,
+ .set_busid = drm_usb_set_busid,
+};
+
+int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
+{
+ int res;
+ DRM_DEBUG("\n");
+
+ INIT_LIST_HEAD(&driver->device_list);
+ driver->kdriver.usb = udriver;
+ driver->bus = &drm_usb_bus;
+
+ res = usb_register(udriver);
+ return res;
+}
+EXPORT_SYMBOL(drm_usb_init);
+
+void drm_usb_exit(struct drm_driver *driver,
+ struct usb_driver *udriver)
+{
+ usb_deregister(udriver);
+}
+EXPORT_SYMBOL(drm_usb_exit);
+#endif
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ff33e53bbbf8..8f371e8d630f 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -37,7 +37,6 @@
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#define I810_BUF_FREE 2
@@ -94,7 +93,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
struct drm_buf *buf;
drm_i810_buf_priv_t *buf_priv;
- lock_kernel();
dev = priv->minor->dev;
dev_priv = dev->dev_private;
buf = dev_priv->mmap_buffer;
@@ -104,7 +102,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
vma->vm_file = filp;
buf_priv->currently_mapped = I810_BUF_MAPPED;
- unlock_kernel();
if (io_remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff,
@@ -116,7 +113,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = i810_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
.llseek = noop_llseek,
@@ -1242,19 +1239,6 @@ int i810_driver_dma_quiescent(struct drm_device *dev)
return 0;
}
-/*
- * call the drm_ioctl under the big kernel lock because
- * to lock against the i810_mmap_buffers function.
- */
-long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
- lock_kernel();
- ret = drm_ioctl(file, cmd, arg);
- unlock_kernel();
- return ret;
-}
-
struct drm_ioctl_desc i810_ioctls[] = {
DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 88bcd331e7c5..6f98d059f68a 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -57,18 +57,13 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = i810_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -77,15 +72,24 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver i810_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init i810_init(void)
{
+ if (num_possible_cpus() > 1) {
+ pr_err("drm/i810 does not support SMP\n");
+ return -EINVAL;
+ }
driver.num_ioctls = i810_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &i810_pci_driver);
}
static void __exit i810_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &i810_pci_driver);
}
module_init(i810_init);
diff --git a/drivers/gpu/drm/i830/Makefile b/drivers/gpu/drm/i830/Makefile
deleted file mode 100644
index c642ee0b238c..000000000000
--- a/drivers/gpu/drm/i830/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for the drm device driver. This driver provides support for the
-# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
-ccflags-y := -Iinclude/drm
-i830-y := i830_drv.o i830_dma.o i830_irq.o
-
-obj-$(CONFIG_DRM_I830) += i830.o
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
deleted file mode 100644
index ca6f31ff0eec..000000000000
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ /dev/null
@@ -1,1560 +0,0 @@
-/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- * Keith Whitwell <keith@tungstengraphics.com>
- * Abraham vd Merwe <abraham@2d3d.co.za>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i830_drm.h"
-#include "i830_drv.h"
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/smp_lock.h>
-#include <linux/pagemap.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <asm/uaccess.h>
-
-#define I830_BUF_FREE 2
-#define I830_BUF_CLIENT 1
-#define I830_BUF_HARDWARE 0
-
-#define I830_BUF_UNMAPPED 0
-#define I830_BUF_MAPPED 1
-
-static struct drm_buf *i830_freelist_get(struct drm_device * dev)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
- int used;
-
- /* Linear search might not be the best solution */
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- /* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
- I830_BUF_CLIENT);
- if (used == I830_BUF_FREE)
- return buf;
- }
- return NULL;
-}
-
-/* This should only be called if the buffer is not sent to the hardware
- * yet, the hardware updates in use for us once its on the ring buffer.
- */
-
-static int i830_freelist_put(struct drm_device *dev, struct drm_buf *buf)
-{
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- int used;
-
- /* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE);
- if (used != I830_BUF_CLIENT) {
- DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev;
- drm_i830_private_t *dev_priv;
- struct drm_buf *buf;
- drm_i830_buf_priv_t *buf_priv;
-
- lock_kernel();
- dev = priv->minor->dev;
- dev_priv = dev->dev_private;
- buf = dev_priv->mmap_buffer;
- buf_priv = buf->dev_private;
-
- vma->vm_flags |= (VM_IO | VM_DONTCOPY);
- vma->vm_file = filp;
-
- buf_priv->currently_mapped = I830_BUF_MAPPED;
- unlock_kernel();
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
- return 0;
-}
-
-static const struct file_operations i830_buffer_fops = {
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = i830_ioctl,
- .mmap = i830_mmap_buffers,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
-};
-
-static int i830_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
-{
- struct drm_device *dev = file_priv->minor->dev;
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- drm_i830_private_t *dev_priv = dev->dev_private;
- const struct file_operations *old_fops;
- unsigned long virtual;
- int retcode = 0;
-
- if (buf_priv->currently_mapped == I830_BUF_MAPPED)
- return -EINVAL;
-
- down_write(&current->mm->mmap_sem);
- old_fops = file_priv->filp->f_op;
- file_priv->filp->f_op = &i830_buffer_fops;
- dev_priv->mmap_buffer = buf;
- virtual = do_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE,
- MAP_SHARED, buf->bus_address);
- dev_priv->mmap_buffer = NULL;
- file_priv->filp->f_op = old_fops;
- if (IS_ERR((void *)virtual)) { /* ugh */
- /* Real error */
- DRM_ERROR("mmap error\n");
- retcode = PTR_ERR((void *)virtual);
- buf_priv->virtual = NULL;
- } else {
- buf_priv->virtual = (void __user *)virtual;
- }
- up_write(&current->mm->mmap_sem);
-
- return retcode;
-}
-
-static int i830_unmap_buffer(struct drm_buf *buf)
-{
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- int retcode = 0;
-
- if (buf_priv->currently_mapped != I830_BUF_MAPPED)
- return -EINVAL;
-
- down_write(&current->mm->mmap_sem);
- retcode = do_munmap(current->mm,
- (unsigned long)buf_priv->virtual,
- (size_t) buf->total);
- up_write(&current->mm->mmap_sem);
-
- buf_priv->currently_mapped = I830_BUF_UNMAPPED;
- buf_priv->virtual = NULL;
-
- return retcode;
-}
-
-static int i830_dma_get_buffer(struct drm_device *dev, drm_i830_dma_t *d,
- struct drm_file *file_priv)
-{
- struct drm_buf *buf;
- drm_i830_buf_priv_t *buf_priv;
- int retcode = 0;
-
- buf = i830_freelist_get(dev);
- if (!buf) {
- retcode = -ENOMEM;
- DRM_DEBUG("retcode=%d\n", retcode);
- return retcode;
- }
-
- retcode = i830_map_buffer(buf, file_priv);
- if (retcode) {
- i830_freelist_put(dev, buf);
- DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
- return retcode;
- }
- buf->file_priv = file_priv;
- buf_priv = buf->dev_private;
- d->granted = 1;
- d->request_idx = buf->idx;
- d->request_size = buf->total;
- d->virtual = buf_priv->virtual;
-
- return retcode;
-}
-
-static int i830_dma_cleanup(struct drm_device *dev)
-{
- struct drm_device_dma *dma = dev->dma;
-
- /* Make sure interrupts are disabled here because the uninstall ioctl
- * may not have been called from userspace and after dev_private
- * is freed, it's too late.
- */
- if (dev->irq_enabled)
- drm_irq_uninstall(dev);
-
- if (dev->dev_private) {
- int i;
- drm_i830_private_t *dev_priv =
- (drm_i830_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start)
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
- if (dev_priv->hw_status_page) {
- pci_free_consistent(dev->pdev, PAGE_SIZE,
- dev_priv->hw_status_page,
- dev_priv->dma_status_page);
- /* Need to rewrite hardware status page */
- I830_WRITE(0x02080, 0x1ffff000);
- }
-
- kfree(dev->dev_private);
- dev->dev_private = NULL;
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- if (buf_priv->kernel_virtual && buf->total)
- drm_core_ioremapfree(&buf_priv->map, dev);
- }
- }
- return 0;
-}
-
-int i830_wait_ring(struct drm_device *dev, int n, const char *caller)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
- int iters = 0;
- unsigned long end;
- unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
-
- end = jiffies + (HZ * 3);
- while (ring->space < n) {
- ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-
- if (ring->head != last_head) {
- end = jiffies + (HZ * 3);
- last_head = ring->head;
- }
-
- iters++;
- if (time_before(end, jiffies)) {
- DRM_ERROR("space: %d wanted %d\n", ring->space, n);
- DRM_ERROR("lockup\n");
- goto out_wait_ring;
- }
- udelay(1);
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
- }
-
-out_wait_ring:
- return iters;
-}
-
-static void i830_kernel_lost_context(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
-
- ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
- ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-
- if (ring->head == ring->tail)
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
-}
-
-static int i830_freelist_init(struct drm_device *dev, drm_i830_private_t *dev_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int my_idx = 36;
- u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
- int i;
-
- if (dma->buf_count > 1019) {
- /* Not enough space in the status page for the freelist */
- return -EINVAL;
- }
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
-
- buf_priv->in_use = hw_status++;
- buf_priv->my_use_idx = my_idx;
- my_idx += 4;
-
- *buf_priv->in_use = I830_BUF_FREE;
-
- buf_priv->map.offset = buf->bus_address;
- buf_priv->map.size = buf->total;
- buf_priv->map.type = _DRM_AGP;
- buf_priv->map.flags = 0;
- buf_priv->map.mtrr = 0;
-
- drm_core_ioremap(&buf_priv->map, dev);
- buf_priv->kernel_virtual = buf_priv->map.handle;
- }
- return 0;
-}
-
-static int i830_dma_initialize(struct drm_device *dev,
- drm_i830_private_t *dev_priv,
- drm_i830_init_t *init)
-{
- struct drm_map_list *r_list;
-
- memset(dev_priv, 0, sizeof(drm_i830_private_t));
-
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map &&
- r_list->map->type == _DRM_SHM &&
- r_list->map->flags & _DRM_CONTAINS_LOCK) {
- dev_priv->sarea_map = r_list->map;
- break;
- }
- }
-
- if (!dev_priv->sarea_map) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not find sarea!\n");
- return -EINVAL;
- }
- dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
- if (!dev_priv->mmio_map) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not find mmio map!\n");
- return -EINVAL;
- }
- dev->agp_buffer_token = init->buffers_offset;
- dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
- if (!dev->agp_buffer_map) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not find dma buffer map!\n");
- return -EINVAL;
- }
-
- dev_priv->sarea_priv = (drm_i830_sarea_t *)
- ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
-
- dev_priv->ring.Start = init->ring_start;
- dev_priv->ring.End = init->ring_end;
- dev_priv->ring.Size = init->ring_size;
-
- dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
- dev_priv->ring.map.type = _DRM_AGP;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
- drm_core_ioremap(&dev_priv->ring.map, dev);
-
- if (dev_priv->ring.map.handle == NULL) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
- }
-
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
-
- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-
- dev_priv->w = init->w;
- dev_priv->h = init->h;
- dev_priv->pitch = init->pitch;
- dev_priv->back_offset = init->back_offset;
- dev_priv->depth_offset = init->depth_offset;
- dev_priv->front_offset = init->front_offset;
-
- dev_priv->front_di1 = init->front_offset | init->pitch_bits;
- dev_priv->back_di1 = init->back_offset | init->pitch_bits;
- dev_priv->zi1 = init->depth_offset | init->pitch_bits;
-
- DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1);
- DRM_DEBUG("back_offset %x\n", dev_priv->back_offset);
- DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1);
- DRM_DEBUG("pitch_bits %x\n", init->pitch_bits);
-
- dev_priv->cpp = init->cpp;
- /* We are using separate values as placeholders for mechanisms for
- * private backbuffer/depthbuffer usage.
- */
-
- dev_priv->back_pitch = init->back_pitch;
- dev_priv->depth_pitch = init->depth_pitch;
- dev_priv->do_boxes = 0;
- dev_priv->use_mi_batchbuffer_start = 0;
-
- /* Program Hardware Status Page */
- dev_priv->hw_status_page =
- pci_alloc_consistent(dev->pdev, PAGE_SIZE,
- &dev_priv->dma_status_page);
- if (!dev_priv->hw_status_page) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
-
- I830_WRITE(0x02080, dev_priv->dma_status_page);
- DRM_DEBUG("Enabled hardware status page\n");
-
- /* Now we need to init our freelist */
- if (i830_freelist_init(dev, dev_priv) != 0) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("Not enough space in the status page for"
- " the freelist\n");
- return -ENOMEM;
- }
- dev->dev_private = (void *)dev_priv;
-
- return 0;
-}
-
-static int i830_dma_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv;
- drm_i830_init_t *init = data;
- int retcode = 0;
-
- switch (init->func) {
- case I830_INIT_DMA:
- dev_priv = kmalloc(sizeof(drm_i830_private_t), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
- retcode = i830_dma_initialize(dev, dev_priv, init);
- break;
- case I830_CLEANUP_DMA:
- retcode = i830_dma_cleanup(dev);
- break;
- default:
- retcode = -EINVAL;
- break;
- }
-
- return retcode;
-}
-
-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define ST1_ENABLE (1<<16)
-#define ST1_MASK (0xffff)
-
-/* Most efficient way to verify state for the i830 is as it is
- * emitted. Non-conformant state is silently dropped.
- */
-static void i830EmitContextVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I830_CTX_SETUP_SIZE + 4);
-
- for (i = 0; i < I830_CTXREG_BLENDCOLR0; i++) {
- tmp = code[i];
- if ((tmp & (7 << 29)) == CMD_3D &&
- (tmp & (0x1f << 24)) < (0x1d << 24)) {
- OUT_RING(tmp);
- j++;
- } else {
- DRM_ERROR("Skipping %d\n", i);
- }
- }
-
- OUT_RING(STATE3D_CONST_BLEND_COLOR_CMD);
- OUT_RING(code[I830_CTXREG_BLENDCOLR]);
- j += 2;
-
- for (i = I830_CTXREG_VF; i < I830_CTXREG_MCSB0; i++) {
- tmp = code[i];
- if ((tmp & (7 << 29)) == CMD_3D &&
- (tmp & (0x1f << 24)) < (0x1d << 24)) {
- OUT_RING(tmp);
- j++;
- } else {
- DRM_ERROR("Skipping %d\n", i);
- }
- }
-
- OUT_RING(STATE3D_MAP_COORD_SETBIND_CMD);
- OUT_RING(code[I830_CTXREG_MCSB1]);
- j += 2;
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i830EmitTexVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO ||
- (code[I830_TEXREG_MI0] & ~(0xf * LOAD_TEXTURE_MAP0)) ==
- (STATE3D_LOAD_STATE_IMMEDIATE_2 | 4)) {
-
- BEGIN_LP_RING(I830_TEX_SETUP_SIZE);
-
- OUT_RING(code[I830_TEXREG_MI0]); /* TM0LI */
- OUT_RING(code[I830_TEXREG_MI1]); /* TM0S0 */
- OUT_RING(code[I830_TEXREG_MI2]); /* TM0S1 */
- OUT_RING(code[I830_TEXREG_MI3]); /* TM0S2 */
- OUT_RING(code[I830_TEXREG_MI4]); /* TM0S3 */
- OUT_RING(code[I830_TEXREG_MI5]); /* TM0S4 */
-
- for (i = 6; i < I830_TEX_SETUP_SIZE; i++) {
- tmp = code[i];
- OUT_RING(tmp);
- j++;
- }
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
- } else
- printk("rejected packet %x\n", code[0]);
-}
-
-static void i830EmitTexBlendVerified(struct drm_device *dev,
- unsigned int *code, unsigned int num)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- if (!num)
- return;
-
- BEGIN_LP_RING(num + 1);
-
- for (i = 0; i < num; i++) {
- tmp = code[i];
- OUT_RING(tmp);
- j++;
- }
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i830EmitTexPalette(struct drm_device *dev,
- unsigned int *palette, int number, int is_shared)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i;
- RING_LOCALS;
-
- return;
-
- BEGIN_LP_RING(258);
-
- if (is_shared == 1) {
- OUT_RING(CMD_OP_MAP_PALETTE_LOAD |
- MAP_PALETTE_NUM(0) | MAP_PALETTE_BOTH);
- } else {
- OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
- }
- for (i = 0; i < 256; i++)
- OUT_RING(palette[i]);
- OUT_RING(0);
- /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
- */
-}
-
-/* Need to do some additional checking when setting the dest buffer.
- */
-static void i830EmitDestVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I830_DEST_SETUP_SIZE + 10);
-
- tmp = code[I830_DESTREG_CBUFADDR];
- if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
- if (((int)outring) & 8) {
- OUT_RING(0);
- OUT_RING(0);
- }
-
- OUT_RING(CMD_OP_DESTBUFFER_INFO);
- OUT_RING(BUF_3D_ID_COLOR_BACK |
- BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) |
- BUF_3D_USE_FENCE);
- OUT_RING(tmp);
- OUT_RING(0);
-
- OUT_RING(CMD_OP_DESTBUFFER_INFO);
- OUT_RING(BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE |
- BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
- OUT_RING(dev_priv->zi1);
- OUT_RING(0);
- } else {
- DRM_ERROR("bad di1 %x (allow %x or %x)\n",
- tmp, dev_priv->front_di1, dev_priv->back_di1);
- }
-
- /* invarient:
- */
-
- OUT_RING(GFX_OP_DESTBUFFER_VARS);
- OUT_RING(code[I830_DESTREG_DV1]);
-
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(code[I830_DESTREG_DR1]);
- OUT_RING(code[I830_DESTREG_DR2]);
- OUT_RING(code[I830_DESTREG_DR3]);
- OUT_RING(code[I830_DESTREG_DR4]);
-
- /* Need to verify this */
- tmp = code[I830_DESTREG_SENABLE];
- if ((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) {
- OUT_RING(tmp);
- } else {
- DRM_ERROR("bad scissor enable\n");
- OUT_RING(0);
- }
-
- OUT_RING(GFX_OP_SCISSOR_RECT);
- OUT_RING(code[I830_DESTREG_SR1]);
- OUT_RING(code[I830_DESTREG_SR2]);
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i830EmitStippleVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- BEGIN_LP_RING(2);
- OUT_RING(GFX_OP_STIPPLE);
- OUT_RING(code[1]);
- ADVANCE_LP_RING();
-}
-
-static void i830EmitState(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int dirty = sarea_priv->dirty;
-
- DRM_DEBUG("%s %x\n", __func__, dirty);
-
- if (dirty & I830_UPLOAD_BUFFERS) {
- i830EmitDestVerified(dev, sarea_priv->BufferState);
- sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS;
- }
-
- if (dirty & I830_UPLOAD_CTX) {
- i830EmitContextVerified(dev, sarea_priv->ContextState);
- sarea_priv->dirty &= ~I830_UPLOAD_CTX;
- }
-
- if (dirty & I830_UPLOAD_TEX0) {
- i830EmitTexVerified(dev, sarea_priv->TexState[0]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX0;
- }
-
- if (dirty & I830_UPLOAD_TEX1) {
- i830EmitTexVerified(dev, sarea_priv->TexState[1]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX1;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND0) {
- i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[0],
- sarea_priv->TexBlendStateWordsUsed[0]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND1) {
- i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[1],
- sarea_priv->TexBlendStateWordsUsed[1]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1;
- }
-
- if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) {
- i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1);
- } else {
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) {
- i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0);
- }
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) {
- i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1);
- }
-
- /* 1.3:
- */
-#if 0
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) {
- i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
- }
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) {
- i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
- }
-#endif
- }
-
- /* 1.3:
- */
- if (dirty & I830_UPLOAD_STIPPLE) {
- i830EmitStippleVerified(dev, sarea_priv->StippleState);
- sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE;
- }
-
- if (dirty & I830_UPLOAD_TEX2) {
- i830EmitTexVerified(dev, sarea_priv->TexState2);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX2;
- }
-
- if (dirty & I830_UPLOAD_TEX3) {
- i830EmitTexVerified(dev, sarea_priv->TexState3);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX3;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND2) {
- i830EmitTexBlendVerified(dev,
- sarea_priv->TexBlendState2,
- sarea_priv->TexBlendStateWordsUsed2);
-
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND3) {
- i830EmitTexBlendVerified(dev,
- sarea_priv->TexBlendState3,
- sarea_priv->TexBlendStateWordsUsed3);
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3;
- }
-}
-
-/* ================================================================
- * Performance monitoring functions
- */
-
-static void i830_fill_box(struct drm_device *dev,
- int x, int y, int w, int h, int r, int g, int b)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- u32 color;
- unsigned int BR13, CMD;
- RING_LOCALS;
-
- BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1 << 24);
- CMD = XY_COLOR_BLT_CMD;
- x += dev_priv->sarea_priv->boxes[0].x1;
- y += dev_priv->sarea_priv->boxes[0].y1;
-
- if (dev_priv->cpp == 4) {
- BR13 |= (1 << 25);
- CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
- color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
- } else {
- color = (((r & 0xf8) << 8) |
- ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
- }
-
- BEGIN_LP_RING(6);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((y << 16) | x);
- OUT_RING(((y + h) << 16) | (x + w));
-
- if (dev_priv->current_page == 1)
- OUT_RING(dev_priv->front_offset);
- else
- OUT_RING(dev_priv->back_offset);
-
- OUT_RING(color);
- ADVANCE_LP_RING();
-}
-
-static void i830_cp_performance_boxes(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- /* Purple box for page flipping
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP)
- i830_fill_box(dev, 4, 4, 8, 8, 255, 0, 255);
-
- /* Red box if we have to wait for idle at any point
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT)
- i830_fill_box(dev, 16, 4, 8, 8, 255, 0, 0);
-
- /* Blue box: lost context?
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT)
- i830_fill_box(dev, 28, 4, 8, 8, 0, 0, 255);
-
- /* Yellow box for texture swaps
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD)
- i830_fill_box(dev, 40, 4, 8, 8, 255, 255, 0);
-
- /* Green box if hardware never idles (as far as we can tell)
- */
- if (!(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY))
- i830_fill_box(dev, 64, 4, 8, 8, 0, 255, 0);
-
- /* Draw bars indicating number of buffers allocated
- * (not a great measure, easily confused)
- */
- if (dev_priv->dma_used) {
- int bar = dev_priv->dma_used / 10240;
- if (bar > 100)
- bar = 100;
- if (bar < 1)
- bar = 1;
- i830_fill_box(dev, 4, 16, bar, 4, 196, 128, 128);
- dev_priv->dma_used = 0;
- }
-
- dev_priv->sarea_priv->perf_boxes = 0;
-}
-
-static void i830_dma_dispatch_clear(struct drm_device *dev, int flags,
- unsigned int clear_color,
- unsigned int clear_zval,
- unsigned int clear_depthmask)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int pitch = dev_priv->pitch;
- int cpp = dev_priv->cpp;
- int i;
- unsigned int BR13, CMD, D_CMD;
- RING_LOCALS;
-
- if (dev_priv->current_page == 1) {
- unsigned int tmp = flags;
-
- flags &= ~(I830_FRONT | I830_BACK);
- if (tmp & I830_FRONT)
- flags |= I830_BACK;
- if (tmp & I830_BACK)
- flags |= I830_FRONT;
- }
-
- i830_kernel_lost_context(dev);
-
- switch (cpp) {
- case 2:
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
- D_CMD = CMD = XY_COLOR_BLT_CMD;
- break;
- case 4:
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25);
- CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
- XY_COLOR_BLT_WRITE_RGB);
- D_CMD = XY_COLOR_BLT_CMD;
- if (clear_depthmask & 0x00ffffff)
- D_CMD |= XY_COLOR_BLT_WRITE_RGB;
- if (clear_depthmask & 0xff000000)
- D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
- break;
- default:
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
- D_CMD = CMD = XY_COLOR_BLT_CMD;
- break;
- }
-
- if (nbox > I830_NR_SAREA_CLIPRECTS)
- nbox = I830_NR_SAREA_CLIPRECTS;
-
- for (i = 0; i < nbox; i++, pbox++) {
- if (pbox->x1 > pbox->x2 ||
- pbox->y1 > pbox->y2 ||
- pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
- continue;
-
- if (flags & I830_FRONT) {
- DRM_DEBUG("clear front\n");
- BEGIN_LP_RING(6);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
- OUT_RING(dev_priv->front_offset);
- OUT_RING(clear_color);
- ADVANCE_LP_RING();
- }
-
- if (flags & I830_BACK) {
- DRM_DEBUG("clear back\n");
- BEGIN_LP_RING(6);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
- OUT_RING(dev_priv->back_offset);
- OUT_RING(clear_color);
- ADVANCE_LP_RING();
- }
-
- if (flags & I830_DEPTH) {
- DRM_DEBUG("clear depth\n");
- BEGIN_LP_RING(6);
- OUT_RING(D_CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
- OUT_RING(dev_priv->depth_offset);
- OUT_RING(clear_zval);
- ADVANCE_LP_RING();
- }
- }
-}
-
-static void i830_dma_dispatch_swap(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int pitch = dev_priv->pitch;
- int cpp = dev_priv->cpp;
- int i;
- unsigned int CMD, BR13;
- RING_LOCALS;
-
- DRM_DEBUG("swapbuffers\n");
-
- i830_kernel_lost_context(dev);
-
- if (dev_priv->do_boxes)
- i830_cp_performance_boxes(dev);
-
- switch (cpp) {
- case 2:
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
- CMD = XY_SRC_COPY_BLT_CMD;
- break;
- case 4:
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
- CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
- XY_SRC_COPY_BLT_WRITE_RGB);
- break;
- default:
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
- CMD = XY_SRC_COPY_BLT_CMD;
- break;
- }
-
- if (nbox > I830_NR_SAREA_CLIPRECTS)
- nbox = I830_NR_SAREA_CLIPRECTS;
-
- for (i = 0; i < nbox; i++, pbox++) {
- if (pbox->x1 > pbox->x2 ||
- pbox->y1 > pbox->y2 ||
- pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
- continue;
-
- DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
- pbox->x1, pbox->y1, pbox->x2, pbox->y2);
-
- BEGIN_LP_RING(8);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
-
- if (dev_priv->current_page == 0)
- OUT_RING(dev_priv->front_offset);
- else
- OUT_RING(dev_priv->back_offset);
-
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING(BR13 & 0xffff);
-
- if (dev_priv->current_page == 0)
- OUT_RING(dev_priv->back_offset);
- else
- OUT_RING(dev_priv->front_offset);
-
- ADVANCE_LP_RING();
- }
-}
-
-static void i830_dma_dispatch_flip(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
- __func__,
- dev_priv->current_page,
- dev_priv->sarea_priv->pf_current_page);
-
- i830_kernel_lost_context(dev);
-
- if (dev_priv->do_boxes) {
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP;
- i830_cp_performance_boxes(dev);
- }
-
- BEGIN_LP_RING(2);
- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(6);
- OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
- OUT_RING(0);
- if (dev_priv->current_page == 0) {
- OUT_RING(dev_priv->back_offset);
- dev_priv->current_page = 1;
- } else {
- OUT_RING(dev_priv->front_offset);
- dev_priv->current_page = 0;
- }
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(2);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-}
-
-static void i830_dma_dispatch_vertex(struct drm_device *dev,
- struct drm_buf *buf, int discard, int used)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- struct drm_clip_rect *box = sarea_priv->boxes;
- int nbox = sarea_priv->nbox;
- unsigned long address = (unsigned long)buf->bus_address;
- unsigned long start = address - dev->agp->base;
- int i = 0, u;
- RING_LOCALS;
-
- i830_kernel_lost_context(dev);
-
- if (nbox > I830_NR_SAREA_CLIPRECTS)
- nbox = I830_NR_SAREA_CLIPRECTS;
-
- if (discard) {
- u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
- I830_BUF_HARDWARE);
- if (u != I830_BUF_CLIENT)
- DRM_DEBUG("xxxx 2\n");
- }
-
- if (used > 4 * 1023)
- used = 0;
-
- if (sarea_priv->dirty)
- i830EmitState(dev);
-
- DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
- address, used, nbox);
-
- dev_priv->counter++;
- DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
- DRM_DEBUG("i830_dma_dispatch\n");
- DRM_DEBUG("start : %lx\n", start);
- DRM_DEBUG("used : %d\n", used);
- DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
-
- if (buf_priv->currently_mapped == I830_BUF_MAPPED) {
- u32 *vp = buf_priv->kernel_virtual;
-
- vp[0] = (GFX_OP_PRIMITIVE |
- sarea_priv->vertex_prim | ((used / 4) - 2));
-
- if (dev_priv->use_mi_batchbuffer_start) {
- vp[used / 4] = MI_BATCH_BUFFER_END;
- used += 4;
- }
-
- if (used & 4) {
- vp[used / 4] = 0;
- used += 4;
- }
-
- i830_unmap_buffer(buf);
- }
-
- if (used) {
- do {
- if (i < nbox) {
- BEGIN_LP_RING(6);
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(sarea_priv->
- BufferState[I830_DESTREG_DR1]);
- OUT_RING(box[i].x1 | (box[i].y1 << 16));
- OUT_RING(box[i].x2 | (box[i].y2 << 16));
- OUT_RING(sarea_priv->
- BufferState[I830_DESTREG_DR4]);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- if (dev_priv->use_mi_batchbuffer_start) {
- BEGIN_LP_RING(2);
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
- OUT_RING(start | MI_BATCH_NON_SECURE);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(4);
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(start | MI_BATCH_NON_SECURE);
- OUT_RING(start + used - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- } while (++i < nbox);
- }
-
- if (discard) {
- dev_priv->counter++;
-
- (void)cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
- I830_BUF_HARDWARE);
-
- BEGIN_LP_RING(8);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(20);
- OUT_RING(dev_priv->counter);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(buf_priv->my_use_idx);
- OUT_RING(I830_BUF_FREE);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-}
-
-static void i830_dma_quiescent(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- i830_kernel_lost_context(dev);
-
- BEGIN_LP_RING(4);
- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
-}
-
-static int i830_flush_queue(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- struct drm_device_dma *dma = dev->dma;
- int i, ret = 0;
- RING_LOCALS;
-
- i830_kernel_lost_context(dev);
-
- BEGIN_LP_RING(2);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
-
- int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
- I830_BUF_FREE);
-
- if (used == I830_BUF_HARDWARE)
- DRM_DEBUG("reclaimed from HARDWARE\n");
- if (used == I830_BUF_CLIENT)
- DRM_DEBUG("still on client\n");
- }
-
- return ret;
-}
-
-/* Must be called with the lock held */
-static void i830_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
-
- if (!dma)
- return;
- if (!dev->dev_private)
- return;
- if (!dma->buflist)
- return;
-
- i830_flush_queue(dev);
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
-
- if (buf->file_priv == file_priv && buf_priv) {
- int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
- I830_BUF_FREE);
-
- if (used == I830_BUF_CLIENT)
- DRM_DEBUG("reclaimed from client\n");
- if (buf_priv->currently_mapped == I830_BUF_MAPPED)
- buf_priv->currently_mapped = I830_BUF_UNMAPPED;
- }
- }
-}
-
-static int i830_flush_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- i830_flush_queue(dev);
- return 0;
-}
-
-static int i830_dma_vertex(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
- dev_priv->sarea_priv;
- drm_i830_vertex_t *vertex = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
- vertex->idx, vertex->used, vertex->discard);
-
- if (vertex->idx < 0 || vertex->idx > dma->buf_count)
- return -EINVAL;
-
- i830_dma_dispatch_vertex(dev,
- dma->buflist[vertex->idx],
- vertex->discard, vertex->used);
-
- sarea_priv->last_enqueue = dev_priv->counter - 1;
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return 0;
-}
-
-static int i830_clear_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_clear_t *clear = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- /* GH: Someone's doing nasty things... */
- if (!dev->dev_private)
- return -EINVAL;
-
- i830_dma_dispatch_clear(dev, clear->flags,
- clear->clear_color,
- clear->clear_depth, clear->clear_depthmask);
- return 0;
-}
-
-static int i830_swap_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- DRM_DEBUG("i830_swap_bufs\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- i830_dma_dispatch_swap(dev);
- return 0;
-}
-
-/* Not sure why this isn't set all the time:
- */
-static void i830_do_init_pageflip(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
- dev_priv->page_flipping = 1;
- dev_priv->current_page = 0;
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-}
-
-static int i830_do_cleanup_pageflip(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
- if (dev_priv->current_page != 0)
- i830_dma_dispatch_flip(dev);
-
- dev_priv->page_flipping = 0;
- return 0;
-}
-
-static int i830_flip_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (!dev_priv->page_flipping)
- i830_do_init_pageflip(dev);
-
- i830_dma_dispatch_flip(dev);
- return 0;
-}
-
-static int i830_getage(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
- dev_priv->sarea_priv;
-
- sarea_priv->last_dispatch = (int)hw_status[5];
- return 0;
-}
-
-static int i830_getbuf(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int retcode = 0;
- drm_i830_dma_t *d = data;
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
- dev_priv->sarea_priv;
-
- DRM_DEBUG("getbuf\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- d->granted = 0;
-
- retcode = i830_dma_get_buffer(dev, d, file_priv);
-
- DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
- task_pid_nr(current), retcode, d->granted);
-
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return retcode;
-}
-
-static int i830_copybuf(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* Never copy - 2.4.x doesn't need it */
- return 0;
-}
-
-static int i830_docopy(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return 0;
-}
-
-static int i830_getparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_getparam_t *param = data;
- int value;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- switch (param->param) {
- case I830_PARAM_IRQ_ACTIVE:
- value = dev->irq_enabled;
- break;
- default:
- return -EINVAL;
- }
-
- if (copy_to_user(param->value, &value, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int i830_setparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_setparam_t *param = data;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- switch (param->param) {
- case I830_SETPARAM_USE_MI_BATCHBUFFER_START:
- dev_priv->use_mi_batchbuffer_start = param->value;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-int i830_driver_load(struct drm_device *dev, unsigned long flags)
-{
- /* i830 has 4 more counters */
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
-
- return 0;
-}
-
-void i830_driver_lastclose(struct drm_device *dev)
-{
- i830_dma_cleanup(dev);
-}
-
-void i830_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
-{
- if (dev->dev_private) {
- drm_i830_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping)
- i830_do_cleanup_pageflip(dev);
- }
-}
-
-void i830_driver_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv)
-{
- i830_reclaim_buffers(dev, file_priv);
-}
-
-int i830_driver_dma_quiescent(struct drm_device *dev)
-{
- i830_dma_quiescent(dev);
- return 0;
-}
-
-/*
- * call the drm_ioctl under the big kernel lock because
- * to lock against the i830_mmap_buffers function.
- */
-long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
- lock_kernel();
- ret = drm_ioctl(file, cmd, arg);
- unlock_kernel();
- return ret;
-}
-
-struct drm_ioctl_desc i830_ioctls[] = {
- DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
-};
-
-int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
-
-/**
- * Determine if the device really is AGP or not.
- *
- * All Intel graphics chipsets are treated as AGP, even if they are really
- * PCI-e.
- *
- * \param dev The device to be tested.
- *
- * \returns
- * A value of 1 is always retured to indictate every i8xx is AGP.
- */
-int i830_driver_device_is_agp(struct drm_device *dev)
-{
- return 1;
-}
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
deleted file mode 100644
index f655ab7977da..000000000000
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/* i830_drv.c -- I810 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- * Abraham vd Merwe <abraham@2d3d.co.za>
- * Keith Whitwell <keith@tungstengraphics.com>
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i830_drm.h"
-#include "i830_drv.h"
-
-#include "drm_pciids.h"
-
-static struct pci_device_id pciidlist[] = {
- i830_PCI_IDS
-};
-
-static struct drm_driver driver = {
- .driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
- DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
-#if USE_IRQS
- .driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
-#endif
- .dev_priv_size = sizeof(drm_i830_buf_priv_t),
- .load = i830_driver_load,
- .lastclose = i830_driver_lastclose,
- .preclose = i830_driver_preclose,
- .device_is_agp = i830_driver_device_is_agp,
- .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
- .dma_quiescent = i830_driver_dma_quiescent,
-#if USE_IRQS
- .irq_preinstall = i830_driver_irq_preinstall,
- .irq_postinstall = i830_driver_irq_postinstall,
- .irq_uninstall = i830_driver_irq_uninstall,
- .irq_handler = i830_driver_irq_handler,
-#endif
- .ioctls = i830_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = i830_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static int __init i830_init(void)
-{
- driver.num_ioctls = i830_max_ioctl;
- return drm_init(&driver);
-}
-
-static void __exit i830_exit(void)
-{
- drm_exit(&driver);
-}
-
-module_init(i830_init);
-module_exit(i830_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
deleted file mode 100644
index 0df1c720560b..000000000000
--- a/drivers/gpu/drm/i830/i830_drv.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/* i830_drv.h -- Private header for the I830 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- *
- */
-
-#ifndef _I830_DRV_H_
-#define _I830_DRV_H_
-
-/* General customization:
- */
-
-#define DRIVER_AUTHOR "VA Linux Systems Inc."
-
-#define DRIVER_NAME "i830"
-#define DRIVER_DESC "Intel 830M"
-#define DRIVER_DATE "20021108"
-
-/* Interface history:
- *
- * 1.1: Original.
- * 1.2: ?
- * 1.3: New irq emit/wait ioctls.
- * New pageflip ioctl.
- * New getparam ioctl.
- * State for texunits 3&4 in sarea.
- * New (alternative) layout for texture state.
- */
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 3
-#define DRIVER_PATCHLEVEL 2
-
-/* Driver will work either way: IRQ's save cpu time when waiting for
- * the card, but are subject to subtle interactions between bios,
- * hardware and the driver.
- */
-/* XXX: Add vblank support? */
-#define USE_IRQS 0
-
-typedef struct drm_i830_buf_priv {
- u32 *in_use;
- int my_use_idx;
- int currently_mapped;
- void __user *virtual;
- void *kernel_virtual;
- drm_local_map_t map;
-} drm_i830_buf_priv_t;
-
-typedef struct _drm_i830_ring_buffer {
- int tail_mask;
- unsigned long Start;
- unsigned long End;
- unsigned long Size;
- u8 *virtual_start;
- int head;
- int tail;
- int space;
- drm_local_map_t map;
-} drm_i830_ring_buffer_t;
-
-typedef struct drm_i830_private {
- struct drm_local_map *sarea_map;
- struct drm_local_map *mmio_map;
-
- drm_i830_sarea_t *sarea_priv;
- drm_i830_ring_buffer_t ring;
-
- void *hw_status_page;
- unsigned long counter;
-
- dma_addr_t dma_status_page;
-
- struct drm_buf *mmap_buffer;
-
- u32 front_di1, back_di1, zi1;
-
- int back_offset;
- int depth_offset;
- int front_offset;
- int w, h;
- int pitch;
- int back_pitch;
- int depth_pitch;
- unsigned int cpp;
-
- int do_boxes;
- int dma_used;
-
- int current_page;
- int page_flipping;
-
- wait_queue_head_t irq_queue;
- atomic_t irq_received;
- atomic_t irq_emitted;
-
- int use_mi_batchbuffer_start;
-
-} drm_i830_private_t;
-
-long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-extern struct drm_ioctl_desc i830_ioctls[];
-extern int i830_max_ioctl;
-
-/* i830_irq.c */
-extern int i830_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i830_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i830_driver_irq_preinstall(struct drm_device *dev);
-extern void i830_driver_irq_postinstall(struct drm_device *dev);
-extern void i830_driver_irq_uninstall(struct drm_device *dev);
-extern int i830_driver_load(struct drm_device *, unsigned long flags);
-extern void i830_driver_preclose(struct drm_device *dev,
- struct drm_file *file_priv);
-extern void i830_driver_lastclose(struct drm_device *dev);
-extern void i830_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv);
-extern int i830_driver_dma_quiescent(struct drm_device *dev);
-extern int i830_driver_device_is_agp(struct drm_device *dev);
-
-#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
-#define I830_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
-#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
-#define I830_WRITE16(reg, val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
-
-#define I830_VERBOSE 0
-
-#define RING_LOCALS unsigned int outring, ringmask, outcount; \
- volatile char *virt;
-
-#define BEGIN_LP_RING(n) do { \
- if (I830_VERBOSE) \
- printk("BEGIN_LP_RING(%d)\n", (n)); \
- if (dev_priv->ring.space < n*4) \
- i830_wait_ring(dev, n*4, __func__); \
- outcount = 0; \
- outring = dev_priv->ring.tail; \
- ringmask = dev_priv->ring.tail_mask; \
- virt = dev_priv->ring.virtual_start; \
-} while (0)
-
-#define OUT_RING(n) do { \
- if (I830_VERBOSE) \
- printk(" OUT_RING %x\n", (int)(n)); \
- *(volatile unsigned int *)(virt + outring) = n; \
- outcount++; \
- outring += 4; \
- outring &= ringmask; \
-} while (0)
-
-#define ADVANCE_LP_RING() do { \
- if (I830_VERBOSE) \
- printk("ADVANCE_LP_RING %x\n", outring); \
- dev_priv->ring.tail = outring; \
- dev_priv->ring.space -= outcount * 4; \
- I830_WRITE(LP_RING + RING_TAIL, outring); \
-} while (0)
-
-extern int i830_wait_ring(struct drm_device *dev, int n, const char *caller);
-
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
-#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
-#define CMD_REPORT_HEAD (7<<23)
-#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
-#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
-
-#define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16))
-#define LOAD_TEXTURE_MAP0 (1<<11)
-
-#define INST_PARSER_CLIENT 0x00000000
-#define INST_OP_FLUSH 0x02000000
-#define INST_FLUSH_MAP_CACHE 0x00000001
-
-#define BB1_START_ADDR_MASK (~0x7)
-#define BB1_PROTECTED (1<<0)
-#define BB1_UNPROTECTED (0<<0)
-#define BB2_END_ADDR_MASK (~0x7)
-
-#define I830REG_HWSTAM 0x02098
-#define I830REG_INT_IDENTITY_R 0x020a4
-#define I830REG_INT_MASK_R 0x020a8
-#define I830REG_INT_ENABLE_R 0x020a0
-
-#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
-
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define RING_TAIL 0x00
-#define TAIL_ADDR 0x001FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x0xFFFFF000
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x001FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
-
-#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define SC_UPDATE_SCISSOR (0x1<<1)
-#define SC_ENABLE_MASK (0x1<<0)
-#define SC_ENABLE (0x1<<0)
-
-#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
-#define SCI_YMIN_MASK (0xffff<<16)
-#define SCI_XMIN_MASK (0xffff<<0)
-#define SCI_YMAX_MASK (0xffff<<16)
-#define SCI_XMAX_MASK (0xffff<<0)
-
-#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
-#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
-#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
-#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
-#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
-
-#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
-
-#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
-#define ASYNC_FLIP (1<<22)
-
-#define CMD_3D (0x3<<29)
-#define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16))
-#define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16))
-
-#define BR00_BITBLT_CLIENT 0x40000000
-#define BR00_OP_COLOR_BLT 0x10000000
-#define BR00_OP_SRC_COPY_BLT 0x10C00000
-#define BR13_SOLID_PATTERN 0x80000000
-
-#define BUF_3D_ID_COLOR_BACK (0x3<<24)
-#define BUF_3D_ID_DEPTH (0x7<<24)
-#define BUF_3D_USE_FENCE (1<<23)
-#define BUF_3D_PITCH(x) (((x)/4)<<2)
-
-#define CMD_OP_MAP_PALETTE_LOAD ((3<<29)|(0x1d<<24)|(0x82<<16)|255)
-#define MAP_PALETTE_NUM(x) ((x<<8) & (1<<8))
-#define MAP_PALETTE_BOTH (1<<11)
-
-#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
-#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
-#define XY_COLOR_BLT_WRITE_RGB (1<<20)
-
-#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
-#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
-#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
-
-#define MI_BATCH_BUFFER ((0x30<<23)|1)
-#define MI_BATCH_BUFFER_START (0x31<<23)
-#define MI_BATCH_BUFFER_END (0xA<<23)
-#define MI_BATCH_NON_SECURE (1)
-
-#define MI_WAIT_FOR_EVENT ((0x3<<23))
-#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
-#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
-
-#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
-
-#endif
diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
deleted file mode 100644
index d1a6b95d631d..000000000000
--- a/drivers/gpu/drm/i830/i830_irq.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
- *
- * Copyright 2002 Tungsten Graphics, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Keith Whitwell <keith@tungstengraphics.com>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i830_drm.h"
-#include "i830_drv.h"
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/delay.h>
-
-irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u16 temp;
-
- temp = I830_READ16(I830REG_INT_IDENTITY_R);
- DRM_DEBUG("%x\n", temp);
-
- if (!(temp & 2))
- return IRQ_NONE;
-
- I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
-
- atomic_inc(&dev_priv->irq_received);
- wake_up_interruptible(&dev_priv->irq_queue);
-
- return IRQ_HANDLED;
-}
-
-static int i830_emit_irq(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- DRM_DEBUG("%s\n", __func__);
-
- atomic_inc(&dev_priv->irq_emitted);
-
- BEGIN_LP_RING(2);
- OUT_RING(0);
- OUT_RING(GFX_OP_USER_INTERRUPT);
- ADVANCE_LP_RING();
-
- return atomic_read(&dev_priv->irq_emitted);
-}
-
-static int i830_wait_irq(struct drm_device *dev, int irq_nr)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- DECLARE_WAITQUEUE(entry, current);
- unsigned long end = jiffies + HZ * 3;
- int ret = 0;
-
- DRM_DEBUG("%s\n", __func__);
-
- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
- return 0;
-
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
-
- add_wait_queue(&dev_priv->irq_queue, &entry);
-
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
- break;
- if ((signed)(end - jiffies) <= 0) {
- DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
- I830_READ16(I830REG_INT_IDENTITY_R),
- I830_READ16(I830REG_INT_MASK_R),
- I830_READ16(I830REG_INT_ENABLE_R),
- I830_READ16(I830REG_HWSTAM));
-
- ret = -EBUSY; /* Lockup? Missed irq? */
- break;
- }
- schedule_timeout(HZ * 3);
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- }
-
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&dev_priv->irq_queue, &entry);
- return ret;
-}
-
-/* Needs the lock as it touches the ring.
- */
-int i830_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_irq_emit_t *emit = data;
- int result;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- result = i830_emit_irq(dev);
-
- if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/* Doesn't need the hardware lock.
- */
-int i830_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_irq_wait_t *irqwait = data;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- return i830_wait_irq(dev, irqwait->irq_seq);
-}
-
-/* drm_dma.h hooks
-*/
-void i830_driver_irq_preinstall(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
-
- I830_WRITE16(I830REG_HWSTAM, 0xffff);
- I830_WRITE16(I830REG_INT_MASK_R, 0x0);
- I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
- atomic_set(&dev_priv->irq_received, 0);
- atomic_set(&dev_priv->irq_emitted, 0);
- init_waitqueue_head(&dev_priv->irq_queue);
-}
-
-void i830_driver_irq_postinstall(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
-
- I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
-}
-
-void i830_driver_irq_uninstall(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- if (!dev_priv)
- return;
-
- I830_WRITE16(I830REG_INT_MASK_R, 0xffff);
- I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
-}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4ff9b6cc973f..09e0327fc6ce 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -326,21 +326,21 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
struct intel_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
- const char *pipe = crtc->pipe ? "B" : "A";
- const char *plane = crtc->plane ? "B" : "A";
+ const char pipe = pipe_name(crtc->pipe);
+ const char plane = plane_name(crtc->plane);
struct intel_unpin_work *work;
spin_lock_irqsave(&dev->event_lock, flags);
work = crtc->unpin_work;
if (work == NULL) {
- seq_printf(m, "No flip due on pipe %s (plane %s)\n",
+ seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane);
} else {
if (!work->pending) {
- seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
+ seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
pipe, plane);
} else {
- seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
+ seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
pipe, plane);
}
if (work->enable_stall_check)
@@ -458,7 +458,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret, i;
+ int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -471,10 +471,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(IIR));
seq_printf(m, "Interrupt mask: %08x\n",
I915_READ(IMR));
- seq_printf(m, "Pipe A stat: %08x\n",
- I915_READ(PIPEASTAT));
- seq_printf(m, "Pipe B stat: %08x\n",
- I915_READ(PIPEBSTAT));
+ for_each_pipe(pipe)
+ seq_printf(m, "Pipe %c stat: %08x\n",
+ pipe_name(pipe),
+ I915_READ(PIPESTAT(pipe)));
} else {
seq_printf(m, "North Display Interrupt enable: %08x\n",
I915_READ(DEIER));
@@ -544,11 +544,11 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- volatile u32 *hws;
+ const volatile u32 __iomem *hws;
int i;
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- hws = (volatile u32 *)ring->status_page.page_addr;
+ hws = (volatile u32 __iomem *)ring->status_page.page_addr;
if (hws == NULL)
return 0;
@@ -615,7 +615,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
if (!ring->obj) {
seq_printf(m, "No ringbuffer setup\n");
} else {
- u8 *virt = ring->virtual_start;
+ const u8 __iomem *virt = ring->virtual_start;
uint32_t off;
for (off = 0; off < ring->size; off += 4) {
@@ -805,15 +805,20 @@ static int i915_error_state(struct seq_file *m, void *unused)
}
}
- if (error->ringbuffer) {
- struct drm_i915_error_object *obj = error->ringbuffer;
-
- seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
- offset = 0;
- for (page = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
- offset += 4;
+ for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
+ if (error->ringbuffer[i]) {
+ struct drm_i915_error_object *obj = error->ringbuffer[i];
+ seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ seq_printf(m, "%08x : %08x\n",
+ offset,
+ obj->pages[page][elt]);
+ offset += 4;
+ }
}
}
}
@@ -862,19 +867,44 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ u32 rpstat;
+ u32 rpupei, rpcurup, rpprevup;
+ u32 rpdownei, rpcurdown, rpprevdown;
int max_freq;
/* RPSTAT1 is in the GT power well */
__gen6_gt_force_wake_get(dev_priv);
+ rpstat = I915_READ(GEN6_RPSTAT1);
+ rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
+ rpcurup = I915_READ(GEN6_RP_CUR_UP);
+ rpprevup = I915_READ(GEN6_RP_PREV_UP);
+ rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
+ rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
+ rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
+
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
- seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
+ seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & 0xff00) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
+ seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
+ GEN6_CAGF_SHIFT) * 100);
+ seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
+ GEN6_CURICONT_MASK);
+ seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
+ GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
+ GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
+ GEN6_CURIAVG_MASK);
+ seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
+ GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
+ GEN6_CURBSYTAVG_MASK);
max_freq = (rp_state_cap & 0xff0000) >> 16;
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
@@ -1259,7 +1289,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
}
static struct drm_info_list i915_debugfs_list[] = {
- {"i915_capabilities", i915_capabilities, 0, 0},
+ {"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e33d9be7df3b..72730377a01b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -43,6 +43,17 @@
#include <linux/slab.h>
#include <acpi/video.h>
+static void i915_write_hws_pga(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 addr;
+
+ addr = dev_priv->status_page_dmah->busaddr;
+ if (INTEL_INFO(dev)->gen >= 4)
+ addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+ I915_WRITE(HWS_PGA, addr);
+}
+
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
@@ -60,16 +71,13 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
- dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+ ring->status_page.page_addr =
+ (void __force __iomem *)dev_priv->status_page_dmah->vaddr;
- memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
- if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
- 0xf0;
+ i915_write_hws_pga(dev);
- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
}
@@ -216,7 +224,7 @@ static int i915_dma_resume(struct drm_device * dev)
if (ring->status_page.gfx_addr != 0)
intel_ring_setup_status_page(ring);
else
- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+ i915_write_hws_pga(dev);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
@@ -771,6 +779,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_CONSTANTS:
value = INTEL_INFO(dev)->gen >= 4;
break;
+ case I915_PARAM_HAS_RELAXED_DELTA:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -859,8 +870,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
" G33 hw status page\n");
return -ENOMEM;
}
- ring->status_page.page_addr = dev_priv->hws_map.handle;
- memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ ring->status_page.page_addr =
+ (void __force __iomem *)dev_priv->hws_map.handle;
+ memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
@@ -2013,9 +2025,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
- dev_priv->trace_irq_seqno = 0;
- ret = drm_vblank_init(dev, I915_NUM_PIPE);
+ if (IS_MOBILE(dev) || !IS_GEN2(dev))
+ dev_priv->num_pipe = 2;
+ else
+ dev_priv->num_pipe = 1;
+
+ ret = drm_vblank_init(dev, dev_priv->num_pipe);
if (ret)
goto out_gem_unload;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 22ec066adae6..c34a8dd31d02 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -43,10 +43,13 @@ module_param_named(modeset, i915_modeset, int, 0400);
unsigned int i915_fbpercrtc = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
+int i915_panel_ignore_lid = 0;
+module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
+
unsigned int i915_powersave = 1;
module_param_named(powersave, i915_powersave, int, 0600);
-unsigned int i915_semaphores = 0;
+unsigned int i915_semaphores = 1;
module_param_named(semaphores, i915_semaphores, int, 0600);
unsigned int i915_enable_rc6 = 0;
@@ -58,7 +61,10 @@ module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
unsigned int i915_panel_use_ssc = 1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
-bool i915_try_reset = true;
+int i915_vbt_sdvo_panel_type = -1;
+module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
+
+static bool i915_try_reset = true;
module_param_named(reset, i915_try_reset, bool, 0600);
static struct drm_driver driver;
@@ -716,6 +722,9 @@ static struct drm_driver driver = {
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
+ .dumb_create = i915_gem_dumb_create,
+ .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_destroy = i915_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = {
.owner = THIS_MODULE,
@@ -732,14 +741,6 @@ static struct drm_driver driver = {
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = i915_pci_probe,
- .remove = i915_pci_remove,
- .driver.pm = &i915_pm_ops,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -748,6 +749,14 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver i915_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = i915_pci_probe,
+ .remove = i915_pci_remove,
+ .driver.pm = &i915_pm_ops,
+};
+
static int __init i915_init(void)
{
if (!intel_agp_enabled) {
@@ -781,12 +790,12 @@ static int __init i915_init(void)
if (!(driver.driver_features & DRIVER_MODESET))
driver.get_vblank_timestamp = NULL;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &i915_pci_driver);
}
static void __exit i915_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &i915_pci_driver);
}
module_init(i915_init);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 456f40484838..449650545bb4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -49,17 +49,22 @@
enum pipe {
PIPE_A = 0,
PIPE_B,
+ PIPE_C,
+ I915_MAX_PIPES
};
+#define pipe_name(p) ((p) + 'A')
enum plane {
PLANE_A = 0,
PLANE_B,
+ PLANE_C,
};
-
-#define I915_NUM_PIPE 2
+#define plane_name(p) ((p) + 'A')
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+
/* Interface history:
*
* 1.1: Original.
@@ -75,10 +80,7 @@ enum plane {
#define DRIVER_PATCHLEVEL 0
#define WATCH_COHERENCY 0
-#define WATCH_EXEC 0
-#define WATCH_RELOC 0
#define WATCH_LISTS 0
-#define WATCH_PWRITE 0
#define I915_GEM_PHYS_CURSOR_0 1
#define I915_GEM_PHYS_CURSOR_1 2
@@ -111,6 +113,7 @@ struct intel_opregion {
struct opregion_swsci *swsci;
struct opregion_asle *asle;
void *vbt;
+ u32 __iomem *lid_state;
};
#define OPREGION_SIZE (8*1024)
@@ -144,8 +147,7 @@ struct intel_display_error_state;
struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
- u32 pipeastat;
- u32 pipebstat;
+ u32 pipestat[I915_MAX_PIPES];
u32 ipeir;
u32 ipehr;
u32 instdone;
@@ -172,7 +174,7 @@ struct drm_i915_error_state {
int page_count;
u32 gtt_offset;
u32 *pages[0];
- } *ringbuffer, *batchbuffer[I915_NUM_RINGS];
+ } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS];
struct drm_i915_error_buffer {
u32 size;
u32 name;
@@ -200,9 +202,7 @@ struct drm_i915_display_funcs {
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
- void (*update_wm)(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int sr_htotal,
- int pixel_size);
+ void (*update_wm)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -274,7 +274,6 @@ typedef struct drm_i915_private {
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
- dma_addr_t dma_status_page;
uint32_t counter;
drm_local_map_t hws_map;
struct drm_i915_gem_object *pwrctx;
@@ -289,7 +288,6 @@ typedef struct drm_i915_private {
int page_flipping;
atomic_t irq_received;
- u32 trace_irq_seqno;
/* protects the irq masks */
spinlock_t irq_lock;
@@ -324,8 +322,6 @@ typedef struct drm_i915_private {
int cfb_plane;
int cfb_y;
- int irq_enabled;
-
struct intel_opregion opregion;
/* overlay */
@@ -387,7 +383,6 @@ typedef struct drm_i915_private {
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
- u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
@@ -615,6 +610,12 @@ typedef struct drm_i915_private {
struct delayed_work retire_work;
/**
+ * Are we in a non-interruptible section of code like
+ * modesetting?
+ */
+ bool interruptible;
+
+ /**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
@@ -652,6 +653,7 @@ typedef struct drm_i915_private {
unsigned int lvds_border_bits;
/* Panel fitter placement and size for Ironlake+ */
u32 pch_pf_pos, pch_pf_size;
+ int panel_t3, panel_t12;
struct drm_crtc *plane_to_crtc_mapping[2];
struct drm_crtc *pipe_to_crtc_mapping[2];
@@ -698,6 +700,8 @@ typedef struct drm_i915_private {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
+
+ struct drm_property *broadcast_rgb_property;
} drm_i915_private_t;
struct drm_i915_gem_object {
@@ -955,10 +959,12 @@ enum intel_chip_family {
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
+extern int i915_panel_ignore_lid;
extern unsigned int i915_powersave;
extern unsigned int i915_semaphores;
extern unsigned int i915_lvds_downclock;
extern unsigned int i915_panel_use_ssc;
+extern int i915_vbt_sdvo_panel_type;
extern unsigned int i915_enable_rc6;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
@@ -998,8 +1004,6 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
-extern void i915_enable_interrupt (struct drm_device *dev);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(struct drm_device * dev);
@@ -1051,7 +1055,6 @@ extern void i915_mem_takedown(struct mem_block **heap);
extern void i915_mem_release(struct drm_device * dev,
struct drm_file *file_priv, struct mem_block *heap);
/* i915_gem.c */
-int i915_gem_check_is_wedged(struct drm_device *dev);
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -1094,8 +1097,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
-int __must_check i915_gem_flush_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
@@ -1110,12 +1112,18 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool interruptible);
+int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
u32 seqno);
+int i915_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle);
/**
* Returns true if seq1 is later than seq2.
*/
@@ -1126,16 +1134,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
}
static inline u32
-i915_gem_next_request_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
return ring->outstanding_lazy_request = dev_priv->next_seqno;
}
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined,
- bool interruptible);
+ struct intel_ring_buffer *pipelined);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
void i915_gem_retire_requests(struct drm_device *dev);
@@ -1144,8 +1150,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
-int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
- bool interruptible);
+int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_do_init(struct drm_device *dev,
@@ -1154,14 +1159,11 @@ void i915_gem_do_init(struct drm_device *dev,
unsigned long end);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
-int __must_check i915_add_request(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_i915_gem_request *request,
- struct intel_ring_buffer *ring);
-int __must_check i915_do_wait_request(struct drm_device *dev,
- uint32_t seqno,
- bool interruptible,
- struct intel_ring_buffer *ring);
+int __must_check i915_add_request(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ struct drm_i915_gem_request *request);
+int __must_check i915_wait_request(struct intel_ring_buffer *ring,
+ uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -1313,7 +1315,7 @@ extern void intel_display_print_error_state(struct seq_file *m,
#define __i915_read(x, y) \
static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = read##y(dev_priv->regs + reg); \
- trace_i915_reg_rw('R', reg, val, sizeof(val)); \
+ trace_i915_reg_rw(false, reg, val, sizeof(val)); \
return val; \
}
__i915_read(8, b)
@@ -1324,7 +1326,7 @@ __i915_read(64, q)
#define __i915_write(x, y) \
static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
- trace_i915_reg_rw('W', reg, val, sizeof(val)); \
+ trace_i915_reg_rw(true, reg, val, sizeof(val)); \
write##y(val, dev_priv->regs + reg); \
}
__i915_write(8, b)
@@ -1382,47 +1384,4 @@ static inline void i915_gt_write(struct drm_i915_private *dev_priv,
__gen6_gt_wait_for_fifo(dev_priv);
I915_WRITE(reg, val);
}
-
-static inline void
-i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
-{
- /* Trace down the write operation before the real write */
- trace_i915_reg_rw('W', reg, val, len);
- switch (len) {
- case 8:
- writeq(val, dev_priv->regs + reg);
- break;
- case 4:
- writel(val, dev_priv->regs + reg);
- break;
- case 2:
- writew(val, dev_priv->regs + reg);
- break;
- case 1:
- writeb(val, dev_priv->regs + reg);
- break;
- }
-}
-
-/**
- * Reads a dword out of the status page, which is written to from the command
- * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
- * MI_STORE_DATA_IMM.
- *
- * The following dwords have a reserved meaning:
- * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
- * 0x04: ring 0 head pointer
- * 0x05: ring 1 head pointer (915-class)
- * 0x06: ring 2 head pointer (915-class)
- * 0x10-0x1b: Context status DWords (GM45)
- * 0x1f: Last written status offset. (GM45)
- *
- * The area from dword 0x20 to 0x3ff is available for driver usage.
- */
-#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
- (LP_RING(dev_priv)->status_page.page_addr))[reg])
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
-#define I915_GEM_HWS_INDEX 0x20
-#define I915_BREADCRUMB_INDEX 0x21
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 36e66cc5225e..c4c2855d002d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -75,8 +75,8 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
dev_priv->mm.object_memory -= size;
}
-int
-i915_gem_check_is_wedged(struct drm_device *dev)
+static int
+i915_gem_wait_for_error(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct completion *x = &dev_priv->error_completion;
@@ -90,27 +90,24 @@ i915_gem_check_is_wedged(struct drm_device *dev)
if (ret)
return ret;
- /* Success, we reset the GPU! */
- if (!atomic_read(&dev_priv->mm.wedged))
- return 0;
-
- /* GPU is hung, bump the completion count to account for
- * the token we just consumed so that we never hit zero and
- * end up waiting upon a subsequent completion event that
- * will never happen.
- */
- spin_lock_irqsave(&x->wait.lock, flags);
- x->done++;
- spin_unlock_irqrestore(&x->wait.lock, flags);
- return -EIO;
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ /* GPU is hung, bump the completion count to account for
+ * the token we just consumed so that we never hit zero and
+ * end up waiting upon a subsequent completion event that
+ * will never happen.
+ */
+ spin_lock_irqsave(&x->wait.lock, flags);
+ x->done++;
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+ }
+ return 0;
}
int i915_mutex_lock_interruptible(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_gem_check_is_wedged(dev);
+ ret = i915_gem_wait_for_error(dev);
if (ret)
return ret;
@@ -118,11 +115,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
if (ret)
return ret;
- if (atomic_read(&dev_priv->mm.wedged)) {
- mutex_unlock(&dev->struct_mutex);
- return -EAGAIN;
- }
-
WARN_ON(i915_verify_lists(dev));
return 0;
}
@@ -193,22 +185,20 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-/**
- * Creates a new mm object and returns a handle to it.
- */
-int
-i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+static int
+i915_gem_create(struct drm_file *file,
+ struct drm_device *dev,
+ uint64_t size,
+ uint32_t *handle_p)
{
- struct drm_i915_gem_create *args = data;
struct drm_i915_gem_object *obj;
int ret;
u32 handle;
- args->size = roundup(args->size, PAGE_SIZE);
+ size = roundup(size, PAGE_SIZE);
/* Allocate the new object */
- obj = i915_gem_alloc_object(dev, args->size);
+ obj = i915_gem_alloc_object(dev, size);
if (obj == NULL)
return -ENOMEM;
@@ -224,10 +214,41 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_unreference(&obj->base);
trace_i915_gem_object_create(obj);
- args->handle = handle;
+ *handle_p = handle;
return 0;
}
+int
+i915_gem_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ /* have to work out size/pitch and return them */
+ args->pitch = ALIGN(args->width & ((args->bpp + 1) / 8), 64);
+ args->size = args->pitch * args->height;
+ return i915_gem_create(file, dev,
+ args->size, &args->handle);
+}
+
+int i915_gem_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_create *args = data;
+ return i915_gem_create(file, dev,
+ args->size, &args->handle);
+}
+
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
@@ -514,7 +535,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -526,6 +547,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ trace_i915_gem_object_pread(obj, args->offset, args->size);
+
ret = i915_gem_object_set_cpu_read_domain_range(obj,
args->offset,
args->size);
@@ -955,7 +978,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -967,6 +990,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
* different detiling behavior between reading and writing.
@@ -1049,7 +1074,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -1092,7 +1117,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -1121,7 +1146,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap *args = data;
struct drm_gem_object *obj;
- loff_t offset;
unsigned long addr;
if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1136,8 +1160,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -E2BIG;
}
- offset = args->offset;
-
down_write(&current->mm->mmap_sem);
addr = do_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
@@ -1182,9 +1204,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
PAGE_SHIFT;
- /* Now bind it into the GTT if needed */
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto out;
+ trace_i915_gem_object_fault(obj, page_offset, true, write);
+
+ /* Now bind it into the GTT if needed */
if (!obj->map_and_fenceable) {
ret = i915_gem_object_unbind(obj);
if (ret)
@@ -1203,7 +1229,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (obj->tiling_mode == I915_TILING_NONE)
ret = i915_gem_object_put_fence(obj);
else
- ret = i915_gem_object_get_fence(obj, NULL, true);
+ ret = i915_gem_object_get_fence(obj, NULL);
if (ret)
goto unlock;
@@ -1219,12 +1245,21 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
unlock:
mutex_unlock(&dev->struct_mutex);
-
+out:
switch (ret) {
+ case -EIO:
case -EAGAIN:
+ /* Give the error handler a chance to run and move the
+ * objects off the GPU active list. Next time we service the
+ * fault, we should be able to transition the page into the
+ * GTT without touching the GPU (and so avoid further
+ * EIO/EGAIN). If the GPU is wedged, then there is no issue
+ * with coherency, just lost writes.
+ */
set_need_resched();
case 0:
case -ERESTARTSYS:
+ case -EINTR:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
@@ -1425,27 +1460,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
return tile_height * obj->stride * 2;
}
-/**
- * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
- * @dev: DRM device
- * @data: GTT mapping ioctl data
- * @file: GEM object info
- *
- * Simply returns the fake offset to userspace so it can mmap it.
- * The mmap call will end up in drm_gem_mmap(), which will set things
- * up so we can get faults in the handler above.
- *
- * The fault handler will take care of binding the object into the GTT
- * (since it may have been evicted to make room for something), allocating
- * a fence register, and mapping the appropriate aperture address into
- * userspace.
- */
int
-i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+i915_gem_mmap_gtt(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_mmap_gtt *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -1456,8 +1477,8 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -1479,7 +1500,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
goto out;
}
- args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
+ *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
out:
drm_gem_object_unreference(&obj->base);
@@ -1488,6 +1509,34 @@ unlock:
return ret;
}
+/**
+ * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * @dev: DRM device
+ * @data: GTT mapping ioctl data
+ * @file: GEM object info
+ *
+ * Simply returns the fake offset to userspace so it can mmap it.
+ * The mmap call will end up in drm_gem_mmap(), which will set things
+ * up so we can get faults in the handler above.
+ *
+ * The fault handler will take care of binding the object into the GTT
+ * (since it may have been evicted to make room for something), allocating
+ * a fence register, and mapping the appropriate aperture address into
+ * userspace.
+ */
+int
+i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_mmap_gtt *args = data;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+}
+
+
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask)
@@ -1669,9 +1718,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
}
static void
-i915_gem_process_flushing_list(struct drm_device *dev,
- uint32_t flush_domains,
- struct intel_ring_buffer *ring)
+i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
+ uint32_t flush_domains)
{
struct drm_i915_gem_object *obj, *next;
@@ -1684,7 +1732,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
obj->base.write_domain = 0;
list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_active(obj, ring,
- i915_gem_next_request_seqno(dev, ring));
+ i915_gem_next_request_seqno(ring));
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
@@ -1694,27 +1742,22 @@ i915_gem_process_flushing_list(struct drm_device *dev,
}
int
-i915_add_request(struct drm_device *dev,
+i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
- struct drm_i915_gem_request *request,
- struct intel_ring_buffer *ring)
+ struct drm_i915_gem_request *request)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_file_private *file_priv = NULL;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
uint32_t seqno;
int was_empty;
int ret;
BUG_ON(request == NULL);
- if (file != NULL)
- file_priv = file->driver_priv;
-
ret = ring->add_request(ring, &seqno);
if (ret)
return ret;
- ring->outstanding_lazy_request = false;
+ trace_i915_gem_request_add(ring, seqno);
request->seqno = seqno;
request->ring = ring;
@@ -1722,7 +1765,9 @@ i915_add_request(struct drm_device *dev,
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
- if (file_priv) {
+ if (file) {
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
spin_lock(&file_priv->mm.lock);
request->file_priv = file_priv;
list_add_tail(&request->client_list,
@@ -1730,6 +1775,8 @@ i915_add_request(struct drm_device *dev,
spin_unlock(&file_priv->mm.lock);
}
+ ring->outstanding_lazy_request = false;
+
if (!dev_priv->mm.suspended) {
mod_timer(&dev_priv->hangcheck_timer,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -1846,18 +1893,15 @@ void i915_gem_reset(struct drm_device *dev)
* This function clears the request list as sequence numbers are passed.
*/
static void
-i915_gem_retire_requests_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
int i;
- if (!ring->status_page.page_addr ||
- list_empty(&ring->request_list))
+ if (list_empty(&ring->request_list))
return;
- WARN_ON(i915_verify_lists(dev));
+ WARN_ON(i915_verify_lists(ring->dev));
seqno = ring->get_seqno(ring);
@@ -1875,7 +1919,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
if (!i915_seqno_passed(seqno, request->seqno))
break;
- trace_i915_gem_request_retire(dev, request->seqno);
+ trace_i915_gem_request_retire(ring, request->seqno);
list_del(&request->list);
i915_gem_request_remove_from_client(request);
@@ -1901,13 +1945,13 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
i915_gem_object_move_to_inactive(obj);
}
- if (unlikely (dev_priv->trace_irq_seqno &&
- i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
+ if (unlikely(ring->trace_irq_seqno &&
+ i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
ring->irq_put(ring);
- dev_priv->trace_irq_seqno = 0;
+ ring->trace_irq_seqno = 0;
}
- WARN_ON(i915_verify_lists(dev));
+ WARN_ON(i915_verify_lists(ring->dev));
}
void
@@ -1931,7 +1975,7 @@ i915_gem_retire_requests(struct drm_device *dev)
}
for (i = 0; i < I915_NUM_RINGS; i++)
- i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
+ i915_gem_retire_requests_ring(&dev_priv->ring[i]);
}
static void
@@ -1965,11 +2009,11 @@ i915_gem_retire_work_handler(struct work_struct *work)
struct drm_i915_gem_request *request;
int ret;
- ret = i915_gem_flush_ring(dev, ring, 0,
- I915_GEM_GPU_DOMAINS);
+ ret = i915_gem_flush_ring(ring,
+ 0, I915_GEM_GPU_DOMAINS);
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (ret || request == NULL ||
- i915_add_request(dev, NULL, request, ring))
+ i915_add_request(ring, NULL, request))
kfree(request);
}
@@ -1982,18 +2026,32 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
- bool interruptible, struct intel_ring_buffer *ring)
+i915_wait_request(struct intel_ring_buffer *ring,
+ uint32_t seqno)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier;
int ret = 0;
BUG_ON(seqno == 0);
- if (atomic_read(&dev_priv->mm.wedged))
- return -EAGAIN;
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ struct completion *x = &dev_priv->error_completion;
+ bool recovery_complete;
+ unsigned long flags;
+
+ /* Give the error handler a chance to run. */
+ spin_lock_irqsave(&x->wait.lock, flags);
+ recovery_complete = x->done > 0;
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+
+ return recovery_complete ? -EIO : -EAGAIN;
+ }
if (seqno == ring->outstanding_lazy_request) {
struct drm_i915_gem_request *request;
@@ -2002,7 +2060,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (request == NULL)
return -ENOMEM;
- ret = i915_add_request(dev, NULL, request, ring);
+ ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
return ret;
@@ -2012,22 +2070,22 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
}
if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(ring->dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
ier = I915_READ(IER);
if (!ier) {
DRM_ERROR("something (likely vbetool) disabled "
"interrupts, re-enabling\n");
- i915_driver_irq_preinstall(dev);
- i915_driver_irq_postinstall(dev);
+ i915_driver_irq_preinstall(ring->dev);
+ i915_driver_irq_postinstall(ring->dev);
}
- trace_i915_gem_request_wait_begin(dev, seqno);
+ trace_i915_gem_request_wait_begin(ring, seqno);
ring->waiting_seqno = seqno;
if (ring->irq_get(ring)) {
- if (interruptible)
+ if (dev_priv->mm.interruptible)
ret = wait_event_interruptible(ring->irq_queue,
i915_seqno_passed(ring->get_seqno(ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
@@ -2043,7 +2101,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
ret = -EBUSY;
ring->waiting_seqno = 0;
- trace_i915_gem_request_wait_end(dev, seqno);
+ trace_i915_gem_request_wait_end(ring, seqno);
}
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
@@ -2059,31 +2117,18 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
* a separate wait queue to handle that.
*/
if (ret == 0)
- i915_gem_retire_requests_ring(dev, ring);
+ i915_gem_retire_requests_ring(ring);
return ret;
}
/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno,
- struct intel_ring_buffer *ring)
-{
- return i915_do_wait_request(dev, seqno, 1, ring);
-}
-
-/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool interruptible)
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
int ret;
/* This function only exists to support waiting for existing rendering,
@@ -2095,10 +2140,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
* it.
*/
if (obj->active) {
- ret = i915_do_wait_request(dev,
- obj->last_rendering_seqno,
- interruptible,
- obj->ring);
+ ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
if (ret)
return ret;
}
@@ -2148,6 +2190,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (ret == -ERESTARTSYS)
return ret;
+ trace_i915_gem_object_unbind(obj);
+
i915_gem_gtt_unbind_object(obj);
i915_gem_object_put_pages_gtt(obj);
@@ -2163,29 +2207,27 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
- trace_i915_gem_object_unbind(obj);
-
return ret;
}
int
-i915_gem_flush_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains)
{
int ret;
+ trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
+
ret = ring->flush(ring, invalidate_domains, flush_domains);
if (ret)
return ret;
- i915_gem_process_flushing_list(dev, flush_domains, ring);
+ i915_gem_process_flushing_list(ring, flush_domains);
return 0;
}
-static int i915_ring_idle(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int i915_ring_idle(struct intel_ring_buffer *ring)
{
int ret;
@@ -2193,15 +2235,13 @@ static int i915_ring_idle(struct drm_device *dev,
return 0;
if (!list_empty(&ring->gpu_write_list)) {
- ret = i915_gem_flush_ring(dev, ring,
+ ret = i915_gem_flush_ring(ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
- return i915_wait_request(dev,
- i915_gem_next_request_seqno(dev, ring),
- ring);
+ return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
}
int
@@ -2218,7 +2258,7 @@ i915_gpu_idle(struct drm_device *dev)
/* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) {
- ret = i915_ring_idle(dev, &dev_priv->ring[i]);
+ ret = i915_ring_idle(&dev_priv->ring[i]);
if (ret)
return ret;
}
@@ -2402,15 +2442,13 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined,
- bool interruptible)
+ struct intel_ring_buffer *pipelined)
{
int ret;
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->base.dev,
- obj->last_fenced_ring,
+ ret = i915_gem_flush_ring(obj->last_fenced_ring,
0, obj->base.write_domain);
if (ret)
return ret;
@@ -2422,10 +2460,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
if (!ring_passed_seqno(obj->last_fenced_ring,
obj->last_fenced_seqno)) {
- ret = i915_do_wait_request(obj->base.dev,
- obj->last_fenced_seqno,
- interruptible,
- obj->last_fenced_ring);
+ ret = i915_wait_request(obj->last_fenced_ring,
+ obj->last_fenced_seqno);
if (ret)
return ret;
}
@@ -2451,7 +2487,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
if (obj->tiling_mode)
i915_gem_release_mmap(obj);
- ret = i915_gem_object_flush_fence(obj, NULL, true);
+ ret = i915_gem_object_flush_fence(obj, NULL);
if (ret)
return ret;
@@ -2528,8 +2564,7 @@ i915_find_fence_reg(struct drm_device *dev,
*/
int
i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined,
- bool interruptible)
+ struct intel_ring_buffer *pipelined)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2551,10 +2586,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (reg->setup_seqno) {
if (!ring_passed_seqno(obj->last_fenced_ring,
reg->setup_seqno)) {
- ret = i915_do_wait_request(obj->base.dev,
- reg->setup_seqno,
- interruptible,
- obj->last_fenced_ring);
+ ret = i915_wait_request(obj->last_fenced_ring,
+ reg->setup_seqno);
if (ret)
return ret;
}
@@ -2563,15 +2596,13 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
}
} else if (obj->last_fenced_ring &&
obj->last_fenced_ring != pipelined) {
- ret = i915_gem_object_flush_fence(obj,
- pipelined,
- interruptible);
+ ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
return ret;
} else if (obj->tiling_changed) {
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
+ ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
if (ret)
return ret;
@@ -2588,7 +2619,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (obj->tiling_changed) {
if (pipelined) {
reg->setup_seqno =
- i915_gem_next_request_seqno(dev, pipelined);
+ i915_gem_next_request_seqno(pipelined);
obj->last_fenced_seqno = reg->setup_seqno;
obj->last_fenced_ring = pipelined;
}
@@ -2602,7 +2633,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (reg == NULL)
return -ENOSPC;
- ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
+ ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
return ret;
@@ -2614,9 +2645,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (old->tiling_mode)
i915_gem_release_mmap(old);
- ret = i915_gem_object_flush_fence(old,
- pipelined,
- interruptible);
+ ret = i915_gem_object_flush_fence(old, pipelined);
if (ret) {
drm_gem_object_unreference(&old->base);
return ret;
@@ -2628,7 +2657,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
old->fence_reg = I915_FENCE_REG_NONE;
old->last_fenced_ring = pipelined;
old->last_fenced_seqno =
- pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+ pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
drm_gem_object_unreference(&old->base);
} else if (obj->last_fenced_seqno == 0)
@@ -2640,7 +2669,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
obj->last_fenced_ring = pipelined;
reg->setup_seqno =
- pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+ pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
obj->last_fenced_seqno = reg->setup_seqno;
update:
@@ -2837,7 +2866,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
obj->map_and_fenceable = mappable && fenceable;
- trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
+ trace_i915_gem_object_bind(obj, map_and_fenceable);
return 0;
}
@@ -2860,13 +2889,11 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
static int
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
-
if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
return 0;
/* Queue the GPU write cache flushing we need. */
- return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
+ return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2933,12 +2960,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->gtt_space == NULL)
return -EINVAL;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+ return 0;
+
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
if (obj->pending_gpu_write || write) {
- ret = i915_gem_object_wait_rendering(obj, true);
+ ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
@@ -2988,7 +3018,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
/* Currently, we are always called from an non-interruptible context. */
if (pipelined != obj->ring) {
- ret = i915_gem_object_wait_rendering(obj, false);
+ ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
@@ -3006,8 +3036,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
}
int
-i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
- bool interruptible)
+i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
{
int ret;
@@ -3015,13 +3044,12 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
- 0, obj->base.write_domain);
+ ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
if (ret)
return ret;
}
- return i915_gem_object_wait_rendering(obj, interruptible);
+ return i915_gem_object_wait_rendering(obj);
}
/**
@@ -3036,11 +3064,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains;
int ret;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+ return 0;
+
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
- ret = i915_gem_object_wait_rendering(obj, true);
+ ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
@@ -3138,7 +3169,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- ret = i915_gem_object_wait_rendering(obj, true);
+ ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
@@ -3209,6 +3240,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
u32 seqno = 0;
int ret;
+ if (atomic_read(&dev_priv->mm.wedged))
+ return -EIO;
+
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
if (time_after_eq(request->emitted_jiffies, recent_enough))
@@ -3324,7 +3358,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -3375,7 +3409,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -3412,7 +3446,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -3430,7 +3464,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* flush earlier is beneficial.
*/
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(dev, obj->ring,
+ ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
} else if (obj->ring->outstanding_lazy_request ==
obj->last_rendering_seqno) {
@@ -3441,9 +3475,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
*/
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request)
- ret = i915_add_request(dev,
- NULL, request,
- obj->ring);
+ ret = i915_add_request(obj->ring, NULL,request);
else
ret = -ENOMEM;
}
@@ -3453,7 +3485,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* are actually unmasked, and our working set ends up being
* larger than required.
*/
- i915_gem_retire_requests_ring(dev, obj->ring);
+ i915_gem_retire_requests_ring(obj->ring);
args->busy = obj->active;
}
@@ -3492,7 +3524,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -3583,6 +3615,8 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
kfree(obj->page_cpu_valid);
kfree(obj->bit_17);
kfree(obj);
+
+ trace_i915_gem_object_destroy(obj);
}
void i915_gem_free_object(struct drm_gem_object *gem_obj)
@@ -3590,8 +3624,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
- trace_i915_gem_object_destroy(obj);
-
while (obj->pin_count > 0)
i915_gem_object_unpin(obj);
@@ -3837,6 +3869,8 @@ i915_gem_load(struct drm_device *dev)
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
+ dev_priv->mm.interruptible = true;
+
dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 29d014c48ca2..8da1899bd24f 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -134,51 +134,6 @@ i915_verify_lists(struct drm_device *dev)
}
#endif /* WATCH_INACTIVE */
-
-#if WATCH_EXEC | WATCH_PWRITE
-static void
-i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
- uint32_t bias, uint32_t mark)
-{
- uint32_t *mem = kmap_atomic(page, KM_USER0);
- int i;
- for (i = start; i < end; i += 4)
- DRM_INFO("%08x: %08x%s\n",
- (int) (bias + i), mem[i / 4],
- (bias + i == mark) ? " ********" : "");
- kunmap_atomic(mem, KM_USER0);
- /* give syslog time to catch up */
- msleep(1);
-}
-
-void
-i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
- const char *where, uint32_t mark)
-{
- int page;
-
- DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
- for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
- int page_len, chunk, chunk_len;
-
- page_len = len - page * PAGE_SIZE;
- if (page_len > PAGE_SIZE)
- page_len = PAGE_SIZE;
-
- for (chunk = 0; chunk < page_len; chunk += 128) {
- chunk_len = page_len - chunk;
- if (chunk_len > 128)
- chunk_len = 128;
- i915_gem_dump_page(obj->pages[page],
- chunk, chunk + chunk_len,
- obj->gtt_offset +
- page * PAGE_SIZE,
- mark);
- }
- }
-}
-#endif
-
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 3d39005540aa..da05a2692a75 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -30,6 +30,7 @@
#include "drm.h"
#include "i915_drv.h"
#include "i915_drm.h"
+#include "i915_trace.h"
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
@@ -63,6 +64,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
return 0;
}
+ trace_i915_gem_evict(dev, min_size, alignment, mappable);
+
/*
* The goal is to evict objects and amalgamate space in LRU order.
* The oldest idle objects reside on the inactive list, which is in
@@ -189,6 +192,8 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
if (lists_empty)
return -ENOSPC;
+ trace_i915_gem_evict_everything(dev, purgeable_only);
+
/* Flush everything (on to the inactive lists) and evict */
ret = i915_gpu_idle(dev);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 50ab1614571c..7ff7f933ddf1 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -37,6 +37,7 @@ struct change_domains {
uint32_t invalidate_domains;
uint32_t flush_domains;
uint32_t flush_rings;
+ uint32_t flips;
};
/*
@@ -190,6 +191,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
i915_gem_release_mmap(obj);
+ if (obj->base.pending_write_domain)
+ cd->flips |= atomic_read(&obj->pending_flip);
+
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
@@ -282,21 +286,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_offset = to_intel_bo(target_obj)->gtt_offset;
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc->offset,
- (int) reloc->target_handle,
- (int) reloc->read_domains,
- (int) reloc->write_domain,
- (int) target_offset,
- (int) reloc->presumed_offset,
- reloc->delta);
-#endif
-
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
@@ -365,16 +354,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
}
- /* and points to somewhere within the target object. */
- if (unlikely(reloc->delta >= target_obj->size)) {
- DRM_ERROR("Relocation beyond target object bounds: "
- "obj %p target %d delta %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->delta,
- (int) target_obj->size);
- return ret;
- }
-
reloc->delta += target_offset;
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
uint32_t page_offset = reloc->offset & ~PAGE_MASK;
@@ -575,7 +554,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (has_fenced_gpu_access) {
if (need_fence) {
- ret = i915_gem_object_get_fence(obj, ring, 1);
+ ret = i915_gem_object_get_fence(obj, ring);
if (ret)
break;
} else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
@@ -690,11 +669,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
/* reacquire the objects */
eb_reset(eb);
for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj;
-
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
@@ -749,8 +726,7 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
for (i = 0; i < I915_NUM_RINGS; i++)
if (flush_rings & (1 << i)) {
- ret = i915_gem_flush_ring(dev,
- &dev_priv->ring[i],
+ ret = i915_gem_flush_ring(&dev_priv->ring[i],
invalidate_domains,
flush_domains);
if (ret)
@@ -774,7 +750,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
- return i915_gem_object_wait_rendering(obj, true);
+ return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
@@ -789,7 +765,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
if (request == NULL)
return -ENOMEM;
- ret = i915_add_request(obj->base.dev, NULL, request, from);
+ ret = i915_add_request(from, NULL, request);
if (ret) {
kfree(request);
return ret;
@@ -803,6 +779,39 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
}
static int
+i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
+{
+ u32 plane, flip_mask;
+ int ret;
+
+ /* Check for any pending flips. As we only maintain a flip queue depth
+ * of 1, we can simply insert a WAIT for the next display flip prior
+ * to executing the batch and avoid stalling the CPU.
+ */
+
+ for (plane = 0; flips >> plane; plane++) {
+ if (((flips >> plane) & 1) == 0)
+ continue;
+
+ if (plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ }
+
+ return 0;
+}
+
+
+static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
@@ -810,19 +819,11 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct change_domains cd;
int ret;
- cd.invalidate_domains = 0;
- cd.flush_domains = 0;
- cd.flush_rings = 0;
+ memset(&cd, 0, sizeof(cd));
list_for_each_entry(obj, objects, exec_list)
i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
if (cd.invalidate_domains | cd.flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- cd.invalidate_domains,
- cd.flush_domains);
-#endif
ret = i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains,
cd.flush_domains,
@@ -831,6 +832,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
return ret;
}
+ if (cd.flips) {
+ ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
+ if (ret)
+ return ret;
+ }
+
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_sync_rings(obj, ring);
if (ret)
@@ -877,47 +884,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
return 0;
}
-static int
-i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
- struct list_head *objects)
-{
- struct drm_i915_gem_object *obj;
- int flips;
-
- /* Check for any pending flips. As we only maintain a flip queue depth
- * of 1, we can simply insert a WAIT for the next display flip prior
- * to executing the batch and avoid stalling the CPU.
- */
- flips = 0;
- list_for_each_entry(obj, objects, exec_list) {
- if (obj->base.write_domain)
- flips |= atomic_read(&obj->pending_flip);
- }
- if (flips) {
- int plane, flip_mask, ret;
-
- for (plane = 0; flips >> plane; plane++) {
- if (((flips >> plane) & 1) == 0)
- continue;
-
- if (plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
- ret = intel_ring_begin(ring, 2);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
- }
- }
-
- return 0;
-}
-
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct intel_ring_buffer *ring,
@@ -926,6 +892,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, objects, exec_list) {
+ u32 old_read = obj->base.read_domains;
+ u32 old_write = obj->base.write_domain;
+
+
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
@@ -939,9 +909,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
intel_mark_busy(ring->dev, obj);
}
- trace_i915_gem_object_change_domain(obj,
- obj->base.read_domains,
- obj->base.write_domain);
+ trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}
@@ -963,14 +931,14 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 4)
invalidate |= I915_GEM_DOMAIN_SAMPLER;
if (ring->flush(ring, invalidate, 0)) {
- i915_gem_next_request_seqno(dev, ring);
+ i915_gem_next_request_seqno(ring);
return;
}
/* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL || i915_add_request(dev, file, request, ring)) {
- i915_gem_next_request_seqno(dev, ring);
+ if (request == NULL || i915_add_request(ring, file, request)) {
+ i915_gem_next_request_seqno(ring);
kfree(request);
}
}
@@ -1000,10 +968,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
return ret;
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
@@ -1113,7 +1077,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
@@ -1170,11 +1134,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto err;
- ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
- if (ret)
- goto err;
-
- seqno = i915_gem_next_request_seqno(dev, ring);
+ seqno = i915_gem_next_request_seqno(ring);
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
if (seqno < ring->sync_seqno[i]) {
/* The GPU can not handle its semaphore value wrapping,
@@ -1189,6 +1149,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
}
+ trace_i915_gem_ring_dispatch(ring, seqno);
+
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
@@ -1245,11 +1207,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret, i;
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
if (args->buffer_count < 1) {
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
@@ -1330,17 +1287,16 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
if (args->buffer_count < 1) {
DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
return -EINVAL;
}
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (exec2_list == NULL)
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list),
+ args->buffer_count);
if (exec2_list == NULL) {
DRM_ERROR("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index d64843e18df2..281ad3d6115d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -284,14 +284,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- int ret;
-
- ret = i915_gem_check_is_wedged(dev);
- if (ret)
- return ret;
+ int ret = 0;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL)
+ if (&obj->base == NULL)
return -ENOENT;
if (!i915_tiling_ok(dev,
@@ -384,7 +380,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL)
+ if (&obj->base == NULL)
return -ENOENT;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8a9e08bf1cf7..188b497e5076 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,21 +85,11 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
}
}
-static inline u32
-i915_pipestat(int pipe)
-{
- if (pipe == 0)
- return PIPEASTAT;
- if (pipe == 1)
- return PIPEBSTAT;
- BUG();
-}
-
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != mask) {
- u32 reg = i915_pipestat(pipe);
+ u32 reg = PIPESTAT(pipe);
dev_priv->pipestat[pipe] |= mask;
/* Enable the interrupt, clear any pending status */
@@ -112,7 +102,7 @@ void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != 0) {
- u32 reg = i915_pipestat(pipe);
+ u32 reg = PIPESTAT(pipe);
dev_priv->pipestat[pipe] &= ~mask;
I915_WRITE(reg, dev_priv->pipestat[pipe]);
@@ -171,12 +161,12 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
- "pipe %d\n", pipe);
+ "pipe %c\n", pipe_name(pipe));
return 0;
}
- high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
- low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+ high_frame = PIPEFRAME(pipe);
+ low_frame = PIPEFRAMEPIXEL(pipe);
/*
* High & low register fields aren't synchronized, so make sure
@@ -197,11 +187,11 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
+ int reg = PIPE_FRMCOUNT_GM45(pipe);
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
- "pipe %d\n", pipe);
+ "pipe %c\n", pipe_name(pipe));
return 0;
}
@@ -219,7 +209,7 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
- "pipe %d\n", pipe);
+ "pipe %c\n", pipe_name(pipe));
return 0;
}
@@ -367,7 +357,7 @@ static void notify_ring(struct drm_device *dev,
return;
seqno = ring->get_seqno(ring);
- trace_i915_gem_request_complete(dev, seqno);
+ trace_i915_gem_request_complete(ring, seqno);
ring->irq_seqno = seqno;
wake_up_all(&ring->irq_queue);
@@ -419,6 +409,7 @@ static void pch_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 pch_iir;
+ int pipe;
pch_iir = I915_READ(SDEIIR);
@@ -439,13 +430,11 @@ static void pch_irq_handler(struct drm_device *dev)
if (pch_iir & SDE_POISON)
DRM_ERROR("PCH poison interrupt\n");
- if (pch_iir & SDE_FDI_MASK) {
- u32 fdia, fdib;
-
- fdia = I915_READ(FDI_RXA_IIR);
- fdib = I915_READ(FDI_RXB_IIR);
- DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib);
- }
+ if (pch_iir & SDE_FDI_MASK)
+ for_each_pipe(pipe)
+ DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
@@ -650,9 +639,14 @@ static void
i915_error_state_free(struct drm_device *dev,
struct drm_i915_error_state *error)
{
- i915_error_object_free(error->batchbuffer[0]);
- i915_error_object_free(error->batchbuffer[1]);
- i915_error_object_free(error->ringbuffer);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++)
+ i915_error_object_free(error->batchbuffer[i]);
+
+ for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++)
+ i915_error_object_free(error->ringbuffer[i]);
+
kfree(error->active_bo);
kfree(error->overlay);
kfree(error);
@@ -767,7 +761,7 @@ static void i915_capture_error_state(struct drm_device *dev)
struct drm_i915_gem_object *obj;
struct drm_i915_error_state *error;
unsigned long flags;
- int i;
+ int i, pipe;
spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error;
@@ -775,19 +769,21 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error)
return;
+ /* Account for pipe specific data like PIPE*STAT */
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
return;
}
- DRM_DEBUG_DRIVER("generating error event\n");
+ DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
+ dev->primary->index);
error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
- error->pipeastat = I915_READ(PIPEASTAT);
- error->pipebstat = I915_READ(PIPEBSTAT);
+ for_each_pipe(pipe)
+ error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
error->instpm = I915_READ(INSTPM);
error->error = 0;
if (INTEL_INFO(dev)->gen >= 6) {
@@ -826,15 +822,16 @@ static void i915_capture_error_state(struct drm_device *dev)
}
i915_gem_record_fences(dev, error);
- /* Record the active batchbuffers */
- for (i = 0; i < I915_NUM_RINGS; i++)
+ /* Record the active batch and ring buffers */
+ for (i = 0; i < I915_NUM_RINGS; i++) {
error->batchbuffer[i] =
i915_error_first_batchbuffer(dev_priv,
&dev_priv->ring[i]);
- /* Record the ringbuffer */
- error->ringbuffer = i915_error_object_create(dev_priv,
- dev_priv->ring[RCS].obj);
+ error->ringbuffer[i] =
+ i915_error_object_create(dev_priv,
+ dev_priv->ring[i].obj);
+ }
/* Record buffers on the active and pinned lists. */
error->active_bo = NULL;
@@ -907,6 +904,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 eir = I915_READ(EIR);
+ int pipe;
if (!eir)
return;
@@ -955,14 +953,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
}
if (eir & I915_ERROR_MEMORY_REFRESH) {
- u32 pipea_stats = I915_READ(PIPEASTAT);
- u32 pipeb_stats = I915_READ(PIPEBSTAT);
-
- printk(KERN_ERR "memory refresh error\n");
- printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
- pipea_stats);
- printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
- pipeb_stats);
+ printk(KERN_ERR "memory refresh error:\n");
+ for_each_pipe(pipe)
+ printk(KERN_ERR "pipe %c stat: 0x%08x\n",
+ pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
/* pipestat has already been acked */
}
if (eir & I915_ERROR_INSTRUCTION) {
@@ -1076,10 +1070,10 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
- int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
+ int dspsurf = DSPSURF(intel_crtc->plane);
stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
} else {
- int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
+ int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
crtc->y * crtc->fb->pitch +
crtc->x * crtc->fb->bits_per_pixel/8);
@@ -1099,12 +1093,13 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv;
u32 iir, new_iir;
- u32 pipea_stats, pipeb_stats;
+ u32 pipe_stats[I915_MAX_PIPES];
u32 vblank_status;
int vblank = 0;
unsigned long irqflags;
int irq_received;
- int ret = IRQ_NONE;
+ int ret = IRQ_NONE, pipe;
+ bool blc_event = false;
atomic_inc(&dev_priv->irq_received);
@@ -1127,27 +1122,23 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
* interrupts (for non-MSI).
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- pipea_stats = I915_READ(PIPEASTAT);
- pipeb_stats = I915_READ(PIPEBSTAT);
-
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
- /*
- * Clear the PIPE(A|B)STAT regs before the IIR
- */
- if (pipea_stats & 0x8000ffff) {
- if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe a underrun\n");
- I915_WRITE(PIPEASTAT, pipea_stats);
- irq_received = 1;
- }
-
- if (pipeb_stats & 0x8000ffff) {
- if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe b underrun\n");
- I915_WRITE(PIPEBSTAT, pipeb_stats);
- irq_received = 1;
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = 1;
+ }
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -1198,27 +1189,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
intel_finish_page_flip_plane(dev, 1);
}
- if (pipea_stats & vblank_status &&
- drm_handle_vblank(dev, 0)) {
- vblank++;
- if (!dev_priv->flip_pending_is_done) {
- i915_pageflip_stall_check(dev, 0);
- intel_finish_page_flip(dev, 0);
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & vblank_status &&
+ drm_handle_vblank(dev, pipe)) {
+ vblank++;
+ if (!dev_priv->flip_pending_is_done) {
+ i915_pageflip_stall_check(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
}
- }
- if (pipeb_stats & vblank_status &&
- drm_handle_vblank(dev, 1)) {
- vblank++;
- if (!dev_priv->flip_pending_is_done) {
- i915_pageflip_stall_check(dev, 1);
- intel_finish_page_flip(dev, 1);
- }
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
}
- if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
- (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
- (iir & I915_ASLE_INTERRUPT))
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
intel_opregion_asle_intr(dev);
/* With MSI, interrupts are only generated when iir
@@ -1268,16 +1254,6 @@ static int i915_emit_irq(struct drm_device * dev)
return dev_priv->counter;
}
-void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
-
- if (dev_priv->trace_irq_seqno == 0 &&
- ring->irq_get(ring))
- dev_priv->trace_irq_seqno = seqno;
-}
-
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1377,7 +1353,12 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
else
i915_enable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ /* maintain vblank delivery even in deep C-states */
+ if (dev_priv->info->gen == 3)
+ I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
return 0;
}
@@ -1390,6 +1371,10 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (dev_priv->info->gen == 3)
+ I915_WRITE(INSTPM,
+ INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
+
if (HAS_PCH_SPLIT(dev))
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
@@ -1400,16 +1385,6 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void i915_enable_interrupt (struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!HAS_PCH_SPLIT(dev))
- intel_opregion_enable_asle(dev);
- dev_priv->irq_enabled = 1;
-}
-
-
/* Set the vblank monitor pipe
*/
int i915_vblank_pipe_set(struct drm_device *dev, void *data,
@@ -1646,12 +1621,16 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
POSTING_READ(GTIER);
if (HAS_PCH_CPT(dev)) {
- hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
- SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
+ hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
+ SDE_PORTB_HOTPLUG_CPT |
+ SDE_PORTC_HOTPLUG_CPT |
+ SDE_PORTD_HOTPLUG_CPT);
} else {
- hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
- SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
- hotplug_mask |= SDE_AUX_MASK;
+ hotplug_mask = (SDE_CRT_HOTPLUG |
+ SDE_PORTB_HOTPLUG |
+ SDE_PORTC_HOTPLUG |
+ SDE_PORTD_HOTPLUG |
+ SDE_AUX_MASK);
}
dev_priv->pch_irq_mask = ~hotplug_mask;
@@ -1674,6 +1653,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
void i915_driver_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
atomic_set(&dev_priv->irq_received, 0);
@@ -1691,8 +1671,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
}
I915_WRITE(HWSTAM, 0xeffe);
- I915_WRITE(PIPEASTAT, 0);
- I915_WRITE(PIPEBSTAT, 0);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
POSTING_READ(IER);
@@ -1804,6 +1784,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
void i915_driver_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
if (!dev_priv)
return;
@@ -1821,12 +1802,13 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
}
I915_WRITE(HWSTAM, 0xffffffff);
- I915_WRITE(PIPEASTAT, 0);
- I915_WRITE(PIPEBSTAT, 0);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
- I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
- I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe),
+ I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
I915_WRITE(IIR, I915_READ(IIR));
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2abe240dae58..f39ac3a0fa93 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -405,9 +405,12 @@
#define I915_ERROR_INSTRUCTION (1<<0)
#define INSTPM 0x020c0
#define INSTPM_SELF_EN (1<<12) /* 915GM only */
+#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
+ will not assert AGPBUSY# and will only
+ be delivered when out of C3. */
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
-#define FW_BLC2 0x020dc
+#define FW_BLC2 0x020dc
#define FW_BLC_SELF 0x020e0 /* 915+ only */
#define FW_BLC_SELF_EN_MASK (1<<31)
#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
@@ -706,9 +709,9 @@
#define VGA1_PD_P1_DIV_2 (1 << 13)
#define VGA1_PD_P1_SHIFT 8
#define VGA1_PD_P1_MASK (0x1f << 8)
-#define DPLL_A 0x06014
-#define DPLL_B 0x06018
-#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B)
+#define _DPLL_A 0x06014
+#define _DPLL_B 0x06018
+#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
@@ -779,7 +782,7 @@
#define SDVO_MULTIPLIER_MASK 0x000000ff
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define DPLL_A_MD 0x0601c /* 965+ only */
+#define _DPLL_A_MD 0x0601c /* 965+ only */
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
*
@@ -816,14 +819,14 @@
*/
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
-#define DPLL_B_MD 0x06020 /* 965+ only */
-#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD)
-#define FPA0 0x06040
-#define FPA1 0x06044
-#define FPB0 0x06048
-#define FPB1 0x0604c
-#define FP0(pipe) _PIPE(pipe, FPA0, FPB0)
-#define FP1(pipe) _PIPE(pipe, FPA1, FPB1)
+#define _DPLL_B_MD 0x06020 /* 965+ only */
+#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
+#define _FPA0 0x06040
+#define _FPA1 0x06044
+#define _FPB0 0x06048
+#define _FPB1 0x0604c
+#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0)
+#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1)
#define FP_N_DIV_MASK 0x003f0000
#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
#define FP_N_DIV_SHIFT 16
@@ -962,8 +965,9 @@
* Palette regs
*/
-#define PALETTE_A 0x0a000
-#define PALETTE_B 0x0a800
+#define _PALETTE_A 0x0a000
+#define _PALETTE_B 0x0a800
+#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
/* MCH MMIO space */
@@ -1267,32 +1271,32 @@
*/
/* Pipe A timing regs */
-#define HTOTAL_A 0x60000
-#define HBLANK_A 0x60004
-#define HSYNC_A 0x60008
-#define VTOTAL_A 0x6000c
-#define VBLANK_A 0x60010
-#define VSYNC_A 0x60014
-#define PIPEASRC 0x6001c
-#define BCLRPAT_A 0x60020
+#define _HTOTAL_A 0x60000
+#define _HBLANK_A 0x60004
+#define _HSYNC_A 0x60008
+#define _VTOTAL_A 0x6000c
+#define _VBLANK_A 0x60010
+#define _VSYNC_A 0x60014
+#define _PIPEASRC 0x6001c
+#define _BCLRPAT_A 0x60020
/* Pipe B timing regs */
-#define HTOTAL_B 0x61000
-#define HBLANK_B 0x61004
-#define HSYNC_B 0x61008
-#define VTOTAL_B 0x6100c
-#define VBLANK_B 0x61010
-#define VSYNC_B 0x61014
-#define PIPEBSRC 0x6101c
-#define BCLRPAT_B 0x61020
-
-#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B)
-#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B)
-#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B)
-#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
-#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
-#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
-#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
+#define _HTOTAL_B 0x61000
+#define _HBLANK_B 0x61004
+#define _HSYNC_B 0x61008
+#define _VTOTAL_B 0x6100c
+#define _VBLANK_B 0x61010
+#define _VSYNC_B 0x61014
+#define _PIPEBSRC 0x6101c
+#define _BCLRPAT_B 0x61020
+
+#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
+#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
+#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
+#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
+#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
+#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
+#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
/* VGA port control */
#define ADPA 0x61100
@@ -1386,6 +1390,7 @@
#define SDVO_ENCODING_HDMI (0x2 << 10)
/** Requird for HDMI operation */
#define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9)
+#define SDVO_COLOR_RANGE_16_235 (1 << 8)
#define SDVO_BORDER_ENABLE (1 << 7)
#define SDVO_AUDIO_ENABLE (1 << 6)
/** New with 965, default is to be set */
@@ -1441,8 +1446,13 @@
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30)
+#define LVDS_PIPE_MASK (1 << 30)
/* LVDS dithering flag on 965/g4x platform */
#define LVDS_ENABLE_DITHER (1 << 25)
+/* LVDS sync polarity flags. Set to invert (i.e. negative) */
+#define LVDS_VSYNC_POLARITY (1 << 21)
+#define LVDS_HSYNC_POLARITY (1 << 20)
+
/* Enable border for unscaled (or aspect-scaled) display */
#define LVDS_BORDER_ENABLE (1 << 15)
/*
@@ -1476,6 +1486,9 @@
#define LVDS_B0B3_POWER_DOWN (0 << 2)
#define LVDS_B0B3_POWER_UP (3 << 2)
+#define LVDS_PIPE_ENABLED(V, P) \
+ (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN))
+
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
#define VIDEO_DIP_CTL 0x61170
@@ -2064,6 +2077,10 @@
#define DP_PORT_EN (1 << 31)
#define DP_PIPEB_SELECT (1 << 30)
+#define DP_PIPE_MASK (1 << 30)
+
+#define DP_PIPE_ENABLED(V, P) \
+ (((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN))
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
@@ -2206,8 +2223,8 @@
* which is after the LUTs, so we want the bytes for our color format.
* For our current usage, this is always 3, one byte for R, G and B.
*/
-#define PIPEA_GMCH_DATA_M 0x70050
-#define PIPEB_GMCH_DATA_M 0x71050
+#define _PIPEA_GMCH_DATA_M 0x70050
+#define _PIPEB_GMCH_DATA_M 0x71050
/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
@@ -2215,8 +2232,8 @@
#define PIPE_GMCH_DATA_M_MASK (0xffffff)
-#define PIPEA_GMCH_DATA_N 0x70054
-#define PIPEB_GMCH_DATA_N 0x71054
+#define _PIPEA_GMCH_DATA_N 0x70054
+#define _PIPEB_GMCH_DATA_N 0x71054
#define PIPE_GMCH_DATA_N_MASK (0xffffff)
/*
@@ -2230,20 +2247,25 @@
* Attributes and VB-ID.
*/
-#define PIPEA_DP_LINK_M 0x70060
-#define PIPEB_DP_LINK_M 0x71060
+#define _PIPEA_DP_LINK_M 0x70060
+#define _PIPEB_DP_LINK_M 0x71060
#define PIPEA_DP_LINK_M_MASK (0xffffff)
-#define PIPEA_DP_LINK_N 0x70064
-#define PIPEB_DP_LINK_N 0x71064
+#define _PIPEA_DP_LINK_N 0x70064
+#define _PIPEB_DP_LINK_N 0x71064
#define PIPEA_DP_LINK_N_MASK (0xffffff)
+#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
+#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
+#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
+#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
+
/* Display & cursor control */
/* Pipe A */
-#define PIPEADSL 0x70000
+#define _PIPEADSL 0x70000
#define DSL_LINEMASK 0x00000fff
-#define PIPEACONF 0x70008
+#define _PIPEACONF 0x70008
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
@@ -2269,7 +2291,7 @@
#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
-#define PIPEASTAT 0x70024
+#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
@@ -2305,10 +2327,12 @@
#define PIPE_6BPC (2 << 5)
#define PIPE_12BPC (3 << 5)
-#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
-#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
-#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
-#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL)
+#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
+#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
+#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
+#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
+#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
+#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -2470,20 +2494,21 @@
* } while (high1 != high2);
* frame = (high1 << 8) | low1;
*/
-#define PIPEAFRAMEHIGH 0x70040
+#define _PIPEAFRAMEHIGH 0x70040
#define PIPE_FRAME_HIGH_MASK 0x0000ffff
#define PIPE_FRAME_HIGH_SHIFT 0
-#define PIPEAFRAMEPIXEL 0x70044
+#define _PIPEAFRAMEPIXEL 0x70044
#define PIPE_FRAME_LOW_MASK 0xff000000
#define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
/* GM45+ just has to be different */
-#define PIPEA_FRMCOUNT_GM45 0x70040
-#define PIPEA_FLIPCOUNT_GM45 0x70044
+#define _PIPEA_FRMCOUNT_GM45 0x70040
+#define _PIPEA_FLIPCOUNT_GM45 0x70044
+#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
/* Cursor A & B regs */
-#define CURACNTR 0x70080
+#define _CURACNTR 0x70080
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
@@ -2504,23 +2529,23 @@
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
-#define CURABASE 0x70084
-#define CURAPOS 0x70088
+#define _CURABASE 0x70084
+#define _CURAPOS 0x70088
#define CURSOR_POS_MASK 0x007FF
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
#define CURSIZE 0x700a0
-#define CURBCNTR 0x700c0
-#define CURBBASE 0x700c4
-#define CURBPOS 0x700c8
+#define _CURBCNTR 0x700c0
+#define _CURBBASE 0x700c4
+#define _CURBPOS 0x700c8
-#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR)
-#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE)
-#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS)
+#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
+#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
+#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
/* Display A control */
-#define DSPACNTR 0x70180
+#define _DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1<<31)
#define DISPLAY_PLANE_DISABLE 0
#define DISPPLANE_GAMMA_ENABLE (1<<30)
@@ -2534,9 +2559,10 @@
#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
-#define DISPPLANE_SEL_PIPE_MASK (1<<24)
+#define DISPPLANE_SEL_PIPE_SHIFT 24
+#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SEL_PIPE_A 0
-#define DISPPLANE_SEL_PIPE_B (1<<24)
+#define DISPPLANE_SEL_PIPE_B (1<<DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
#define DISPPLANE_SRC_KEY_DISABLE 0
#define DISPPLANE_LINE_DOUBLE (1<<20)
@@ -2545,20 +2571,20 @@
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
-#define DSPAADDR 0x70184
-#define DSPASTRIDE 0x70188
-#define DSPAPOS 0x7018C /* reserved */
-#define DSPASIZE 0x70190
-#define DSPASURF 0x7019C /* 965+ only */
-#define DSPATILEOFF 0x701A4 /* 965+ only */
-
-#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR)
-#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR)
-#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE)
-#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS)
-#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE)
-#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF)
-#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF)
+#define _DSPAADDR 0x70184
+#define _DSPASTRIDE 0x70188
+#define _DSPAPOS 0x7018C /* reserved */
+#define _DSPASIZE 0x70190
+#define _DSPASURF 0x7019C /* 965+ only */
+#define _DSPATILEOFF 0x701A4 /* 965+ only */
+
+#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
+#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
+#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE)
+#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS)
+#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
+#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
+#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
/* VBIOS flags */
#define SWF00 0x71410
@@ -2576,27 +2602,27 @@
#define SWF32 0x7241c
/* Pipe B */
-#define PIPEBDSL 0x71000
-#define PIPEBCONF 0x71008
-#define PIPEBSTAT 0x71024
-#define PIPEBFRAMEHIGH 0x71040
-#define PIPEBFRAMEPIXEL 0x71044
-#define PIPEB_FRMCOUNT_GM45 0x71040
-#define PIPEB_FLIPCOUNT_GM45 0x71044
+#define _PIPEBDSL 0x71000
+#define _PIPEBCONF 0x71008
+#define _PIPEBSTAT 0x71024
+#define _PIPEBFRAMEHIGH 0x71040
+#define _PIPEBFRAMEPIXEL 0x71044
+#define _PIPEB_FRMCOUNT_GM45 0x71040
+#define _PIPEB_FLIPCOUNT_GM45 0x71044
/* Display B control */
-#define DSPBCNTR 0x71180
+#define _DSPBCNTR 0x71180
#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
-#define DSPBADDR 0x71184
-#define DSPBSTRIDE 0x71188
-#define DSPBPOS 0x7118C
-#define DSPBSIZE 0x71190
-#define DSPBSURF 0x7119C
-#define DSPBTILEOFF 0x711A4
+#define _DSPBADDR 0x71184
+#define _DSPBSTRIDE 0x71188
+#define _DSPBPOS 0x7118C
+#define _DSPBSIZE 0x71190
+#define _DSPBSURF 0x7119C
+#define _DSPBTILEOFF 0x711A4
/* VBIOS regs */
#define VGACNTRL 0x71400
@@ -2650,68 +2676,80 @@
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
-#define PIPEA_DATA_M1 0x60030
+#define _PIPEA_DATA_M1 0x60030
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
#define TU_SIZE_MASK 0x7e000000
#define PIPE_DATA_M1_OFFSET 0
-#define PIPEA_DATA_N1 0x60034
+#define _PIPEA_DATA_N1 0x60034
#define PIPE_DATA_N1_OFFSET 0
-#define PIPEA_DATA_M2 0x60038
+#define _PIPEA_DATA_M2 0x60038
#define PIPE_DATA_M2_OFFSET 0
-#define PIPEA_DATA_N2 0x6003c
+#define _PIPEA_DATA_N2 0x6003c
#define PIPE_DATA_N2_OFFSET 0
-#define PIPEA_LINK_M1 0x60040
+#define _PIPEA_LINK_M1 0x60040
#define PIPE_LINK_M1_OFFSET 0
-#define PIPEA_LINK_N1 0x60044
+#define _PIPEA_LINK_N1 0x60044
#define PIPE_LINK_N1_OFFSET 0
-#define PIPEA_LINK_M2 0x60048
+#define _PIPEA_LINK_M2 0x60048
#define PIPE_LINK_M2_OFFSET 0
-#define PIPEA_LINK_N2 0x6004c
+#define _PIPEA_LINK_N2 0x6004c
#define PIPE_LINK_N2_OFFSET 0
/* PIPEB timing regs are same start from 0x61000 */
-#define PIPEB_DATA_M1 0x61030
-#define PIPEB_DATA_N1 0x61034
+#define _PIPEB_DATA_M1 0x61030
+#define _PIPEB_DATA_N1 0x61034
-#define PIPEB_DATA_M2 0x61038
-#define PIPEB_DATA_N2 0x6103c
+#define _PIPEB_DATA_M2 0x61038
+#define _PIPEB_DATA_N2 0x6103c
-#define PIPEB_LINK_M1 0x61040
-#define PIPEB_LINK_N1 0x61044
+#define _PIPEB_LINK_M1 0x61040
+#define _PIPEB_LINK_N1 0x61044
-#define PIPEB_LINK_M2 0x61048
-#define PIPEB_LINK_N2 0x6104c
+#define _PIPEB_LINK_M2 0x61048
+#define _PIPEB_LINK_N2 0x6104c
-#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1)
-#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1)
-#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2)
-#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2)
-#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1)
-#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1)
-#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2)
-#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2)
+#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
+#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
+#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
+#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
+#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
+#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
+#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
+#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
/* CPU panel fitter */
-#define PFA_CTL_1 0x68080
-#define PFB_CTL_1 0x68880
+/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
+#define _PFA_CTL_1 0x68080
+#define _PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31)
#define PF_FILTER_MASK (3<<23)
#define PF_FILTER_PROGRAMMED (0<<23)
#define PF_FILTER_MED_3x3 (1<<23)
#define PF_FILTER_EDGE_ENHANCE (2<<23)
#define PF_FILTER_EDGE_SOFTEN (3<<23)
-#define PFA_WIN_SZ 0x68074
-#define PFB_WIN_SZ 0x68874
-#define PFA_WIN_POS 0x68070
-#define PFB_WIN_POS 0x68870
+#define _PFA_WIN_SZ 0x68074
+#define _PFB_WIN_SZ 0x68874
+#define _PFA_WIN_POS 0x68070
+#define _PFB_WIN_POS 0x68870
+#define _PFA_VSCALE 0x68084
+#define _PFB_VSCALE 0x68884
+#define _PFA_HSCALE 0x68090
+#define _PFB_HSCALE 0x68890
+
+#define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
+#define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
+#define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
+#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
+#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
/* legacy palette */
-#define LGC_PALETTE_A 0x4a000
-#define LGC_PALETTE_B 0x4a800
+#define _LGC_PALETTE_A 0x4a000
+#define _LGC_PALETTE_B 0x4a800
+#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
/* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31)
@@ -2877,17 +2915,17 @@
#define PCH_GMBUS4 0xc5110
#define PCH_GMBUS5 0xc5120
-#define PCH_DPLL_A 0xc6014
-#define PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
+#define _PCH_DPLL_A 0xc6014
+#define _PCH_DPLL_B 0xc6018
+#define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B)
-#define PCH_FPA0 0xc6040
+#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
-#define PCH_FPA1 0xc6044
-#define PCH_FPB0 0xc6048
-#define PCH_FPB1 0xc604c
-#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0)
-#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1)
+#define _PCH_FPA1 0xc6044
+#define _PCH_FPB0 0xc6048
+#define _PCH_FPB1 0xc604c
+#define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0)
+#define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@@ -2906,6 +2944,7 @@
#define DREF_NONSPREAD_SOURCE_MASK (3<<9)
#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
+#define DREF_SUPERSPREAD_SOURCE_MASK (3<<7)
#define DREF_SSC4_DOWNSPREAD (0<<6)
#define DREF_SSC4_CENTERSPREAD (1<<6)
#define DREF_SSC1_DISABLE (0<<1)
@@ -2938,60 +2977,69 @@
/* transcoder */
-#define TRANS_HTOTAL_A 0xe0000
+#define _TRANS_HTOTAL_A 0xe0000
#define TRANS_HTOTAL_SHIFT 16
#define TRANS_HACTIVE_SHIFT 0
-#define TRANS_HBLANK_A 0xe0004
+#define _TRANS_HBLANK_A 0xe0004
#define TRANS_HBLANK_END_SHIFT 16
#define TRANS_HBLANK_START_SHIFT 0
-#define TRANS_HSYNC_A 0xe0008
+#define _TRANS_HSYNC_A 0xe0008
#define TRANS_HSYNC_END_SHIFT 16
#define TRANS_HSYNC_START_SHIFT 0
-#define TRANS_VTOTAL_A 0xe000c
+#define _TRANS_VTOTAL_A 0xe000c
#define TRANS_VTOTAL_SHIFT 16
#define TRANS_VACTIVE_SHIFT 0
-#define TRANS_VBLANK_A 0xe0010
+#define _TRANS_VBLANK_A 0xe0010
#define TRANS_VBLANK_END_SHIFT 16
#define TRANS_VBLANK_START_SHIFT 0
-#define TRANS_VSYNC_A 0xe0014
+#define _TRANS_VSYNC_A 0xe0014
#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
-#define TRANSA_DATA_M1 0xe0030
-#define TRANSA_DATA_N1 0xe0034
-#define TRANSA_DATA_M2 0xe0038
-#define TRANSA_DATA_N2 0xe003c
-#define TRANSA_DP_LINK_M1 0xe0040
-#define TRANSA_DP_LINK_N1 0xe0044
-#define TRANSA_DP_LINK_M2 0xe0048
-#define TRANSA_DP_LINK_N2 0xe004c
-
-#define TRANS_HTOTAL_B 0xe1000
-#define TRANS_HBLANK_B 0xe1004
-#define TRANS_HSYNC_B 0xe1008
-#define TRANS_VTOTAL_B 0xe100c
-#define TRANS_VBLANK_B 0xe1010
-#define TRANS_VSYNC_B 0xe1014
-
-#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B)
-#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B)
-#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B)
-#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B)
-#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B)
-#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B)
-
-#define TRANSB_DATA_M1 0xe1030
-#define TRANSB_DATA_N1 0xe1034
-#define TRANSB_DATA_M2 0xe1038
-#define TRANSB_DATA_N2 0xe103c
-#define TRANSB_DP_LINK_M1 0xe1040
-#define TRANSB_DP_LINK_N1 0xe1044
-#define TRANSB_DP_LINK_M2 0xe1048
-#define TRANSB_DP_LINK_N2 0xe104c
-
-#define TRANSACONF 0xf0008
-#define TRANSBCONF 0xf1008
-#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF)
+#define _TRANSA_DATA_M1 0xe0030
+#define _TRANSA_DATA_N1 0xe0034
+#define _TRANSA_DATA_M2 0xe0038
+#define _TRANSA_DATA_N2 0xe003c
+#define _TRANSA_DP_LINK_M1 0xe0040
+#define _TRANSA_DP_LINK_N1 0xe0044
+#define _TRANSA_DP_LINK_M2 0xe0048
+#define _TRANSA_DP_LINK_N2 0xe004c
+
+#define _TRANS_HTOTAL_B 0xe1000
+#define _TRANS_HBLANK_B 0xe1004
+#define _TRANS_HSYNC_B 0xe1008
+#define _TRANS_VTOTAL_B 0xe100c
+#define _TRANS_VBLANK_B 0xe1010
+#define _TRANS_VSYNC_B 0xe1014
+
+#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
+#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
+#define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B)
+#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
+#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
+#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
+
+#define _TRANSB_DATA_M1 0xe1030
+#define _TRANSB_DATA_N1 0xe1034
+#define _TRANSB_DATA_M2 0xe1038
+#define _TRANSB_DATA_N2 0xe103c
+#define _TRANSB_DP_LINK_M1 0xe1040
+#define _TRANSB_DP_LINK_N1 0xe1044
+#define _TRANSB_DP_LINK_M2 0xe1048
+#define _TRANSB_DP_LINK_N2 0xe104c
+
+#define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1)
+#define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1)
+#define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2)
+#define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2)
+#define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1)
+#define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1)
+#define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2)
+#define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2)
+
+#define _TRANSACONF 0xf0008
+#define _TRANSBCONF 0xf1008
+#define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF)
#define TRANS_DISABLE (0<<31)
#define TRANS_ENABLE (1<<31)
#define TRANS_STATE_MASK (1<<30)
@@ -3009,18 +3057,19 @@
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
-#define FDI_RXA_CHICKEN 0xc200c
-#define FDI_RXB_CHICKEN 0xc2010
-#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
-#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN)
+#define _FDI_RXA_CHICKEN 0xc200c
+#define _FDI_RXB_CHICKEN 0xc2010
+#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
+#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
+#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
#define SOUTH_DSPCLK_GATE_D 0xc2020
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
/* CPU: FDI_TX */
-#define FDI_TXA_CTL 0x60100
-#define FDI_TXB_CTL 0x61100
-#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL)
+#define _FDI_TXA_CTL 0x60100
+#define _FDI_TXB_CTL 0x61100
+#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
#define FDI_TX_DISABLE (0<<31)
#define FDI_TX_ENABLE (1<<31)
#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
@@ -3060,9 +3109,9 @@
#define FDI_SCRAMBLING_DISABLE (1<<7)
/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
-#define FDI_RXA_CTL 0xf000c
-#define FDI_RXB_CTL 0xf100c
-#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL)
+#define _FDI_RXA_CTL 0xf000c
+#define _FDI_RXB_CTL 0xf100c
+#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
#define FDI_RX_ENABLE (1<<31)
/* train, dp width same as FDI_TX */
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
@@ -3087,15 +3136,15 @@
#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
-#define FDI_RXA_MISC 0xf0010
-#define FDI_RXB_MISC 0xf1010
-#define FDI_RXA_TUSIZE1 0xf0030
-#define FDI_RXA_TUSIZE2 0xf0038
-#define FDI_RXB_TUSIZE1 0xf1030
-#define FDI_RXB_TUSIZE2 0xf1038
-#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC)
-#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1)
-#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2)
+#define _FDI_RXA_MISC 0xf0010
+#define _FDI_RXB_MISC 0xf1010
+#define _FDI_RXA_TUSIZE1 0xf0030
+#define _FDI_RXA_TUSIZE2 0xf0038
+#define _FDI_RXB_TUSIZE1 0xf1030
+#define _FDI_RXB_TUSIZE2 0xf1038
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
+#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
+#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
/* FDI_RX interrupt register format */
#define FDI_RX_INTER_LANE_ALIGN (1<<10)
@@ -3110,12 +3159,12 @@
#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
-#define FDI_RXA_IIR 0xf0014
-#define FDI_RXA_IMR 0xf0018
-#define FDI_RXB_IIR 0xf1014
-#define FDI_RXB_IMR 0xf1018
-#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR)
-#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR)
+#define _FDI_RXA_IIR 0xf0014
+#define _FDI_RXA_IMR 0xf0018
+#define _FDI_RXB_IIR 0xf1014
+#define _FDI_RXB_IMR 0xf1018
+#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
+#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
@@ -3145,11 +3194,15 @@
#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
+#define ADPA_PIPE_ENABLED(V, P) \
+ (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE))
+
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER_A (0)
#define TRANSCODER_B (1 << 30)
+#define TRANSCODER_MASK (1 << 30)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@@ -3165,6 +3218,9 @@
#define HSYNC_ACTIVE_HIGH (1 << 3)
#define PORT_DETECTED (1 << 2)
+#define HDMI_PIPE_ENABLED(V, P) \
+ (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE))
+
/* PCH SDVOB multiplex with HDMIB */
#define PCH_SDVOB HDMIB
@@ -3240,6 +3296,7 @@
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
#define TRANS_DP_PORT_SEL_D (2<<29)
+#define TRANS_DP_PORT_SEL_NONE (3<<29)
#define TRANS_DP_PORT_SEL_MASK (3<<29)
#define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_ENH_FRAMING (1<<18)
@@ -3290,15 +3347,28 @@
#define GEN6_RP_DOWN_TIMEOUT 0xA010
#define GEN6_RP_INTERRUPT_LIMITS 0xA014
#define GEN6_RPSTAT1 0xA01C
+#define GEN6_CAGF_SHIFT 8
+#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
#define GEN6_RP_MEDIA_IS_GFX (1<<8)
#define GEN6_RP_ENABLE (1<<7)
-#define GEN6_RP_UP_BUSY_MAX (0x2<<3)
-#define GEN6_RP_DOWN_BUSY_MIN (0x2<<0)
+#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
+#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
+#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
+#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
#define GEN6_RP_UP_THRESHOLD 0xA02C
#define GEN6_RP_DOWN_THRESHOLD 0xA030
+#define GEN6_RP_CUR_UP_EI 0xA050
+#define GEN6_CURICONT_MASK 0xffffff
+#define GEN6_RP_CUR_UP 0xA054
+#define GEN6_CURBSYTAVG_MASK 0xffffff
+#define GEN6_RP_PREV_UP 0xA058
+#define GEN6_RP_CUR_DOWN_EI 0xA05C
+#define GEN6_CURIAVG_MASK 0xffffff
+#define GEN6_RP_CUR_DOWN 0xA060
+#define GEN6_RP_PREV_DOWN 0xA064
#define GEN6_RP_UP_EI 0xA068
#define GEN6_RP_DOWN_EI 0xA06C
#define GEN6_RP_IDLE_HYSTERSIS 0xA070
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 0521ecf26017..7e992a8e9098 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -34,11 +34,10 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
- if (HAS_PCH_SPLIT(dev)) {
- dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
- } else {
- dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
- }
+ if (HAS_PCH_SPLIT(dev))
+ dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B;
+ else
+ dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
}
@@ -46,7 +45,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
@@ -54,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
return;
if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
@@ -68,7 +67,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
@@ -76,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
return;
if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
@@ -241,12 +240,12 @@ static void i915_save_modeset_reg(struct drm_device *dev)
return;
/* Cursor state */
- dev_priv->saveCURACNTR = I915_READ(CURACNTR);
- dev_priv->saveCURAPOS = I915_READ(CURAPOS);
- dev_priv->saveCURABASE = I915_READ(CURABASE);
- dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
- dev_priv->saveCURBPOS = I915_READ(CURBPOS);
- dev_priv->saveCURBBASE = I915_READ(CURBBASE);
+ dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
+ dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
+ dev_priv->saveCURABASE = I915_READ(_CURABASE);
+ dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
+ dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
+ dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
if (IS_GEN2(dev))
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
@@ -256,118 +255,118 @@ static void i915_save_modeset_reg(struct drm_device *dev)
}
/* Pipe & plane A info */
- dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
- dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
+ dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
+ dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
- dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
- dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
+ dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
+ dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
+ dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
} else {
- dev_priv->saveFPA0 = I915_READ(FPA0);
- dev_priv->saveFPA1 = I915_READ(FPA1);
- dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+ dev_priv->saveFPA0 = I915_READ(_FPA0);
+ dev_priv->saveFPA1 = I915_READ(_FPA1);
+ dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
- dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
- dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
- dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
- dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
- dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
- dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
+ dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+ dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
+ dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
+ dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
+ dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
+ dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
+ dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+ dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
- dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
- dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
- dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1);
-
- dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL);
- dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL);
-
- dev_priv->savePFA_CTL_1 = I915_READ(PFA_CTL_1);
- dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ);
- dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS);
-
- dev_priv->saveTRANSACONF = I915_READ(TRANSACONF);
- dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A);
- dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A);
- dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A);
- dev_priv->saveTRANS_VTOTAL_A = I915_READ(TRANS_VTOTAL_A);
- dev_priv->saveTRANS_VBLANK_A = I915_READ(TRANS_VBLANK_A);
- dev_priv->saveTRANS_VSYNC_A = I915_READ(TRANS_VSYNC_A);
- }
-
- dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
- dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
- dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
- dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
- dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
+ dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+ dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+ dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+ dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+ dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+ dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+ dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+ dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+ dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+ dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
+ dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+ dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+ dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+ dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+ dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+ dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+ }
+
+ dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
+ dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+ dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
+ dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
+ dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPASURF = I915_READ(DSPASURF);
- dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
+ dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
+ dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
- dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
+ dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
/* Pipe & plane B info */
- dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
- dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
+ dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
+ dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
- dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
- dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
+ dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
+ dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
+ dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
} else {
- dev_priv->saveFPB0 = I915_READ(FPB0);
- dev_priv->saveFPB1 = I915_READ(FPB1);
- dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+ dev_priv->saveFPB0 = I915_READ(_FPB0);
+ dev_priv->saveFPB1 = I915_READ(_FPB1);
+ dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
- dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
- dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
- dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
- dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
- dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
- dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
+ dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+ dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
+ dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
+ dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
+ dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
+ dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
+ dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
+ dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
- dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
- dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
- dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1);
-
- dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL);
- dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL);
-
- dev_priv->savePFB_CTL_1 = I915_READ(PFB_CTL_1);
- dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ);
- dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS);
-
- dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF);
- dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B);
- dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B);
- dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B);
- dev_priv->saveTRANS_VTOTAL_B = I915_READ(TRANS_VTOTAL_B);
- dev_priv->saveTRANS_VBLANK_B = I915_READ(TRANS_VBLANK_B);
- dev_priv->saveTRANS_VSYNC_B = I915_READ(TRANS_VSYNC_B);
- }
-
- dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
- dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
- dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
- dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
- dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
+ dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+ dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+ dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+ dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+ dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+ dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+ dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+ dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+ dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+ dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
+ dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+ dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+ dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+ dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+ dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+ dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+ }
+
+ dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
+ dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+ dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
+ dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
+ dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
- dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
+ dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
+ dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
- dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+ dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
/* Fences */
switch (INTEL_INFO(dev)->gen) {
@@ -426,19 +425,19 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
- dpll_a_reg = PCH_DPLL_A;
- dpll_b_reg = PCH_DPLL_B;
- fpa0_reg = PCH_FPA0;
- fpb0_reg = PCH_FPB0;
- fpa1_reg = PCH_FPA1;
- fpb1_reg = PCH_FPB1;
+ dpll_a_reg = _PCH_DPLL_A;
+ dpll_b_reg = _PCH_DPLL_B;
+ fpa0_reg = _PCH_FPA0;
+ fpb0_reg = _PCH_FPB0;
+ fpa1_reg = _PCH_FPA1;
+ fpb1_reg = _PCH_FPB1;
} else {
- dpll_a_reg = DPLL_A;
- dpll_b_reg = DPLL_B;
- fpa0_reg = FPA0;
- fpb0_reg = FPB0;
- fpa1_reg = FPA1;
- fpb1_reg = FPB1;
+ dpll_a_reg = _DPLL_A;
+ dpll_b_reg = _DPLL_B;
+ fpa0_reg = _FPA0;
+ fpb0_reg = _FPB0;
+ fpa1_reg = _FPA1;
+ fpb1_reg = _FPB1;
}
if (HAS_PCH_SPLIT(dev)) {
@@ -461,60 +460,60 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
POSTING_READ(dpll_a_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
- POSTING_READ(DPLL_A_MD);
+ I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+ POSTING_READ(_DPLL_A_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
- I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
- I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
- I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
- I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
- I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
+ I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
+ I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
+ I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
+ I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
+ I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
+ I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+ I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
- I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
- I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
- I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
+ I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
+ I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
+ I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
+ I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
- I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
- I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
+ I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
+ I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
- I915_WRITE(PFA_CTL_1, dev_priv->savePFA_CTL_1);
- I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
- I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
+ I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
+ I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
+ I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
- I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF);
- I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
- I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
- I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
- I915_WRITE(TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
- I915_WRITE(TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
- I915_WRITE(TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
+ I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
+ I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
+ I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
+ I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
+ I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
+ I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
+ I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
}
/* Restore plane info */
- I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
- I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
- I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
- I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
- I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+ I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
+ I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
+ I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
+ I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
+ I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
- I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+ I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
+ I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
- I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+ I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
- I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
- I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
+ I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
+ I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
/* Pipe & plane B info */
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
@@ -530,68 +529,68 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
POSTING_READ(dpll_b_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
- POSTING_READ(DPLL_B_MD);
+ I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+ POSTING_READ(_DPLL_B_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
- I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
- I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
- I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
- I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
- I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
+ I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
+ I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
+ I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
+ I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
+ I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
+ I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+ I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
- I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
- I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
- I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
+ I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
+ I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
+ I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
+ I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
- I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
- I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
+ I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
+ I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
- I915_WRITE(PFB_CTL_1, dev_priv->savePFB_CTL_1);
- I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
- I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
+ I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
+ I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
+ I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
- I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF);
- I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
- I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
- I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
- I915_WRITE(TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
- I915_WRITE(TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
- I915_WRITE(TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
+ I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
+ I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
+ I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
+ I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
+ I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
+ I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
+ I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
}
/* Restore plane info */
- I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
- I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
- I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
- I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
- I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+ I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
+ I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
+ I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
+ I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
+ I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
- I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+ I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
+ I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
- I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
+ I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
- I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
- I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+ I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
+ I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
/* Cursor state */
- I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
- I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
- I915_WRITE(CURABASE, dev_priv->saveCURABASE);
- I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
- I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
- I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
+ I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
+ I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
+ I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
+ I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
+ I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
+ I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
if (IS_GEN2(dev))
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
@@ -653,14 +652,14 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveDP_B = I915_READ(DP_B);
dev_priv->saveDP_C = I915_READ(DP_C);
dev_priv->saveDP_D = I915_READ(DP_D);
- dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M);
- dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M);
- dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N);
- dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N);
- dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M);
- dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M);
- dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N);
- dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N);
+ dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+ dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+ dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+ dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+ dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+ dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+ dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+ dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
}
/* FIXME: save TV & SDVO state */
@@ -699,14 +698,14 @@ void i915_restore_display(struct drm_device *dev)
/* Display port ratios (must be done before clock is set) */
if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
- I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
- I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
- I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
- I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
- I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
- I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
- I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
+ I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
+ I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
+ I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
+ I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
+ I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
+ I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
+ I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
+ I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
}
/* This is only meaningful in non-KMS mode */
@@ -797,9 +796,6 @@ int i915_save_state(struct drm_device *dev)
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
- /* Hardware status page */
- dev_priv->saveHWS = I915_READ(HWS_PGA);
-
i915_save_display(dev);
/* Interrupt state */
@@ -808,8 +804,8 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveDEIMR = I915_READ(DEIMR);
dev_priv->saveGTIER = I915_READ(GTIER);
dev_priv->saveGTIMR = I915_READ(GTIMR);
- dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
- dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
+ dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+ dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
dev_priv->saveMCHBAR_RENDER_STANDBY =
I915_READ(RSTDBYCTL);
} else {
@@ -846,9 +842,6 @@ int i915_restore_state(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
- /* Hardware status page */
- I915_WRITE(HWS_PGA, dev_priv->saveHWS);
-
i915_restore_display(dev);
/* Interrupt state */
@@ -857,11 +850,11 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(DEIMR, dev_priv->saveDEIMR);
I915_WRITE(GTIER, dev_priv->saveGTIER);
I915_WRITE(GTIMR, dev_priv->saveGTIMR);
- I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
- I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
+ I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
+ I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
} else {
- I915_WRITE (IER, dev_priv->saveIER);
- I915_WRITE (IMR, dev_priv->saveIMR);
+ I915_WRITE(IER, dev_priv->saveIER);
+ I915_WRITE(IMR, dev_priv->saveIMR);
}
/* Clock gating state */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 7f0fc3ed61aa..d623fefbfaca 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -7,6 +7,7 @@
#include <drm/drmP.h>
#include "i915_drv.h"
+#include "intel_ringbuffer.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
@@ -16,9 +17,7 @@
/* object tracking */
TRACE_EVENT(i915_gem_object_create,
-
TP_PROTO(struct drm_i915_gem_object *obj),
-
TP_ARGS(obj),
TP_STRUCT__entry(
@@ -35,33 +34,51 @@ TRACE_EVENT(i915_gem_object_create,
);
TRACE_EVENT(i915_gem_object_bind,
-
- TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
-
- TP_ARGS(obj, gtt_offset, mappable),
+ TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
+ TP_ARGS(obj, mappable),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
- __field(u32, gtt_offset)
+ __field(u32, offset)
+ __field(u32, size)
__field(bool, mappable)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->gtt_offset = gtt_offset;
+ __entry->offset = obj->gtt_space->start;
+ __entry->size = obj->gtt_space->size;
__entry->mappable = mappable;
),
- TP_printk("obj=%p, gtt_offset=%08x%s",
- __entry->obj, __entry->gtt_offset,
+ TP_printk("obj=%p, offset=%08x size=%x%s",
+ __entry->obj, __entry->offset, __entry->size,
__entry->mappable ? ", mappable" : "")
);
-TRACE_EVENT(i915_gem_object_change_domain,
+TRACE_EVENT(i915_gem_object_unbind,
+ TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_ARGS(obj),
+
+ TP_STRUCT__entry(
+ __field(struct drm_i915_gem_object *, obj)
+ __field(u32, offset)
+ __field(u32, size)
+ ),
- TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->offset = obj->gtt_space->start;
+ __entry->size = obj->gtt_space->size;
+ ),
- TP_ARGS(obj, old_read_domains, old_write_domain),
+ TP_printk("obj=%p, offset=%08x size=%x",
+ __entry->obj, __entry->offset, __entry->size)
+);
+
+TRACE_EVENT(i915_gem_object_change_domain,
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
+ TP_ARGS(obj, old_read, old_write),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
@@ -71,177 +88,264 @@ TRACE_EVENT(i915_gem_object_change_domain,
TP_fast_assign(
__entry->obj = obj;
- __entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
- __entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
+ __entry->read_domains = obj->base.read_domains | (old_read << 16);
+ __entry->write_domain = obj->base.write_domain | (old_write << 16);
),
- TP_printk("obj=%p, read=%04x, write=%04x",
+ TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
__entry->obj,
- __entry->read_domains, __entry->write_domain)
+ __entry->read_domains >> 16,
+ __entry->read_domains & 0xffff,
+ __entry->write_domain >> 16,
+ __entry->write_domain & 0xffff)
);
-DECLARE_EVENT_CLASS(i915_gem_object,
+TRACE_EVENT(i915_gem_object_pwrite,
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+ TP_ARGS(obj, offset, len),
- TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_STRUCT__entry(
+ __field(struct drm_i915_gem_object *, obj)
+ __field(u32, offset)
+ __field(u32, len)
+ ),
- TP_ARGS(obj),
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->offset = offset;
+ __entry->len = len;
+ ),
+
+ TP_printk("obj=%p, offset=%u, len=%u",
+ __entry->obj, __entry->offset, __entry->len)
+);
+
+TRACE_EVENT(i915_gem_object_pread,
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+ TP_ARGS(obj, offset, len),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
+ __field(u32, offset)
+ __field(u32, len)
),
TP_fast_assign(
__entry->obj = obj;
+ __entry->offset = offset;
+ __entry->len = len;
),
- TP_printk("obj=%p", __entry->obj)
+ TP_printk("obj=%p, offset=%u, len=%u",
+ __entry->obj, __entry->offset, __entry->len)
);
-DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
+TRACE_EVENT(i915_gem_object_fault,
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
+ TP_ARGS(obj, index, gtt, write),
+
+ TP_STRUCT__entry(
+ __field(struct drm_i915_gem_object *, obj)
+ __field(u32, index)
+ __field(bool, gtt)
+ __field(bool, write)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->index = index;
+ __entry->gtt = gtt;
+ __entry->write = write;
+ ),
+ TP_printk("obj=%p, %s index=%u %s",
+ __entry->obj,
+ __entry->gtt ? "GTT" : "CPU",
+ __entry->index,
+ __entry->write ? ", writable" : "")
+);
+
+DECLARE_EVENT_CLASS(i915_gem_object,
TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_ARGS(obj),
- TP_ARGS(obj)
+ TP_STRUCT__entry(
+ __field(struct drm_i915_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ ),
+
+ TP_printk("obj=%p", __entry->obj)
);
-DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
+DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
+ TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_ARGS(obj)
+);
+DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
TP_PROTO(struct drm_i915_gem_object *obj),
-
TP_ARGS(obj)
);
-DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
+TRACE_EVENT(i915_gem_evict,
+ TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable),
+ TP_ARGS(dev, size, align, mappable),
- TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, size)
+ __field(u32, align)
+ __field(bool, mappable)
+ ),
- TP_ARGS(obj)
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->size = size;
+ __entry->align = align;
+ __entry->mappable = mappable;
+ ),
+
+ TP_printk("dev=%d, size=%d, align=%d %s",
+ __entry->dev, __entry->size, __entry->align,
+ __entry->mappable ? ", mappable" : "")
);
-/* batch tracing */
+TRACE_EVENT(i915_gem_evict_everything,
+ TP_PROTO(struct drm_device *dev, bool purgeable),
+ TP_ARGS(dev, purgeable),
-TRACE_EVENT(i915_gem_request_submit,
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(bool, purgeable)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->purgeable = purgeable;
+ ),
- TP_PROTO(struct drm_device *dev, u32 seqno),
+ TP_printk("dev=%d%s",
+ __entry->dev,
+ __entry->purgeable ? ", purgeable only" : "")
+);
- TP_ARGS(dev, seqno),
+TRACE_EVENT(i915_gem_ring_dispatch,
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, ring)
__field(u32, seqno)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
__entry->seqno = seqno;
- i915_trace_irq_get(dev, seqno);
+ i915_trace_irq_get(ring, seqno);
),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->seqno)
);
-TRACE_EVENT(i915_gem_request_flush,
-
- TP_PROTO(struct drm_device *dev, u32 seqno,
- u32 flush_domains, u32 invalidate_domains),
-
- TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
+TRACE_EVENT(i915_gem_ring_flush,
+ TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
+ TP_ARGS(ring, invalidate, flush),
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, seqno)
- __field(u32, flush_domains)
- __field(u32, invalidate_domains)
+ __field(u32, ring)
+ __field(u32, invalidate)
+ __field(u32, flush)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- __entry->flush_domains = flush_domains;
- __entry->invalidate_domains = invalidate_domains;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
+ __entry->invalidate = invalidate;
+ __entry->flush = flush;
),
- TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x",
- __entry->dev, __entry->seqno,
- __entry->flush_domains, __entry->invalidate_domains)
+ TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
+ __entry->dev, __entry->ring,
+ __entry->invalidate, __entry->flush)
);
DECLARE_EVENT_CLASS(i915_gem_request,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno),
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, ring)
__field(u32, seqno)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
__entry->seqno = seqno;
),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->seqno)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
+DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
+);
- TP_ARGS(dev, seqno)
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno)
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno)
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno)
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
);
DECLARE_EVENT_CLASS(i915_ring,
-
- TP_PROTO(struct drm_device *dev),
-
- TP_ARGS(dev),
+ TP_PROTO(struct intel_ring_buffer *ring),
+ TP_ARGS(ring),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, ring)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
),
- TP_printk("dev=%u", __entry->dev)
+ TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
);
DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
-
- TP_PROTO(struct drm_device *dev),
-
- TP_ARGS(dev)
+ TP_PROTO(struct intel_ring_buffer *ring),
+ TP_ARGS(ring)
);
DEFINE_EVENT(i915_ring, i915_ring_wait_end,
-
- TP_PROTO(struct drm_device *dev),
-
- TP_ARGS(dev)
+ TP_PROTO(struct intel_ring_buffer *ring),
+ TP_ARGS(ring)
);
TRACE_EVENT(i915_flip_request,
@@ -281,26 +385,29 @@ TRACE_EVENT(i915_flip_complete,
);
TRACE_EVENT(i915_reg_rw,
- TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len),
+ TP_PROTO(bool write, u32 reg, u64 val, int len),
- TP_ARGS(cmd, reg, val, len),
+ TP_ARGS(write, reg, val, len),
TP_STRUCT__entry(
- __field(int, cmd)
- __field(uint32_t, reg)
- __field(uint64_t, val)
- __field(int, len)
+ __field(u64, val)
+ __field(u32, reg)
+ __field(u16, write)
+ __field(u16, len)
),
TP_fast_assign(
- __entry->cmd = cmd;
+ __entry->val = (u64)val;
__entry->reg = reg;
- __entry->val = (uint64_t)val;
+ __entry->write = write;
__entry->len = len;
),
- TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d",
- __entry->cmd, __entry->reg, __entry->val, __entry->len)
+ TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
+ __entry->write ? "write" : "read",
+ __entry->reg, __entry->len,
+ (u32)(__entry->val & 0xffffffff),
+ (u32)(__entry->val >> 32))
);
#endif /* _I915_TRACE_H_ */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 0b44956c336b..fb5b4d426ae0 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -226,29 +226,49 @@ static void
parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
- struct bdb_sdvo_lvds_options *sdvo_lvds_options;
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
+ int index;
- sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
- if (!sdvo_lvds_options)
- return;
+ index = i915_vbt_sdvo_panel_type;
+ if (index == -1) {
+ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+
+ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+ if (!sdvo_lvds_options)
+ return;
+
+ index = sdvo_lvds_options->panel_type;
+ }
dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
if (!dvo_timing)
return;
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
-
if (!panel_fixed_mode)
return;
- fill_detail_timing_data(panel_fixed_mode,
- dvo_timing + sdvo_lvds_options->panel_type);
+ fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
- return;
+ DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+}
+
+static int intel_bios_ssc_frequency(struct drm_device *dev,
+ bool alternate)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ return alternate ? 66 : 48;
+ case 3:
+ case 4:
+ return alternate ? 100 : 96;
+ default:
+ return alternate ? 100 : 120;
+ }
}
static void
@@ -263,13 +283,8 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->int_tv_support = general->int_tv_support;
dev_priv->int_crt_support = general->int_crt_support;
dev_priv->lvds_use_ssc = general->enable_ssc;
-
- if (IS_I85X(dev))
- dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
- else if (IS_GEN5(dev) || IS_GEN6(dev))
- dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120;
- else
- dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
+ dev_priv->lvds_ssc_freq =
+ intel_bios_ssc_frequency(dev, general->ssc_freq);
}
}
@@ -553,6 +568,8 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
static void
init_vbt_defaults(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
+
dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
/* LFP panel data */
@@ -565,7 +582,11 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* general features */
dev_priv->int_tv_support = 1;
dev_priv->int_crt_support = 1;
- dev_priv->lvds_use_ssc = 0;
+
+ /* Default to using SSC */
+ dev_priv->lvds_use_ssc = 1;
+ dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
+ DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
/* eDP data */
dev_priv->edp.bpp = 18;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 8a77ff4a7237..8342259f3160 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -129,10 +129,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
u32 adpa, dpll_md;
u32 adpa_reg;
- if (intel_crtc->pipe == 0)
- dpll_md_reg = DPLL_A_MD;
- else
- dpll_md_reg = DPLL_B_MD;
+ dpll_md_reg = DPLL_MD(intel_crtc->pipe);
if (HAS_PCH_SPLIT(dev))
adpa_reg = PCH_ADPA;
@@ -160,17 +157,16 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
adpa |= PORT_TRANS_A_SEL_CPT;
else
adpa |= ADPA_PIPE_A_SELECT;
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(BCLRPAT_A, 0);
} else {
if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_B_SEL_CPT;
else
adpa |= ADPA_PIPE_B_SELECT;
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(BCLRPAT_B, 0);
}
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
+
I915_WRITE(adpa_reg, adpa);
}
@@ -353,21 +349,12 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt)
DRM_DEBUG_KMS("starting load-detect on CRT\n");
- if (pipe == 0) {
- bclrpat_reg = BCLRPAT_A;
- vtotal_reg = VTOTAL_A;
- vblank_reg = VBLANK_A;
- vsync_reg = VSYNC_A;
- pipeconf_reg = PIPEACONF;
- pipe_dsl_reg = PIPEADSL;
- } else {
- bclrpat_reg = BCLRPAT_B;
- vtotal_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- pipeconf_reg = PIPEBCONF;
- pipe_dsl_reg = PIPEBDSL;
- }
+ bclrpat_reg = BCLRPAT(pipe);
+ vtotal_reg = VTOTAL(pipe);
+ vblank_reg = VBLANK(pipe);
+ vsync_reg = VSYNC(pipe);
+ pipeconf_reg = PIPECONF(pipe);
+ pipe_dsl_reg = PIPEDSL(pipe);
save_bclrpat = I915_READ(bclrpat_reg);
save_vtotal = I915_READ(vtotal_reg);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 49fb54fd9a18..3106c0dc8389 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -989,7 +989,7 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
+ int pipestat_reg = PIPESTAT(pipe);
/* Clear existing vblank status. Note this will clear any other
* sticky status fields as well.
@@ -1058,6 +1058,612 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
}
}
+static const char *state_string(bool enabled)
+{
+ return enabled ? "on" : "off";
+}
+
+/* Only for pre-ILK configs */
+static void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & DPLL_VCO_ENABLE);
+ WARN(cur_state != state,
+ "PLL state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+
+/* For ILK+ */
+static void assert_pch_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = PCH_DPLL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & DPLL_VCO_ENABLE);
+ WARN(cur_state != state,
+ "PCH PLL state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
+#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
+
+static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_TX_ENABLE);
+ WARN(cur_state != state,
+ "FDI TX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
+#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
+
+static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
+ WARN(cur_state != state,
+ "FDI RX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
+#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
+
+static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* ILK FDI PLL is always enabled */
+ if (dev_priv->info->gen == 5)
+ return;
+
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int pp_reg, lvds_reg;
+ u32 val;
+ enum pipe panel_pipe = PIPE_A;
+ bool locked = locked;
+
+ if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ pp_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ } else {
+ pp_reg = PP_CONTROL;
+ lvds_reg = LVDS;
+ }
+
+ val = I915_READ(pp_reg);
+ if (!(val & PANEL_POWER_ON) ||
+ ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
+ locked = false;
+
+ if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
+ panel_pipe = PIPE_B;
+
+ WARN(panel_pipe == pipe && locked,
+ "panel assertion failure, pipe %c regs locked\n",
+ pipe_name(pipe));
+}
+
+static void assert_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & PIPECONF_ENABLE);
+ WARN(cur_state != state,
+ "pipe %c assertion failure (expected %s, current %s)\n",
+ pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+
+static void assert_plane_enabled(struct drm_i915_private *dev_priv,
+ enum plane plane)
+{
+ int reg;
+ u32 val;
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ WARN(!(val & DISPLAY_PLANE_ENABLE),
+ "plane %c assertion failure, should be active but is disabled\n",
+ plane_name(plane));
+}
+
+static void assert_planes_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg, i;
+ u32 val;
+ int cur_pipe;
+
+ /* Planes are fixed to pipes on ILK+ */
+ if (HAS_PCH_SPLIT(dev_priv->dev))
+ return;
+
+ /* Need to check both planes against the pipe */
+ for (i = 0; i < 2; i++) {
+ reg = DSPCNTR(i);
+ val = I915_READ(reg);
+ cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+ DISPPLANE_SEL_PIPE_SHIFT;
+ WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
+ "plane %c assertion failure, should be off on pipe %c but is still active\n",
+ plane_name(i), pipe_name(pipe));
+ }
+}
+
+static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+ bool enabled;
+
+ val = I915_READ(PCH_DREF_CONTROL);
+ enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
+ DREF_SUPERSPREAD_SOURCE_MASK));
+ WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+}
+
+static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+ bool enabled;
+
+ reg = TRANSCONF(pipe);
+ val = I915_READ(reg);
+ enabled = !!(val & TRANS_ENABLE);
+ WARN(enabled,
+ "transcoder assertion failed, should be off on pipe %c but is still active\n",
+ pipe_name(pipe));
+}
+
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ WARN(DP_PIPE_ENABLED(val, pipe),
+ "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+}
+
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ WARN(HDMI_PIPE_ENABLED(val, pipe),
+ "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+}
+
+static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D);
+
+ reg = PCH_ADPA;
+ val = I915_READ(reg);
+ WARN(ADPA_PIPE_ENABLED(val, pipe),
+ "PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ reg = PCH_LVDS;
+ val = I915_READ(reg);
+ WARN(LVDS_PIPE_ENABLED(val, pipe),
+ "PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
+ assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
+ assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
+}
+
+/**
+ * intel_enable_pll - enable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
+ * make sure the PLL reg is writable first though, since the panel write
+ * protect mechanism may be enabled.
+ *
+ * Note! This is for pre-ILK only.
+ */
+static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* No really, not for ILK+ */
+ BUG_ON(dev_priv->info->gen >= 5);
+
+ /* PLL is protected by panel, make sure we can write it */
+ if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
+ assert_panel_unlocked(dev_priv, pipe);
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ val |= DPLL_VCO_ENABLE;
+
+ /* We do this three times for luck */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+}
+
+/**
+ * intel_disable_pll - disable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe, making sure the pipe is off first.
+ *
+ * Note! This is for pre-ILK only.
+ */
+static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, pipe);
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ val &= ~DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+/**
+ * intel_enable_pch_pll - enable PCH PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * The PCH PLL needs to be enabled before the PCH transcoder, since it
+ * drives the transcoder clock.
+ */
+static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* PCH refclock must be enabled first */
+ assert_pch_refclk_enabled(dev_priv);
+
+ reg = PCH_DPLL(pipe);
+ val = I915_READ(reg);
+ val |= DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(200);
+}
+
+static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* Make sure transcoder isn't still depending on us */
+ assert_transcoder_disabled(dev_priv, pipe);
+
+ reg = PCH_DPLL(pipe);
+ val = I915_READ(reg);
+ val &= ~DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(200);
+}
+
+static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* Make sure PCH DPLL is enabled */
+ assert_pch_pll_enabled(dev_priv, pipe);
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, pipe);
+ assert_fdi_rx_enabled(dev_priv, pipe);
+
+ reg = TRANSCONF(pipe);
+ val = I915_READ(reg);
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ val &= ~PIPE_BPC_MASK;
+ val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+ I915_WRITE(reg, val | TRANS_ENABLE);
+ if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+ DRM_ERROR("failed to enable transcoder %d\n", pipe);
+}
+
+static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* FDI relies on the transcoder */
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+
+ /* Ports must be off as well */
+ assert_pch_ports_disabled(dev_priv, pipe);
+
+ reg = TRANSCONF(pipe);
+ val = I915_READ(reg);
+ val &= ~TRANS_ENABLE;
+ I915_WRITE(reg, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+ DRM_ERROR("failed to disable transcoder\n");
+}
+
+/**
+ * intel_enable_pipe - enable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to enable
+ * @pch_port: on ILK+, is this pipe driving a PCH port or not
+ *
+ * Enable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe is actually running (i.e. first vblank) before
+ * returning.
+ */
+static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool pch_port)
+{
+ int reg;
+ u32 val;
+
+ /*
+ * A pipe without a PLL won't actually be able to drive bits from
+ * a plane. On ILK+ the pipe PLLs are integrated, so we don't
+ * need the check.
+ */
+ if (!HAS_PCH_SPLIT(dev_priv->dev))
+ assert_pll_enabled(dev_priv, pipe);
+ else {
+ if (pch_port) {
+ /* if driving the PCH, we need FDI enabled */
+ assert_fdi_rx_pll_enabled(dev_priv, pipe);
+ assert_fdi_tx_pll_enabled(dev_priv, pipe);
+ }
+ /* FIXME: assert CPU port conditions for SNB+ */
+ }
+
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ val |= PIPECONF_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+/**
+ * intel_disable_pipe - disable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to disable
+ *
+ * Disable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe has shut down before returning.
+ */
+static void intel_disable_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /*
+ * Make sure planes won't keep trying to pump pixels to us,
+ * or we might hang the display.
+ */
+ assert_planes_disabled(dev_priv, pipe);
+
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ val &= ~PIPECONF_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ intel_wait_for_pipe_off(dev_priv->dev, pipe);
+}
+
+/**
+ * intel_enable_plane - enable a display plane on a given pipe
+ * @dev_priv: i915 private structure
+ * @plane: plane to enable
+ * @pipe: pipe being fed
+ *
+ * Enable @plane on @pipe, making sure that @pipe is running first.
+ */
+static void intel_enable_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* If the pipe isn't enabled, we can't pump pixels and may hang */
+ assert_pipe_enabled(dev_priv, pipe);
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ val |= DISPLAY_PLANE_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+/*
+ * Plane regs are double buffered, going from enabled->disabled needs a
+ * trigger in order to latch. The display address reg provides this.
+ */
+static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+ enum plane plane)
+{
+ u32 reg = DSPADDR(plane);
+ I915_WRITE(reg, I915_READ(reg));
+}
+
+/**
+ * intel_disable_plane - disable a display plane
+ * @dev_priv: i915 private structure
+ * @plane: plane to disable
+ * @pipe: pipe consuming the data
+ *
+ * Disable @plane; should be an independent operation.
+ */
+static void intel_disable_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ val &= ~DISPLAY_PLANE_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ intel_flush_display_plane(dev_priv, plane);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+static void disable_pch_dp(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ if (DP_PIPE_ENABLED(val, pipe))
+ I915_WRITE(reg, val & ~DP_PORT_EN);
+}
+
+static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ if (HDMI_PIPE_ENABLED(val, pipe))
+ I915_WRITE(reg, val & ~PORT_ENABLE);
+}
+
+/* Disable any ports connected to this transcoder */
+static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ u32 reg, val;
+
+ val = I915_READ(PCH_PP_CONTROL);
+ I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
+
+ disable_pch_dp(dev_priv, pipe, PCH_DP_B);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_C);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_D);
+
+ reg = PCH_ADPA;
+ val = I915_READ(reg);
+ if (ADPA_PIPE_ENABLED(val, pipe))
+ I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
+
+ reg = PCH_LVDS;
+ val = I915_READ(reg);
+ if (LVDS_PIPE_ENABLED(val, pipe)) {
+ I915_WRITE(reg, val & ~LVDS_PORT_EN);
+ POSTING_READ(reg);
+ udelay(100);
+ }
+
+ disable_pch_hdmi(dev_priv, pipe, HDMIB);
+ disable_pch_hdmi(dev_priv, pipe, HDMIC);
+ disable_pch_hdmi(dev_priv, pipe, HDMID);
+}
+
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
@@ -1390,7 +1996,7 @@ static void intel_update_fbc(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled) {
+ if (tmp_crtc->enabled && tmp_crtc->fb) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -1461,6 +2067,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 alignment;
int ret;
@@ -1485,9 +2092,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
BUG();
}
+ dev_priv->mm.interruptible = false;
ret = i915_gem_object_pin(obj, alignment, true);
if (ret)
- return ret;
+ goto err_interruptible;
ret = i915_gem_object_set_to_display_plane(obj, pipelined);
if (ret)
@@ -1499,15 +2107,18 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* a fence as the cost is not that onerous.
*/
if (obj->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence(obj, pipelined, false);
+ ret = i915_gem_object_get_fence(obj, pipelined);
if (ret)
goto err_unpin;
}
+ dev_priv->mm.interruptible = true;
return 0;
err_unpin:
i915_gem_object_unpin(obj);
+err_interruptible:
+ dev_priv->mm.interruptible = true;
return ret;
}
@@ -1641,7 +2252,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- ret = i915_gem_object_flush_gpu(obj, false);
+ ret = i915_gem_object_flush_gpu(obj);
(void) ret;
}
@@ -1753,8 +2364,13 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
u32 reg, temp, tries;
+ /* FDI needs bits from pipe & plane first */
+ assert_pipe_enabled(dev_priv, pipe);
+ assert_plane_enabled(dev_priv, plane);
+
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
@@ -1784,7 +2400,11 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
udelay(150);
/* Ironlake workaround, enable clock pointer after FDI enable*/
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE);
+ if (HAS_PCH_IBX(dev)) {
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+ FDI_RX_PHASE_SYNC_POINTER_EN);
+ }
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
@@ -1834,7 +2454,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
}
-static const int const snb_b_fdi_train_param [] = {
+static const int snb_b_fdi_train_param [] = {
FDI_LINK_TRAIN_400MV_0DB_SNB_B,
FDI_LINK_TRAIN_400MV_6DB_SNB_B,
FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
@@ -2003,12 +2623,60 @@ static void ironlake_fdi_enable(struct drm_crtc *crtc)
}
}
-static void intel_flush_display_plane(struct drm_device *dev,
- int plane)
+static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg = DSPADDR(plane);
- I915_WRITE(reg, I915_READ(reg));
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev)) {
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_EN));
+ }
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(100);
}
/*
@@ -2070,114 +2738,21 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
return true;
}
-static void ironlake_crtc_enable(struct drm_crtc *crtc)
+/*
+ * Enable PCH resources required for PCH ports:
+ * - PCH PLLs
+ * - FDI training & RX/TX
+ * - update transcoder timings
+ * - DP transcoding bits
+ * - transcoder
+ */
+static void ironlake_pch_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
u32 reg, temp;
- bool is_pch_port = false;
-
- if (intel_crtc->active)
- return;
-
- intel_crtc->active = true;
- intel_update_watermarks(dev);
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- if ((temp & LVDS_PORT_EN) == 0)
- I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
- }
-
- is_pch_port = intel_crtc_driving_pch(crtc);
-
- if (is_pch_port)
- ironlake_fdi_enable(crtc);
- else {
- /* disable CPU FDI tx and PCH FDI rx */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
- POSTING_READ(reg);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~(0x7 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
- I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
-
- POSTING_READ(reg);
- udelay(100);
-
- /* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev))
- I915_WRITE(FDI_RX_CHICKEN(pipe),
- I915_READ(FDI_RX_CHICKEN(pipe) &
- ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
-
- /* still set train pattern 1 */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- /* BPC in FDI rx is consistent with that in PIPECONF */
- temp &= ~(0x07 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
- I915_WRITE(reg, temp);
-
- POSTING_READ(reg);
- udelay(100);
- }
-
- /* Enable panel fitting for LVDS */
- if (dev_priv->pch_pf_size &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
- /* Force use of hard-coded filter coefficients
- * as some pre-programmed values are broken,
- * e.g. x201.
- */
- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
- PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
- dev_priv->pch_pf_pos);
- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
- dev_priv->pch_pf_size);
- }
-
- /* Enable CPU pipe */
- reg = PIPECONF(pipe);
- temp = I915_READ(reg);
- if ((temp & PIPECONF_ENABLE) == 0) {
- I915_WRITE(reg, temp | PIPECONF_ENABLE);
- POSTING_READ(reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- }
-
- /* configure and enable CPU plane */
- reg = DSPCNTR(plane);
- temp = I915_READ(reg);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev, plane);
- }
-
- /* Skip the PCH stuff if possible */
- if (!is_pch_port)
- goto done;
/* For PCH output, training FDI link */
if (IS_GEN6(dev))
@@ -2185,14 +2760,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
else
ironlake_fdi_link_train(crtc);
- /* enable PCH DPLL */
- reg = PCH_DPLL(pipe);
- temp = I915_READ(reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
- POSTING_READ(reg);
- udelay(200);
- }
+ intel_enable_pch_pll(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
/* Be sure PCH DPLL SEL is set */
@@ -2204,7 +2772,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_DPLL_SEL, temp);
}
- /* set transcoder timing */
+ /* set transcoder timing, panel must allow it */
+ assert_panel_unlocked(dev_priv, pipe);
I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
@@ -2251,19 +2820,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(reg, temp);
}
- /* enable PCH transcoder */
- reg = TRANSCONF(pipe);
- temp = I915_READ(reg);
- /*
- * make the BPC in transcoder be consistent with
- * that in pipeconf reg.
- */
- temp &= ~PIPE_BPC_MASK;
- temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
- I915_WRITE(reg, temp | TRANS_ENABLE);
- if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
- DRM_ERROR("failed to enable transcoder %d\n", pipe);
-done:
+ intel_enable_transcoder(dev_priv, pipe);
+}
+
+static void ironlake_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 temp;
+ bool is_pch_port;
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ if ((temp & LVDS_PORT_EN) == 0)
+ I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+ }
+
+ is_pch_port = intel_crtc_driving_pch(crtc);
+
+ if (is_pch_port)
+ ironlake_fdi_enable(crtc);
+ else
+ ironlake_fdi_disable(crtc);
+
+ /* Enable panel fitting for LVDS */
+ if (dev_priv->pch_pf_size &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+ I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+ }
+
+ intel_enable_pipe(dev_priv, pipe, is_pch_port);
+ intel_enable_plane(dev_priv, plane, pipe);
+
+ if (is_pch_port)
+ ironlake_pch_enable(crtc);
+
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
intel_crtc_update_cursor(crtc, true);
@@ -2285,116 +2891,58 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
drm_vblank_off(dev, pipe);
intel_crtc_update_cursor(crtc, false);
- /* Disable display plane */
- reg = DSPCNTR(plane);
- temp = I915_READ(reg);
- if (temp & DISPLAY_PLANE_ENABLE) {
- I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev, plane);
- }
+ intel_disable_plane(dev_priv, plane, pipe);
if (dev_priv->cfb_plane == plane &&
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- /* disable cpu pipe, disable after all planes disabled */
- reg = PIPECONF(pipe);
- temp = I915_READ(reg);
- if (temp & PIPECONF_ENABLE) {
- I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
- POSTING_READ(reg);
- /* wait for cpu pipe off, pipe state */
- intel_wait_for_pipe_off(dev, intel_crtc->pipe);
- }
+ intel_disable_pipe(dev_priv, pipe);
/* Disable PF */
- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
-
- /* disable CPU FDI tx and PCH FDI rx */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
- POSTING_READ(reg);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~(0x7 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
- I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
-
- POSTING_READ(reg);
- udelay(100);
-
- /* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev))
- I915_WRITE(FDI_RX_CHICKEN(pipe),
- I915_READ(FDI_RX_CHICKEN(pipe) &
- ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
-
- /* still set train pattern 1 */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- /* BPC in FDI rx is consistent with that in PIPECONF */
- temp &= ~(0x07 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
- I915_WRITE(reg, temp);
+ I915_WRITE(PF_CTL(pipe), 0);
+ I915_WRITE(PF_WIN_SZ(pipe), 0);
- POSTING_READ(reg);
- udelay(100);
+ ironlake_fdi_disable(crtc);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- if (temp & LVDS_PORT_EN) {
- I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
- POSTING_READ(PCH_LVDS);
- udelay(100);
- }
- }
+ /* This is a horrible layering violation; we should be doing this in
+ * the connector/encoder ->prepare instead, but we don't always have
+ * enough information there about the config to know whether it will
+ * actually be necessary or just cause undesired flicker.
+ */
+ intel_disable_pch_ports(dev_priv, pipe);
- /* disable PCH transcoder */
- reg = TRANSCONF(plane);
- temp = I915_READ(reg);
- if (temp & TRANS_ENABLE) {
- I915_WRITE(reg, temp & ~TRANS_ENABLE);
- /* wait for PCH transcoder off, transcoder state */
- if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
- DRM_ERROR("failed to disable transcoder\n");
- }
+ intel_disable_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
/* disable TRANS_DP_CTL */
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+ temp |= TRANS_DP_PORT_SEL_NONE;
I915_WRITE(reg, temp);
/* disable DPLL_SEL */
temp = I915_READ(PCH_DPLL_SEL);
- if (pipe == 0)
- temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
- else
+ switch (pipe) {
+ case 0:
+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ break;
+ case 1:
temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ break;
+ case 2:
+ /* FIXME: manage transcoder PLLs? */
+ temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
+ break;
+ default:
+ BUG(); /* wtf */
+ }
I915_WRITE(PCH_DPLL_SEL, temp);
}
/* disable PCH DPLL */
- reg = PCH_DPLL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
+ intel_disable_pch_pll(dev_priv, pipe);
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
@@ -2451,9 +2999,12 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
- (void) intel_overlay_switch_off(intel_crtc->overlay, false);
+ dev_priv->mm.interruptible = false;
+ (void) intel_overlay_switch_off(intel_crtc->overlay);
+ dev_priv->mm.interruptible = true;
mutex_unlock(&dev->struct_mutex);
}
@@ -2469,7 +3020,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- u32 reg, temp;
if (intel_crtc->active)
return;
@@ -2477,42 +3027,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_update_watermarks(dev);
- /* Enable the DPLL */
- reg = DPLL(pipe);
- temp = I915_READ(reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(reg, temp);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(reg);
- udelay(150);
-
- I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(reg);
- udelay(150);
-
- I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(reg);
- udelay(150);
- }
-
- /* Enable the pipe */
- reg = PIPECONF(pipe);
- temp = I915_READ(reg);
- if ((temp & PIPECONF_ENABLE) == 0)
- I915_WRITE(reg, temp | PIPECONF_ENABLE);
-
- /* Enable the plane */
- reg = DSPCNTR(plane);
- temp = I915_READ(reg);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev, plane);
- }
+ intel_enable_pll(dev_priv, pipe);
+ intel_enable_pipe(dev_priv, pipe, false);
+ intel_enable_plane(dev_priv, plane, pipe);
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
@@ -2529,7 +3046,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- u32 reg, temp;
if (!intel_crtc->active)
return;
@@ -2544,45 +3060,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- /* Disable display plane */
- reg = DSPCNTR(plane);
- temp = I915_READ(reg);
- if (temp & DISPLAY_PLANE_ENABLE) {
- I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- intel_flush_display_plane(dev, plane);
-
- /* Wait for vblank for the disable to take effect */
- if (IS_GEN2(dev))
- intel_wait_for_vblank(dev, pipe);
- }
-
- /* Don't disable pipe A or pipe A PLLs if needed */
- if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
- goto done;
-
- /* Next, disable display pipes */
- reg = PIPECONF(pipe);
- temp = I915_READ(reg);
- if (temp & PIPECONF_ENABLE) {
- I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
-
- /* Wait for the pipe to turn off */
- POSTING_READ(reg);
- intel_wait_for_pipe_off(dev, pipe);
- }
-
- reg = DPLL(pipe);
- temp = I915_READ(reg);
- if (temp & DPLL_VCO_ENABLE) {
- I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
-
- /* Wait for the clocks to turn off. */
- POSTING_READ(reg);
- udelay(150);
- }
+ intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_pipe(dev_priv, pipe);
+ intel_disable_pll(dev_priv, pipe);
-done:
intel_crtc->active = false;
intel_update_fbc(dev);
intel_update_watermarks(dev);
@@ -2644,7 +3125,7 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
break;
default:
- DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
+ DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
break;
}
}
@@ -2841,77 +3322,77 @@ struct intel_watermark_params {
};
/* Pineview has different values for various configs */
-static struct intel_watermark_params pineview_display_wm = {
+static const struct intel_watermark_params pineview_display_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
-static struct intel_watermark_params pineview_display_hplloff_wm = {
+static const struct intel_watermark_params pineview_display_hplloff_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_HPLLOFF_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
-static struct intel_watermark_params pineview_cursor_wm = {
+static const struct intel_watermark_params pineview_cursor_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE,
};
-static struct intel_watermark_params pineview_cursor_hplloff_wm = {
+static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
-static struct intel_watermark_params g4x_wm_info = {
+static const struct intel_watermark_params g4x_wm_info = {
G4X_FIFO_SIZE,
G4X_MAX_WM,
G4X_MAX_WM,
2,
G4X_FIFO_LINE_SIZE,
};
-static struct intel_watermark_params g4x_cursor_wm_info = {
+static const struct intel_watermark_params g4x_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
G4X_FIFO_LINE_SIZE,
};
-static struct intel_watermark_params i965_cursor_wm_info = {
+static const struct intel_watermark_params i965_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
I915_FIFO_LINE_SIZE,
};
-static struct intel_watermark_params i945_wm_info = {
+static const struct intel_watermark_params i945_wm_info = {
I945_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
};
-static struct intel_watermark_params i915_wm_info = {
+static const struct intel_watermark_params i915_wm_info = {
I915_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
};
-static struct intel_watermark_params i855_wm_info = {
+static const struct intel_watermark_params i855_wm_info = {
I855GM_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I830_FIFO_LINE_SIZE
};
-static struct intel_watermark_params i830_wm_info = {
+static const struct intel_watermark_params i830_wm_info = {
I830_FIFO_SIZE,
I915_MAX_WM,
1,
@@ -2919,31 +3400,28 @@ static struct intel_watermark_params i830_wm_info = {
I830_FIFO_LINE_SIZE
};
-static struct intel_watermark_params ironlake_display_wm_info = {
+static const struct intel_watermark_params ironlake_display_wm_info = {
ILK_DISPLAY_FIFO,
ILK_DISPLAY_MAXWM,
ILK_DISPLAY_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params ironlake_cursor_wm_info = {
+static const struct intel_watermark_params ironlake_cursor_wm_info = {
ILK_CURSOR_FIFO,
ILK_CURSOR_MAXWM,
ILK_CURSOR_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params ironlake_display_srwm_info = {
+static const struct intel_watermark_params ironlake_display_srwm_info = {
ILK_DISPLAY_SR_FIFO,
ILK_DISPLAY_MAX_SRWM,
ILK_DISPLAY_DFT_SRWM,
2,
ILK_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params ironlake_cursor_srwm_info = {
+static const struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_CURSOR_SR_FIFO,
ILK_CURSOR_MAX_SRWM,
ILK_CURSOR_DFT_SRWM,
@@ -2951,31 +3429,28 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_FIFO_LINE_SIZE
};
-static struct intel_watermark_params sandybridge_display_wm_info = {
+static const struct intel_watermark_params sandybridge_display_wm_info = {
SNB_DISPLAY_FIFO,
SNB_DISPLAY_MAXWM,
SNB_DISPLAY_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params sandybridge_cursor_wm_info = {
+static const struct intel_watermark_params sandybridge_cursor_wm_info = {
SNB_CURSOR_FIFO,
SNB_CURSOR_MAXWM,
SNB_CURSOR_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params sandybridge_display_srwm_info = {
+static const struct intel_watermark_params sandybridge_display_srwm_info = {
SNB_DISPLAY_SR_FIFO,
SNB_DISPLAY_MAX_SRWM,
SNB_DISPLAY_DFT_SRWM,
2,
SNB_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params sandybridge_cursor_srwm_info = {
+static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
SNB_CURSOR_SR_FIFO,
SNB_CURSOR_MAX_SRWM,
SNB_CURSOR_DFT_SRWM,
@@ -3003,7 +3478,8 @@ static struct intel_watermark_params sandybridge_cursor_srwm_info = {
* will occur, and a display engine hang could result.
*/
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
- struct intel_watermark_params *wm,
+ const struct intel_watermark_params *wm,
+ int fifo_size,
int pixel_size,
unsigned long latency_ns)
{
@@ -3021,7 +3497,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
- wm_size = wm->fifo_size - (entries_required + wm->guard_size);
+ wm_size = fifo_size - (entries_required + wm->guard_size);
DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
@@ -3194,15 +3670,28 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static void pineview_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int unused,
- int pixel_size)
+static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+{
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled && crtc->fb) {
+ if (enabled)
+ return NULL;
+ enabled = crtc;
+ }
+ }
+
+ return enabled;
+}
+
+static void pineview_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
- int sr_clock;
latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
dev_priv->fsb_freq, dev_priv->mem_freq);
@@ -3212,11 +3701,14 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
return;
}
- if (!planea_clock || !planeb_clock) {
- sr_clock = planea_clock ? planea_clock : planeb_clock;
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ int clock = crtc->mode.clock;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
/* Display SR */
- wm = intel_calculate_wm(sr_clock, &pineview_display_wm,
+ wm = intel_calculate_wm(clock, &pineview_display_wm,
+ pineview_display_wm.fifo_size,
pixel_size, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
@@ -3225,7 +3717,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm,
+ wm = intel_calculate_wm(clock, &pineview_cursor_wm,
+ pineview_display_wm.fifo_size,
pixel_size, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
@@ -3233,7 +3726,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
I915_WRITE(DSPFW3, reg);
/* Display HPLL off SR */
- wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm,
+ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
@@ -3241,7 +3735,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
I915_WRITE(DSPFW3, reg);
/* cursor HPLL off SR */
- wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm,
+ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
@@ -3259,125 +3754,229 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
}
}
-static void g4x_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int sr_htotal,
- int pixel_size)
+static bool g4x_compute_wm0(struct drm_device *dev,
+ int plane,
+ const struct intel_watermark_params *display,
+ int display_latency_ns,
+ const struct intel_watermark_params *cursor,
+ int cursor_latency_ns,
+ int *plane_wm,
+ int *cursor_wm)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int total_size, cacheline_size;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr;
- struct intel_watermark_params planea_params, planeb_params;
- unsigned long line_time_us;
- int sr_clock, sr_entries = 0, entries_required;
+ struct drm_crtc *crtc;
+ int htotal, hdisplay, clock, pixel_size;
+ int line_time_us, line_count;
+ int entries, tlb_miss;
- /* Create copies of the base settings for each pipe */
- planea_params = planeb_params = g4x_wm_info;
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled)
+ return false;
- /* Grab a couple of global values before we overwrite them */
- total_size = planea_params.fifo_size;
- cacheline_size = planea_params.cacheline_size;
+ htotal = crtc->mode.htotal;
+ hdisplay = crtc->mode.hdisplay;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
- /*
- * Note: we need to make sure we don't overflow for various clock &
- * latency values.
- * clocks go from a few thousand to several hundred thousand.
- * latency is usually a few thousand
- */
- entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
- 1000;
- entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
- planea_wm = entries_required + planea_params.guard_size;
+ /* Use the small buffer method to calculate plane watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *plane_wm = entries + display->guard_size;
+ if (*plane_wm > (int)display->max_wm)
+ *plane_wm = display->max_wm;
- entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
- 1000;
- entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
- planeb_wm = entries_required + planeb_params.guard_size;
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = ((htotal * 1000) / clock);
+ line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
+ entries = line_count * 64 * pixel_size;
+ tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+ if (*cursor_wm > (int)cursor->max_wm)
+ *cursor_wm = (int)cursor->max_wm;
- cursora_wm = cursorb_wm = 16;
- cursor_sr = 32;
+ return true;
+}
- DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool g4x_check_srwm(struct drm_device *dev,
+ int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
+ display_wm, cursor_wm);
- /* Calc sr entries for one plane configs */
- if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 12000;
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark is too large(%d), disabling\n",
+ display_wm, display->max_wm);
+ return false;
+ }
- sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_htotal * 1000) / sr_clock);
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark is too large(%d), disabling\n",
+ cursor_wm, cursor->max_wm);
+ return false;
+ }
- /* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
- sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
-
- entries_required = (((sr_latency_ns / line_time_us) +
- 1000) / 1000) * pixel_size * 64;
- entries_required = DIV_ROUND_UP(entries_required,
- g4x_cursor_wm_info.cacheline_size);
- cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
-
- if (cursor_sr > g4x_cursor_wm_info.max_wm)
- cursor_sr = g4x_cursor_wm_info.max_wm;
- DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
- "cursor %d\n", sr_entries, cursor_sr);
+ if (!(display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("SR latency is 0, disabling\n");
+ return false;
+ }
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- } else {
- /* Turn off self refresh if both pipes are enabled */
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
+ return true;
+}
+
+static bool g4x_compute_srwm(struct drm_device *dev,
+ int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int hdisplay, htotal, pixel_size, clock;
+ unsigned long line_time_us;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *display_wm = *cursor_wm = 0;
+ return false;
}
- DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
- planea_wm, planeb_wm, sr_entries);
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
- planea_wm &= 0x3f;
- planeb_wm &= 0x3f;
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
- I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) |
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return g4x_check_srwm(dev,
+ *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+static inline bool single_plane_enabled(unsigned int mask)
+{
+ return mask && (mask & -mask) == 0;
+}
+
+static void g4x_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ unsigned int enabled = 0;
+
+ if (g4x_compute_wm0(dev, 0,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1;
+
+ if (g4x_compute_wm0(dev, 1,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 2;
+
+ plane_sr = cursor_sr = 0;
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &g4x_wm_info,
+ &g4x_cursor_wm_info,
+ &plane_sr, &cursor_sr))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ else
+ I915_WRITE(FW_BLC_SELF,
+ I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm);
- I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
/* HPLL off in SR has some issues on G4x... disable it */
- I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void i965_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int sr_htotal,
- int pixel_size)
+static void i965_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long line_time_us;
- int sr_clock, sr_entries, srwm = 1;
+ struct drm_crtc *crtc;
+ int srwm = 1;
int cursor_sr = 16;
/* Calc sr entries for one plane configs */
- if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
+ int clock = crtc->mode.clock;
+ int htotal = crtc->mode.htotal;
+ int hdisplay = crtc->mode.hdisplay;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
- sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_htotal * 1000) / sr_clock);
+ line_time_us = ((htotal * 1000) / clock);
/* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
- sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
- DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
- srwm = I965_FIFO_SIZE - sr_entries;
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
+ srwm = I965_FIFO_SIZE - entries;
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
+ DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
- sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * 64;
- sr_entries = DIV_ROUND_UP(sr_entries,
+ entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
- (sr_entries + i965_cursor_wm_info.guard_size);
+ (entries + i965_cursor_wm_info.guard_size);
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
@@ -3398,46 +3997,56 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
srwm);
/* 965 has limitations... */
- I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
- (8 << 0));
+ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
+ (8 << 16) | (8 << 8) | (8 << 0));
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
/* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int sr_htotal,
- int pixel_size)
+static void i9xx_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
uint32_t fwater_hi;
- int total_size, cacheline_size, cwm, srwm = 1;
+ int cwm, srwm = 1;
+ int fifo_size;
int planea_wm, planeb_wm;
- struct intel_watermark_params planea_params, planeb_params;
- unsigned long line_time_us;
- int sr_clock, sr_entries = 0;
+ struct drm_crtc *crtc, *enabled = NULL;
- /* Create copies of the base settings for each pipe */
- if (IS_CRESTLINE(dev) || IS_I945GM(dev))
- planea_params = planeb_params = i945_wm_info;
+ if (IS_I945GM(dev))
+ wm_info = &i945_wm_info;
else if (!IS_GEN2(dev))
- planea_params = planeb_params = i915_wm_info;
+ wm_info = &i915_wm_info;
else
- planea_params = planeb_params = i855_wm_info;
-
- /* Grab a couple of global values before we overwrite them */
- total_size = planea_params.fifo_size;
- cacheline_size = planea_params.cacheline_size;
-
- /* Update per-plane FIFO sizes */
- planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
- planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+ wm_info = &i855_wm_info;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ crtc = intel_get_crtc_for_plane(dev, 0);
+ if (crtc->enabled && crtc->fb) {
+ planea_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ enabled = crtc;
+ } else
+ planea_wm = fifo_size - wm_info->guard_size;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+ crtc = intel_get_crtc_for_plane(dev, 1);
+ if (crtc->enabled && crtc->fb) {
+ planeb_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ if (enabled == NULL)
+ enabled = crtc;
+ else
+ enabled = NULL;
+ } else
+ planeb_wm = fifo_size - wm_info->guard_size;
- planea_wm = intel_calculate_wm(planea_clock, &planea_params,
- pixel_size, latency_ns);
- planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
- pixel_size, latency_ns);
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
/*
@@ -3445,39 +4054,39 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
*/
cwm = 2;
+ /* Play safe and disable self-refresh before adjusting watermarks. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+
/* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev) && sr_hdisplay &&
- (!planea_clock || !planeb_clock)) {
+ if (HAS_FW_BLC(dev) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
+ int clock = enabled->mode.clock;
+ int htotal = enabled->mode.htotal;
+ int hdisplay = enabled->mode.hdisplay;
+ int pixel_size = enabled->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
- sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_htotal * 1000) / sr_clock);
+ line_time_us = (htotal * 1000) / clock;
/* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
- sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
- DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
- srwm = total_size - sr_entries;
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
+ DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+ srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
- else if (IS_I915GM(dev)) {
- /* 915M has a smaller SRWM field */
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else if (IS_I915GM(dev))
I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
- I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
- }
- } else {
- /* Turn off self refresh if both pipes are enabled */
- if (IS_I945G(dev) || IS_I945GM(dev)) {
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
- } else if (IS_I915GM(dev)) {
- I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
- }
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -3492,19 +4101,36 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
I915_WRITE(FW_BLC, fwater_lo);
I915_WRITE(FW_BLC2, fwater_hi);
+
+ if (HAS_FW_BLC(dev)) {
+ if (enabled) {
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+ DRM_DEBUG_KMS("memory self refresh enabled\n");
+ } else
+ DRM_DEBUG_KMS("memory self refresh disabled\n");
+ }
}
-static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
- int unused2, int unused3, int pixel_size)
+static void i830_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+ struct drm_crtc *crtc;
+ uint32_t fwater_lo;
int planea_wm;
- i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ crtc = single_enabled_crtc(dev);
+ if (crtc == NULL)
+ return;
- planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info,
- pixel_size, latency_ns);
+ planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+ dev_priv->display.get_fifo_size(dev, 0),
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
@@ -3613,15 +4239,15 @@ static bool ironlake_check_srwm(struct drm_device *dev, int level,
/*
* Compute watermark values of WM[1-3],
*/
-static bool ironlake_compute_srwm(struct drm_device *dev, int level,
- int hdisplay, int htotal,
- int pixel_size, int clock, int latency_ns,
+static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
+ int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *fbc_wm, int *display_wm, int *cursor_wm)
{
-
+ struct drm_crtc *crtc;
unsigned long line_time_us;
+ int hdisplay, htotal, pixel_size, clock;
int line_count, line_size;
int small, large;
int entries;
@@ -3631,6 +4257,12 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level,
return false;
}
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
line_time_us = (htotal * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
@@ -3658,14 +4290,11 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level,
display, cursor);
}
-static void ironlake_update_wm(struct drm_device *dev,
- int planea_clock, int planeb_clock,
- int hdisplay, int htotal,
- int pixel_size)
+static void ironlake_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int fbc_wm, plane_wm, cursor_wm, enabled;
- int clock;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
enabled = 0;
if (ironlake_compute_wm0(dev, 0,
@@ -3679,7 +4308,7 @@ static void ironlake_update_wm(struct drm_device *dev,
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
- enabled++;
+ enabled |= 1;
}
if (ironlake_compute_wm0(dev, 1,
@@ -3693,7 +4322,7 @@ static void ironlake_update_wm(struct drm_device *dev,
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
- enabled++;
+ enabled |= 2;
}
/*
@@ -3704,14 +4333,13 @@ static void ironlake_update_wm(struct drm_device *dev,
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- if (enabled != 1)
+ if (!single_plane_enabled(enabled))
return;
-
- clock = planea_clock ? planea_clock : planeb_clock;
+ enabled = ffs(enabled) - 1;
/* WM1 */
- if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
- clock, ILK_READ_WM1_LATENCY() * 500,
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ ILK_READ_WM1_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -3725,8 +4353,8 @@ static void ironlake_update_wm(struct drm_device *dev,
cursor_wm);
/* WM2 */
- if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size,
- clock, ILK_READ_WM2_LATENCY() * 500,
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ ILK_READ_WM2_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -3745,15 +4373,12 @@ static void ironlake_update_wm(struct drm_device *dev,
*/
}
-static void sandybridge_update_wm(struct drm_device *dev,
- int planea_clock, int planeb_clock,
- int hdisplay, int htotal,
- int pixel_size)
+static void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
- int fbc_wm, plane_wm, cursor_wm, enabled;
- int clock;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
enabled = 0;
if (ironlake_compute_wm0(dev, 0,
@@ -3765,7 +4390,7 @@ static void sandybridge_update_wm(struct drm_device *dev,
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
- enabled++;
+ enabled |= 1;
}
if (ironlake_compute_wm0(dev, 1,
@@ -3777,7 +4402,7 @@ static void sandybridge_update_wm(struct drm_device *dev,
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
- enabled++;
+ enabled |= 2;
}
/*
@@ -3794,14 +4419,13 @@ static void sandybridge_update_wm(struct drm_device *dev,
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- if (enabled != 1)
+ if (!single_plane_enabled(enabled))
return;
-
- clock = planea_clock ? planea_clock : planeb_clock;
+ enabled = ffs(enabled) - 1;
/* WM1 */
- if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
- clock, SNB_READ_WM1_LATENCY() * 500,
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -3815,9 +4439,8 @@ static void sandybridge_update_wm(struct drm_device *dev,
cursor_wm);
/* WM2 */
- if (!ironlake_compute_srwm(dev, 2,
- hdisplay, htotal, pixel_size,
- clock, SNB_READ_WM2_LATENCY() * 500,
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -3831,9 +4454,8 @@ static void sandybridge_update_wm(struct drm_device *dev,
cursor_wm);
/* WM3 */
- if (!ironlake_compute_srwm(dev, 3,
- hdisplay, htotal, pixel_size,
- clock, SNB_READ_WM3_LATENCY() * 500,
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -3882,44 +4504,9 @@ static void sandybridge_update_wm(struct drm_device *dev,
static void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- int sr_hdisplay = 0;
- unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
- int enabled = 0, pixel_size = 0;
- int sr_htotal = 0;
-
- if (!dev_priv->display.update_wm)
- return;
- /* Get the clock config from both planes */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->active) {
- enabled++;
- if (intel_crtc->plane == 0) {
- DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
- intel_crtc->pipe, crtc->mode.clock);
- planea_clock = crtc->mode.clock;
- } else {
- DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
- intel_crtc->pipe, crtc->mode.clock);
- planeb_clock = crtc->mode.clock;
- }
- sr_hdisplay = crtc->mode.hdisplay;
- sr_clock = crtc->mode.clock;
- sr_htotal = crtc->mode.htotal;
- if (crtc->fb)
- pixel_size = crtc->fb->bits_per_pixel / 8;
- else
- pixel_size = 4; /* by default */
- }
- }
-
- if (enabled <= 0)
- return;
-
- dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
- sr_hdisplay, sr_htotal, pixel_size);
+ if (dev_priv->display.update_wm)
+ dev_priv->display.update_wm(dev);
}
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -3951,6 +4538,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int ret;
struct fdi_m_n m_n = {0};
u32 reg, temp;
+ u32 lvds_sync = 0;
int target_clock;
drm_vblank_pre_modeset(dev, pipe);
@@ -4322,9 +4910,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
- dspcntr |= DISPLAY_PLANE_ENABLE;
- pipeconf |= PIPECONF_ENABLE;
- dpll |= DPLL_VCO_ENABLE;
+ if (!HAS_PCH_SPLIT(dev))
+ dpll |= DPLL_VCO_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
@@ -4350,10 +4937,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* enable transcoder DPLL */
if (HAS_PCH_CPT(dev)) {
temp = I915_READ(PCH_DPLL_SEL);
- if (pipe == 0)
+ switch (pipe) {
+ case 0:
temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
- else
+ break;
+ case 1:
temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
+ break;
+ case 2:
+ /* FIXME: manage transcoder PLLs? */
+ temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
+ break;
+ default:
+ BUG();
+ }
I915_WRITE(PCH_DPLL_SEL, temp);
POSTING_READ(PCH_DPLL_SEL);
@@ -4403,6 +5000,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
else
temp &= ~LVDS_ENABLE_DITHER;
}
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ lvds_sync |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ lvds_sync |= LVDS_VSYNC_POLARITY;
+ if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
+ != lvds_sync) {
+ char flags[2] = "-+";
+ DRM_INFO("Changing LVDS panel from "
+ "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
+ flags[!(temp & LVDS_HSYNC_POLARITY)],
+ flags[!(temp & LVDS_VSYNC_POLARITY)],
+ flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
+ flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ temp |= lvds_sync;
+ }
I915_WRITE(reg, temp);
}
@@ -4420,17 +5033,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else if (HAS_PCH_SPLIT(dev)) {
/* For non-DP output, clear any trans DP clock recovery setting.*/
- if (pipe == 0) {
- I915_WRITE(TRANSA_DATA_M1, 0);
- I915_WRITE(TRANSA_DATA_N1, 0);
- I915_WRITE(TRANSA_DP_LINK_M1, 0);
- I915_WRITE(TRANSA_DP_LINK_N1, 0);
- } else {
- I915_WRITE(TRANSB_DATA_M1, 0);
- I915_WRITE(TRANSB_DATA_N1, 0);
- I915_WRITE(TRANSB_DP_LINK_M1, 0);
- I915_WRITE(TRANSB_DP_LINK_N1, 0);
- }
+ I915_WRITE(TRANSDATA_M1(pipe), 0);
+ I915_WRITE(TRANSDATA_N1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_M1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
@@ -4533,6 +5139,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
+ if (!HAS_PCH_SPLIT(dev))
+ intel_enable_pipe(dev_priv, pipe, false);
intel_wait_for_vblank(dev, pipe);
@@ -4543,6 +5151,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
I915_WRITE(DSPCNTR(plane), dspcntr);
+ POSTING_READ(DSPCNTR(plane));
+ if (!HAS_PCH_SPLIT(dev))
+ intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -4559,7 +5170,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
+ int palreg = PALETTE(intel_crtc->pipe);
int i;
/* The clocks have to be on to load the palette. */
@@ -4568,8 +5179,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
/* use legacy palette for Ironlake */
if (HAS_PCH_SPLIT(dev))
- palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
- LGC_PALETTE_B;
+ palreg = LGC_PALETTE(intel_crtc->pipe);
for (i = 0; i < 256; i++) {
I915_WRITE(palreg + 4 * i,
@@ -4590,12 +5200,12 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
if (intel_crtc->cursor_visible == visible)
return;
- cntl = I915_READ(CURACNTR);
+ cntl = I915_READ(_CURACNTR);
if (visible) {
/* On these chipsets we can only modify the base whilst
* the cursor is disabled.
*/
- I915_WRITE(CURABASE, base);
+ I915_WRITE(_CURABASE, base);
cntl &= ~(CURSOR_FORMAT_MASK);
/* XXX width must be 64, stride 256 => 0x00 << 28 */
@@ -4604,7 +5214,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
CURSOR_FORMAT_ARGB;
} else
cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
- I915_WRITE(CURACNTR, cntl);
+ I915_WRITE(_CURACNTR, cntl);
intel_crtc->cursor_visible = visible;
}
@@ -4618,7 +5228,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
bool visible = base != 0;
if (intel_crtc->cursor_visible != visible) {
- uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
+ uint32_t cntl = I915_READ(CURCNTR(pipe));
if (base) {
cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
@@ -4627,12 +5237,12 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
- I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
+ I915_WRITE(CURCNTR(pipe), cntl);
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
- I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
+ I915_WRITE(CURBASE(pipe), base);
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -4682,7 +5292,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
if (!visible && !intel_crtc->cursor_visible)
return;
- I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
+ I915_WRITE(CURPOS(pipe), pos);
if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
else
@@ -4722,7 +5332,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
}
obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
- if (!obj)
+ if (&obj->base == NULL)
return -ENOENT;
if (obj->base.size < width * height * 4) {
@@ -4988,14 +5598,14 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ u32 dpll = I915_READ(DPLL(pipe));
u32 fp;
intel_clock_t clock;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = I915_READ((pipe == 0) ? FPA0 : FPB0);
+ fp = FP0(pipe);
else
- fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
+ fp = FP1(pipe);
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
if (IS_PINEVIEW(dev)) {
@@ -5077,10 +5687,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
struct drm_display_mode *mode;
- int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
- int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
- int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
- int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ int htot = I915_READ(HTOTAL(pipe));
+ int hsync = I915_READ(HSYNC(pipe));
+ int vtot = I915_READ(VTOTAL(pipe));
+ int vsync = I915_READ(VSYNC(pipe));
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
@@ -5189,7 +5799,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dpll_reg = DPLL(pipe);
int dpll = I915_READ(dpll_reg);
if (HAS_PCH_SPLIT(dev))
@@ -5237,7 +5847,6 @@ static void intel_idle_update(struct work_struct *work)
struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
- int enabled = 0;
if (!i915_powersave)
return;
@@ -5251,16 +5860,11 @@ static void intel_idle_update(struct work_struct *work)
if (!crtc->fb)
continue;
- enabled++;
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->busy)
intel_decrease_pllclock(crtc);
}
- if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) {
- DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
- }
mutex_unlock(&dev->struct_mutex);
}
@@ -5285,17 +5889,9 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (!dev_priv->busy) {
- if (IS_I945G(dev) || IS_I945GM(dev)) {
- u32 fw_blc_self;
-
- DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
- fw_blc_self = I915_READ(FW_BLC_SELF);
- fw_blc_self &= ~FW_BLC_SELF_EN;
- I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
- }
+ if (!dev_priv->busy)
dev_priv->busy = true;
- } else
+ else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
@@ -5307,14 +5903,6 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
intel_fb = to_intel_framebuffer(crtc->fb);
if (intel_fb->obj == obj) {
if (!intel_crtc->busy) {
- if (IS_I945G(dev) || IS_I945GM(dev)) {
- u32 fw_blc_self;
-
- DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
- fw_blc_self = I915_READ(FW_BLC_SELF);
- fw_blc_self &= ~FW_BLC_SELF_EN;
- I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
- }
/* Non-busy -> busy, upclock */
intel_increase_pllclock(crtc);
intel_crtc->busy = true;
@@ -5592,7 +6180,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
*/
pf = 0;
- pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+ pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
OUT_RING(pf | pipesrc);
break;
@@ -5602,8 +6190,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(fb->pitch | obj->tiling_mode);
OUT_RING(obj->gtt_offset);
- pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
- pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+ pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
+ pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
OUT_RING(pf | pipesrc);
break;
}
@@ -5692,22 +6280,8 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
pipe = !pipe;
/* Disable the plane and wait for it to stop reading from the pipe. */
- I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev, plane);
-
- if (IS_GEN2(dev))
- intel_wait_for_vblank(dev, pipe);
-
- if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
- return;
-
- /* Switch off the pipe. */
- reg = PIPECONF(pipe);
- val = I915_READ(reg);
- if (val & PIPECONF_ENABLE) {
- I915_WRITE(reg, val & ~PIPECONF_ENABLE);
- intel_wait_for_pipe_off(dev, pipe);
- }
+ intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_pipe(dev_priv, pipe);
}
static void intel_crtc_init(struct drm_device *dev, int pipe)
@@ -5997,7 +6571,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
int ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
- if (!obj)
+ if (&obj->base == NULL)
return ERR_PTR(-ENOENT);
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
@@ -6319,18 +6893,18 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
18 << 24 |
6 << 16);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
I915_WRITE(GEN6_RP_UP_EI, 100000);
- I915_WRITE(GEN6_RP_DOWN_EI, 300000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_USE_NORMAL_FREQ |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_MAX |
- GEN6_RP_DOWN_BUSY_MIN);
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
@@ -6386,6 +6960,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
void intel_enable_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
/*
* Disable clock gating reported to work incorrectly according to the
@@ -6495,12 +7070,10 @@ void intel_enable_clock_gating(struct drm_device *dev)
ILK_DPARB_CLK_GATE |
ILK_DPFD_CLK_GATE);
- I915_WRITE(DSPACNTR,
- I915_READ(DSPACNTR) |
- DISPPLANE_TRICKLE_FEED_DISABLE);
- I915_WRITE(DSPBCNTR,
- I915_READ(DSPBCNTR) |
- DISPPLANE_TRICKLE_FEED_DISABLE);
+ for_each_pipe(pipe)
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
}
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
@@ -6855,10 +7428,6 @@ void intel_modeset_init(struct drm_device *dev)
}
dev->mode_config.fb_base = dev->agp->base;
- if (IS_MOBILE(dev) || !IS_GEN2(dev))
- dev_priv->num_pipe = 2;
- else
- dev_priv->num_pipe = 1;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 51cb4e36997f..d29e33f815d7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -49,6 +49,7 @@ struct intel_dp {
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
int force_audio;
+ uint32_t color_range;
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
@@ -685,6 +686,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4, bpp = 24;
struct intel_dp_m_n m_n;
+ int pipe = intel_crtc->pipe;
/*
* Find the lane count in the intel_encoder private
@@ -715,39 +717,19 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
mode->clock, adjusted_mode->clock, &m_n);
if (HAS_PCH_SPLIT(dev)) {
- if (intel_crtc->pipe == 0) {
- I915_WRITE(TRANSA_DATA_M1,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n);
- I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m);
- I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n);
- } else {
- I915_WRITE(TRANSB_DATA_M1,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n);
- I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m);
- I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n);
- }
+ I915_WRITE(TRANSDATA_M1(pipe),
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
+ I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
+ I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
} else {
- if (intel_crtc->pipe == 0) {
- I915_WRITE(PIPEA_GMCH_DATA_M,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(PIPEA_GMCH_DATA_N,
- m_n.gmch_n);
- I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m);
- I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n);
- } else {
- I915_WRITE(PIPEB_GMCH_DATA_M,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(PIPEB_GMCH_DATA_N,
- m_n.gmch_n);
- I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m);
- I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n);
- }
+ I915_WRITE(PIPE_GMCH_DATA_M(pipe),
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
+ I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
+ I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
}
}
@@ -760,8 +742,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_dp->DP = (DP_VOLTAGE_0_4 |
- DP_PRE_EMPHASIS_0);
+ intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
@@ -813,6 +795,40 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
+static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+
+ /*
+ * If the panel wasn't on, make sure there's not a currently
+ * active PP sequence before enabling AUX VDD.
+ */
+ if (!(I915_READ(PCH_PP_STATUS) & PP_ON))
+ msleep(dev_priv->panel_t3);
+
+ pp = I915_READ(PCH_PP_CONTROL);
+ pp |= EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+}
+
+static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+
+ pp = I915_READ(PCH_PP_CONTROL);
+ pp &= ~EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
+ /* Make sure sequencer is idle before allowing subsequent activity */
+ msleep(dev_priv->panel_t12);
+}
+
/* Returns true if the panel was already on when called */
static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
{
@@ -834,11 +850,6 @@ static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
- /* Ouch. We need to wait here for some panels, like Dell e6510
- * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
- */
- msleep(300);
-
if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
5000))
DRM_ERROR("panel on wait timed out: 0x%08x\n",
@@ -875,11 +886,6 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
-
- /* Ouch. We need to wait here for some panels, like Dell e6510
- * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
- */
- msleep(300);
}
static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -945,7 +951,7 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
if (is_edp(intel_dp)) {
ironlake_edp_backlight_off(dev);
- ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_off(dev);
if (!is_pch_edp(intel_dp))
ironlake_edp_pll_on(encoder);
else
@@ -959,10 +965,15 @@ static void intel_dp_commit(struct drm_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
+ if (is_edp(intel_dp))
+ ironlake_edp_panel_vdd_on(intel_dp);
+
intel_dp_start_link_train(intel_dp);
- if (is_edp(intel_dp))
+ if (is_edp(intel_dp)) {
ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp);
+ }
intel_dp_complete_link_train(intel_dp);
@@ -988,9 +999,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
ironlake_edp_pll_off(encoder);
} else {
if (is_edp(intel_dp))
- ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_on(intel_dp);
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_start_link_train(intel_dp);
+ if (is_edp(intel_dp)) {
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp);
+ }
intel_dp_complete_link_train(intel_dp);
}
if (is_edp(intel_dp))
@@ -1508,9 +1523,13 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
{
enum drm_connector_status status;
- /* Can't disconnect eDP */
- if (is_edp(intel_dp))
- return connector_status_connected;
+ /* Can't disconnect eDP, but you can close the lid... */
+ if (is_edp(intel_dp)) {
+ status = intel_panel_detect(intel_dp->base.base.dev);
+ if (status == connector_status_unknown)
+ status = connector_status_connected;
+ return status;
+ }
status = connector_status_disconnected;
if (intel_dp_aux_native_read(intel_dp,
@@ -1662,6 +1681,7 @@ intel_dp_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_dp *intel_dp = intel_attached_dp(connector);
int ret;
@@ -1690,6 +1710,14 @@ intel_dp_set_property(struct drm_connector *connector,
goto done;
}
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_dp->color_range)
+ return 0;
+
+ intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
return -EINVAL;
done:
@@ -1809,6 +1837,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_dp->force_audio_property->values[1] = 1;
drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
}
+
+ intel_attach_broadcast_rgb_property(connector);
}
void
@@ -1826,6 +1856,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
if (!intel_dp)
return;
+ intel_dp->output_reg = output_reg;
+ intel_dp->dpms_mode = -1;
+
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dp);
@@ -1865,10 +1898,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- intel_dp->output_reg = output_reg;
- intel_dp->has_audio = false;
- intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
-
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
@@ -1906,21 +1935,33 @@ intel_dp_init(struct drm_device *dev, int output_reg)
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
int ret;
- bool was_on;
+ u32 pp_on, pp_div;
- was_on = ironlake_edp_panel_on(intel_dp);
+ pp_on = I915_READ(PCH_PP_ON_DELAYS);
+ pp_div = I915_READ(PCH_PP_DIVISOR);
+
+ /* Get T3 & T12 values (note: VESA not bspec terminology) */
+ dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16;
+ dev_priv->panel_t3 /= 10; /* t3 in 100us units */
+ dev_priv->panel_t12 = pp_div & 0xf;
+ dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
+
+ ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
intel_dp->dpcd,
sizeof(intel_dp->dpcd));
+ ironlake_edp_panel_vdd_off(intel_dp);
if (ret == sizeof(intel_dp->dpcd)) {
if (intel_dp->dpcd[0] >= 0x11)
dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
} else {
+ /* if this fails, presume the device is a ghost */
DRM_ERROR("failed to retrieve link info\n");
+ intel_dp_destroy(&intel_connector->base);
+ intel_dp_encoder_destroy(&intel_dp->base.base);
+ return;
}
- if (!was_on)
- ironlake_edp_panel_off(dev);
}
intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 2c431049963c..5daa991cb287 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -217,6 +217,13 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
return dev_priv->pipe_to_crtc_mapping[pipe];
}
+static inline struct drm_crtc *
+intel_get_crtc_for_plane(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->plane_to_crtc_mapping[plane];
+}
+
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
@@ -230,6 +237,8 @@ struct intel_unpin_work {
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
+extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
@@ -260,6 +269,7 @@ extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern void intel_panel_setup_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev);
extern void intel_panel_disable_backlight(struct drm_device *dev);
+extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
@@ -321,8 +331,7 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
extern void intel_setup_overlay(struct drm_device *dev);
extern void intel_cleanup_overlay(struct drm_device *dev);
-extern int intel_overlay_switch_off(struct intel_overlay *overlay,
- bool interruptible);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay);
extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ea373283c93b..6eda1b51c636 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -178,7 +178,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
int pipe = intel_crtc->pipe;
u32 dvo_val;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dpll_reg = DPLL(pipe);
switch (dvo_reg) {
case DVOA:
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index c635c9e357b9..f289b8642976 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -41,6 +41,7 @@ struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
+ uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
int force_audio;
@@ -124,6 +125,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
+ sdvox |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -278,6 +280,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret;
ret = drm_connector_property_set_value(connector, property, val);
@@ -305,6 +308,14 @@ intel_hdmi_set_property(struct drm_connector *connector,
goto done;
}
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_hdmi->color_range)
+ return 0;
+
+ intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
return -EINVAL;
done:
@@ -363,6 +374,8 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_hdmi->force_audio_property->values[1] = 1;
drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
}
+
+ intel_attach_broadcast_rgb_property(connector);
}
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 58040f68ed7a..82d04c5899d2 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -384,7 +384,8 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->reg0 = i | GMBUS_RATE_100KHZ;
/* XXX force bit banging until GMBUS is fully debugged */
- bus->force_bit = intel_gpio_create(dev_priv, i);
+ if (IS_GEN2(dev))
+ bus->force_bit = intel_gpio_create(dev_priv, i);
}
intel_i2c_reset(dev_priv->dev);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index bcdba7bd5cfa..1a311ad01116 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -231,6 +231,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
struct drm_encoder *tmp_encoder;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+ int pipe;
/* Should never happen!! */
if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
@@ -277,8 +278,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
- I915_WRITE(BCLRPAT_A, 0);
- I915_WRITE(BCLRPAT_B, 0);
+ for_each_pipe(pipe)
+ I915_WRITE(BCLRPAT(pipe), 0);
switch (intel_lvds->fitting_mode) {
case DRM_MODE_SCALE_CENTER:
@@ -474,6 +475,10 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
enum drm_connector_status status = connector_status_connected;
+ status = intel_panel_detect(dev);
+ if (status != connector_status_unknown)
+ return status;
+
/* ACPI lid methods were generally unreliable in this generation, so
* don't even bother.
*/
@@ -496,7 +501,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return drm_add_edid_modes(connector, intel_lvds->edid);
mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
- if (mode == 0)
+ if (mode == NULL)
return 0;
drm_mode_probed_add(connector, mode);
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index f70b7cf32bff..9034dd8f33c7 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -80,3 +80,33 @@ int intel_ddc_get_modes(struct drm_connector *connector,
return ret;
}
+
+static const char *broadcast_rgb_names[] = {
+ "Full",
+ "Limited 16:235",
+};
+
+void
+intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+ int i;
+
+ prop = dev_priv->broadcast_rgb_property;
+ if (prop == NULL) {
+ prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ ARRAY_SIZE(broadcast_rgb_names));
+ if (prop == NULL)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
+ drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
+
+ dev_priv->broadcast_rgb_property = prop;
+ }
+
+ drm_connector_attach_property(connector, prop, 0);
+}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 64fd64443ca6..d2c710422908 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -39,6 +39,8 @@
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
+#define ACPI_CLID 0x01ac /* current lid state indicator */
+#define ACPI_CDCK 0x01b0 /* current docking state indicator */
#define OPREGION_SWSCI_OFFSET 0x200
#define OPREGION_ASLE_OFFSET 0x300
#define OPREGION_VBT_OFFSET 0x400
@@ -489,6 +491,8 @@ int intel_opregion_setup(struct drm_device *dev)
opregion->header = base;
opregion->vbt = base + OPREGION_VBT_OFFSET;
+ opregion->lid_state = base + ACPI_CLID;
+
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 3fbb98b948d6..a670c006982e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -213,7 +213,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
struct drm_i915_gem_request *request,
- bool interruptible,
void (*tail)(struct intel_overlay *))
{
struct drm_device *dev = overlay->dev;
@@ -221,16 +220,14 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
int ret;
BUG_ON(overlay->last_flip_req);
- ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+ ret = i915_add_request(LP_RING(dev_priv), NULL, request);
if (ret) {
kfree(request);
return ret;
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
- ret = i915_do_wait_request(dev,
- overlay->last_flip_req, true,
- LP_RING(dev_priv));
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
if (ret)
return ret;
@@ -256,7 +253,7 @@ i830_activate_pipe_a(struct drm_device *dev)
return 0;
/* most i8xx have pipe a forced on, so don't trust dpms mode */
- if (I915_READ(PIPEACONF) & PIPECONF_ENABLE)
+ if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
return 0;
crtc_funcs = crtc->base.helper_private;
@@ -322,7 +319,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
+ ret = intel_overlay_do_wait_request(overlay, request, NULL);
out:
if (pipe_a_quirk)
i830_deactivate_pipe_a(dev);
@@ -364,7 +361,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
OUT_RING(flip_addr);
ADVANCE_LP_RING();
- ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+ ret = i915_add_request(LP_RING(dev_priv), NULL, request);
if (ret) {
kfree(request);
return ret;
@@ -401,8 +398,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
}
/* overlay needs to be disabled in OCMD reg */
-static int intel_overlay_off(struct intel_overlay *overlay,
- bool interruptible)
+static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -437,14 +433,13 @@ static int intel_overlay_off(struct intel_overlay *overlay,
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
ADVANCE_LP_RING();
- return intel_overlay_do_wait_request(overlay, request, interruptible,
+ return intel_overlay_do_wait_request(overlay, request,
intel_overlay_off_tail);
}
/* recover from an interruption due to a signal
* We have to be careful not to repeat work forever an make forward progess. */
-static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
- bool interruptible)
+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -453,8 +448,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible, LP_RING(dev_priv));
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
if (ret)
return ret;
@@ -499,7 +493,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- ret = intel_overlay_do_wait_request(overlay, request, true,
+ ret = intel_overlay_do_wait_request(overlay, request,
intel_overlay_release_old_vid_tail);
if (ret)
return ret;
@@ -868,8 +862,7 @@ out_unpin:
return ret;
}
-int intel_overlay_switch_off(struct intel_overlay *overlay,
- bool interruptible)
+int intel_overlay_switch_off(struct intel_overlay *overlay)
{
struct overlay_registers *regs;
struct drm_device *dev = overlay->dev;
@@ -878,7 +871,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
- ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
+ ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
return ret;
@@ -893,7 +886,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
regs->OCMD = 0;
intel_overlay_unmap_regs(overlay, regs);
- ret = intel_overlay_off(overlay, interruptible);
+ ret = intel_overlay_off(overlay);
if (ret != 0)
return ret;
@@ -1135,7 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
- ret = intel_overlay_switch_off(overlay, true);
+ ret = intel_overlay_switch_off(overlay);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
@@ -1157,7 +1150,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
put_image_rec->bo_handle));
- if (!new_bo) {
+ if (&new_bo->base == NULL) {
ret = -ENOENT;
goto out_free;
}
@@ -1171,13 +1164,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = intel_overlay_recover_from_interrupt(overlay, true);
+ ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
goto out_unlock;
if (overlay->crtc != crtc) {
struct drm_display_mode *mode = &crtc->base.mode;
- ret = intel_overlay_switch_off(overlay, true);
+ ret = intel_overlay_switch_off(overlay);
if (ret != 0)
goto out_unlock;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index f8f86e57df22..a06ff07a4d3b 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -280,3 +280,28 @@ void intel_panel_setup_backlight(struct drm_device *dev)
dev_priv->backlight_level = intel_panel_get_backlight(dev);
dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
}
+
+enum drm_connector_status
+intel_panel_detect(struct drm_device *dev)
+{
+#if 0
+ struct drm_i915_private *dev_priv = dev->dev_private;
+#endif
+
+ if (i915_panel_ignore_lid)
+ return i915_panel_ignore_lid > 0 ?
+ connector_status_connected :
+ connector_status_disconnected;
+
+ /* opregion lid state on HP 2540p is wrong at boot up,
+ * appears to be either the BIOS or Linux ACPI fault */
+#if 0
+ /* Assume that the BIOS does not lie through the OpRegion... */
+ if (dev_priv->opregion.lid_state)
+ return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
+ connector_status_connected :
+ connector_status_disconnected;
+#endif
+
+ return connector_status_unknown;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 445f27efe677..789c47801ba8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -62,18 +62,9 @@ render_ring_flush(struct intel_ring_buffer *ring,
u32 flush_domains)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
u32 cmd;
int ret;
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
- invalidate_domains, flush_domains);
-#endif
-
- trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
- invalidate_domains, flush_domains);
-
if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
/*
* read/write caches:
@@ -122,9 +113,6 @@ render_ring_flush(struct intel_ring_buffer *ring,
(IS_G4X(dev) || IS_GEN5(dev)))
cmd |= MI_INVALIDATE_ISP;
-#if WATCH_EXEC
- DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
-#endif
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
@@ -612,7 +600,6 @@ ring_add_request(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
- DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
*result = seqno;
return 0;
}
@@ -715,11 +702,8 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
-
if (IS_I830(dev) || IS_845G(dev)) {
ret = intel_ring_begin(ring, 4);
if (ret)
@@ -894,6 +878,10 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
/* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private;
ret = intel_wait_ring_buffer(ring, ring->size - 8);
+ if (ret)
+ DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+ ring->name, ret);
+
I915_WRITE_CTL(ring, 0);
drm_core_ioremapfree(&ring->map, ring->dev);
@@ -950,13 +938,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return 0;
}
- trace_i915_ring_wait_begin (dev);
+ trace_i915_ring_wait_begin(ring);
end = jiffies + 3 * HZ;
do {
ring->head = I915_READ_HEAD(ring);
ring->space = ring_space(ring);
if (ring->space >= n) {
- trace_i915_ring_wait_end(dev);
+ trace_i915_ring_wait_end(ring);
return 0;
}
@@ -970,16 +958,20 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
if (atomic_read(&dev_priv->mm.wedged))
return -EAGAIN;
} while (!time_after(jiffies, end));
- trace_i915_ring_wait_end (dev);
+ trace_i915_ring_wait_end(ring);
return -EBUSY;
}
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
int n = 4*num_dwords;
int ret;
+ if (unlikely(atomic_read(&dev_priv->mm.wedged)))
+ return -EIO;
+
if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 34306865a5df..f23cc5f037a6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -44,7 +44,7 @@ struct intel_ring_buffer {
RING_BLT = 0x4,
} id;
u32 mmio_base;
- void *virtual_start;
+ void __iomem *virtual_start;
struct drm_device *dev;
struct drm_i915_gem_object *obj;
@@ -59,6 +59,7 @@ struct intel_ring_buffer {
u32 irq_refcount;
u32 irq_mask;
u32 irq_seqno; /* last seq seem at irq time */
+ u32 trace_irq_seqno;
u32 waiting_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
@@ -142,6 +143,26 @@ intel_read_status_page(struct intel_ring_buffer *ring,
return ioread32(ring->status_page.page_addr + reg);
}
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 0x04: ring 0 head pointer
+ * 0x05: ring 1 head pointer (915-class)
+ * 0x06: ring 2 head pointer (915-class)
+ * 0x10-0x1b: Context status DWords (GM45)
+ * 0x1f: Last written status offset. (GM45)
+ *
+ * The area from dword 0x20 to 0x3ff is available for driver usage.
+ */
+#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
+#define I915_GEM_HWS_INDEX 0x20
+#define I915_BREADCRUMB_INDEX 0x21
+
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
@@ -167,6 +188,12 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
+{
+ if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
+ ring->trace_irq_seqno = seqno;
+}
+
/* DRI warts */
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 7c50cdce84f0..4324f33212d6 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -93,6 +93,12 @@ struct intel_sdvo {
uint16_t attached_output;
/**
+ * This is used to select the color range of RBG outputs in HDMI mode.
+ * It is only valid when using TMDS encoding and 8 bit per color mode.
+ */
+ uint32_t color_range;
+
+ /**
* This is set if we're going to treat the device as TV-out.
*
* While we have these nice friendly flags for output types that ought
@@ -585,6 +591,7 @@ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *i
{
struct intel_sdvo_get_trained_inputs_response response;
+ BUILD_BUG_ON(sizeof(response) != 1);
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
&response, sizeof(response)))
return false;
@@ -632,6 +639,7 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo
{
struct intel_sdvo_pixel_clock_range clocks;
+ BUILD_BUG_ON(sizeof(clocks) != 4);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
&clocks, sizeof(clocks)))
@@ -699,6 +707,8 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
+ BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+ BUILD_BUG_ON(sizeof(dtd->part2) != 8);
return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
&dtd->part1, sizeof(dtd->part1)) &&
intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
@@ -796,6 +806,7 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
{
struct intel_sdvo_encode encode;
+ BUILD_BUG_ON(sizeof(encode) != 2);
return intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPP_ENCODE,
&encode, sizeof(encode));
@@ -1051,6 +1062,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* Set the SDVO control regs. */
if (INTEL_INFO(dev)->gen >= 4) {
sdvox = 0;
+ if (intel_sdvo->is_hdmi)
+ sdvox |= intel_sdvo->color_range;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1162,6 +1175,7 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
{
+ BUILD_BUG_ON(sizeof(*caps) != 8);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_DEVICE_CAPS,
caps, sizeof(*caps)))
@@ -1268,33 +1282,9 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
static bool
intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
{
- int caps = 0;
-
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
- caps++;
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
- caps++;
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
- caps++;
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
- caps++;
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1))
- caps++;
-
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1))
- caps++;
-
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1))
- caps++;
-
- return (caps > 1);
+ /* Is there more than one type of output? */
+ int caps = intel_sdvo->caps.output_flags & 0xf;
+ return caps & -caps;
}
static struct edid *
@@ -1482,7 +1472,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* Note! This is in reply order (see loop in get_tv_modes).
* XXX: all 60Hz refresh?
*/
-struct drm_display_mode sdvo_tv_modes[] = {
+static const struct drm_display_mode sdvo_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
416, 0, 200, 201, 232, 233, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
@@ -1713,6 +1703,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint16_t temp_value;
uint8_t cmd;
int ret;
@@ -1742,6 +1733,14 @@ intel_sdvo_set_property(struct drm_connector *connector,
goto done;
}
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_sdvo->color_range)
+ return 0;
+
+ intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
#define CHECK_PROPERTY(name, NAME) \
if (intel_sdvo_connector->name == property) { \
if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
@@ -2046,6 +2045,9 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
drm_connector_attach_property(&connector->base.base,
connector->force_audio_property, 0);
}
+
+ if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
+ intel_attach_broadcast_rgb_property(&connector->base.base);
}
static bool
@@ -2268,6 +2270,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_set_target_output(intel_sdvo, type))
return false;
+ BUILD_BUG_ON(sizeof(format) != 6);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
&format, sizeof(format)))
@@ -2474,6 +2477,8 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
uint16_t response;
} enhancements;
+ BUILD_BUG_ON(sizeof(enhancements) != 2);
+
enhancements.response = 0;
intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index fe4a53a50b83..4256b8ef3947 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1006,6 +1006,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
const struct video_levels *video_levels;
const struct color_conversion *color_conversion;
bool burst_ena;
+ int pipe = intel_crtc->pipe;
if (!tv_mode)
return; /* can't happen (mode_prepare prevents this) */
@@ -1149,14 +1150,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
(video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
{
- int pipeconf_reg = (intel_crtc->pipe == 0) ?
- PIPEACONF : PIPEBCONF;
- int dspcntr_reg = (intel_crtc->plane == 0) ?
- DSPACNTR : DSPBCNTR;
+ int pipeconf_reg = PIPECONF(pipe);
+ int dspcntr_reg = DSPCNTR(pipe);
int pipeconf = I915_READ(pipeconf_reg);
int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = (intel_crtc->plane == 0) ?
- DSPAADDR : DSPBADDR;
+ int dspbase_reg = DSPADDR(pipe);
int xpos = 0x0, ypos = 0x0;
unsigned int xsize, ysize;
/* Pipe must be off here */
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 08868ac3048a..1e1eb1d7e971 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -703,7 +703,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
static int mga_do_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t *dma_bs)
{
- const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
+ const int is_agp = (dma_bs->agp_mode != 0) && drm_pci_device_is_agp(dev);
int err;
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 0aaf5f67a436..42d31874edf2 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -75,10 +75,6 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -88,15 +84,20 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver mga_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init mga_init(void)
{
driver.num_ioctls = mga_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &mga_pci_driver);
}
static void __exit mga_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &mga_pci_driver);
}
module_init(mga_init);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 6bdab891c64e..8314a49b6b9a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -282,7 +282,7 @@ static void still_alive(void)
{
#if 0
sync();
- msleep(2);
+ mdelay(2);
#endif
}
@@ -1904,7 +1904,7 @@ init_condition_time(struct nvbios *bios, uint16_t offset,
BIOSLOG(bios, "0x%04X: "
"Condition not met, sleeping for 20ms\n",
offset);
- msleep(20);
+ mdelay(20);
}
}
@@ -1938,7 +1938,7 @@ init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n",
offset, time);
- msleep(time);
+ mdelay(time);
return 3;
}
@@ -2962,7 +2962,7 @@ init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (time < 1000)
udelay(time);
else
- msleep((time + 900) / 1000);
+ mdelay((time + 900) / 1000);
return 3;
}
@@ -3856,7 +3856,7 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
if (script == LVDS_PANEL_OFF) {
/* off-on delay in ms */
- msleep(ROM16(bios->data[bios->fp.xlated_entry + 7]));
+ mdelay(ROM16(bios->data[bios->fp.xlated_entry + 7]));
}
#ifdef __powerpc__
/* Powerbook specific quirks */
@@ -5950,6 +5950,11 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
}
}
+static const u8 hpd_gpio[16] = {
+ 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
+};
+
static void
parse_dcb_connector_table(struct nvbios *bios)
{
@@ -5986,23 +5991,9 @@ parse_dcb_connector_table(struct nvbios *bios)
cte->type = (cte->entry & 0x000000ff) >> 0;
cte->index2 = (cte->entry & 0x00000f00) >> 8;
- switch (cte->entry & 0x00033000) {
- case 0x00001000:
- cte->gpio_tag = 0x07;
- break;
- case 0x00002000:
- cte->gpio_tag = 0x08;
- break;
- case 0x00010000:
- cte->gpio_tag = 0x51;
- break;
- case 0x00020000:
- cte->gpio_tag = 0x52;
- break;
- default:
- cte->gpio_tag = 0xff;
- break;
- }
+
+ cte->gpio_tag = ffs((cte->entry & 0x07033000) >> 12);
+ cte->gpio_tag = hpd_gpio[cte->gpio_tag];
if (cte->type == 0xff)
continue;
@@ -6702,11 +6693,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
struct nvbios *bios = &dev_priv->vbios;
struct init_exec iexec = { true, false };
- mutex_lock(&bios->lock);
+ spin_lock_bh(&bios->lock);
bios->display.output = dcbent;
parse_init_table(bios, table, &iexec);
bios->display.output = NULL;
- mutex_unlock(&bios->lock);
+ spin_unlock_bh(&bios->lock);
}
static bool NVInitVBIOS(struct drm_device *dev)
@@ -6715,7 +6706,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
struct nvbios *bios = &dev_priv->vbios;
memset(bios, 0, sizeof(struct nvbios));
- mutex_init(&bios->lock);
+ spin_lock_init(&bios->lock);
bios->dev = dev;
if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 50a648e01c49..8a54fa7edf5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -251,7 +251,7 @@ struct nvbios {
uint8_t digital_min_front_porch;
bool fp_no_ddc;
- struct mutex lock;
+ spinlock_t lock;
uint8_t data[NV_PROM_SIZE];
unsigned int length;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index a52184007f5f..2ad49cbf7c8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -57,8 +57,8 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
}
static void
-nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
- int *page_shift)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
+ int *align, int *size, int *page_shift)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
@@ -83,7 +83,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
}
} else {
if (likely(dev_priv->chan_vm)) {
- if (*size > 256 * 1024)
+ if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024)
*page_shift = dev_priv->chan_vm->lpg_shift;
else
*page_shift = dev_priv->chan_vm->spg_shift;
@@ -101,8 +101,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
int
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
int size, int align, uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, bool no_vm, bool mappable,
- struct nouveau_bo **pnvbo)
+ uint32_t tile_flags, struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
@@ -113,16 +112,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
return -ENOMEM;
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
- nvbo->mappable = mappable;
- nvbo->no_vm = no_vm;
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev;
- nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
+ nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
align >>= PAGE_SHIFT;
- if (!nvbo->no_vm && dev_priv->chan_vm) {
+ if (dev_priv->chan_vm) {
ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
NV_MEM_ACCESS_RW, &nvbo->vma);
if (ret) {
@@ -144,11 +141,8 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
}
nvbo->channel = NULL;
- if (nvbo->vma.node) {
- if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
- nvbo->bo.offset = nvbo->vma.offset;
- }
-
+ if (nvbo->vma.node)
+ nvbo->bo.offset = nvbo->vma.offset;
*pnvbo = nvbo;
return 0;
}
@@ -318,11 +312,8 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
if (ret)
return ret;
- if (nvbo->vma.node) {
- if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
- nvbo->bo.offset = nvbo->vma.offset;
- }
-
+ if (nvbo->vma.node)
+ nvbo->bo.offset = nvbo->vma.offset;
return 0;
}
@@ -385,7 +376,8 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
case NOUVEAU_GART_AGP:
return ttm_agp_backend_init(bdev, dev->agp->bridge);
#endif
- case NOUVEAU_GART_SGDMA:
+ case NOUVEAU_GART_PDMA:
+ case NOUVEAU_GART_HW:
return nouveau_sgdma_init_ttm(dev);
default:
NV_ERROR(dev, "Unknown GART type %d\n",
@@ -431,7 +423,10 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_TT:
- man->func = &ttm_bo_manager_func;
+ if (dev_priv->card_type >= NV_50)
+ man->func = &nouveau_gart_manager;
+ else
+ man->func = &ttm_bo_manager_func;
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -439,7 +434,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
break;
- case NOUVEAU_GART_SGDMA:
+ case NOUVEAU_GART_PDMA:
+ case NOUVEAU_GART_HW:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING;
@@ -501,45 +497,22 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret;
}
-static inline uint32_t
-nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
- struct nouveau_channel *chan, struct ttm_mem_reg *mem)
-{
- struct nouveau_bo *nvbo = nouveau_bo(bo);
-
- if (nvbo->no_vm) {
- if (mem->mem_type == TTM_PL_TT)
- return NvDmaGART;
- return NvDmaVRAM;
- }
-
- if (mem->mem_type == TTM_PL_TT)
- return chan->gart_handle;
- return chan->vram_handle;
-}
-
static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *old_node = old_mem->mm_node;
+ struct nouveau_mem *new_node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- u64 src_offset = old_mem->start << PAGE_SHIFT;
- u64 dst_offset = new_mem->start << PAGE_SHIFT;
u32 page_count = new_mem->num_pages;
+ u64 src_offset, dst_offset;
int ret;
- if (!nvbo->no_vm) {
- if (old_mem->mem_type == TTM_PL_VRAM)
- src_offset = nvbo->vma.offset;
- else
- src_offset += dev_priv->gart_info.aper_base;
-
- if (new_mem->mem_type == TTM_PL_VRAM)
- dst_offset = nvbo->vma.offset;
- else
- dst_offset += dev_priv->gart_info.aper_base;
- }
+ src_offset = old_node->tmp_vma.offset;
+ if (new_node->tmp_vma.node)
+ dst_offset = new_node->tmp_vma.offset;
+ else
+ dst_offset = nvbo->vma.offset;
page_count = new_mem->num_pages;
while (page_count) {
@@ -574,33 +547,18 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *old_node = old_mem->mm_node;
+ struct nouveau_mem *new_node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 length = (new_mem->num_pages << PAGE_SHIFT);
u64 src_offset, dst_offset;
int ret;
- src_offset = old_mem->start << PAGE_SHIFT;
- dst_offset = new_mem->start << PAGE_SHIFT;
- if (!nvbo->no_vm) {
- if (old_mem->mem_type == TTM_PL_VRAM)
- src_offset = nvbo->vma.offset;
- else
- src_offset += dev_priv->gart_info.aper_base;
-
- if (new_mem->mem_type == TTM_PL_VRAM)
- dst_offset = nvbo->vma.offset;
- else
- dst_offset += dev_priv->gart_info.aper_base;
- }
-
- ret = RING_SPACE(chan, 3);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
+ src_offset = old_node->tmp_vma.offset;
+ if (new_node->tmp_vma.node)
+ dst_offset = new_node->tmp_vma.offset;
+ else
+ dst_offset = nvbo->vma.offset;
while (length) {
u32 amount, stride, height;
@@ -681,6 +639,15 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
return 0;
}
+static inline uint32_t
+nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
+ struct nouveau_channel *chan, struct ttm_mem_reg *mem)
+{
+ if (mem->mem_type == TTM_PL_TT)
+ return chan->gart_handle;
+ return chan->vram_handle;
+}
+
static int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
@@ -734,15 +701,43 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct ttm_mem_reg *old_mem = &bo->mem;
struct nouveau_channel *chan;
int ret;
chan = nvbo->channel;
- if (!chan || nvbo->no_vm) {
+ if (!chan) {
chan = dev_priv->channel;
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
}
+ /* create temporary vma for old memory, this will get cleaned
+ * up after ttm destroys the ttm_mem_reg
+ */
+ if (dev_priv->card_type >= NV_50) {
+ struct nouveau_mem *node = old_mem->mm_node;
+ if (!node->tmp_vma.node) {
+ u32 page_shift = nvbo->vma.node->type;
+ if (old_mem->mem_type == TTM_PL_TT)
+ page_shift = nvbo->vma.vm->spg_shift;
+
+ ret = nouveau_vm_get(chan->vm,
+ old_mem->num_pages << PAGE_SHIFT,
+ page_shift, NV_MEM_ACCESS_RO,
+ &node->tmp_vma);
+ if (ret)
+ goto out;
+ }
+
+ if (old_mem->mem_type == TTM_PL_VRAM)
+ nouveau_vm_map(&node->tmp_vma, node);
+ else {
+ nouveau_vm_map_sg(&node->tmp_vma, 0,
+ old_mem->num_pages << PAGE_SHIFT,
+ node, node->pages);
+ }
+ }
+
if (dev_priv->card_type < NV_50)
ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
else
@@ -756,6 +751,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
no_wait_gpu, new_mem);
}
+out:
if (chan == dev_priv->channel)
mutex_unlock(&chan->mutex);
return ret;
@@ -766,6 +762,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
struct ttm_mem_reg tmp_mem;
@@ -785,7 +782,23 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
+ if (dev_priv->card_type >= NV_50) {
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_mem *node = tmp_mem.mm_node;
+ struct nouveau_vma *vma = &nvbo->vma;
+ if (vma->node->type != vma->vm->spg_shift)
+ vma = &node->tmp_vma;
+ nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
+ node, node->pages);
+ }
+
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
+
+ if (dev_priv->card_type >= NV_50) {
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ nouveau_vm_unmap(&nvbo->vma);
+ }
+
if (ret)
goto out;
@@ -828,6 +841,36 @@ out:
return ret;
}
+static void
+nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *node = new_mem->mm_node;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_vma *vma = &nvbo->vma;
+ struct nouveau_vm *vm = vma->vm;
+
+ if (dev_priv->card_type < NV_50)
+ return;
+
+ switch (new_mem->mem_type) {
+ case TTM_PL_VRAM:
+ nouveau_vm_map(vma, node);
+ break;
+ case TTM_PL_TT:
+ if (vma->node->type != vm->spg_shift) {
+ nouveau_vm_unmap(vma);
+ vma = &node->tmp_vma;
+ }
+ nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
+ node, node->pages);
+ break;
+ default:
+ nouveau_vm_unmap(&nvbo->vma);
+ break;
+ }
+}
+
static int
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct nouveau_tile_reg **new_tile)
@@ -835,19 +878,13 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- uint64_t offset;
+ u64 offset = new_mem->start << PAGE_SHIFT;
- if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
- /* Nothing to do. */
- *new_tile = NULL;
+ *new_tile = NULL;
+ if (new_mem->mem_type != TTM_PL_VRAM)
return 0;
- }
-
- offset = new_mem->start << PAGE_SHIFT;
- if (dev_priv->chan_vm) {
- nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
- } else if (dev_priv->card_type >= NV_10) {
+ if (dev_priv->card_type >= NV_10) {
*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
nvbo->tile_mode,
nvbo->tile_flags);
@@ -864,11 +901,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct drm_device *dev = dev_priv->dev;
- if (dev_priv->card_type >= NV_10 &&
- dev_priv->card_type < NV_50) {
- nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
- *old_tile = new_tile;
- }
+ nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
+ *old_tile = new_tile;
}
static int
@@ -882,9 +916,11 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
struct nouveau_tile_reg *new_tile = NULL;
int ret = 0;
- ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
- if (ret)
- return ret;
+ if (dev_priv->card_type < NV_50) {
+ ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
+ if (ret)
+ return ret;
+ }
/* Fake bo copy. */
if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
@@ -915,10 +951,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
- if (ret)
- nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
- else
- nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
+ if (dev_priv->card_type < NV_50) {
+ if (ret)
+ nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
+ else
+ nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
+ }
return ret;
}
@@ -959,7 +997,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
break;
case TTM_PL_VRAM:
{
- struct nouveau_vram *vram = mem->mm_node;
+ struct nouveau_mem *node = mem->mm_node;
u8 page_shift;
if (!dev_priv->bar1_vm) {
@@ -970,23 +1008,23 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
}
if (dev_priv->card_type == NV_C0)
- page_shift = vram->page_shift;
+ page_shift = node->page_shift;
else
page_shift = 12;
ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
page_shift, NV_MEM_ACCESS_RW,
- &vram->bar_vma);
+ &node->bar_vma);
if (ret)
return ret;
- nouveau_vm_map(&vram->bar_vma, vram);
+ nouveau_vm_map(&node->bar_vma, node);
if (ret) {
- nouveau_vm_put(&vram->bar_vma);
+ nouveau_vm_put(&node->bar_vma);
return ret;
}
- mem->bus.offset = vram->bar_vma.offset;
+ mem->bus.offset = node->bar_vma.offset;
if (dev_priv->card_type == NV_50) /*XXX*/
mem->bus.offset -= 0x0020000000ULL;
mem->bus.base = pci_resource_start(dev->pdev, 1);
@@ -1003,16 +1041,16 @@ static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct nouveau_vram *vram = mem->mm_node;
+ struct nouveau_mem *node = mem->mm_node;
if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
return;
- if (!vram->bar_vma.node)
+ if (!node->bar_vma.node)
return;
- nouveau_vm_unmap(&vram->bar_vma);
- nouveau_vm_put(&vram->bar_vma);
+ nouveau_vm_unmap(&node->bar_vma);
+ nouveau_vm_put(&node->bar_vma);
}
static int
@@ -1062,6 +1100,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.evict_flags = nouveau_bo_evict_flags,
+ .move_notify = nouveau_bo_move_ntfy,
.move = nouveau_bo_move,
.verify_access = nouveau_bo_verify_access,
.sync_obj_signaled = __nouveau_fence_signalled,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 3960d66d7aba..3837090d66af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -35,7 +35,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *pb = chan->pushbuf_bo;
struct nouveau_gpuobj *pushbuf = NULL;
- int ret;
+ int ret = 0;
if (dev_priv->card_type >= NV_50) {
if (dev_priv->card_type < NV_C0) {
@@ -90,8 +90,7 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
else
location = TTM_PL_FLAG_TT;
- ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
- true, &pushbuf);
+ ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);
if (ret) {
NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
return NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 505c6bfb4d75..764c15d537ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -32,6 +32,7 @@
#include "nouveau_hw.h"
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
+#include "nv50_display.h"
static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
@@ -61,18 +62,59 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
};
int
-nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
- struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
+nouveau_framebuffer_init(struct drm_device *dev,
+ struct nouveau_framebuffer *nv_fb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct nouveau_bo *nvbo)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = &nv_fb->base;
int ret;
- ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
+ ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
if (ret) {
return ret;
}
- drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
- nouveau_fb->nvbo = nvbo;
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+ nv_fb->nvbo = nvbo;
+
+ if (dev_priv->card_type >= NV_50) {
+ u32 tile_flags = nouveau_bo_tile_layout(nvbo);
+ if (tile_flags == 0x7a00 ||
+ tile_flags == 0xfe00)
+ nv_fb->r_dma = NvEvoFB32;
+ else
+ if (tile_flags == 0x7000)
+ nv_fb->r_dma = NvEvoFB16;
+ else
+ nv_fb->r_dma = NvEvoVRAM_LP;
+
+ switch (fb->depth) {
+ case 8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break;
+ case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break;
+ case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break;
+ case 24:
+ case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
+ case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
+ default:
+ NV_ERROR(dev, "unknown depth %d\n", fb->depth);
+ return -EINVAL;
+ }
+
+ if (dev_priv->chipset == 0x50)
+ nv_fb->r_format |= (tile_flags << 8);
+
+ if (!tile_flags)
+ nv_fb->r_pitch = 0x00100000 | fb->pitch;
+ else {
+ u32 mode = nvbo->tile_mode;
+ if (dev_priv->card_type >= NV_C0)
+ mode >>= 4;
+ nv_fb->r_pitch = ((fb->pitch / 4) << 4) | mode;
+ }
+ }
+
return 0;
}
@@ -182,6 +224,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_page_flip_state *s,
struct nouveau_fence **pfence)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
unsigned long flags;
int ret;
@@ -201,9 +244,12 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
if (ret)
goto fail;
- BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
- OUT_RING(chan, 0);
- FIRE_RING(chan);
+ if (dev_priv->card_type < NV_C0)
+ BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+ else
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0500, 1);
+ OUT_RING (chan, 0);
+ FIRE_RING (chan);
ret = nouveau_fence_new(chan, pfence, true);
if (ret)
@@ -244,7 +290,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
- { { }, s->event, nouveau_crtc(crtc)->index,
+ { { }, event, nouveau_crtc(crtc)->index,
fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
new_bo->bo.offset };
@@ -255,6 +301,14 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
mutex_lock(&chan->mutex);
/* Emit a page flip */
+ if (dev_priv->card_type >= NV_50) {
+ ret = nv50_display_flip_next(crtc, fb, chan);
+ if (ret) {
+ nouveau_channel_put(&chan);
+ goto fail_unreserve;
+ }
+ }
+
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
nouveau_channel_put(&chan);
if (ret)
@@ -305,7 +359,8 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
}
list_del(&s->head);
- *ps = *s;
+ if (ps)
+ *ps = *s;
kfree(s);
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index b368ed74aad7..ce38e97b9428 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -97,13 +97,15 @@ nouveau_dma_init(struct nouveau_channel *chan)
OUT_RING(chan, 0);
/* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
- ret = RING_SPACE(chan, 4);
+ ret = RING_SPACE(chan, 6);
if (ret)
return ret;
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
- OUT_RING(chan, NvM2MF);
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
- OUT_RING(chan, NvNotify0);
+ OUT_RING (chan, NvM2MF);
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
+ OUT_RING (chan, NvNotify0);
+ OUT_RING (chan, chan->vram_handle);
+ OUT_RING (chan, chan->gart_handle);
/* Sit back and pray the channel works.. */
FIRE_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index c36f1763feaa..23d4edf992b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -61,8 +61,6 @@ enum {
NvM2MF = 0x80000001,
NvDmaFB = 0x80000002,
NvDmaTT = 0x80000003,
- NvDmaVRAM = 0x80000004,
- NvDmaGART = 0x80000005,
NvNotify0 = 0x80000006,
Nv2D = 0x80000007,
NvCtxSurf2D = 0x80000008,
@@ -73,12 +71,15 @@ enum {
NvImageBlit = 0x8000000d,
NvSw = 0x8000000e,
NvSema = 0x8000000f,
+ NvEvoSema0 = 0x80000010,
+ NvEvoSema1 = 0x80000011,
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
NvEvoFB16 = 0x01000001,
NvEvoFB32 = 0x01000002,
- NvEvoVRAM_LP = 0x01000003
+ NvEvoVRAM_LP = 0x01000003,
+ NvEvoSync = 0xcafe0000
};
#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 38d599554bce..7beb82a0315d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -175,7 +175,6 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct bit_displayport_encoder_table_entry *dpse;
struct bit_displayport_encoder_table *dpe;
int ret, i, dpe_headerlen, vs = 0, pre = 0;
uint8_t request[2];
@@ -183,7 +182,6 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
if (!dpe)
return false;
- dpse = (void *)((char *)dpe + dpe_headerlen);
ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index f658a04eecf9..155ebdcbf06f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -408,14 +408,6 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = nouveau_pci_probe,
- .remove = nouveau_pci_remove,
- .suspend = nouveau_pci_suspend,
- .resume = nouveau_pci_resume
- },
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
@@ -432,6 +424,15 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver nouveau_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = nouveau_pci_probe,
+ .remove = nouveau_pci_remove,
+ .suspend = nouveau_pci_suspend,
+ .resume = nouveau_pci_resume
+};
+
static int __init nouveau_init(void)
{
driver.num_ioctls = nouveau_max_ioctl;
@@ -449,7 +450,7 @@ static int __init nouveau_init(void)
return 0;
nouveau_register_dsm_handler();
- return drm_init(&driver);
+ return drm_pci_init(&driver, &nouveau_pci_driver);
}
static void __exit nouveau_exit(void)
@@ -457,7 +458,7 @@ static void __exit nouveau_exit(void)
if (!nouveau_modeset)
return;
- drm_exit(&driver);
+ drm_pci_exit(&driver, &nouveau_pci_driver);
nouveau_unregister_dsm_handler();
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 982d70b12722..06111887b789 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -57,7 +57,7 @@ struct nouveau_fpriv {
#include "nouveau_util.h"
struct nouveau_grctx;
-struct nouveau_vram;
+struct nouveau_mem;
#include "nouveau_vm.h"
#define MAX_NUM_DCB_ENTRIES 16
@@ -65,13 +65,16 @@ struct nouveau_vram;
#define NOUVEAU_MAX_CHANNEL_NR 128
#define NOUVEAU_MAX_TILE_NR 15
-struct nouveau_vram {
+struct nouveau_mem {
struct drm_device *dev;
struct nouveau_vma bar_vma;
+ struct nouveau_vma tmp_vma;
u8 page_shift;
+ struct drm_mm_node *tag;
struct list_head regions;
+ dma_addr_t *pages;
u32 memtype;
u64 offset;
u64 size;
@@ -90,6 +93,7 @@ struct nouveau_tile_reg {
struct nouveau_bo {
struct ttm_buffer_object bo;
struct ttm_placement placement;
+ u32 valid_domains;
u32 placements[3];
u32 busy_placements[3];
struct ttm_bo_kmap_obj kmap;
@@ -104,8 +108,6 @@ struct nouveau_bo {
struct nouveau_channel *channel;
struct nouveau_vma vma;
- bool mappable;
- bool no_vm;
uint32_t tile_mode;
uint32_t tile_flags;
@@ -387,6 +389,7 @@ struct nouveau_pgraph_engine {
};
struct nouveau_display_engine {
+ void *priv;
int (*early_init)(struct drm_device *);
void (*late_takedown)(struct drm_device *);
int (*create)(struct drm_device *);
@@ -509,8 +512,8 @@ struct nouveau_crypt_engine {
struct nouveau_vram_engine {
int (*init)(struct drm_device *);
int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
- u32 type, struct nouveau_vram **);
- void (*put)(struct drm_device *, struct nouveau_vram **);
+ u32 type, struct nouveau_mem **);
+ void (*put)(struct drm_device *, struct nouveau_mem **);
bool (*flags_valid)(struct drm_device *, u32 tile_flags);
};
@@ -652,8 +655,6 @@ struct drm_nouveau_private {
/* interrupt handling */
void (*irq_handler[32])(struct drm_device *);
bool msi_enabled;
- struct workqueue_struct *wq;
- struct work_struct irq_work;
struct list_head vbl_waiting;
@@ -691,15 +692,22 @@ struct drm_nouveau_private {
struct {
enum {
NOUVEAU_GART_NONE = 0,
- NOUVEAU_GART_AGP,
- NOUVEAU_GART_SGDMA
+ NOUVEAU_GART_AGP, /* AGP */
+ NOUVEAU_GART_PDMA, /* paged dma object */
+ NOUVEAU_GART_HW /* on-chip gart/vm */
} type;
uint64_t aper_base;
uint64_t aper_size;
uint64_t aper_free;
+ struct ttm_backend_func *func;
+
+ struct {
+ struct page *page;
+ dma_addr_t addr;
+ } dummy;
+
struct nouveau_gpuobj *sg_ctxdma;
- struct nouveau_vma vma;
} gart_info;
/* nv10-nv40 tiling regions */
@@ -740,14 +748,6 @@ struct drm_nouveau_private {
struct backlight_device *backlight;
- struct nouveau_channel *evo;
- u32 evo_alloc;
- struct {
- struct dcb_entry *dcb;
- u16 script;
- u32 pclk;
- } evo_irq;
-
struct {
struct dentry *channel_root;
} debugfs;
@@ -847,6 +847,7 @@ extern void nv10_mem_put_tile_region(struct drm_device *dev,
struct nouveau_tile_reg *tile,
struct nouveau_fence *fence);
extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
+extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
/* nouveau_notifier.c */
extern int nouveau_notifier_init_channel(struct nouveau_channel *);
@@ -1076,7 +1077,7 @@ extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
/* nv50_fb.c */
extern int nv50_fb_init(struct drm_device *);
extern void nv50_fb_takedown(struct drm_device *);
-extern void nv50_fb_vm_trap(struct drm_device *, int display, const char *);
+extern void nv50_fb_vm_trap(struct drm_device *, int display);
/* nvc0_fb.c */
extern int nvc0_fb_init(struct drm_device *);
@@ -1295,7 +1296,7 @@ extern struct ttm_bo_driver nouveau_bo_driver;
extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
int size, int align, uint32_t flags,
uint32_t tile_mode, uint32_t tile_flags,
- bool no_vm, bool mappable, struct nouveau_bo **);
+ struct nouveau_bo **);
extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
extern int nouveau_bo_unpin(struct nouveau_bo *);
extern int nouveau_bo_map(struct nouveau_bo *);
@@ -1356,9 +1357,9 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
- int size, int align, uint32_t flags,
+ int size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags,
- bool no_vm, bool mappable, struct nouveau_bo **);
+ struct nouveau_bo **);
extern int nouveau_gem_object_new(struct drm_gem_object *);
extern void nouveau_gem_object_del(struct drm_gem_object *);
extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index d432134b71e0..a3a88ad00f86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -30,6 +30,9 @@
struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
+ u32 r_dma;
+ u32 r_format;
+ u32 r_pitch;
};
static inline struct nouveau_framebuffer *
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 60769d2f9a66..889c4454682e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -296,8 +296,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
size = mode_cmd.pitch * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
- ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nvbo);
+ ret = nouveau_gem_new(dev, dev_priv->channel, size, 0,
+ NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
if (ret) {
NV_ERROR(dev, "failed to allocate framebuffer\n");
goto out;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 221b8462ea37..4b9f4493c9f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -27,13 +27,15 @@
#include "drmP.h"
#include "drm.h"
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
#include "nouveau_dma.h"
#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
-#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \
- nouveau_private(dev)->card_type < NV_C0)
+#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
struct nouveau_fence {
struct nouveau_channel *channel;
@@ -230,7 +232,8 @@ int
__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
{
unsigned long timeout = jiffies + (3 * DRM_HZ);
- unsigned long sleep_time = jiffies + 1;
+ unsigned long sleep_time = NSEC_PER_MSEC / 1000;
+ ktime_t t;
int ret = 0;
while (1) {
@@ -244,8 +247,13 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
__set_current_state(intr ? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE);
- if (lazy && time_after_eq(jiffies, sleep_time))
- schedule_timeout(1);
+ if (lazy) {
+ t = ktime_set(0, sleep_time);
+ schedule_hrtimeout(&t, HRTIMER_MODE_REL);
+ sleep_time *= 2;
+ if (sleep_time > NSEC_PER_MSEC)
+ sleep_time = NSEC_PER_MSEC;
+ }
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
@@ -259,11 +267,12 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
}
static struct nouveau_semaphore *
-alloc_semaphore(struct drm_device *dev)
+semaphore_alloc(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_semaphore *sema;
- int ret;
+ int size = (dev_priv->chipset < 0x84) ? 4 : 16;
+ int ret, i;
if (!USE_SEMA(dev))
return NULL;
@@ -277,9 +286,9 @@ alloc_semaphore(struct drm_device *dev)
goto fail;
spin_lock(&dev_priv->fence.lock);
- sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0);
+ sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
if (sema->mem)
- sema->mem = drm_mm_get_block_atomic(sema->mem, 4, 0);
+ sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
spin_unlock(&dev_priv->fence.lock);
if (!sema->mem)
@@ -287,7 +296,8 @@ alloc_semaphore(struct drm_device *dev)
kref_init(&sema->ref);
sema->dev = dev;
- nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 0);
+ for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
+ nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
return sema;
fail:
@@ -296,7 +306,7 @@ fail:
}
static void
-free_semaphore(struct kref *ref)
+semaphore_free(struct kref *ref)
{
struct nouveau_semaphore *sema =
container_of(ref, struct nouveau_semaphore, ref);
@@ -318,61 +328,107 @@ semaphore_work(void *priv, bool signalled)
if (unlikely(!signalled))
nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
- kref_put(&sema->ref, free_semaphore);
+ kref_put(&sema->ref, semaphore_free);
}
static int
-emit_semaphore(struct nouveau_channel *chan, int method,
- struct nouveau_semaphore *sema)
+semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
- struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
- struct nouveau_fence *fence;
- bool smart = (dev_priv->card_type >= NV_50);
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_fence *fence = NULL;
int ret;
- ret = RING_SPACE(chan, smart ? 8 : 4);
+ if (dev_priv->chipset < 0x84) {
+ ret = RING_SPACE(chan, 3);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 2);
+ OUT_RING (chan, sema->mem->start);
+ OUT_RING (chan, 1);
+ } else
+ if (dev_priv->chipset < 0xc0) {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 1); /* ACQUIRE_EQ */
+ } else {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
+ }
+
+ /* Delay semaphore destruction until its work is done */
+ ret = nouveau_fence_new(chan, &fence, true);
if (ret)
return ret;
- if (smart) {
- BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
- OUT_RING(chan, NvSema);
- }
- BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
- OUT_RING(chan, sema->mem->start);
-
- if (smart && method == NV_SW_SEMAPHORE_ACQUIRE) {
- /*
- * NV50 tries to be too smart and context-switch
- * between semaphores instead of doing a "first come,
- * first served" strategy like previous cards
- * do.
- *
- * That's bad because the ACQUIRE latency can get as
- * large as the PFIFO context time slice in the
- * typical DRI2 case where you have several
- * outstanding semaphores at the same moment.
- *
- * If we're going to ACQUIRE, force the card to
- * context switch before, just in case the matching
- * RELEASE is already scheduled to be executed in
- * another channel.
- */
- BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
- OUT_RING(chan, 0);
- }
+ kref_get(&sema->ref);
+ nouveau_fence_work(fence, semaphore_work, sema);
+ nouveau_fence_unref(&fence);
+ return 0;
+}
+
+static int
+semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_fence *fence = NULL;
+ int ret;
+
+ if (dev_priv->chipset < 0x84) {
+ ret = RING_SPACE(chan, 4);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
+ OUT_RING (chan, sema->mem->start);
+ BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
+ OUT_RING (chan, 1);
+ } else
+ if (dev_priv->chipset < 0xc0) {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 2); /* RELEASE */
+ } else {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
- BEGIN_RING(chan, NvSubSw, method, 1);
- OUT_RING(chan, 1);
-
- if (smart && method == NV_SW_SEMAPHORE_RELEASE) {
- /*
- * Force the card to context switch, there may be
- * another channel waiting for the semaphore we just
- * released.
- */
- BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
- OUT_RING(chan, 0);
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0x1002); /* RELEASE */
}
/* Delay semaphore destruction until its work is done */
@@ -383,7 +439,6 @@ emit_semaphore(struct nouveau_channel *chan, int method,
kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema);
nouveau_fence_unref(&fence);
-
return 0;
}
@@ -400,7 +455,7 @@ nouveau_fence_sync(struct nouveau_fence *fence,
nouveau_fence_signalled(fence)))
goto out;
- sema = alloc_semaphore(dev);
+ sema = semaphore_alloc(dev);
if (!sema) {
/* Early card or broken userspace, fall back to
* software sync. */
@@ -418,17 +473,17 @@ nouveau_fence_sync(struct nouveau_fence *fence,
}
/* Make wchan wait until it gets signalled */
- ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
+ ret = semaphore_acquire(wchan, sema);
if (ret)
goto out_unlock;
/* Signal the semaphore from chan */
- ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
+ ret = semaphore_release(chan, sema);
out_unlock:
mutex_unlock(&chan->mutex);
out_unref:
- kref_put(&sema->ref, free_semaphore);
+ kref_put(&sema->ref, semaphore_free);
out:
if (chan)
nouveau_channel_put_unlocked(&chan);
@@ -449,22 +504,23 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
struct nouveau_gpuobj *obj = NULL;
int ret;
+ if (dev_priv->card_type >= NV_C0)
+ goto out_initialised;
+
/* Create an NV_SW object for various sync purposes */
ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
if (ret)
return ret;
/* we leave subchannel empty for nvc0 */
- if (dev_priv->card_type < NV_C0) {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubSw, 0, 1);
- OUT_RING(chan, NvSw);
- }
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, 0, 1);
+ OUT_RING(chan, NvSw);
/* Create a DMA object for the shared cross-channel sync area. */
- if (USE_SEMA(dev)) {
+ if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
@@ -484,14 +540,20 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
OUT_RING(chan, NvSema);
+ } else {
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, chan->vram_handle); /* whole VM */
}
FIRE_RING(chan);
+out_initialised:
INIT_LIST_HEAD(&chan->fence.pending);
spin_lock_init(&chan->fence.lock);
atomic_set(&chan->fence.last_sequence_irq, 0);
-
return 0;
}
@@ -519,12 +581,13 @@ int
nouveau_fence_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
int ret;
/* Create a shared VRAM heap for cross-channel sync. */
if (USE_SEMA(dev)) {
- ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM,
- 0, 0, false, true, &dev_priv->fence.bo);
+ ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, &dev_priv->fence.bo);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 506c508b7eda..e8b04f4aed7e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -61,19 +61,36 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
int
nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
- int size, int align, uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, bool no_vm, bool mappable,
- struct nouveau_bo **pnvbo)
+ int size, int align, uint32_t domain, uint32_t tile_mode,
+ uint32_t tile_flags, struct nouveau_bo **pnvbo)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
+ u32 flags = 0;
int ret;
+ if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
+ flags |= TTM_PL_FLAG_VRAM;
+ if (domain & NOUVEAU_GEM_DOMAIN_GART)
+ flags |= TTM_PL_FLAG_TT;
+ if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
+ flags |= TTM_PL_FLAG_SYSTEM;
+
ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
- tile_flags, no_vm, mappable, pnvbo);
+ tile_flags, pnvbo);
if (ret)
return ret;
nvbo = *pnvbo;
+ /* we restrict allowed domains on nv50+ to only the types
+ * that were requested at creation time. not possibly on
+ * earlier chips without busting the ABI.
+ */
+ nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+ NOUVEAU_GEM_DOMAIN_GART;
+ if (dev_priv->card_type >= NV_50)
+ nvbo->valid_domains &= domain;
+
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) {
nouveau_bo_ref(NULL, pnvbo);
@@ -97,7 +114,7 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->offset = nvbo->bo.offset;
- rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
+ rep->map_handle = nvbo->bo.addr_space_offset;
rep->tile_mode = nvbo->tile_mode;
rep->tile_flags = nvbo->tile_flags;
return 0;
@@ -111,19 +128,11 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
struct nouveau_channel *chan = NULL;
- uint32_t flags = 0;
int ret = 0;
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
- if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
- flags |= TTM_PL_FLAG_VRAM;
- if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
- flags |= TTM_PL_FLAG_TT;
- if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
- flags |= TTM_PL_FLAG_SYSTEM;
-
if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
@@ -135,10 +144,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
return PTR_ERR(chan);
}
- ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
- req->info.tile_mode, req->info.tile_flags, false,
- (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
- &nvbo);
+ ret = nouveau_gem_new(dev, chan, req->info.size, req->align,
+ req->info.domain, req->info.tile_mode,
+ req->info.tile_flags, &nvbo);
if (chan)
nouveau_channel_put(&chan);
if (ret)
@@ -161,7 +169,7 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
{
struct nouveau_bo *nvbo = gem->driver_private;
struct ttm_buffer_object *bo = &nvbo->bo;
- uint32_t domains = valid_domains &
+ uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains);
uint32_t pref_flags = 0, valid_flags = 0;
@@ -592,7 +600,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (push[i].bo_index >= req->nr_buffers) {
NV_ERROR(dev, "push %d buffer not in list\n", i);
ret = -EINVAL;
- goto out;
+ goto out_prevalid;
}
bo[push[i].bo_index].read_domains |= (1 << 31);
@@ -604,7 +612,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (ret) {
if (ret != -ERESTARTSYS)
NV_ERROR(dev, "validate: %d\n", ret);
- goto out;
+ goto out_prevalid;
}
/* Apply any relocations that are required */
@@ -697,6 +705,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
out:
validate_fini(&op, fence);
nouveau_fence_unref(&fence);
+
+out_prevalid:
kfree(bo);
kfree(push);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index b0fb9bdcddb7..2683377f4131 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -152,7 +152,6 @@ nouveau_mem_vram_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- nouveau_bo_unpin(dev_priv->vga_ram);
nouveau_bo_ref(NULL, &dev_priv->vga_ram);
ttm_bo_device_release(&dev_priv->ttm.bdev);
@@ -393,11 +392,17 @@ nouveau_mem_vram_init(struct drm_device *dev)
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
int ret, dma_bits;
- if (dev_priv->card_type >= NV_50 &&
- pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
- dma_bits = 40;
- else
- dma_bits = 32;
+ dma_bits = 32;
+ if (dev_priv->card_type >= NV_50) {
+ if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
+ dma_bits = 40;
+ } else
+ if (drm_pci_device_is_pcie(dev) &&
+ dev_priv->chipset != 0x40 &&
+ dev_priv->chipset != 0x45) {
+ if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
+ dma_bits = 39;
+ }
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret)
@@ -419,14 +424,32 @@ nouveau_mem_vram_init(struct drm_device *dev)
}
/* reserve space at end of VRAM for PRAMIN */
- if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
- dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
- dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
- else
- if (dev_priv->card_type >= NV_40)
- dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
- else
- dev_priv->ramin_rsvd_vram = (512 * 1024);
+ if (dev_priv->card_type >= NV_50) {
+ dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
+ } else
+ if (dev_priv->card_type >= NV_40) {
+ u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
+ u32 rsvd;
+
+ /* estimate grctx size, the magics come from nv40_grctx.c */
+ if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
+ else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
+ else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
+ else rsvd = 0x4a40 * vs;
+ rsvd += 16 * 1024;
+ rsvd *= dev_priv->engine.fifo.channels;
+
+ /* pciegart table */
+ if (drm_pci_device_is_pcie(dev))
+ rsvd += 512 * 1024;
+
+ /* object storage */
+ rsvd += 512 * 1024;
+
+ dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
+ } else {
+ dev_priv->ramin_rsvd_vram = 512 * 1024;
+ }
ret = dev_priv->engine.vram.init(dev);
if (ret)
@@ -455,13 +478,17 @@ nouveau_mem_vram_init(struct drm_device *dev)
return ret;
}
- ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
- 0, 0, true, true, &dev_priv->vga_ram);
- if (ret == 0)
- ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_WARN(dev, "failed to reserve VGA memory\n");
- nouveau_bo_ref(NULL, &dev_priv->vga_ram);
+ if (dev_priv->card_type < NV_50) {
+ ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, &dev_priv->vga_ram);
+ if (ret == 0)
+ ret = nouveau_bo_pin(dev_priv->vga_ram,
+ TTM_PL_FLAG_VRAM);
+
+ if (ret) {
+ NV_WARN(dev, "failed to reserve VGA memory\n");
+ nouveau_bo_ref(NULL, &dev_priv->vga_ram);
+ }
}
dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
@@ -480,7 +507,7 @@ nouveau_mem_gart_init(struct drm_device *dev)
dev_priv->gart_info.type = NOUVEAU_GART_NONE;
#if !defined(__powerpc__) && !defined(__ia64__)
- if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
+ if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
ret = nouveau_mem_init_agp(dev);
if (ret)
NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
@@ -666,13 +693,14 @@ nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_mm *mm;
- u32 b_size;
+ u64 size, block, rsvd;
int ret;
- p_size = (p_size << PAGE_SHIFT) >> 12;
- b_size = dev_priv->vram_rblock_size >> 12;
+ rsvd = (256 * 1024); /* vga memory */
+ size = (p_size << PAGE_SHIFT) - rsvd;
+ block = dev_priv->vram_rblock_size;
- ret = nouveau_mm_init(&mm, 0, p_size, b_size);
+ ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12);
if (ret)
return ret;
@@ -700,9 +728,15 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct nouveau_mem *node = mem->mm_node;
struct drm_device *dev = dev_priv->dev;
- vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
+ if (node->tmp_vma.node) {
+ nouveau_vm_unmap(&node->tmp_vma);
+ nouveau_vm_put(&node->tmp_vma);
+ }
+
+ vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
}
static int
@@ -715,7 +749,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_vram *node;
+ struct nouveau_mem *node;
u32 size_nc = 0;
int ret;
@@ -724,7 +758,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
- (nvbo->tile_flags >> 8) & 0xff, &node);
+ (nvbo->tile_flags >> 8) & 0x3ff, &node);
if (ret) {
mem->mm_node = NULL;
return (ret == -ENOSPC) ? 0 : ret;
@@ -771,3 +805,84 @@ const struct ttm_mem_type_manager_func nouveau_vram_manager = {
nouveau_vram_manager_del,
nouveau_vram_manager_debug
};
+
+static int
+nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+{
+ return 0;
+}
+
+static int
+nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
+{
+ return 0;
+}
+
+static void
+nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct nouveau_mem *node = mem->mm_node;
+
+ if (node->tmp_vma.node) {
+ nouveau_vm_unmap(&node->tmp_vma);
+ nouveau_vm_put(&node->tmp_vma);
+ }
+ mem->mm_node = NULL;
+}
+
+static int
+nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_vma *vma = &nvbo->vma;
+ struct nouveau_vm *vm = vma->vm;
+ struct nouveau_mem *node;
+ int ret;
+
+ if (unlikely((mem->num_pages << PAGE_SHIFT) >=
+ dev_priv->gart_info.aper_size))
+ return -ENOMEM;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ /* This node must be for evicting large-paged VRAM
+ * to system memory. Due to a nv50 limitation of
+ * not being able to mix large/small pages within
+ * the same PDE, we need to create a temporary
+ * small-paged VMA for the eviction.
+ */
+ if (vma->node->type != vm->spg_shift) {
+ ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
+ vm->spg_shift, NV_MEM_ACCESS_RW,
+ &node->tmp_vma);
+ if (ret) {
+ kfree(node);
+ return ret;
+ }
+ }
+
+ node->page_shift = nvbo->vma.node->type;
+ mem->mm_node = node;
+ mem->start = 0;
+ return 0;
+}
+
+void
+nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+}
+
+const struct ttm_mem_type_manager_func nouveau_gart_manager = {
+ nouveau_gart_manager_init,
+ nouveau_gart_manager_fini,
+ nouveau_gart_manager_new,
+ nouveau_gart_manager_del,
+ nouveau_gart_manager_debug
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index 798eaf39691c..1f7483aae9a4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -53,13 +53,13 @@ void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
int nv50_vram_init(struct drm_device *);
int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
- u32 memtype, struct nouveau_vram **);
-void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
+ u32 memtype, struct nouveau_mem **);
+void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
int nvc0_vram_init(struct drm_device *);
int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nouveau_vram **);
+ u32 memtype, struct nouveau_mem **);
bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 5ea167623a82..7ba3fc0b30c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -39,12 +39,11 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
int ret;
if (nouveau_vram_notify)
- flags = TTM_PL_FLAG_VRAM;
+ flags = NOUVEAU_GEM_DOMAIN_VRAM;
else
- flags = TTM_PL_FLAG_TT;
+ flags = NOUVEAU_GEM_DOMAIN_GART;
- ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
- 0, 0x0000, false, true, &ntfy);
+ ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
if (ret)
return ret;
@@ -100,6 +99,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
uint32_t *b_offset)
{
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
struct drm_mm_node *mem;
uint32_t offset;
@@ -114,11 +114,16 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
return -ENOMEM;
}
- if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
- target = NV_MEM_TARGET_VRAM;
- else
- target = NV_MEM_TARGET_GART;
- offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
+ if (dev_priv->card_type < NV_50) {
+ if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
+ target = NV_MEM_TARGET_VRAM;
+ else
+ target = NV_MEM_TARGET_GART;
+ offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
+ } else {
+ target = NV_MEM_TARGET_VM;
+ offset = chan->notifier_bo->vma.offset;
+ }
offset += mem->start;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 30b6544467ca..4f00c87ed86e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -36,6 +36,7 @@
#include "nouveau_drm.h"
#include "nouveau_ramht.h"
#include "nouveau_vm.h"
+#include "nv50_display.h"
struct nouveau_gpuobj_method {
struct list_head head;
@@ -490,16 +491,22 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
}
if (target == NV_MEM_TARGET_GART) {
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
- target = NV_MEM_TARGET_PCI_NOSNOOP;
- base += dev_priv->gart_info.aper_base;
- } else
- if (base != 0) {
- base = nouveau_sgdma_get_physical(dev, base);
+ struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
+
+ if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
+ if (base == 0) {
+ nouveau_gpuobj_ref(gart, pobj);
+ return 0;
+ }
+
+ base = nouveau_sgdma_get_physical(dev, base);
target = NV_MEM_TARGET_PCI;
} else {
- nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
- return 0;
+ base += dev_priv->gart_info.aper_base;
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
+ target = NV_MEM_TARGET_PCI_NOSNOOP;
+ else
+ target = NV_MEM_TARGET_PCI;
}
}
@@ -776,7 +783,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
- int ret;
+ int ret, i;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
@@ -841,6 +848,25 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
nouveau_gpuobj_ref(NULL, &ramht);
if (ret)
return ret;
+
+ /* dma objects for display sync channel semaphore blocks */
+ for (i = 0; i < 2; i++) {
+ struct nouveau_gpuobj *sem = NULL;
+ struct nv50_display_crtc *dispc =
+ &nv50_display(dev)->crtc[i];
+ u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
+
+ ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &sem);
+ if (ret)
+ return ret;
+
+ ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem);
+ nouveau_gpuobj_ref(NULL, &sem);
+ if (ret)
+ return ret;
+ }
}
/* VRAM ctxdma */
@@ -909,7 +935,7 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
- if (chan->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
nouveau_gpuobj_ref(NULL, &chan->ramin);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index bef3e6910418..a24a81f5a89e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -114,7 +114,9 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
(gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
} else {
if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
- ctx = (gpuobj->cinst << 10) | chan->id;
+ ctx = (gpuobj->cinst << 10) |
+ (chan->id << 28) |
+ chan->id; /* HASH_TAG */
} else {
ctx = (gpuobj->cinst >> 4) |
((gpuobj->engine <<
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 9a250eb53098..a33fe4019286 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -12,6 +12,7 @@ struct nouveau_sgdma_be {
struct drm_device *dev;
dma_addr_t *pages;
+ bool *ttm_alloced;
unsigned nr_pages;
u64 offset;
@@ -20,7 +21,8 @@ struct nouveau_sgdma_be {
static int
nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
- struct page **pages, struct page *dummy_read_page)
+ struct page **pages, struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
@@ -34,15 +36,25 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
if (!nvbe->pages)
return -ENOMEM;
+ nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
+ if (!nvbe->ttm_alloced)
+ return -ENOMEM;
+
nvbe->nr_pages = 0;
while (num_pages--) {
- nvbe->pages[nvbe->nr_pages] =
- pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
+ if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
+ nvbe->pages[nvbe->nr_pages] =
+ dma_addrs[nvbe->nr_pages];
+ nvbe->ttm_alloced[nvbe->nr_pages] = true;
+ } else {
+ nvbe->pages[nvbe->nr_pages] =
+ pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev,
- nvbe->pages[nvbe->nr_pages])) {
- be->func->clear(be);
- return -EFAULT;
+ if (pci_dma_mapping_error(dev->pdev,
+ nvbe->pages[nvbe->nr_pages])) {
+ be->func->clear(be);
+ return -EFAULT;
+ }
}
nvbe->nr_pages++;
@@ -65,17 +77,36 @@ nouveau_sgdma_clear(struct ttm_backend *be)
be->func->unbind(be);
while (nvbe->nr_pages--) {
- pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
+ if (!nvbe->ttm_alloced[nvbe->nr_pages])
+ pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
kfree(nvbe->pages);
+ kfree(nvbe->ttm_alloced);
nvbe->pages = NULL;
+ nvbe->ttm_alloced = NULL;
nvbe->nr_pages = 0;
}
}
+static void
+nouveau_sgdma_destroy(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+
+ if (be) {
+ NV_DEBUG(nvbe->dev, "\n");
+
+ if (nvbe) {
+ if (nvbe->pages)
+ be->func->clear(be);
+ kfree(nvbe);
+ }
+ }
+}
+
static int
-nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
@@ -102,7 +133,7 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
}
static int
-nouveau_sgdma_unbind(struct ttm_backend *be)
+nv04_sgdma_unbind(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
@@ -125,59 +156,245 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
return 0;
}
+static struct ttm_backend_func nv04_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
+ .bind = nv04_sgdma_bind,
+ .unbind = nv04_sgdma_unbind,
+ .destroy = nouveau_sgdma_destroy
+};
+
static void
-nouveau_sgdma_destroy(struct ttm_backend *be)
+nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
+{
+ struct drm_device *dev = nvbe->dev;
+
+ nv_wr32(dev, 0x100810, 0x00000022);
+ if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
+ NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
+ nv_rd32(dev, 0x100810));
+ nv_wr32(dev, 0x100810, 0x00000000);
+}
+
+static int
+nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ dma_addr_t *list = nvbe->pages;
+ u32 pte = mem->start << 2;
+ u32 cnt = nvbe->nr_pages;
- if (be) {
- NV_DEBUG(nvbe->dev, "\n");
+ nvbe->offset = mem->start << PAGE_SHIFT;
- if (nvbe) {
- if (nvbe->pages)
- be->func->clear(be);
- kfree(nvbe);
+ while (cnt--) {
+ nv_wo32(pgt, pte, (*list++ >> 7) | 1);
+ pte += 4;
+ }
+
+ nv41_sgdma_flush(nvbe);
+ nvbe->bound = true;
+ return 0;
+}
+
+static int
+nv41_sgdma_unbind(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ u32 pte = (nvbe->offset >> 12) << 2;
+ u32 cnt = nvbe->nr_pages;
+
+ while (cnt--) {
+ nv_wo32(pgt, pte, 0x00000000);
+ pte += 4;
+ }
+
+ nv41_sgdma_flush(nvbe);
+ nvbe->bound = false;
+ return 0;
+}
+
+static struct ttm_backend_func nv41_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
+ .bind = nv41_sgdma_bind,
+ .unbind = nv41_sgdma_unbind,
+ .destroy = nouveau_sgdma_destroy
+};
+
+static void
+nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
+{
+ struct drm_device *dev = nvbe->dev;
+
+ nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
+ nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
+ if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
+ NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
+ nv_rd32(dev, 0x100808));
+ nv_wr32(dev, 0x100808, 0x00000000);
+}
+
+static void
+nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
+{
+ struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
+ dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
+ u32 pte, tmp[4];
+
+ pte = base >> 2;
+ base &= ~0x0000000f;
+
+ tmp[0] = nv_ro32(pgt, base + 0x0);
+ tmp[1] = nv_ro32(pgt, base + 0x4);
+ tmp[2] = nv_ro32(pgt, base + 0x8);
+ tmp[3] = nv_ro32(pgt, base + 0xc);
+ while (cnt--) {
+ u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
+ switch (pte++ & 0x3) {
+ case 0:
+ tmp[0] &= ~0x07ffffff;
+ tmp[0] |= addr;
+ break;
+ case 1:
+ tmp[0] &= ~0xf8000000;
+ tmp[0] |= addr << 27;
+ tmp[1] &= ~0x003fffff;
+ tmp[1] |= addr >> 5;
+ break;
+ case 2:
+ tmp[1] &= ~0xffc00000;
+ tmp[1] |= addr << 22;
+ tmp[2] &= ~0x0001ffff;
+ tmp[2] |= addr >> 10;
+ break;
+ case 3:
+ tmp[2] &= ~0xfffe0000;
+ tmp[2] |= addr << 17;
+ tmp[3] &= ~0x00000fff;
+ tmp[3] |= addr >> 15;
+ break;
}
}
+
+ tmp[3] |= 0x40000000;
+
+ nv_wo32(pgt, base + 0x0, tmp[0]);
+ nv_wo32(pgt, base + 0x4, tmp[1]);
+ nv_wo32(pgt, base + 0x8, tmp[2]);
+ nv_wo32(pgt, base + 0xc, tmp[3]);
}
static int
-nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ dma_addr_t *list = nvbe->pages;
+ u32 pte = mem->start << 2, tmp[4];
+ u32 cnt = nvbe->nr_pages;
+ int i;
nvbe->offset = mem->start << PAGE_SHIFT;
- nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
- nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
+ if (pte & 0x0000000c) {
+ u32 max = 4 - ((pte >> 2) & 0x3);
+ u32 part = (cnt > max) ? max : cnt;
+ nv44_sgdma_fill(pgt, list, pte, part);
+ pte += (part << 2);
+ list += part;
+ cnt -= part;
+ }
+
+ while (cnt >= 4) {
+ for (i = 0; i < 4; i++)
+ tmp[i] = *list++ >> 12;
+ nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27);
+ nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22);
+ nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
+ nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
+ pte += 0x10;
+ cnt -= 4;
+ }
+
+ if (cnt)
+ nv44_sgdma_fill(pgt, list, pte, cnt);
+
+ nv44_sgdma_flush(nvbe);
nvbe->bound = true;
return 0;
}
static int
-nv50_sgdma_unbind(struct ttm_backend *be)
+nv44_sgdma_unbind(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ u32 pte = (nvbe->offset >> 12) << 2;
+ u32 cnt = nvbe->nr_pages;
+
+ if (pte & 0x0000000c) {
+ u32 max = 4 - ((pte >> 2) & 0x3);
+ u32 part = (cnt > max) ? max : cnt;
+ nv44_sgdma_fill(pgt, NULL, pte, part);
+ pte += (part << 2);
+ cnt -= part;
+ }
- if (!nvbe->bound)
- return 0;
+ while (cnt >= 4) {
+ nv_wo32(pgt, pte + 0x0, 0x00000000);
+ nv_wo32(pgt, pte + 0x4, 0x00000000);
+ nv_wo32(pgt, pte + 0x8, 0x00000000);
+ nv_wo32(pgt, pte + 0xc, 0x00000000);
+ pte += 0x10;
+ cnt -= 4;
+ }
+
+ if (cnt)
+ nv44_sgdma_fill(pgt, NULL, pte, cnt);
- nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
- nvbe->nr_pages << PAGE_SHIFT);
+ nv44_sgdma_flush(nvbe);
nvbe->bound = false;
return 0;
}
-static struct ttm_backend_func nouveau_sgdma_backend = {
+static struct ttm_backend_func nv44_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
- .bind = nouveau_sgdma_bind,
- .unbind = nouveau_sgdma_unbind,
+ .bind = nv44_sgdma_bind,
+ .unbind = nv44_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
+static int
+nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_mem *node = mem->mm_node;
+ /* noop: bound in move_notify() */
+ node->pages = nvbe->pages;
+ nvbe->pages = (dma_addr_t *)node;
+ nvbe->bound = true;
+ return 0;
+}
+
+static int
+nv50_sgdma_unbind(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
+ /* noop: unbound in move_notify() */
+ nvbe->pages = node->pages;
+ node->pages = NULL;
+ nvbe->bound = false;
+ return 0;
+}
+
static struct ttm_backend_func nv50_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
@@ -198,10 +415,7 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
nvbe->dev = dev;
- if (dev_priv->card_type < NV_50)
- nvbe->backend.func = &nouveau_sgdma_backend;
- else
- nvbe->backend.func = &nv50_sgdma_backend;
+ nvbe->backend.func = dev_priv->gart_info.func;
return &nvbe->backend;
}
@@ -210,21 +424,64 @@ nouveau_sgdma_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = NULL;
- uint32_t aper_size, obj_size;
- int i, ret;
+ u32 aper_size, align;
+ int ret;
- if (dev_priv->card_type < NV_50) {
- if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
- aper_size = 64 * 1024 * 1024;
- else
- aper_size = 512 * 1024 * 1024;
+ if (dev_priv->card_type >= NV_50 || drm_pci_device_is_pcie(dev))
+ aper_size = 512 * 1024 * 1024;
+ else
+ aper_size = 64 * 1024 * 1024;
+
+ /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
+ * christmas. The cards before it have them, the cards after
+ * it have them, why is NV44 so unloved?
+ */
+ dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
+ if (!dev_priv->gart_info.dummy.page)
+ return -ENOMEM;
+
+ dev_priv->gart_info.dummy.addr =
+ pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
+ 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
+ NV_ERROR(dev, "error mapping dummy page\n");
+ __free_page(dev_priv->gart_info.dummy.page);
+ dev_priv->gart_info.dummy.page = NULL;
+ return -ENOMEM;
+ }
+
+ if (dev_priv->card_type >= NV_50) {
+ dev_priv->gart_info.aper_base = 0;
+ dev_priv->gart_info.aper_size = aper_size;
+ dev_priv->gart_info.type = NOUVEAU_GART_HW;
+ dev_priv->gart_info.func = &nv50_sgdma_backend;
+ } else
+ if (drm_pci_device_is_pcie(dev) &&
+ dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
+ if (nv44_graph_class(dev)) {
+ dev_priv->gart_info.func = &nv44_sgdma_backend;
+ align = 512 * 1024;
+ } else {
+ dev_priv->gart_info.func = &nv41_sgdma_backend;
+ align = 16;
+ }
- obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
- obj_size += 8; /* ctxdma header */
+ ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+ if (ret) {
+ NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
+ return ret;
+ }
- ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+ dev_priv->gart_info.sg_ctxdma = gpuobj;
+ dev_priv->gart_info.aper_base = 0;
+ dev_priv->gart_info.aper_size = aper_size;
+ dev_priv->gart_info.type = NOUVEAU_GART_HW;
+ } else {
+ ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj);
if (ret) {
NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
return ret;
@@ -236,25 +493,14 @@ nouveau_sgdma_init(struct drm_device *dev)
(0 << 14) /* RW */ |
(2 << 16) /* PCI */);
nv_wo32(gpuobj, 4, aper_size - 1);
- for (i = 2; i < 2 + (aper_size >> 12); i++)
- nv_wo32(gpuobj, i * 4, 0x00000000);
dev_priv->gart_info.sg_ctxdma = gpuobj;
dev_priv->gart_info.aper_base = 0;
dev_priv->gart_info.aper_size = aper_size;
- } else
- if (dev_priv->chan_vm) {
- ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
- 12, NV_MEM_ACCESS_RW,
- &dev_priv->gart_info.vma);
- if (ret)
- return ret;
-
- dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
- dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
+ dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
+ dev_priv->gart_info.func = &nv04_sgdma_backend;
}
- dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
return 0;
}
@@ -264,7 +510,13 @@ nouveau_sgdma_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
- nouveau_vm_put(&dev_priv->gart_info.vma);
+
+ if (dev_priv->gart_info.dummy.page) {
+ pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ __free_page(dev_priv->gart_info.dummy.page);
+ dev_priv->gart_info.dummy.page = NULL;
+ }
}
uint32_t
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a54fc431fe98..05294910e135 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -544,7 +544,6 @@ static int
nouveau_card_init_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *gpuobj = NULL;
int ret;
ret = nouveau_channel_alloc(dev, &dev_priv->channel,
@@ -552,41 +551,8 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
return ret;
- /* no dma objects on fermi... */
- if (dev_priv->card_type >= NV_C0)
- goto out_done;
-
- ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->vram_size,
- NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
- &gpuobj);
- if (ret)
- goto out_err;
-
- ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj);
- nouveau_gpuobj_ref(NULL, &gpuobj);
- if (ret)
- goto out_err;
-
- ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->gart_info.aper_size,
- NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
- &gpuobj);
- if (ret)
- goto out_err;
-
- ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj);
- nouveau_gpuobj_ref(NULL, &gpuobj);
- if (ret)
- goto out_err;
-
-out_done:
mutex_unlock(&dev_priv->channel->mutex);
return 0;
-
-out_err:
- nouveau_channel_put(&dev_priv->channel);
- return ret;
}
static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
@@ -929,12 +895,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
dev->pci_vendor, dev->pci_device, dev->pdev->class);
- dev_priv->wq = create_workqueue("nouveau");
- if (!dev_priv->wq) {
- ret = -EINVAL;
- goto err_priv;
- }
-
/* resource 0 is mmio regs */
/* resource 1 is linear FB */
/* resource 2 is RAMIN (mmio regs + 0x1000000) */
@@ -947,7 +907,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_ERROR(dev, "Unable to initialize the mmio mapping. "
"Please report your setup to " DRIVER_EMAIL "\n");
ret = -EINVAL;
- goto err_wq;
+ goto err_priv;
}
NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
(unsigned long long)mmio_start_offs);
@@ -1054,8 +1014,6 @@ err_ramin:
iounmap(dev_priv->ramin);
err_mmio:
iounmap(dev_priv->mmio);
-err_wq:
- destroy_workqueue(dev_priv->wq);
err_priv:
kfree(dev_priv);
dev->dev_private = NULL;
@@ -1103,9 +1061,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = dev->pci_device;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
- if (drm_device_is_agp(dev))
+ if (drm_pci_device_is_agp(dev))
getparam->value = NV_AGP;
- else if (drm_device_is_pcie(dev))
+ else if (drm_pci_device_is_pcie(dev))
getparam->value = NV_PCIE;
else
getparam->value = NV_PCI;
@@ -1126,7 +1084,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = 1;
break;
case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
- getparam->value = (dev_priv->card_type < NV_50);
+ getparam->value = 1;
break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 8d9968e1cba8..649b0413b09f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -239,11 +239,9 @@ static bool
probe_monitoring_device(struct nouveau_i2c_chan *i2c,
struct i2c_board_info *info)
{
- char modalias[16] = "i2c:";
struct i2c_client *client;
- strlcat(modalias, info->type, sizeof(modalias));
- request_module(modalias);
+ request_module("%s%s", I2C_MODULE_PREFIX, info->type);
client = i2c_new_device(&i2c->adapter, info);
if (!client)
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c
index fbe0fb13bc1e..e51b51503baa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_util.c
+++ b/drivers/gpu/drm/nouveau/nouveau_util.c
@@ -47,18 +47,27 @@ nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
printk(" (unknown bits 0x%08x)", value);
}
-void
-nouveau_enum_print(const struct nouveau_enum *en, u32 value)
+const struct nouveau_enum *
+nouveau_enum_find(const struct nouveau_enum *en, u32 value)
{
while (en->name) {
- if (value == en->value) {
- printk("%s", en->name);
- return;
- }
-
+ if (en->value == value)
+ return en;
en++;
}
+ return NULL;
+}
+
+void
+nouveau_enum_print(const struct nouveau_enum *en, u32 value)
+{
+ en = nouveau_enum_find(en, value);
+ if (en) {
+ printk("%s", en->name);
+ return;
+ }
+
printk("(unknown enum 0x%08x)", value);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
index d9ceaea26f4b..b97719fbb739 100644
--- a/drivers/gpu/drm/nouveau/nouveau_util.h
+++ b/drivers/gpu/drm/nouveau/nouveau_util.h
@@ -36,10 +36,14 @@ struct nouveau_bitfield {
struct nouveau_enum {
u32 value;
const char *name;
+ void *data;
};
void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
void nouveau_enum_print(const struct nouveau_enum *, u32 value);
+const struct nouveau_enum *
+nouveau_enum_find(const struct nouveau_enum *, u32 value);
+
int nouveau_ratelimit(void);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 97d82aedf86b..0059e6f58a8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -28,7 +28,7 @@
#include "nouveau_vm.h"
void
-nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
+nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
{
struct nouveau_vm *vm = vma->vm;
struct nouveau_mm_node *r;
@@ -40,7 +40,8 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
u32 max = 1 << (vm->pgt_bits - bits);
u32 end, len;
- list_for_each_entry(r, &vram->regions, rl_entry) {
+ delta = 0;
+ list_for_each_entry(r, &node->regions, rl_entry) {
u64 phys = (u64)r->offset << 12;
u32 num = r->length >> bits;
@@ -52,7 +53,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
end = max;
len = end - pte;
- vm->map(vma, pgt, vram, pte, len, phys);
+ vm->map(vma, pgt, node, pte, len, phys, delta);
num -= len;
pte += len;
@@ -60,6 +61,8 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
pde++;
pte = 0;
}
+
+ delta += (u64)len << vma->node->type;
}
}
@@ -67,14 +70,14 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
}
void
-nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
{
- nouveau_vm_map_at(vma, 0, vram);
+ nouveau_vm_map_at(vma, 0, node);
}
void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
- dma_addr_t *list)
+ struct nouveau_mem *mem, dma_addr_t *list)
{
struct nouveau_vm *vm = vma->vm;
int big = vma->node->type != vm->spg_shift;
@@ -94,7 +97,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
end = max;
len = end - pte;
- vm->map_sg(vma, pgt, pte, list, len);
+ vm->map_sg(vma, pgt, mem, pte, len, list);
num -= len;
pte += len;
@@ -311,18 +314,7 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
vm->spg_shift = 12;
vm->lpg_shift = 17;
pgt_bits = 27;
-
- /* Should be 4096 everywhere, this is a hack that's
- * currently necessary to avoid an elusive bug that
- * causes corruption when mixing small/large pages
- */
- if (length < (1ULL << 40))
- block = 4096;
- else {
- block = (1 << pgt_bits);
- if (length < block)
- block = length;
- }
+ block = 4096;
} else {
kfree(vm);
return -ENOSYS;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index e1193515771b..2e06b55cfdc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -67,9 +67,10 @@ struct nouveau_vm {
void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+ struct nouveau_mem *, u32 pte, u32 cnt,
+ u64 phys, u64 delta);
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
- u32 pte, dma_addr_t *, u32 cnt);
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
void (*flush)(struct nouveau_vm *);
};
@@ -82,20 +83,20 @@ int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
u32 access, struct nouveau_vma *);
void nouveau_vm_put(struct nouveau_vma *);
-void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *);
-void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *);
+void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
+void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
- dma_addr_t *);
+ struct nouveau_mem *, dma_addr_t *);
/* nv50_vm.c */
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+ struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
- u32 pte, dma_addr_t *, u32 cnt);
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
void nv50_vm_flush(struct nouveau_vm *);
void nv50_vm_flush_engine(struct drm_device *, int engine);
@@ -104,9 +105,9 @@ void nv50_vm_flush_engine(struct drm_device *, int engine);
void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+ struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
- u32 pte, dma_addr_t *, u32 cnt);
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
void nvc0_vm_flush(struct nouveau_vm *);
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 297505eb98d5..a260fbbe3d9b 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -1031,7 +1031,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index f89d104698df..db465a3ee1b2 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -379,6 +379,15 @@ out:
return handled;
}
+static const char *nv_dma_state_err(u32 state)
+{
+ static const char * const desc[] = {
+ "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
+ "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
+ };
+ return desc[(state >> 29) & 0x7];
+}
+
void
nv04_fifo_isr(struct drm_device *dev)
{
@@ -460,9 +469,10 @@ nv04_fifo_isr(struct drm_device *dev)
if (nouveau_ratelimit())
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
"Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
- "State 0x%08x Push 0x%08x\n",
+ "State 0x%08x (err: %s) Push 0x%08x\n",
chid, ho_get, dma_get, ho_put,
dma_put, ib_get, ib_put, state,
+ nv_dma_state_err(state),
push);
/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
@@ -476,8 +486,9 @@ nv04_fifo_isr(struct drm_device *dev)
}
} else {
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
- "Put 0x%08x State 0x%08x Push 0x%08x\n",
- chid, dma_get, dma_put, state, push);
+ "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
+ chid, dma_get, dma_put, state,
+ nv_dma_state_err(state), push);
if (dma_get != dma_put)
nv_wr32(dev, 0x003244, dma_put);
@@ -505,7 +516,7 @@ nv04_fifo_isr(struct drm_device *dev)
if (dev_priv->card_type == NV_50) {
if (status & 0x00000010) {
- nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
+ nv50_fb_vm_trap(dev, nouveau_ratelimit());
status &= ~0x00000010;
nv_wr32(dev, 0x002100, 0x00000010);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 28119fd19d03..3900cebba560 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -197,10 +197,12 @@ static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
- struct drm_display_mode *mode, *tv_mode;
+ const struct drm_display_mode *tv_mode;
int n = 0;
for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
+ struct drm_display_mode *mode;
+
mode = drm_mode_duplicate(encoder->dev, tv_mode);
mode->clock = tv_norm->tv_enc_mode.vrefresh *
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
index 6bf03840f9eb..622e72221682 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.h
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -112,7 +112,7 @@ extern struct nv17_tv_norm_params {
} nv17_tv_norms[NUM_TV_NORMS];
#define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm])
-extern struct drm_display_mode nv17_tv_modes[];
+extern const struct drm_display_mode nv17_tv_modes[];
static inline int interpolate(int y0, int y1, int y2, int x)
{
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
index 9d3893c50a41..4d1d29f60307 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -438,7 +438,7 @@ void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
/* Timings similar to the ones the blob sets */
-struct drm_display_mode nv17_tv_modes[] = {
+const struct drm_display_mode nv17_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index f3d9c0505f7b..f0ac2a768c67 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -24,6 +24,53 @@ nv40_fb_set_tile_region(struct drm_device *dev, int i)
}
}
+static void
+nv40_fb_init_gart(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
+
+ if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
+ nv_wr32(dev, 0x100800, 0x00000001);
+ return;
+ }
+
+ nv_wr32(dev, 0x100800, gart->pinst | 0x00000002);
+ nv_mask(dev, 0x10008c, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x100820, 0x00000000);
+}
+
+static void
+nv44_fb_init_gart(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
+ u32 vinst;
+
+ if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
+ nv_wr32(dev, 0x100850, 0x80000000);
+ nv_wr32(dev, 0x100800, 0x00000001);
+ return;
+ }
+
+ /* calculate vram address of this PRAMIN block, object
+ * must be allocated on 512KiB alignment, and not exceed
+ * a total size of 512KiB for this to work correctly
+ */
+ vinst = nv_rd32(dev, 0x10020c);
+ vinst -= ((gart->pinst >> 19) + 1) << 19;
+
+ nv_wr32(dev, 0x100850, 0x80000000);
+ nv_wr32(dev, 0x100818, dev_priv->gart_info.dummy.addr);
+
+ nv_wr32(dev, 0x100804, dev_priv->gart_info.aper_size);
+ nv_wr32(dev, 0x100850, 0x00008000);
+ nv_mask(dev, 0x10008c, 0x00000200, 0x00000200);
+ nv_wr32(dev, 0x100820, 0x00000000);
+ nv_wr32(dev, 0x10082c, 0x00000001);
+ nv_wr32(dev, 0x100800, vinst | 0x00000010);
+}
+
int
nv40_fb_init(struct drm_device *dev)
{
@@ -32,12 +79,12 @@ nv40_fb_init(struct drm_device *dev)
uint32_t tmp;
int i;
- /* This is strictly a NV4x register (don't know about NV5x). */
- /* The blob sets these to all kinds of values, and they mess up our setup. */
- /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
- /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
- /* Any idea what this is? */
- nv_wr32(dev, NV40_PFB_UNK_800, 0x1);
+ if (dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
+ if (nv44_graph_class(dev))
+ nv44_fb_init_gart(dev);
+ else
+ nv40_fb_init_gart(dev);
+ }
switch (dev_priv->chipset) {
case 0x40:
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 9023c4dbb449..2b9984027f41 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -65,7 +65,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
{
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int index = nv_crtc->index, ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
@@ -135,8 +135,7 @@ static int
nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
{
struct drm_device *dev = nv_crtc->base.dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
NV_DEBUG_KMS(dev, "\n");
@@ -186,8 +185,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
struct nouveau_connector *nv_connector =
nouveau_crtc_connector_get(nv_crtc);
struct drm_device *dev = nv_crtc->base.dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct drm_display_mode *native_mode = NULL;
struct drm_display_mode *mode = &nv_crtc->base.mode;
uint32_t outX, outY, horiz, vert;
@@ -445,6 +443,42 @@ nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
{
}
+static int
+nv50_crtc_wait_complete(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_channel *evo = disp->master;
+ u64 start;
+ int ret;
+
+ ret = RING_SPACE(evo, 6);
+ if (ret)
+ return ret;
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x80000000);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0);
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000000);
+
+ nv_wo32(disp->ntfy, 0x000, 0x00000000);
+ FIRE_RING (evo);
+
+ start = ptimer->read(dev);
+ do {
+ nv_wr32(dev, 0x61002c, 0x370);
+ nv_wr32(dev, 0x000140, 1);
+
+ if (nv_ro32(disp->ntfy, 0x000))
+ return 0;
+ } while (ptimer->read(dev) - start < 2000000000ULL);
+
+ return -EBUSY;
+}
+
static void
nv50_crtc_prepare(struct drm_crtc *crtc)
{
@@ -453,6 +487,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ nv50_display_flip_stop(crtc);
drm_vblank_pre_modeset(dev, nv_crtc->index);
nv50_crtc_blank(nv_crtc, true);
}
@@ -461,24 +496,14 @@ static void
nv50_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
nv50_crtc_blank(nv_crtc, false);
drm_vblank_post_modeset(dev, nv_crtc->index);
-
- ret = RING_SPACE(evo, 2);
- if (ret) {
- NV_ERROR(dev, "no space while committing crtc\n");
- return;
- }
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
- FIRE_RING (evo);
+ nv50_crtc_wait_complete(crtc);
+ nv50_display_flip_next(crtc, crtc->fb, NULL);
}
static bool
@@ -491,15 +516,15 @@ nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
static int
nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *passed_fb,
- int x, int y, bool update, bool atomic)
+ int x, int y, bool atomic)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- int ret, format;
+ int ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
@@ -525,28 +550,6 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
}
}
- switch (drm_fb->depth) {
- case 8:
- format = NV50_EVO_CRTC_FB_DEPTH_8;
- break;
- case 15:
- format = NV50_EVO_CRTC_FB_DEPTH_15;
- break;
- case 16:
- format = NV50_EVO_CRTC_FB_DEPTH_16;
- break;
- case 24:
- case 32:
- format = NV50_EVO_CRTC_FB_DEPTH_24;
- break;
- case 30:
- format = NV50_EVO_CRTC_FB_DEPTH_30;
- break;
- default:
- NV_ERROR(dev, "unknown depth %d\n", drm_fb->depth);
- return -EINVAL;
- }
-
nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
@@ -556,14 +559,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return ret;
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
- if (nv_crtc->fb.tile_flags == 0x7a00 ||
- nv_crtc->fb.tile_flags == 0xfe00)
- OUT_RING(evo, NvEvoFB32);
- else
- if (nv_crtc->fb.tile_flags == 0x7000)
- OUT_RING(evo, NvEvoFB16);
- else
- OUT_RING(evo, NvEvoVRAM_LP);
+ OUT_RING (evo, fb->r_dma);
}
ret = RING_SPACE(evo, 12);
@@ -571,45 +567,26 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return ret;
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
- OUT_RING(evo, nv_crtc->fb.offset >> 8);
- OUT_RING(evo, 0);
- OUT_RING(evo, (drm_fb->height << 16) | drm_fb->width);
- if (!nv_crtc->fb.tile_flags) {
- OUT_RING(evo, drm_fb->pitch | (1 << 20));
- } else {
- u32 tile_mode = fb->nvbo->tile_mode;
- if (dev_priv->card_type >= NV_C0)
- tile_mode >>= 4;
- OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | tile_mode);
- }
- if (dev_priv->chipset == 0x50)
- OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
- else
- OUT_RING(evo, format);
+ OUT_RING (evo, nv_crtc->fb.offset >> 8);
+ OUT_RING (evo, 0);
+ OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
+ OUT_RING (evo, fb->r_pitch);
+ OUT_RING (evo, fb->r_format);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
- OUT_RING(evo, fb->base.depth == 8 ?
- NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
+ OUT_RING (evo, fb->base.depth == 8 ?
+ NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
- OUT_RING(evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
+ OUT_RING (evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
- OUT_RING(evo, (y << 16) | x);
+ OUT_RING (evo, (y << 16) | x);
if (nv_crtc->lut.depth != fb->base.depth) {
nv_crtc->lut.depth = fb->base.depth;
nv50_crtc_lut_load(crtc);
}
- if (update) {
- ret = RING_SPACE(evo, 2);
- if (ret)
- return ret;
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
- }
-
return 0;
}
@@ -619,8 +596,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_connector *nv_connector = NULL;
uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end;
@@ -700,14 +676,25 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
- return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false, false);
+ return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
}
static int
nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
- return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, true, false);
+ int ret;
+
+ nv50_display_flip_stop(crtc);
+ ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
+ if (ret)
+ return ret;
+
+ ret = nv50_crtc_wait_complete(crtc);
+ if (ret)
+ return ret;
+
+ return nv50_display_flip_next(crtc, crtc->fb, NULL);
}
static int
@@ -715,7 +702,14 @@ nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
- return nv50_crtc_do_mode_set_base(crtc, fb, x, y, true, true);
+ int ret;
+
+ nv50_display_flip_stop(crtc);
+ ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true);
+ if (ret)
+ return ret;
+
+ return nv50_crtc_wait_complete(crtc);
}
static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@@ -758,7 +752,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
nv_crtc->lut.depth = 0;
ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nv_crtc->lut.nvbo);
+ 0, 0x0000, &nv_crtc->lut.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -784,7 +778,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index 1b9ce3021aa3..9752c35bb84b 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -36,9 +36,9 @@
static void
nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
{
- struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
NV_DEBUG_KMS(dev, "\n");
@@ -71,9 +71,9 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
static void
nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
{
- struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
NV_DEBUG_KMS(dev, "\n");
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 875414b09ade..808f3ec8f827 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -41,8 +41,7 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
if (!nv_encoder->crtc)
@@ -216,8 +215,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
uint32_t mode_ctl = 0, mode_ctl2 = 0;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7cc94ed9ed95..75a376cc342a 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -24,6 +24,7 @@
*
*/
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
#include "nv50_display.h"
#include "nouveau_crtc.h"
#include "nouveau_encoder.h"
@@ -34,6 +35,7 @@
#include "drm_crtc_helper.h"
static void nv50_display_isr(struct drm_device *);
+static void nv50_display_bh(unsigned long);
static inline int
nv50_sor_nr(struct drm_device *dev)
@@ -172,16 +174,16 @@ nv50_display_init(struct drm_device *dev)
ret = nv50_evo_init(dev);
if (ret)
return ret;
- evo = dev_priv->evo;
+ evo = nv50_display(dev)->master;
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
- ret = RING_SPACE(evo, 11);
+ ret = RING_SPACE(evo, 15);
if (ret)
return ret;
BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
- OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE);
+ OUT_RING(evo, NvEvoSync);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
@@ -190,6 +192,11 @@ nv50_display_init(struct drm_device *dev)
OUT_RING(evo, 0);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
OUT_RING(evo, 0);
+ /* required to make display sync channels not hate life */
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK900), 1);
+ OUT_RING (evo, 0x00000311);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(1, UNK900), 1);
+ OUT_RING (evo, 0x00000311);
FIRE_RING(evo);
if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
NV_ERROR(dev, "evo pushbuf stalled\n");
@@ -201,6 +208,8 @@ nv50_display_init(struct drm_device *dev)
static int nv50_display_disable(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_channel *evo = disp->master;
struct drm_crtc *drm_crtc;
int ret, i;
@@ -212,12 +221,12 @@ static int nv50_display_disable(struct drm_device *dev)
nv50_crtc_blank(crtc, true);
}
- ret = RING_SPACE(dev_priv->evo, 2);
+ ret = RING_SPACE(evo, 2);
if (ret == 0) {
- BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(dev_priv->evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
}
- FIRE_RING(dev_priv->evo);
+ FIRE_RING(evo);
/* Almost like ack'ing a vblank interrupt, maybe in the spirit of
* cleaning up?
@@ -267,10 +276,16 @@ int nv50_display_create(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &dev_priv->vbios.dcb;
struct drm_connector *connector, *ct;
+ struct nv50_display *priv;
int ret, i;
NV_DEBUG_KMS(dev, "\n");
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_priv->engine.display.priv = priv;
+
/* init basic kernel modesetting */
drm_mode_config_init(dev);
@@ -330,7 +345,7 @@ int nv50_display_create(struct drm_device *dev)
}
}
- INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
+ tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
nouveau_irq_register(dev, 26, nv50_display_isr);
ret = nv50_display_init(dev);
@@ -345,12 +360,131 @@ int nv50_display_create(struct drm_device *dev)
void
nv50_display_destroy(struct drm_device *dev)
{
+ struct nv50_display *disp = nv50_display(dev);
+
NV_DEBUG_KMS(dev, "\n");
drm_mode_config_cleanup(dev);
nv50_display_disable(dev);
nouveau_irq_unregister(dev, 26);
+ kfree(disp);
+}
+
+void
+nv50_display_flip_stop(struct drm_crtc *crtc)
+{
+ struct nv50_display *disp = nv50_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
+ struct nouveau_channel *evo = dispc->sync;
+ int ret;
+
+ ret = RING_SPACE(evo, 8);
+ if (ret) {
+ WARN_ON(1);
+ return;
+ }
+
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x0094, 1);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x00c0, 1);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0x00000000);
+ FIRE_RING (evo);
+}
+
+int
+nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+ struct nv50_display *disp = nv50_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
+ struct nouveau_channel *evo = dispc->sync;
+ int ret;
+
+ ret = RING_SPACE(evo, 24);
+ if (unlikely(ret))
+ return ret;
+
+ /* synchronise with the rendering channel, if necessary */
+ if (likely(chan)) {
+ u64 offset = dispc->sem.bo->vma.offset + dispc->sem.offset;
+
+ ret = RING_SPACE(chan, 10);
+ if (ret) {
+ WIND_RING(evo);
+ return ret;
+ }
+
+ if (dev_priv->chipset < 0xc0) {
+ BEGIN_RING(chan, NvSubSw, 0x0060, 2);
+ OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
+ OUT_RING (chan, dispc->sem.offset);
+ BEGIN_RING(chan, NvSubSw, 0x006c, 1);
+ OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
+ BEGIN_RING(chan, NvSubSw, 0x0064, 2);
+ OUT_RING (chan, dispc->sem.offset ^ 0x10);
+ OUT_RING (chan, 0x74b1e000);
+ BEGIN_RING(chan, NvSubSw, 0x0060, 1);
+ if (dev_priv->chipset < 0x84)
+ OUT_RING (chan, NvSema);
+ else
+ OUT_RING (chan, chan->vram_handle);
+ } else {
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
+ OUT_RING (chan, 0x1002);
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset ^ 0x10));
+ OUT_RING (chan, 0x74b1e000);
+ OUT_RING (chan, 0x1001);
+ }
+ FIRE_RING (chan);
+ } else {
+ nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4,
+ 0xf00d0000 | dispc->sem.value);
+ }
+
+ /* queue the flip on the crtc's "display sync" channel */
+ BEGIN_RING(evo, 0, 0x0100, 1);
+ OUT_RING (evo, 0xfffe0000);
+ BEGIN_RING(evo, 0, 0x0084, 5);
+ OUT_RING (evo, chan ? 0x00000100 : 0x00000010);
+ OUT_RING (evo, dispc->sem.offset);
+ OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
+ OUT_RING (evo, 0x74b1e000);
+ OUT_RING (evo, NvEvoSync);
+ BEGIN_RING(evo, 0, 0x00a0, 2);
+ OUT_RING (evo, 0x00000000);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x00c0, 1);
+ OUT_RING (evo, nv_fb->r_dma);
+ BEGIN_RING(evo, 0, 0x0110, 2);
+ OUT_RING (evo, 0x00000000);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x0800, 5);
+ OUT_RING (evo, (nv_fb->nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
+ OUT_RING (evo, 0);
+ OUT_RING (evo, (fb->height << 16) | fb->width);
+ OUT_RING (evo, nv_fb->r_pitch);
+ OUT_RING (evo, nv_fb->r_format);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0x00000000);
+ FIRE_RING (evo);
+
+ dispc->sem.offset ^= 0x10;
+ dispc->sem.value++;
+ return 0;
}
static u16
@@ -466,11 +600,12 @@ static void
nv50_display_unk10_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), mc;
int i, crtc, or, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
- dev_priv->evo_irq.dcb = NULL;
+ disp->irq.dcb = NULL;
nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
@@ -541,7 +676,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
if (dcb->type == type && (dcb->or & (1 << or))) {
nouveau_bios_run_display_table(dev, dcb, 0, -1);
- dev_priv->evo_irq.dcb = dcb;
+ disp->irq.dcb = dcb;
goto ack;
}
}
@@ -587,15 +722,16 @@ static void
nv50_display_unk20_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc;
+ struct nv50_display *disp = nv50_display(dev);
+ u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
struct dcb_entry *dcb;
int i, crtc, or, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
- dcb = dev_priv->evo_irq.dcb;
+ dcb = disp->irq.dcb;
if (dcb) {
nouveau_bios_run_display_table(dev, dcb, 0, -2);
- dev_priv->evo_irq.dcb = NULL;
+ disp->irq.dcb = NULL;
}
/* CRTC clock change requested? */
@@ -692,9 +828,9 @@ nv50_display_unk20_handler(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
}
- dev_priv->evo_irq.dcb = dcb;
- dev_priv->evo_irq.pclk = pclk;
- dev_priv->evo_irq.script = script;
+ disp->irq.dcb = dcb;
+ disp->irq.pclk = pclk;
+ disp->irq.script = script;
ack:
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
@@ -735,13 +871,13 @@ nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb)
static void
nv50_display_unk40_handler(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_entry *dcb = dev_priv->evo_irq.dcb;
- u16 script = dev_priv->evo_irq.script;
- u32 unk30 = nv_rd32(dev, 0x610030), pclk = dev_priv->evo_irq.pclk;
+ struct nv50_display *disp = nv50_display(dev);
+ struct dcb_entry *dcb = disp->irq.dcb;
+ u16 script = disp->irq.script;
+ u32 unk30 = nv_rd32(dev, 0x610030), pclk = disp->irq.pclk;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
- dev_priv->evo_irq.dcb = NULL;
+ disp->irq.dcb = NULL;
if (!dcb)
goto ack;
@@ -754,12 +890,10 @@ ack:
nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
}
-void
-nv50_display_irq_handler_bh(struct work_struct *work)
+static void
+nv50_display_bh(unsigned long data)
{
- struct drm_nouveau_private *dev_priv =
- container_of(work, struct drm_nouveau_private, irq_work);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = (struct drm_device *)data;
for (;;) {
uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
@@ -807,7 +941,7 @@ nv50_display_error_handler(struct drm_device *dev)
static void
nv50_display_isr(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
uint32_t delayed = 0;
while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
@@ -835,8 +969,7 @@ nv50_display_isr(struct drm_device *dev)
NV50_PDISPLAY_INTR_1_CLK_UNK40));
if (clock) {
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
- if (!work_pending(&dev_priv->irq_work))
- queue_work(dev_priv->wq, &dev_priv->irq_work);
+ tasklet_schedule(&disp->tasklet);
delayed |= clock;
intr1 &= ~clock;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index f0e30b78ef6b..c2da503a22aa 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,7 +35,36 @@
#include "nouveau_crtc.h"
#include "nv50_evo.h"
-void nv50_display_irq_handler_bh(struct work_struct *work);
+struct nv50_display_crtc {
+ struct nouveau_channel *sync;
+ struct {
+ struct nouveau_bo *bo;
+ u32 offset;
+ u16 value;
+ } sem;
+};
+
+struct nv50_display {
+ struct nouveau_channel *master;
+ struct nouveau_gpuobj *ntfy;
+
+ struct nv50_display_crtc crtc[2];
+
+ struct tasklet_struct tasklet;
+ struct {
+ struct dcb_entry *dcb;
+ u16 script;
+ u32 pclk;
+ } irq;
+};
+
+static inline struct nv50_display *
+nv50_display(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ return dev_priv->engine.display.priv;
+}
+
int nv50_display_early_init(struct drm_device *dev);
void nv50_display_late_takedown(struct drm_device *dev);
int nv50_display_create(struct drm_device *dev);
@@ -44,4 +73,15 @@ void nv50_display_destroy(struct drm_device *dev);
int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
+int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+ struct nouveau_channel *chan);
+void nv50_display_flip_stop(struct drm_crtc *);
+
+int nv50_evo_init(struct drm_device *dev);
+void nv50_evo_fini(struct drm_device *dev);
+void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
+ u64 size);
+int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
+ u64 base, u64 size, struct nouveau_gpuobj **);
+
#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 0ea090f4244a..a2cfaa691e9b 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -27,20 +27,17 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_ramht.h"
+#include "nv50_display.h"
static void
nv50_evo_channel_del(struct nouveau_channel **pevo)
{
- struct drm_nouveau_private *dev_priv;
struct nouveau_channel *evo = *pevo;
if (!evo)
return;
*pevo = NULL;
- dev_priv = evo->dev->dev_private;
- dev_priv->evo_alloc &= ~(1 << evo->id);
-
nouveau_gpuobj_channel_takedown(evo);
nouveau_bo_unmap(evo->pushbuf_bo);
nouveau_bo_ref(NULL, &evo->pushbuf_bo);
@@ -51,42 +48,61 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
kfree(evo);
}
+void
+nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size)
+{
+ struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
+ u32 flags5;
+
+ if (dev_priv->chipset < 0xc0) {
+ /* not supported on 0x50, specified in format mthd */
+ if (dev_priv->chipset == 0x50)
+ memtype = 0;
+ flags5 = 0x00010000;
+ } else {
+ if (memtype & 0x80000000)
+ flags5 = 0x00000000; /* large pages */
+ else
+ flags5 = 0x00020000;
+ }
+
+ nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM,
+ NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0);
+ nv_wo32(obj, 0x14, flags5);
+ dev_priv->engine.instmem.flush(obj->dev);
+}
+
int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
- u32 tile_flags, u32 magic_flags, u32 offset, u32 limit,
- u32 flags5)
+nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
+ u64 base, u64 size, struct nouveau_gpuobj **pobj)
{
- struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
- struct drm_device *dev = evo->dev;
+ struct nv50_display *disp = nv50_display(evo->dev);
struct nouveau_gpuobj *obj = NULL;
int ret;
- ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj);
+ ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj);
if (ret)
return ret;
obj->engine = NVOBJ_ENGINE_DISPLAY;
- nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
- nv_wo32(obj, 4, limit);
- nv_wo32(obj, 8, offset);
- nv_wo32(obj, 12, 0x00000000);
- nv_wo32(obj, 16, 0x00000000);
- nv_wo32(obj, 20, flags5);
- dev_priv->engine.instmem.flush(dev);
+ nv50_evo_dmaobj_init(obj, memtype, base, size);
- ret = nouveau_ramht_insert(evo, name, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- if (ret) {
- return ret;
- }
+ ret = nouveau_ramht_insert(evo, handle, obj);
+ if (ret)
+ goto out;
- return 0;
+ if (pobj)
+ nouveau_gpuobj_ref(obj, pobj);
+out:
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
}
static int
-nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
+nv50_evo_channel_new(struct drm_device *dev, int chid,
+ struct nouveau_channel **pevo)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
struct nouveau_channel *evo;
int ret;
@@ -95,25 +111,13 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
return -ENOMEM;
*pevo = evo;
- for (evo->id = 0; evo->id < 5; evo->id++) {
- if (dev_priv->evo_alloc & (1 << evo->id))
- continue;
-
- dev_priv->evo_alloc |= (1 << evo->id);
- break;
- }
-
- if (evo->id == 5) {
- kfree(evo);
- return -ENODEV;
- }
-
+ evo->id = chid;
evo->dev = dev;
evo->user_get = 4;
evo->user_put = 0;
ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
- false, true, &evo->pushbuf_bo);
+ &evo->pushbuf_bo);
if (ret == 0)
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
if (ret) {
@@ -138,8 +142,8 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
}
/* bind primary evo channel's ramht to the channel */
- if (dev_priv->evo && evo != dev_priv->evo)
- nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL);
+ if (disp->master && evo != disp->master)
+ nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL);
return 0;
}
@@ -212,21 +216,39 @@ nv50_evo_channel_fini(struct nouveau_channel *evo)
}
}
+static void
+nv50_evo_destroy(struct drm_device *dev)
+{
+ struct nv50_display *disp = nv50_display(dev);
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (disp->crtc[i].sem.bo) {
+ nouveau_bo_unmap(disp->crtc[i].sem.bo);
+ nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
+ }
+ nv50_evo_channel_del(&disp->crtc[i].sync);
+ }
+ nouveau_gpuobj_ref(NULL, &disp->ntfy);
+ nv50_evo_channel_del(&disp->master);
+}
+
static int
nv50_evo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
struct nouveau_gpuobj *ramht = NULL;
struct nouveau_channel *evo;
- int ret;
+ int ret, i, j;
/* create primary evo channel, the one we use for modesetting
* purporses
*/
- ret = nv50_evo_channel_new(dev, &dev_priv->evo);
+ ret = nv50_evo_channel_new(dev, 0, &disp->master);
if (ret)
return ret;
- evo = dev_priv->evo;
+ evo = disp->master;
/* setup object management on it, any other evo channel will
* use this also as there's no per-channel support on the
@@ -236,109 +258,167 @@ nv50_evo_create(struct drm_device *dev)
NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
if (ret) {
NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ goto err;
}
ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
if (ret) {
NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ goto err;
}
ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
if (ret) {
NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ goto err;
}
ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
nouveau_gpuobj_ref(NULL, &ramht);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ if (ret)
+ goto err;
+
+ /* not sure exactly what this is..
+ *
+ * the first dword of the structure is used by nvidia to wait on
+ * full completion of an EVO "update" command.
+ *
+ * method 0x8c on the master evo channel will fill a lot more of
+ * this structure with some undefined info
+ */
+ ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
+ disp->ntfy->vinst, disp->ntfy->size, NULL);
+ if (ret)
+ goto err;
/* create some default objects for the scanout memtypes we support */
- if (dev_priv->card_type >= NV_C0) {
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19,
- 0, 0xffffffff, 0x00000000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
- 0, dev_priv->vram_size, 0x00020000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
- 0, dev_priv->vram_size, 0x00000000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
- } else {
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
- 0, 0xffffffff, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
- 0, 0xffffffff, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ /* create "display sync" channels and other structures we need
+ * to implement page flipping
+ */
+ for (i = 0; i < 2; i++) {
+ struct nv50_display_crtc *dispc = &disp->crtc[i];
+ u64 offset;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
- 0, dev_priv->vram_size, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
+ if (ret)
+ goto err;
+
+ ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, &dispc->sem.bo);
+ if (!ret) {
+ offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
+
+ ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(dispc->sem.bo);
+ if (ret)
+ nouveau_bo_ref(NULL, &dispc->sem.bo);
}
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
- 0, dev_priv->vram_size, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
+ offset, 4096, NULL);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ?
+ 0x7a00 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ?
+ 0x7000 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+
+ for (j = 0; j < 4096; j += 4)
+ nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
+ dispc->sem.offset = 0;
}
return 0;
+
+err:
+ nv50_evo_destroy(dev);
+ return ret;
}
int
nv50_evo_init(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int ret;
+ struct nv50_display *disp = nv50_display(dev);
+ int ret, i;
- if (!dev_priv->evo) {
+ if (!disp->master) {
ret = nv50_evo_create(dev);
if (ret)
return ret;
}
- return nv50_evo_channel_init(dev_priv->evo);
+ ret = nv50_evo_channel_init(disp->master);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 2; i++) {
+ ret = nv50_evo_channel_init(disp->crtc[i].sync);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
void
nv50_evo_fini(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
+ int i;
- if (dev_priv->evo) {
- nv50_evo_channel_fini(dev_priv->evo);
- nv50_evo_channel_del(&dev_priv->evo);
+ for (i = 0; i < 2; i++) {
+ if (disp->crtc[i].sync)
+ nv50_evo_channel_fini(disp->crtc[i].sync);
}
+
+ if (disp->master)
+ nv50_evo_channel_fini(disp->master);
+
+ nv50_evo_destroy(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
index aa4f0d3cea8e..3860ca62cb19 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -27,12 +27,6 @@
#ifndef __NV50_EVO_H__
#define __NV50_EVO_H__
-int nv50_evo_init(struct drm_device *dev);
-void nv50_evo_fini(struct drm_device *dev);
-int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
- u32 tile_flags, u32 magic_flags,
- u32 offset, u32 limit);
-
#define NV50_EVO_UPDATE 0x00000080
#define NV50_EVO_UNK84 0x00000084
#define NV50_EVO_UNK84_NOTIFY 0x40000000
@@ -119,5 +113,7 @@ int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
/* Both of these are needed, otherwise nothing happens. */
#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
+#define NV50_EVO_CRTC_UNK900 0x00000900
+#define NV50_EVO_CRTC_UNK904 0x00000904
#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index 50290dea0ac4..bdd2afe29205 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -8,31 +8,61 @@ struct nv50_fb_priv {
dma_addr_t r100c08;
};
+static void
+nv50_fb_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nv50_fb_priv *priv = pfb->priv;
+
+ if (drm_mm_initialized(&pfb->tag_heap))
+ drm_mm_takedown(&pfb->tag_heap);
+
+ if (priv->r100c08_page) {
+ pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c08_page);
+ }
+
+ kfree(priv);
+ pfb->priv = NULL;
+}
+
static int
nv50_fb_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nv50_fb_priv *priv;
+ u32 tagmem;
+ int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ pfb->priv = priv;
priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!priv->r100c08_page) {
- kfree(priv);
+ nv50_fb_destroy(dev);
return -ENOMEM;
}
priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
- __free_page(priv->r100c08_page);
- kfree(priv);
+ nv50_fb_destroy(dev);
return -EFAULT;
}
- dev_priv->engine.fb.priv = priv;
+ tagmem = nv_rd32(dev, 0x100320);
+ NV_DEBUG(dev, "%d tags available\n", tagmem);
+ ret = drm_mm_init(&pfb->tag_heap, 0, tagmem);
+ if (ret) {
+ nv50_fb_destroy(dev);
+ return ret;
+ }
+
return 0;
}
@@ -81,26 +111,112 @@ nv50_fb_init(struct drm_device *dev)
void
nv50_fb_takedown(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fb_priv *priv;
+ nv50_fb_destroy(dev);
+}
- priv = dev_priv->engine.fb.priv;
- if (!priv)
- return;
- dev_priv->engine.fb.priv = NULL;
+static struct nouveau_enum vm_dispatch_subclients[] = {
+ { 0x00000000, "GRCTX", NULL },
+ { 0x00000001, "NOTIFY", NULL },
+ { 0x00000002, "QUERY", NULL },
+ { 0x00000003, "COND", NULL },
+ { 0x00000004, "M2M_IN", NULL },
+ { 0x00000005, "M2M_OUT", NULL },
+ { 0x00000006, "M2M_NOTIFY", NULL },
+ {}
+};
- pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->r100c08_page);
- kfree(priv);
-}
+static struct nouveau_enum vm_ccache_subclients[] = {
+ { 0x00000000, "CB", NULL },
+ { 0x00000001, "TIC", NULL },
+ { 0x00000002, "TSC", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_prop_subclients[] = {
+ { 0x00000000, "RT0", NULL },
+ { 0x00000001, "RT1", NULL },
+ { 0x00000002, "RT2", NULL },
+ { 0x00000003, "RT3", NULL },
+ { 0x00000004, "RT4", NULL },
+ { 0x00000005, "RT5", NULL },
+ { 0x00000006, "RT6", NULL },
+ { 0x00000007, "RT7", NULL },
+ { 0x00000008, "ZETA", NULL },
+ { 0x00000009, "LOCAL", NULL },
+ { 0x0000000a, "GLOBAL", NULL },
+ { 0x0000000b, "STACK", NULL },
+ { 0x0000000c, "DST2D", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_pfifo_subclients[] = {
+ { 0x00000000, "PUSHBUF", NULL },
+ { 0x00000001, "SEMAPHORE", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_bar_subclients[] = {
+ { 0x00000000, "FB", NULL },
+ { 0x00000001, "IN", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_client[] = {
+ { 0x00000000, "STRMOUT", NULL },
+ { 0x00000003, "DISPATCH", vm_dispatch_subclients },
+ { 0x00000004, "PFIFO_WRITE", NULL },
+ { 0x00000005, "CCACHE", vm_ccache_subclients },
+ { 0x00000006, "PPPP", NULL },
+ { 0x00000007, "CLIPID", NULL },
+ { 0x00000008, "PFIFO_READ", NULL },
+ { 0x00000009, "VFETCH", NULL },
+ { 0x0000000a, "TEXTURE", NULL },
+ { 0x0000000b, "PROP", vm_prop_subclients },
+ { 0x0000000c, "PVP", NULL },
+ { 0x0000000d, "PBSP", NULL },
+ { 0x0000000e, "PCRYPT", NULL },
+ { 0x0000000f, "PCOUNTER", NULL },
+ { 0x00000011, "PDAEMON", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_engine[] = {
+ { 0x00000000, "PGRAPH", NULL },
+ { 0x00000001, "PVP", NULL },
+ { 0x00000004, "PEEPHOLE", NULL },
+ { 0x00000005, "PFIFO", vm_pfifo_subclients },
+ { 0x00000006, "BAR", vm_bar_subclients },
+ { 0x00000008, "PPPP", NULL },
+ { 0x00000009, "PBSP", NULL },
+ { 0x0000000a, "PCRYPT", NULL },
+ { 0x0000000b, "PCOUNTER", NULL },
+ { 0x0000000c, "SEMAPHORE_BG", NULL },
+ { 0x0000000d, "PCOPY", NULL },
+ { 0x0000000e, "PDAEMON", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_fault[] = {
+ { 0x00000000, "PT_NOT_PRESENT", NULL },
+ { 0x00000001, "PT_TOO_SHORT", NULL },
+ { 0x00000002, "PAGE_NOT_PRESENT", NULL },
+ { 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
+ { 0x00000004, "PAGE_READ_ONLY", NULL },
+ { 0x00000006, "NULL_DMAOBJ", NULL },
+ { 0x00000007, "WRONG_MEMTYPE", NULL },
+ { 0x0000000b, "VRAM_LIMIT", NULL },
+ { 0x0000000f, "DMAOBJ_LIMIT", NULL },
+ {}
+};
void
-nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
+nv50_fb_vm_trap(struct drm_device *dev, int display)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const struct nouveau_enum *en, *cl;
unsigned long flags;
u32 trap[6], idx, chinst;
+ u8 st0, st1, st2, st3;
int i, ch;
idx = nv_rd32(dev, 0x100c90);
@@ -117,8 +233,8 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
if (!display)
return;
+ /* lookup channel id */
chinst = (trap[2] << 16) | trap[1];
-
spin_lock_irqsave(&dev_priv->channels.lock, flags);
for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
@@ -131,9 +247,48 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
- "channel %d (0x%08x)\n",
- name, (trap[5] & 0x100 ? "read" : "write"),
- trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff,
- trap[0], ch, chinst);
+ /* decode status bits into something more useful */
+ if (dev_priv->chipset < 0xa3 ||
+ dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
+ st0 = (trap[0] & 0x0000000f) >> 0;
+ st1 = (trap[0] & 0x000000f0) >> 4;
+ st2 = (trap[0] & 0x00000f00) >> 8;
+ st3 = (trap[0] & 0x0000f000) >> 12;
+ } else {
+ st0 = (trap[0] & 0x000000ff) >> 0;
+ st1 = (trap[0] & 0x0000ff00) >> 8;
+ st2 = (trap[0] & 0x00ff0000) >> 16;
+ st3 = (trap[0] & 0xff000000) >> 24;
+ }
+
+ NV_INFO(dev, "VM: trapped %s at 0x%02x%04x%04x on ch %d [0x%08x] ",
+ (trap[5] & 0x00000100) ? "read" : "write",
+ trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, ch, chinst);
+
+ en = nouveau_enum_find(vm_engine, st0);
+ if (en)
+ printk("%s/", en->name);
+ else
+ printk("%02x/", st0);
+
+ cl = nouveau_enum_find(vm_client, st2);
+ if (cl)
+ printk("%s/", cl->name);
+ else
+ printk("%02x/", st2);
+
+ if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
+ else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
+ else cl = NULL;
+ if (cl)
+ printk("%s", cl->name);
+ else
+ printk("%02x", st3);
+
+ printk(" reason: ");
+ en = nouveau_enum_find(vm_fault, st1);
+ if (en)
+ printk("%s\n", en->name);
+ else
+ printk("0x%08x\n", st1);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 8dd04c5dac67..c34a074f7ea1 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -149,6 +149,7 @@ nv50_fifo_init_regs(struct drm_device *dev)
nv_wr32(dev, 0x3204, 0);
nv_wr32(dev, 0x3210, 0);
nv_wr32(dev, 0x3270, 0);
+ nv_wr32(dev, 0x2044, 0x01003fff);
/* Enable dummy channels setup by nv50_instmem.c */
nv50_fifo_channel_enable(dev, 0);
@@ -273,7 +274,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->cinst >> 4));
- nv_wo32(ramfc, 0x44, 0x2101ffff);
+ nv_wo32(ramfc, 0x44, 0x01003fff);
nv_wo32(ramfc, 0x60, 0x7fffffff);
nv_wo32(ramfc, 0x40, 0x00000000);
nv_wo32(ramfc, 0x7c, 0x30000001);
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index 6b149c0cc06d..d4f4206dad7e 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -137,6 +137,7 @@ nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
struct nv50_gpio_priv *priv = pgpio->priv;
struct nv50_gpio_handler *gpioh, *tmp;
struct dcb_gpio_entry *gpio;
+ LIST_HEAD(tofree);
unsigned long flags;
gpio = nouveau_bios_gpio_entry(dev, tag);
@@ -149,10 +150,14 @@ nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
gpioh->handler != handler ||
gpioh->data != data)
continue;
- list_del(&gpioh->head);
- kfree(gpioh);
+ list_move(&gpioh->head, &tofree);
}
spin_unlock_irqrestore(&priv->lock, flags);
+
+ list_for_each_entry_safe(gpioh, tmp, &tofree, head) {
+ flush_work_sync(&gpioh->work);
+ kfree(gpioh);
+ }
}
bool
@@ -205,7 +210,6 @@ nv50_gpio_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv;
int ret;
if (!pgpio->priv) {
@@ -213,7 +217,6 @@ nv50_gpio_init(struct drm_device *dev)
if (ret)
return ret;
}
- priv = pgpio->priv;
/* disable, and ack any pending gpio interrupts */
nv_wr32(dev, 0xe050, 0x00000000);
@@ -293,7 +296,7 @@ nv50_gpio_isr(struct drm_device *dev)
continue;
gpioh->inhibit = true;
- queue_work(dev_priv->wq, &gpioh->work);
+ schedule_work(&gpioh->work);
}
spin_unlock(&priv->lock);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 37e21d2be95b..8675b00caf18 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -95,13 +95,41 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
}
static void
-nv50_graph_init_regs(struct drm_device *dev)
+nv50_graph_init_zcull(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
NV_DEBUG(dev, "\n");
- nv_wr32(dev, NV04_PGRAPH_DEBUG_3,
- (1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */);
- nv_wr32(dev, 0x402ca8, 0x800);
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x50:
+ case 0x80:
+ case 0x90:
+ nv_wr32(dev, 0x402ca8, 0x00000800);
+ break;
+ case 0xa0:
+ default:
+ nv_wr32(dev, 0x402cc0, 0x00000000);
+ if (dev_priv->chipset == 0xa0 ||
+ dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac) {
+ nv_wr32(dev, 0x402ca8, 0x00000802);
+ } else {
+ nv_wr32(dev, 0x402cc0, 0x00000000);
+ nv_wr32(dev, 0x402ca8, 0x00000002);
+ }
+
+ break;
+ }
+
+ /* zero out zcull regions */
+ for (i = 0; i < 8; i++) {
+ nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000);
+ nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000);
+ nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
+ nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
+ }
}
static int
@@ -136,6 +164,7 @@ nv50_graph_init_ctxctl(struct drm_device *dev)
}
kfree(cp);
+ nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
nv_wr32(dev, 0x400320, 4);
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
@@ -151,7 +180,7 @@ nv50_graph_init(struct drm_device *dev)
nv50_graph_init_reset(dev);
nv50_graph_init_regs__nv(dev);
- nv50_graph_init_regs(dev);
+ nv50_graph_init_zcull(dev);
ret = nv50_graph_init_ctxctl(dev);
if (ret)
@@ -409,12 +438,7 @@ static int
nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data)
{
- struct nouveau_page_flip_state s;
-
- if (!nouveau_finish_page_flip(chan, &s)) {
- /* XXX - Do something here */
- }
-
+ nouveau_finish_page_flip(chan, NULL);
return 0;
}
@@ -526,11 +550,11 @@ nv86_graph_tlb_flush(struct drm_device *dev)
static struct nouveau_enum nv50_mp_exec_error_names[] =
{
- { 3, "STACK_UNDERFLOW" },
- { 4, "QUADON_ACTIVE" },
- { 8, "TIMEOUT" },
- { 0x10, "INVALID_OPCODE" },
- { 0x40, "BREAKPOINT" },
+ { 3, "STACK_UNDERFLOW", NULL },
+ { 4, "QUADON_ACTIVE", NULL },
+ { 8, "TIMEOUT", NULL },
+ { 0x10, "INVALID_OPCODE", NULL },
+ { 0x40, "BREAKPOINT", NULL },
{}
};
@@ -558,47 +582,47 @@ static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
/* There must be a *lot* of these. Will take some time to gather them up. */
struct nouveau_enum nv50_data_error_names[] = {
- { 0x00000003, "INVALID_QUERY_OR_TEXTURE" },
- { 0x00000004, "INVALID_VALUE" },
- { 0x00000005, "INVALID_ENUM" },
- { 0x00000008, "INVALID_OBJECT" },
- { 0x00000009, "READ_ONLY_OBJECT" },
- { 0x0000000a, "SUPERVISOR_OBJECT" },
- { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT" },
- { 0x0000000c, "INVALID_BITFIELD" },
- { 0x0000000d, "BEGIN_END_ACTIVE" },
- { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT" },
- { 0x0000000f, "VIEWPORT_ID_NEEDS_GP" },
- { 0x00000010, "RT_DOUBLE_BIND" },
- { 0x00000011, "RT_TYPES_MISMATCH" },
- { 0x00000012, "RT_LINEAR_WITH_ZETA" },
- { 0x00000015, "FP_TOO_FEW_REGS" },
- { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH" },
- { 0x00000017, "RT_LINEAR_WITH_MSAA" },
- { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT" },
- { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT" },
- { 0x0000001a, "RT_INVALID_ALIGNMENT" },
- { 0x0000001b, "SAMPLER_OVER_LIMIT" },
- { 0x0000001c, "TEXTURE_OVER_LIMIT" },
- { 0x0000001e, "GP_TOO_MANY_OUTPUTS" },
- { 0x0000001f, "RT_BPP128_WITH_MS8" },
- { 0x00000021, "Z_OUT_OF_BOUNDS" },
- { 0x00000023, "XY_OUT_OF_BOUNDS" },
- { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED" },
- { 0x00000028, "CP_NO_REG_SPACE_STRIPED" },
- { 0x00000029, "CP_NO_REG_SPACE_PACKED" },
- { 0x0000002a, "CP_NOT_ENOUGH_WARPS" },
- { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH" },
- { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS" },
- { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS" },
- { 0x0000002e, "CP_NO_BLOCKDIM_LATCH" },
- { 0x00000031, "ENG2D_FORMAT_MISMATCH" },
- { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP" },
- { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT" },
- { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT" },
- { 0x00000046, "LAYER_ID_NEEDS_GP" },
- { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT" },
- { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT" },
+ { 0x00000003, "INVALID_QUERY_OR_TEXTURE", NULL },
+ { 0x00000004, "INVALID_VALUE", NULL },
+ { 0x00000005, "INVALID_ENUM", NULL },
+ { 0x00000008, "INVALID_OBJECT", NULL },
+ { 0x00000009, "READ_ONLY_OBJECT", NULL },
+ { 0x0000000a, "SUPERVISOR_OBJECT", NULL },
+ { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
+ { 0x0000000c, "INVALID_BITFIELD", NULL },
+ { 0x0000000d, "BEGIN_END_ACTIVE", NULL },
+ { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
+ { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
+ { 0x00000010, "RT_DOUBLE_BIND", NULL },
+ { 0x00000011, "RT_TYPES_MISMATCH", NULL },
+ { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
+ { 0x00000015, "FP_TOO_FEW_REGS", NULL },
+ { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
+ { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
+ { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
+ { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
+ { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
+ { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
+ { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
+ { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
+ { 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
+ { 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
+ { 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
+ { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
+ { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
+ { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
+ { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
+ { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
+ { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
+ { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
+ { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
+ { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
+ { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
+ { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
+ { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
+ { 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
+ { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
+ { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
{}
};
@@ -678,7 +702,6 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
tps++;
switch (type) {
case 6: /* texture error... unknown for now */
- nv50_fb_vm_trap(dev, display, name);
if (display) {
NV_ERROR(dev, "magic set %d:\n", i);
for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
@@ -701,7 +724,6 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
- nv50_fb_vm_trap(dev, display, name);
/* 2d engine destination */
if (ustatus & 0x00000010) {
if (display) {
@@ -912,10 +934,10 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
printk("\n");
NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
" %08x %08x %08x\n",
- nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804),
- nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c),
- nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814),
- nv_rd32(dev, 0x40581c));
+ nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004),
+ nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c),
+ nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014),
+ nv_rd32(dev, 0x40501c));
}
@@ -1044,6 +1066,7 @@ nv50_graph_isr(struct drm_device *dev)
NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
"class 0x%04x mthd 0x%04x data 0x%08x\n",
chid, inst, subc, class, mthd, data);
+ nv50_fb_vm_trap(dev, 1);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index e57caa2a00e3..a6f8aa651fc6 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -56,7 +56,7 @@ nv50_channel_del(struct nouveau_channel **pchan)
nouveau_gpuobj_ref(NULL, &chan->ramfc);
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
- if (chan->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
nouveau_gpuobj_ref(NULL, &chan->ramin);
kfree(chan);
@@ -259,7 +259,7 @@ nv50_instmem_takedown(struct drm_device *dev)
nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
- if (dev_priv->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&dev_priv->ramin_heap))
drm_mm_takedown(&dev_priv->ramin_heap);
dev_priv->engine.instmem.priv = NULL;
@@ -300,7 +300,7 @@ nv50_instmem_resume(struct drm_device *dev)
}
struct nv50_gpuobj_node {
- struct nouveau_vram *vram;
+ struct nouveau_mem *vram;
struct nouveau_vma chan_vma;
u32 align;
};
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index b4a5ecb199f9..c25c59386420 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -41,8 +41,7 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
if (!nv_encoder->crtc)
@@ -184,8 +183,7 @@ static void
nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 6144156f255a..4fd3432b5b8d 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -31,7 +31,6 @@ void
nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2])
{
- struct drm_nouveau_private *dev_priv = pgd->dev->dev_private;
u64 phys = 0xdeadcafe00000000ULL;
u32 coverage = 0;
@@ -58,10 +57,9 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
}
static inline u64
-nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- u64 phys, u32 memtype, u32 target)
+nv50_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
{
- struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
+ struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
phys |= 1; /* present */
phys |= (u64)memtype << 40;
@@ -85,12 +83,13 @@ nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void
nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
+ u32 comp = (mem->memtype & 0x180) >> 7;
u32 block;
int i;
- phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0);
+ phys = nv50_vm_addr(vma, phys, mem->memtype, 0);
pte <<= 3;
cnt <<= 3;
@@ -107,6 +106,11 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
phys += block << (vma->node->type - 3);
cnt -= block;
+ if (comp) {
+ u32 tag = mem->tag->start + ((delta >> 16) * comp);
+ offset_h |= (tag << 17);
+ delta += block << (vma->node->type - 3);
+ }
while (block) {
nv_wo32(pgt, pte + 0, offset_l);
@@ -119,11 +123,11 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void
nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- u32 pte, dma_addr_t *list, u32 cnt)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte <<= 3;
while (cnt--) {
- u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2);
+ u64 phys = nv50_vm_addr(vma, (u64)*list++, mem->memtype, 2);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index 58e98ad36347..ffbc3d8cf5be 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -48,42 +48,49 @@ nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
}
void
-nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram)
+nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *this;
- struct nouveau_vram *vram;
+ struct nouveau_mem *mem;
- vram = *pvram;
- *pvram = NULL;
- if (unlikely(vram == NULL))
+ mem = *pmem;
+ *pmem = NULL;
+ if (unlikely(mem == NULL))
return;
mutex_lock(&mm->mutex);
- while (!list_empty(&vram->regions)) {
- this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+ while (!list_empty(&mem->regions)) {
+ this = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
list_del(&this->rl_entry);
nouveau_mm_put(mm, this);
}
+
+ if (mem->tag) {
+ drm_mm_put_block(mem->tag);
+ mem->tag = NULL;
+ }
mutex_unlock(&mm->mutex);
- kfree(vram);
+ kfree(mem);
}
int
nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
- u32 type, struct nouveau_vram **pvram)
+ u32 memtype, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
- struct nouveau_vram *vram;
+ struct nouveau_mem *mem;
+ int comp = (memtype & 0x300) >> 8;
+ int type = (memtype & 0x07f);
int ret;
if (!types[type])
@@ -92,32 +99,46 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
align >>= 12;
size_nc >>= 12;
- vram = kzalloc(sizeof(*vram), GFP_KERNEL);
- if (!vram)
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
return -ENOMEM;
- INIT_LIST_HEAD(&vram->regions);
- vram->dev = dev_priv->dev;
- vram->memtype = type;
- vram->size = size;
-
mutex_lock(&mm->mutex);
+ if (comp) {
+ if (align == 16) {
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int n = (size >> 4) * comp;
+
+ mem->tag = drm_mm_search_free(&pfb->tag_heap, n, 0, 0);
+ if (mem->tag)
+ mem->tag = drm_mm_get_block(mem->tag, n, 0);
+ }
+
+ if (unlikely(!mem->tag))
+ comp = 0;
+ }
+
+ INIT_LIST_HEAD(&mem->regions);
+ mem->dev = dev_priv->dev;
+ mem->memtype = (comp << 7) | type;
+ mem->size = size;
+
do {
ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
if (ret) {
mutex_unlock(&mm->mutex);
- nv50_vram_del(dev, &vram);
+ nv50_vram_del(dev, &mem);
return ret;
}
- list_add_tail(&r->rl_entry, &vram->regions);
+ list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
mutex_unlock(&mm->mutex);
- r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
- vram->offset = (u64)r->offset << 12;
- *pvram = vram;
+ r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+ mem->offset = (u64)r->offset << 12;
+ *pmem = mem;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
index ec18ae1c3886..fabc7fd30b1d 100644
--- a/drivers/gpu/drm/nouveau/nv84_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -136,5 +136,5 @@ nv84_crypt_isr(struct drm_device *dev)
nv_wr32(dev, 0x102130, stat);
nv_wr32(dev, 0x10200c, 0x10);
- nv50_fb_vm_trap(dev, show, "PCRYPT");
+ nv50_fb_vm_trap(dev, show);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index e6f92c541dba..2886f2726a9e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -116,7 +116,7 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
/* allocate vram for control regs, map into polling area */
ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM,
- 0, 0, true, true, &fifoch->user);
+ 0, 0, &fifoch->user);
if (ret)
goto error;
@@ -418,6 +418,12 @@ nvc0_fifo_isr(struct drm_device *dev)
{
u32 stat = nv_rd32(dev, 0x002100);
+ if (stat & 0x00000100) {
+ NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
+ nv_wr32(dev, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
if (stat & 0x10000000) {
u32 units = nv_rd32(dev, 0x00259c);
u32 u = units;
@@ -446,10 +452,15 @@ nvc0_fifo_isr(struct drm_device *dev)
stat &= ~0x20000000;
}
+ if (stat & 0x40000000) {
+ NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
+ nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
+ stat &= ~0x40000000;
+ }
+
if (stat) {
NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
nv_wr32(dev, 0x002100, stat);
+ nv_wr32(dev, 0x002140, 0);
}
-
- nv_wr32(dev, 0x2140, 0);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index eb18a7e89f5b..3de9b721d8db 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -299,6 +299,14 @@ nvc0_graph_takedown(struct drm_device *dev)
}
static int
+nvc0_graph_mthd_page_flip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ nouveau_finish_page_flip(chan, NULL);
+ return 0;
+}
+
+static int
nvc0_graph_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -395,6 +403,7 @@ nvc0_graph_create(struct drm_device *dev)
nouveau_irq_register(dev, 25, nvc0_runk140_isr);
NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
+ NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
return 0;
@@ -640,7 +649,6 @@ nvc0_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nvc0_graph_priv *priv;
int ret;
dev_priv->engine.graph.accel_blocked = true;
@@ -665,7 +673,6 @@ nvc0_graph_init(struct drm_device *dev)
if (ret)
return ret;
}
- priv = pgraph->priv;
nvc0_graph_init_obj418880(dev);
nvc0_graph_init_regs(dev);
@@ -730,9 +737,12 @@ nvc0_graph_isr(struct drm_device *dev)
u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
if (stat & 0x00000010) {
- NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
+ if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
nv_wr32(dev, 0x400100, 0x00000010);
stat &= ~0x00000010;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index c09091749054..82357d2df1f4 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -67,7 +67,7 @@ nvc0_channel_del(struct nouveau_channel **pchan)
return;
nouveau_vm_ref(NULL, &chan->vm, NULL);
- if (chan->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
nouveau_gpuobj_ref(NULL, &chan->ramin);
kfree(chan);
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index e4e83c2caf5b..69af0ba7edd3 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -59,7 +59,7 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
void
nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
u32 next = 1 << (vma->node->type - 8);
@@ -75,11 +75,11 @@ nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void
nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- u32 pte, dma_addr_t *list, u32 cnt)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte <<= 3;
while (cnt--) {
- u64 phys = nvc0_vm_addr(vma, *list++, 0, 5);
+ u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, 5);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index 858eda5dedd1..67c6ec6f34ea 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -26,64 +26,78 @@
#include "nouveau_drv.h"
#include "nouveau_mm.h"
+/* 0 = unsupported
+ * 1 = non-compressed
+ * 3 = compressed
+ */
+static const u8 types[256] = {
+ 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
+ 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
+ 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
+ 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
+ 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
+ 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
+};
+
bool
nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
{
- switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
- case 0x0000:
- case 0xfe00:
- case 0xdb00:
- case 0x1100:
- return true;
- default:
- break;
- }
-
- return false;
+ u8 memtype = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
+ return likely((types[memtype] == 1));
}
int
nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
- u32 type, struct nouveau_vram **pvram)
+ u32 type, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
- struct nouveau_vram *vram;
+ struct nouveau_mem *mem;
int ret;
size >>= 12;
align >>= 12;
ncmin >>= 12;
- vram = kzalloc(sizeof(*vram), GFP_KERNEL);
- if (!vram)
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
return -ENOMEM;
- INIT_LIST_HEAD(&vram->regions);
- vram->dev = dev_priv->dev;
- vram->memtype = type;
- vram->size = size;
+ INIT_LIST_HEAD(&mem->regions);
+ mem->dev = dev_priv->dev;
+ mem->memtype = (type & 0xff);
+ mem->size = size;
mutex_lock(&mm->mutex);
do {
ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
if (ret) {
mutex_unlock(&mm->mutex);
- nv50_vram_del(dev, &vram);
+ nv50_vram_del(dev, &mem);
return ret;
}
- list_add_tail(&r->rl_entry, &vram->regions);
+ list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
mutex_unlock(&mm->mutex);
- r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
- vram->offset = (u64)r->offset << 12;
- *pvram = vram;
+ r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+ mem->offset = (u64)r->offset << 12;
+ *pmem = mem;
return 0;
}
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 18c3c71e41b1..b9e8efd2b754 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -71,10 +71,7 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -89,16 +86,21 @@ int r128_driver_load(struct drm_device *dev, unsigned long flags)
return drm_vblank_init(dev, 1);
}
+static struct pci_driver r128_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init r128_init(void)
{
driver.num_ioctls = r128_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &r128_pci_driver);
}
static void __exit r128_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &r128_pci_driver);
}
module_init(r128_init);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index e47eecfc2df4..3896ef811102 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -36,6 +36,9 @@ $(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
$(call if_changed,mkregtable)
+$(obj)/cayman_reg_safe.h: $(src)/reg_srcs/cayman $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
$(obj)/r200.o: $(obj)/r200_reg_safe.h
@@ -50,7 +53,7 @@ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h
$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
-$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h
+$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
radeon_irq.o r300_cmdbuf.o r600_cp.o
@@ -66,7 +69,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
- radeon_trace_points.o ni.o
+ radeon_trace_points.o ni.o cayman_blit_shaders.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a4e5e53e0a62..3cd3234ba0af 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -61,8 +61,8 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
} else if (a2 > a1) {
- args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
- args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+ args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+ args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
}
break;
case RMX_FULL:
@@ -1026,7 +1026,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
* just update base pointers
*/
obj = radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -1135,7 +1135,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
- rbo = radeon_fb->obj->driver_private;
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -1181,7 +1181,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
}
obj = radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -1292,7 +1292,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
- rbo = radeon_fb->obj->driver_private;
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
new file mode 100644
index 000000000000..e148ab04b80b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup. Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables. The regsiter state and shaders
+ * were hand generated to support blitting functionality. See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 cayman_default_state[] =
+{
+ /* XXX fill in additional blit state */
+
+ 0xc0026900,
+ 0x00000316,
+ 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ 0x00000010, /* */
+
+ 0xc0026900,
+ 0x000000d9,
+ 0x00000000, /* CP_RINGID */
+ 0x00000000, /* CP_VMID */
+};
+
+const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.h b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
new file mode 100644
index 000000000000..33b75e5d0fa4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef CAYMAN_BLIT_SHADERS_H
+#define CAYMAN_BLIT_SHADERS_H
+
+extern const u32 cayman_default_state[];
+
+extern const u32 cayman_default_size;
+
+#endif
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 6140ea1de45a..b9427e689cf3 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -804,7 +804,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
}
}
-static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
+int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
u32 tmp;
@@ -957,7 +957,7 @@ void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
}
-static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
save->vga_control[0] = RREG32(D1VGA_CONTROL);
save->vga_control[1] = RREG32(D2VGA_CONTROL);
@@ -1011,7 +1011,7 @@ static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_sa
WREG32(EVERGREEN_D6VGA_CONTROL, 0);
}
-static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
@@ -1108,7 +1108,7 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
-static void evergreen_mc_program(struct radeon_device *rdev)
+void evergreen_mc_program(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
u32 tmp;
@@ -2576,7 +2576,7 @@ void evergreen_irq_disable(struct radeon_device *rdev)
evergreen_disable_interrupt_state(rdev);
}
-static void evergreen_irq_suspend(struct radeon_device *rdev)
+void evergreen_irq_suspend(struct radeon_device *rdev)
{
evergreen_irq_disable(rdev);
r600_rlc_stop(rdev);
@@ -2899,7 +2899,7 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
}
- r = btc_mc_load_microcode(rdev);
+ r = ni_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
return r;
@@ -2981,7 +2981,7 @@ int evergreen_resume(struct radeon_device *rdev)
r = evergreen_startup(rdev);
if (r) {
- DRM_ERROR("r600 startup failed on resume\n");
+ DRM_ERROR("evergreen startup failed on resume\n");
return r;
}
@@ -3061,7 +3061,7 @@ int evergreen_init(struct radeon_device *rdev)
}
/* Must be an ATOMBIOS */
if (!rdev->is_atom_bios) {
- dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
+ dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
return -EINVAL;
}
r = radeon_atombios_init(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 2be698e78ff2..ba06a69c6de8 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -579,7 +579,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
obj_size += evergreen_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("evergreen failed to allocate shader\n");
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 345a75a03c96..edde90b37554 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -29,6 +29,7 @@
#include "radeon.h"
#include "evergreend.h"
#include "evergreen_reg_safe.h"
+#include "cayman_reg_safe.h"
static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
@@ -292,33 +293,28 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
if (wait_reg_mem.type != PACKET_TYPE3 ||
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
/* bit 4 is reg (0) or mem (1) */
if (wait_reg_mem_info & 0x10) {
DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
/* waiting for value to be equal */
if ((wait_reg_mem_info & 0x7) != 0x3) {
DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
/* jump over the NOP */
@@ -336,8 +332,7 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
@@ -362,12 +357,10 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
break;
default:
DRM_ERROR("unknown crtc reloc\n");
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
}
-out:
- return r;
+ return 0;
}
static int evergreen_packet0_check(struct radeon_cs_parser *p,
@@ -425,18 +418,28 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
{
struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
struct radeon_cs_reloc *reloc;
- u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+ u32 last_reg;
u32 m, i, tmp, *ib;
int r;
+ if (p->rdev->family >= CHIP_CAYMAN)
+ last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
+ else
+ last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+
i = (reg >> 7);
if (i > last_reg) {
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
m = 1 << ((reg >> 2) & 31);
- if (!(evergreen_reg_safe_bm[i] & m))
- return 0;
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ if (!(cayman_reg_safe_bm[i] & m))
+ return 0;
+ } else {
+ if (!(evergreen_reg_safe_bm[i] & m))
+ return 0;
+ }
ib = p->ib->ptr;
switch (reg) {
/* force following reg to 0 in an attemp to disable out buffer
@@ -468,12 +471,42 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
case SQ_VSTMP_RING_ITEMSIZE:
case VGT_TF_RING_SIZE:
/* get value to populate the IB don't remove */
- tmp =radeon_get_ib_value(p, idx);
- ib[idx] = 0;
+ /*tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;*/
+ break;
+ case SQ_ESGS_RING_BASE:
+ case SQ_GSVS_RING_BASE:
+ case SQ_ESTMP_RING_BASE:
+ case SQ_GSTMP_RING_BASE:
+ case SQ_HSTMP_RING_BASE:
+ case SQ_LSTMP_RING_BASE:
+ case SQ_PSTMP_RING_BASE:
+ case SQ_VSTMP_RING_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case DB_DEPTH_CONTROL:
track->db_depth_control = radeon_get_ib_value(p, idx);
break;
+ case CAYMAN_DB_EQAA:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ break;
+ case CAYMAN_DB_DEPTH_INFO:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ break;
case DB_Z_INFO:
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -559,9 +592,23 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
track->cb_shader_mask = radeon_get_ib_value(p, idx);
break;
case PA_SC_AA_CONFIG:
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
track->nsamples = 1 << tmp;
break;
+ case CAYMAN_PA_SC_AA_CONFIG:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
+ track->nsamples = 1 << tmp;
+ break;
case CB_COLOR0_VIEW:
case CB_COLOR1_VIEW:
case CB_COLOR2_VIEW:
@@ -942,6 +989,37 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
idx_value = radeon_get_ib_value(p, idx);
switch (pkt->opcode) {
+ case PACKET3_SET_PREDICATION:
+ {
+ int pred_op;
+ int tmp;
+ if (pkt->count != 1) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx + 1);
+ pred_op = (tmp >> 16) & 0x7;
+
+ /* for the clear predicate operation */
+ if (pred_op == 0)
+ return 0;
+
+ if (pred_op > 2) {
+ DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ return -EINVAL;
+ }
+
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
+ }
+ break;
case PACKET3_CONTEXT_CONTROL:
if (pkt->count != 1) {
DRM_ERROR("bad CONTEXT_CONTROL\n");
@@ -956,6 +1034,16 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
break;
+ case CAYMAN_PACKET3_DEALLOC_STATE:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
+ return -EINVAL;
+ }
+ if (pkt->count) {
+ DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+ return -EINVAL;
+ }
+ break;
case PACKET3_INDEX_BASE:
if (pkt->count != 1) {
DRM_ERROR("bad INDEX_BASE\n");
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index eb4acf4528ff..9aaa3f0c9372 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -755,13 +755,21 @@
#define SQ_CONST_MEM_BASE 0x8df8
+#define SQ_ESGS_RING_BASE 0x8c40
#define SQ_ESGS_RING_SIZE 0x8c44
+#define SQ_GSVS_RING_BASE 0x8c48
#define SQ_GSVS_RING_SIZE 0x8c4c
+#define SQ_ESTMP_RING_BASE 0x8c50
#define SQ_ESTMP_RING_SIZE 0x8c54
+#define SQ_GSTMP_RING_BASE 0x8c58
#define SQ_GSTMP_RING_SIZE 0x8c5c
+#define SQ_VSTMP_RING_BASE 0x8c60
#define SQ_VSTMP_RING_SIZE 0x8c64
+#define SQ_PSTMP_RING_BASE 0x8c68
#define SQ_PSTMP_RING_SIZE 0x8c6c
+#define SQ_LSTMP_RING_BASE 0x8e10
#define SQ_LSTMP_RING_SIZE 0x8e14
+#define SQ_HSTMP_RING_BASE 0x8e18
#define SQ_HSTMP_RING_SIZE 0x8e1c
#define VGT_TF_RING_SIZE 0x8988
@@ -1093,5 +1101,14 @@
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+/* cayman 3D regs */
+#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B0
+#define CAYMAN_DB_EQAA 0x28804
+#define CAYMAN_DB_DEPTH_INFO 0x2803C
+#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
+#define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0
+#define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7
+/* cayman packet3 addition */
+#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
#endif
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 5e0bef80ad7f..7aade20f63a8 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -31,12 +31,25 @@
#include "nid.h"
#include "atom.h"
#include "ni_reg.h"
+#include "cayman_blit_shaders.h"
+
+extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+extern void evergreen_mc_program(struct radeon_device *rdev);
+extern void evergreen_irq_suspend(struct radeon_device *rdev);
+extern int evergreen_mc_init(struct radeon_device *rdev);
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
#define EVERGREEN_RLC_UCODE_SIZE 768
#define BTC_MC_UCODE_SIZE 6024
+#define CAYMAN_PFP_UCODE_SIZE 2176
+#define CAYMAN_PM4_UCODE_SIZE 2176
+#define CAYMAN_RLC_UCODE_SIZE 1024
+#define CAYMAN_MC_UCODE_SIZE 6037
+
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
MODULE_FIRMWARE("radeon/BARTS_me.bin");
@@ -48,6 +61,10 @@ MODULE_FIRMWARE("radeon/TURKS_mc.bin");
MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
MODULE_FIRMWARE("radeon/CAICOS_me.bin");
MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
#define BTC_IO_MC_REGS_SIZE 29
@@ -147,12 +164,44 @@ static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
{0x0000009f, 0x00916a00}
};
-int btc_mc_load_microcode(struct radeon_device *rdev)
+static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00976b00}
+};
+
+int ni_mc_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
u32 mem_type, running, blackout = 0;
u32 *io_mc_regs;
- int i;
+ int i, ucode_size, regs_size;
if (!rdev->mc_fw)
return -EINVAL;
@@ -160,13 +209,24 @@ int btc_mc_load_microcode(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_BARTS:
io_mc_regs = (u32 *)&barts_io_mc_regs;
+ ucode_size = BTC_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
break;
case CHIP_TURKS:
io_mc_regs = (u32 *)&turks_io_mc_regs;
+ ucode_size = BTC_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
break;
case CHIP_CAICOS:
default:
io_mc_regs = (u32 *)&caicos_io_mc_regs;
+ ucode_size = BTC_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_CAYMAN:
+ io_mc_regs = (u32 *)&cayman_io_mc_regs;
+ ucode_size = CAYMAN_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
break;
}
@@ -184,13 +244,13 @@ int btc_mc_load_microcode(struct radeon_device *rdev)
WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
/* load mc io regs */
- for (i = 0; i < BTC_IO_MC_REGS_SIZE; i++) {
+ for (i = 0; i < regs_size; i++) {
WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
}
/* load the MC ucode */
fw_data = (const __be32 *)rdev->mc_fw->data;
- for (i = 0; i < BTC_MC_UCODE_SIZE; i++)
+ for (i = 0; i < ucode_size; i++)
WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
/* put the engine back into the active state */
@@ -231,23 +291,38 @@ int ni_init_microcode(struct radeon_device *rdev)
case CHIP_BARTS:
chip_name = "BARTS";
rlc_chip_name = "BTC";
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ mc_req_size = BTC_MC_UCODE_SIZE * 4;
break;
case CHIP_TURKS:
chip_name = "TURKS";
rlc_chip_name = "BTC";
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ mc_req_size = BTC_MC_UCODE_SIZE * 4;
break;
case CHIP_CAICOS:
chip_name = "CAICOS";
rlc_chip_name = "BTC";
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ mc_req_size = BTC_MC_UCODE_SIZE * 4;
+ break;
+ case CHIP_CAYMAN:
+ chip_name = "CAYMAN";
+ rlc_chip_name = "CAYMAN";
+ pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
+ me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
+ mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
break;
default: BUG();
}
- pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
- me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
- rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
- mc_req_size = BTC_MC_UCODE_SIZE * 4;
-
DRM_INFO("Loading %s Microcode\n", chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
@@ -314,3 +389,1204 @@ out:
return err;
}
+/*
+ * Core functions
+ */
+static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ u32 num_tile_pipes,
+ u32 num_backends_per_asic,
+ u32 *backend_disable_mask_per_asic,
+ u32 num_shader_engines)
+{
+ u32 backend_map = 0;
+ u32 enabled_backends_mask = 0;
+ u32 enabled_backends_count = 0;
+ u32 num_backends_per_se;
+ u32 cur_pipe;
+ u32 swizzle_pipe[CAYMAN_MAX_PIPES];
+ u32 cur_backend = 0;
+ u32 i;
+ bool force_no_swizzle;
+
+ /* force legal values */
+ if (num_tile_pipes < 1)
+ num_tile_pipes = 1;
+ if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
+ num_tile_pipes = rdev->config.cayman.max_tile_pipes;
+ if (num_shader_engines < 1)
+ num_shader_engines = 1;
+ if (num_shader_engines > rdev->config.cayman.max_shader_engines)
+ num_shader_engines = rdev->config.cayman.max_shader_engines;
+ if (num_backends_per_asic > num_shader_engines)
+ num_backends_per_asic = num_shader_engines;
+ if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
+ num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
+
+ /* make sure we have the same number of backends per se */
+ num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
+ /* set up the number of backends per se */
+ num_backends_per_se = num_backends_per_asic / num_shader_engines;
+ if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
+ num_backends_per_se = rdev->config.cayman.max_backends_per_se;
+ num_backends_per_asic = num_backends_per_se * num_shader_engines;
+ }
+
+ /* create enable mask and count for enabled backends */
+ for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
+ if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
+ enabled_backends_mask |= (1 << i);
+ ++enabled_backends_count;
+ }
+ if (enabled_backends_count == num_backends_per_asic)
+ break;
+ }
+
+ /* force the backends mask to match the current number of backends */
+ if (enabled_backends_count != num_backends_per_asic) {
+ u32 this_backend_enabled;
+ u32 shader_engine;
+ u32 backend_per_se;
+
+ enabled_backends_mask = 0;
+ enabled_backends_count = 0;
+ *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
+ for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
+ /* calc the current se */
+ shader_engine = i / rdev->config.cayman.max_backends_per_se;
+ /* calc the backend per se */
+ backend_per_se = i % rdev->config.cayman.max_backends_per_se;
+ /* default to not enabled */
+ this_backend_enabled = 0;
+ if ((shader_engine < num_shader_engines) &&
+ (backend_per_se < num_backends_per_se))
+ this_backend_enabled = 1;
+ if (this_backend_enabled) {
+ enabled_backends_mask |= (1 << i);
+ *backend_disable_mask_per_asic &= ~(1 << i);
+ ++enabled_backends_count;
+ }
+ }
+ }
+
+
+ memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
+ switch (rdev->family) {
+ case CHIP_CAYMAN:
+ force_no_swizzle = true;
+ break;
+ default:
+ force_no_swizzle = false;
+ break;
+ }
+ if (force_no_swizzle) {
+ bool last_backend_enabled = false;
+
+ force_no_swizzle = false;
+ for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
+ if (((enabled_backends_mask >> i) & 1) == 1) {
+ if (last_backend_enabled)
+ force_no_swizzle = true;
+ last_backend_enabled = true;
+ } else
+ last_backend_enabled = false;
+ }
+ }
+
+ switch (num_tile_pipes) {
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ DRM_ERROR("odd number of pipes!\n");
+ break;
+ case 2:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ break;
+ case 4:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 1;
+ swizzle_pipe[3] = 3;
+ }
+ break;
+ case 6:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 1;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 5;
+ }
+ break;
+ case 8:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ swizzle_pipe[7] = 7;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 1;
+ swizzle_pipe[5] = 3;
+ swizzle_pipe[6] = 5;
+ swizzle_pipe[7] = 7;
+ }
+ break;
+ }
+
+ for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+ while (((1 << cur_backend) & enabled_backends_mask) == 0)
+ cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
+
+ backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
+
+ cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
+ }
+
+ return backend_map;
+}
+
+static void cayman_program_channel_remap(struct radeon_device *rdev)
+{
+ u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
+
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ default:
+ /* default mapping */
+ mc_shared_chremap = 0x00fac688;
+ break;
+ }
+
+ switch (rdev->family) {
+ case CHIP_CAYMAN:
+ default:
+ //tcp_chan_steer_lo = 0x54763210
+ tcp_chan_steer_lo = 0x76543210;
+ tcp_chan_steer_hi = 0x0000ba98;
+ break;
+ }
+
+ WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
+ WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
+ WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+}
+
+static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
+ u32 disable_mask_per_se,
+ u32 max_disable_mask_per_se,
+ u32 num_shader_engines)
+{
+ u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
+ u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
+
+ if (num_shader_engines == 1)
+ return disable_mask_per_asic;
+ else if (num_shader_engines == 2)
+ return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
+ else
+ return 0xffffffff;
+}
+
+static void cayman_gpu_init(struct radeon_device *rdev)
+{
+ u32 cc_rb_backend_disable = 0;
+ u32 cc_gc_shader_pipe_config;
+ u32 gb_addr_config = 0;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 gb_backend_map;
+ u32 cgts_tcc_disable;
+ u32 sx_debug_1;
+ u32 smx_dc_ctl0;
+ u32 gc_user_shader_pipe_config;
+ u32 gc_user_rb_backend_disable;
+ u32 cgts_user_tcc_disable;
+ u32 cgts_sm_ctrl_reg;
+ u32 hdp_host_path_cntl;
+ u32 tmp;
+ int i, j;
+
+ switch (rdev->family) {
+ case CHIP_CAYMAN:
+ default:
+ rdev->config.cayman.max_shader_engines = 2;
+ rdev->config.cayman.max_pipes_per_simd = 4;
+ rdev->config.cayman.max_tile_pipes = 8;
+ rdev->config.cayman.max_simds_per_se = 12;
+ rdev->config.cayman.max_backends_per_se = 4;
+ rdev->config.cayman.max_texture_channel_caches = 8;
+ rdev->config.cayman.max_gprs = 256;
+ rdev->config.cayman.max_threads = 256;
+ rdev->config.cayman.max_gs_threads = 32;
+ rdev->config.cayman.max_stack_entries = 512;
+ rdev->config.cayman.sx_num_of_sets = 8;
+ rdev->config.cayman.sx_max_export_size = 256;
+ rdev->config.cayman.sx_max_export_pos_size = 64;
+ rdev->config.cayman.sx_max_export_smx_size = 192;
+ rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sq_num_cf_insts = 2;
+
+ rdev->config.cayman.sc_prim_fifo_size = 0x100;
+ rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ }
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+
+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+ mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
+ cgts_tcc_disable = RREG32(CGTS_TCC_DISABLE);
+ gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
+ gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
+ cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
+
+ rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
+ tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
+ rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
+ rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
+ tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
+ rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
+ tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
+ rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
+ tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
+ rdev->config.cayman.backend_disable_mask_per_asic =
+ cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
+ rdev->config.cayman.num_shader_engines);
+ rdev->config.cayman.backend_map =
+ cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
+ rdev->config.cayman.num_backends_per_se *
+ rdev->config.cayman.num_shader_engines,
+ &rdev->config.cayman.backend_disable_mask_per_asic,
+ rdev->config.cayman.num_shader_engines);
+ tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
+ rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
+ tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
+ rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
+ if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
+ rdev->config.cayman.mem_max_burst_length_bytes = 512;
+ tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+ rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+ if (rdev->config.cayman.mem_row_size_in_kb > 4)
+ rdev->config.cayman.mem_row_size_in_kb = 4;
+ /* XXX use MC settings? */
+ rdev->config.cayman.shader_engine_tile_size = 32;
+ rdev->config.cayman.num_gpus = 1;
+ rdev->config.cayman.multi_gpu_tile_size = 64;
+
+ //gb_addr_config = 0x02011003
+#if 0
+ gb_addr_config = RREG32(GB_ADDR_CONFIG);
+#else
+ gb_addr_config = 0;
+ switch (rdev->config.cayman.num_tile_pipes) {
+ case 1:
+ default:
+ gb_addr_config |= NUM_PIPES(0);
+ break;
+ case 2:
+ gb_addr_config |= NUM_PIPES(1);
+ break;
+ case 4:
+ gb_addr_config |= NUM_PIPES(2);
+ break;
+ case 8:
+ gb_addr_config |= NUM_PIPES(3);
+ break;
+ }
+
+ tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
+ gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
+ gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
+ tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
+ gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
+ switch (rdev->config.cayman.num_gpus) {
+ case 1:
+ default:
+ gb_addr_config |= NUM_GPUS(0);
+ break;
+ case 2:
+ gb_addr_config |= NUM_GPUS(1);
+ break;
+ case 4:
+ gb_addr_config |= NUM_GPUS(2);
+ break;
+ }
+ switch (rdev->config.cayman.multi_gpu_tile_size) {
+ case 16:
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
+ break;
+ case 32:
+ default:
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
+ break;
+ case 64:
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
+ break;
+ case 128:
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
+ break;
+ }
+ switch (rdev->config.cayman.mem_row_size_in_kb) {
+ case 1:
+ default:
+ gb_addr_config |= ROW_SIZE(0);
+ break;
+ case 2:
+ gb_addr_config |= ROW_SIZE(1);
+ break;
+ case 4:
+ gb_addr_config |= ROW_SIZE(2);
+ break;
+ }
+#endif
+
+ tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
+ rdev->config.cayman.num_tile_pipes = (1 << tmp);
+ tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
+ rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
+ tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
+ rdev->config.cayman.num_shader_engines = tmp + 1;
+ tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
+ rdev->config.cayman.num_gpus = tmp + 1;
+ tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
+ rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
+ tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
+ rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
+
+ //gb_backend_map = 0x76541032;
+#if 0
+ gb_backend_map = RREG32(GB_BACKEND_MAP);
+#else
+ gb_backend_map =
+ cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
+ rdev->config.cayman.num_backends_per_se *
+ rdev->config.cayman.num_shader_engines,
+ &rdev->config.cayman.backend_disable_mask_per_asic,
+ rdev->config.cayman.num_shader_engines);
+#endif
+ /* setup tiling info dword. gb_addr_config is not adequate since it does
+ * not have bank info, so create a custom tiling dword.
+ * bits 3:0 num_pipes
+ * bits 7:4 num_banks
+ * bits 11:8 group_size
+ * bits 15:12 row_size
+ */
+ rdev->config.cayman.tile_config = 0;
+ switch (rdev->config.cayman.num_tile_pipes) {
+ case 1:
+ default:
+ rdev->config.cayman.tile_config |= (0 << 0);
+ break;
+ case 2:
+ rdev->config.cayman.tile_config |= (1 << 0);
+ break;
+ case 4:
+ rdev->config.cayman.tile_config |= (2 << 0);
+ break;
+ case 8:
+ rdev->config.cayman.tile_config |= (3 << 0);
+ break;
+ }
+ rdev->config.cayman.tile_config |=
+ ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+ rdev->config.cayman.tile_config |=
+ (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
+ rdev->config.cayman.tile_config |=
+ ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
+
+ WREG32(GB_BACKEND_MAP, gb_backend_map);
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+
+ cayman_program_channel_remap(rdev);
+
+ /* primary versions */
+ WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+
+ WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
+ WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
+
+ /* user versions */
+ WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+
+ WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
+ WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
+
+ /* reprogram the shader complex */
+ cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
+ for (i = 0; i < 16; i++)
+ WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
+ WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
+
+ /* set HW defaults for 3D engine */
+ WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+ sx_debug_1 = RREG32(SX_DEBUG_1);
+ sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+ WREG32(SX_DEBUG_1, sx_debug_1);
+
+ smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+ smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+ smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+ WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
+
+ /* need to be explicitly zero-ed */
+ WREG32(VGT_OFFCHIP_LDS_BASE, 0);
+ WREG32(SQ_LSTMP_RING_BASE, 0);
+ WREG32(SQ_HSTMP_RING_BASE, 0);
+ WREG32(SQ_ESTMP_RING_BASE, 0);
+ WREG32(SQ_GSTMP_RING_BASE, 0);
+ WREG32(SQ_VSTMP_RING_BASE, 0);
+ WREG32(SQ_PSTMP_RING_BASE, 0);
+
+ WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
+
+ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
+ POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
+ SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+
+ WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+
+
+ WREG32(VGT_NUM_INSTANCES, 1);
+
+ WREG32(CP_PERFMON_CNTL, 0);
+
+ WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+ FETCH_FIFO_HIWATER(0x4) |
+ DONE_FIFO_HIWATER(0xe0) |
+ ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+ WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
+ WREG32(SQ_CONFIG, (VC_ENABLE |
+ EXPORT_SRC_C |
+ GFX_PRIO(0) |
+ CS1_PRIO(0) |
+ CS2_PRIO(1)));
+ WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
+
+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+ FORCE_EOV_MAX_REZ_CNT(255)));
+
+ WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+ AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+ WREG32(VGT_GS_VERTEX_REUSE, 16);
+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+ WREG32(CB_PERF_CTR0_SEL_0, 0);
+ WREG32(CB_PERF_CTR0_SEL_1, 0);
+ WREG32(CB_PERF_CTR1_SEL_0, 0);
+ WREG32(CB_PERF_CTR1_SEL_1, 0);
+ WREG32(CB_PERF_CTR2_SEL_0, 0);
+ WREG32(CB_PERF_CTR2_SEL_1, 0);
+ WREG32(CB_PERF_CTR3_SEL_0, 0);
+ WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+ udelay(50);
+}
+
+/*
+ * GART
+ */
+void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+ /* flush hdp cache */
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ WREG32(VM_INVALIDATE_REQUEST, 1);
+}
+
+int cayman_pcie_gart_enable(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->gart.table.vram.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
+ radeon_gart_restore(rdev);
+ /* Setup TLB control */
+ WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB |
+ ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
+ WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+ /* setup context0 */
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
+ WREG32(VM_CONTEXT0_CNTL2, 0);
+ WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ /* disable context1-7 */
+ WREG32(VM_CONTEXT1_CNTL2, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
+
+ cayman_pcie_gart_tlb_flush(rdev);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+void cayman_pcie_gart_disable(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Disable all tables */
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
+ /* Setup TLB control */
+ WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+ if (rdev->gart.table.vram.robj) {
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ }
+}
+
+void cayman_pcie_gart_fini(struct radeon_device *rdev)
+{
+ cayman_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+/*
+ * CP.
+ */
+static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
+{
+ if (enable)
+ WREG32(CP_ME_CNTL, 0);
+ else {
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
+ WREG32(SCRATCH_UMSK, 0);
+ }
+}
+
+static int cayman_cp_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ int i;
+
+ if (!rdev->me_fw || !rdev->pfp_fw)
+ return -EINVAL;
+
+ cayman_cp_enable(rdev, false);
+
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
+ return 0;
+}
+
+static int cayman_cp_start(struct radeon_device *rdev)
+{
+ int r, i;
+
+ r = radeon_ring_lock(rdev, 7);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+ radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(rdev, 0x1);
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1);
+ radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_unlock_commit(rdev);
+
+ cayman_cp_enable(rdev, true);
+
+ r = radeon_ring_lock(rdev, cayman_default_size + 19);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+
+ /* setup clear context state */
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ for (i = 0; i < cayman_default_size; i++)
+ radeon_ring_write(rdev, cayman_default_state[i]);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ /* set clear context state */
+ radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(rdev, 0);
+
+ /* SQ_VTX_BASE_VTX_LOC */
+ radeon_ring_write(rdev, 0xc0026f00);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+
+ /* Clear consts */
+ radeon_ring_write(rdev, 0xc0036f00);
+ radeon_ring_write(rdev, 0x00000bc4);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
+
+ radeon_ring_write(rdev, 0xc0026900);
+ radeon_ring_write(rdev, 0x00000316);
+ radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(rdev, 0x00000010); /* */
+
+ radeon_ring_unlock_commit(rdev);
+
+ /* XXX init other rings */
+
+ return 0;
+}
+
+static void cayman_cp_fini(struct radeon_device *rdev)
+{
+ cayman_cp_enable(rdev, false);
+ radeon_ring_fini(rdev);
+}
+
+int cayman_cp_resume(struct radeon_device *rdev)
+{
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+ SOFT_RESET_PA |
+ SOFT_RESET_SH |
+ SOFT_RESET_VGT |
+ SOFT_RESET_SX));
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+ RREG32(GRBM_SOFT_RESET);
+
+ WREG32(CP_SEM_WAIT_TIMER, 0x4);
+
+ /* Set the write pointer delay */
+ WREG32(CP_RB_WPTR_DELAY, 0);
+
+ WREG32(CP_DEBUG, (1 << 27));
+
+ /* ring 0 - compute and gfx */
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB0_CNTL, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB0_WPTR, 0);
+
+ /* set the wb address wether it's enabled or not */
+ WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+ WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+ if (rdev->wb.enabled)
+ WREG32(SCRATCH_UMSK, 0xff);
+ else {
+ tmp |= RB_NO_UPDATE;
+ WREG32(SCRATCH_UMSK, 0);
+ }
+
+ mdelay(1);
+ WREG32(CP_RB0_CNTL, tmp);
+
+ WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
+
+ rdev->cp.rptr = RREG32(CP_RB0_RPTR);
+ rdev->cp.wptr = RREG32(CP_RB0_WPTR);
+
+ /* ring1 - compute only */
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(rdev->cp1.ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB1_CNTL, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB1_WPTR, 0);
+
+ /* set the wb address wether it's enabled or not */
+ WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
+
+ mdelay(1);
+ WREG32(CP_RB1_CNTL, tmp);
+
+ WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
+
+ rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
+ rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
+
+ /* ring2 - compute only */
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(rdev->cp2.ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB2_CNTL, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB2_WPTR, 0);
+
+ /* set the wb address wether it's enabled or not */
+ WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
+
+ mdelay(1);
+ WREG32(CP_RB2_CNTL, tmp);
+
+ WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
+
+ rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
+ rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
+
+ /* start the rings */
+ cayman_cp_start(rdev);
+ rdev->cp.ready = true;
+ rdev->cp1.ready = true;
+ rdev->cp2.ready = true;
+ /* this only test cp0 */
+ r = radeon_ring_test(rdev);
+ if (r) {
+ rdev->cp.ready = false;
+ rdev->cp1.ready = false;
+ rdev->cp2.ready = false;
+ return r;
+ }
+
+ return 0;
+}
+
+bool cayman_gpu_is_lockup(struct radeon_device *rdev)
+{
+ u32 srbm_status;
+ u32 grbm_status;
+ u32 grbm_status_se0, grbm_status_se1;
+ struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup;
+ int r;
+
+ srbm_status = RREG32(SRBM_STATUS);
+ grbm_status = RREG32(GRBM_STATUS);
+ grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
+ grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
+ if (!(grbm_status & GUI_ACTIVE)) {
+ r100_gpu_lockup_update(lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ /* XXX deal with CP0,1,2 */
+ rdev->cp.rptr = RREG32(CP_RB0_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+}
+
+static int cayman_gpu_soft_reset(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 grbm_reset = 0;
+
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset \n");
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ /* reset all the gfx blocks */
+ grbm_reset = (SOFT_RESET_CP |
+ SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_GDS |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SH |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_IA);
+
+ dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+ WREG32(GRBM_SOFT_RESET, grbm_reset);
+ (void)RREG32(GRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(GRBM_SOFT_RESET, 0);
+ (void)RREG32(GRBM_SOFT_RESET);
+ /* Wait a little for things to settle down */
+ udelay(50);
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ evergreen_mc_resume(rdev, &save);
+ return 0;
+}
+
+int cayman_asic_reset(struct radeon_device *rdev)
+{
+ return cayman_gpu_soft_reset(rdev);
+}
+
+static int cayman_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ r = ni_mc_load_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load MC firmware!\n");
+ return r;
+ }
+
+ evergreen_mc_program(rdev);
+ r = cayman_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ cayman_gpu_init(rdev);
+
+#if 0
+ r = cayman_blit_init(rdev);
+ if (r) {
+ cayman_blit_fini(rdev);
+ rdev->asic->copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+#endif
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ /* Enable IRQ */
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ evergreen_irq_set(rdev);
+
+ r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ if (r)
+ return r;
+ r = cayman_cp_load_microcode(rdev);
+ if (r)
+ return r;
+ r = cayman_cp_resume(rdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+int cayman_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+ * posting will perform necessary task to bring back GPU into good
+ * shape.
+ */
+ /* post card */
+ atom_asic_init(rdev->mode_info.atom_context);
+
+ r = cayman_startup(rdev);
+ if (r) {
+ DRM_ERROR("cayman startup failed on resume\n");
+ return r;
+ }
+
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ return r;
+ }
+
+ return r;
+
+}
+
+int cayman_suspend(struct radeon_device *rdev)
+{
+ /* int r; */
+
+ /* FIXME: we should wait for ring to be empty */
+ cayman_cp_enable(rdev, false);
+ rdev->cp.ready = false;
+ evergreen_irq_suspend(rdev);
+ radeon_wb_disable(rdev);
+ cayman_pcie_gart_disable(rdev);
+
+#if 0
+ /* unpin shaders bo */
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+#endif
+ return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int cayman_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* This don't do much */
+ r = radeon_gem_init(rdev);
+ if (r)
+ return r;
+ /* Read BIOS */
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ /* Must be an ATOMBIOS */
+ if (!rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
+ return -EINVAL;
+ }
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+
+ /* Post card if necessary */
+ if (!radeon_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
+ DRM_INFO("GPU not posted. posting now...\n");
+ atom_asic_init(rdev->mode_info.atom_context);
+ }
+ /* Initialize scratch registers */
+ r600_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* initialize memory controller */
+ r = evergreen_mc_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
+
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
+
+ r = r600_pcie_gart_init(rdev);
+ if (r)
+ return r;
+
+ rdev->accel_working = true;
+ r = cayman_startup(rdev);
+ if (r) {
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ cayman_cp_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ cayman_pcie_gart_fini(rdev);
+ rdev->accel_working = false;
+ }
+ if (rdev->accel_working) {
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ }
+
+ /* Don't start up if the MC ucode is missing.
+ * The default clocks and voltages before the MC ucode
+ * is loaded are not suffient for advanced operations.
+ */
+ if (!rdev->mc_fw) {
+ DRM_ERROR("radeon: MC ucode required for NI+.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void cayman_fini(struct radeon_device *rdev)
+{
+ /* cayman_blit_fini(rdev); */
+ cayman_cp_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ cayman_pcie_gart_fini(rdev);
+ radeon_gem_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index f7b445390e02..0f9a08b53fbd 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -24,7 +24,101 @@
#ifndef NI_H
#define NI_H
+#define CAYMAN_MAX_SH_GPRS 256
+#define CAYMAN_MAX_TEMP_GPRS 16
+#define CAYMAN_MAX_SH_THREADS 256
+#define CAYMAN_MAX_SH_STACK_ENTRIES 4096
+#define CAYMAN_MAX_FRC_EOV_CNT 16384
+#define CAYMAN_MAX_BACKENDS 8
+#define CAYMAN_MAX_BACKENDS_MASK 0xFF
+#define CAYMAN_MAX_BACKENDS_PER_SE_MASK 0xF
+#define CAYMAN_MAX_SIMDS 16
+#define CAYMAN_MAX_SIMDS_MASK 0xFFFF
+#define CAYMAN_MAX_SIMDS_PER_SE_MASK 0xFFF
+#define CAYMAN_MAX_PIPES 8
+#define CAYMAN_MAX_PIPES_MASK 0xFF
+#define CAYMAN_MAX_LDS_NUM 0xFFFF
+#define CAYMAN_MAX_TCC 16
+#define CAYMAN_MAX_TCC_MASK 0xFF
+
+#define DMIF_ADDR_CONFIG 0xBD4
+#define SRBM_STATUS 0x0E50
+
+#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
+#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
+#define RESPONSE_TYPE_MASK 0x000000F0
+#define RESPONSE_TYPE_SHIFT 4
+#define VM_L2_CNTL 0x1400
+#define ENABLE_L2_CACHE (1 << 0)
+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
+#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10)
+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
+#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 18)
+/* CONTEXT1_IDENTITY_ACCESS_MODE
+ * 0 physical = logical
+ * 1 logical via context1 page table
+ * 2 inside identity aperture use translation, outside physical = logical
+ * 3 inside identity aperture physical = logical, outside use translation
+ */
+#define VM_L2_CNTL2 0x1404
+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
+#define INVALIDATE_L2_CACHE (1 << 1)
+#define VM_L2_CNTL3 0x1408
+#define BANK_SELECT(x) ((x) << 0)
+#define CACHE_UPDATE_MODE(x) ((x) << 6)
+#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20)
+#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15)
+#define VM_L2_STATUS 0x140C
+#define L2_BUSY (1 << 0)
+#define VM_CONTEXT0_CNTL 0x1410
+#define ENABLE_CONTEXT (1 << 0)
+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define VM_CONTEXT1_CNTL 0x1414
+#define VM_CONTEXT0_CNTL2 0x1430
+#define VM_CONTEXT1_CNTL2 0x1434
+#define VM_INVALIDATE_REQUEST 0x1478
+#define VM_INVALIDATE_RESPONSE 0x147c
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
+
+#define MC_SHARED_CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+#define MC_SHARED_CHREMAP 0x2008
+
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
+#define MC_VM_MX_L1_TLB_CNTL 0x2064
+#define ENABLE_L1_TLB (1 << 0)
+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
+#define ENABLE_ADVANCED_DRIVER_MODEL (1 << 6)
+
#define MC_SHARED_BLACKOUT_CNTL 0x20ac
+#define MC_ARB_RAMCFG 0x2760
+#define NOOFBANK_SHIFT 0
+#define NOOFBANK_MASK 0x00000003
+#define NOOFRANK_SHIFT 2
+#define NOOFRANK_MASK 0x00000004
+#define NOOFROWS_SHIFT 3
+#define NOOFROWS_MASK 0x00000038
+#define NOOFCOLS_SHIFT 6
+#define NOOFCOLS_MASK 0x000000C0
+#define CHANSIZE_SHIFT 8
+#define CHANSIZE_MASK 0x00000100
+#define BURSTLENGTH_SHIFT 9
+#define BURSTLENGTH_MASK 0x00000200
+#define CHANSIZE_OVERRIDE (1 << 11)
#define MC_SEQ_SUP_CNTL 0x28c8
#define RUN_MASK (1 << 0)
#define MC_SEQ_SUP_PGM 0x28cc
@@ -37,5 +131,406 @@
#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
#define MC_SEQ_IO_DEBUG_DATA 0x2a48
+#define HDP_HOST_PATH_CNTL 0x2C00
+#define HDP_NONSURFACE_BASE 0x2C04
+#define HDP_NONSURFACE_INFO 0x2C08
+#define HDP_NONSURFACE_SIZE 0x2C0C
+#define HDP_ADDR_CONFIG 0x2F48
+
+#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
+#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3F8C
+#define CGTS_SYS_TCC_DISABLE 0x3F90
+#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
+
+#define CONFIG_MEMSIZE 0x5428
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+
+#define GRBM_CNTL 0x8000
+#define GRBM_READ_TIMEOUT(x) ((x) << 0)
+#define GRBM_STATUS 0x8010
+#define CMDFIFO_AVAIL_MASK 0x0000000F
+#define RING2_RQ_PENDING (1 << 4)
+#define SRBM_RQ_PENDING (1 << 5)
+#define RING1_RQ_PENDING (1 << 6)
+#define CF_RQ_PENDING (1 << 7)
+#define PF_RQ_PENDING (1 << 8)
+#define GDS_DMA_RQ_PENDING (1 << 9)
+#define GRBM_EE_BUSY (1 << 10)
+#define SX_CLEAN (1 << 11)
+#define DB_CLEAN (1 << 12)
+#define CB_CLEAN (1 << 13)
+#define TA_BUSY (1 << 14)
+#define GDS_BUSY (1 << 15)
+#define VGT_BUSY_NO_DMA (1 << 16)
+#define VGT_BUSY (1 << 17)
+#define IA_BUSY_NO_DMA (1 << 18)
+#define IA_BUSY (1 << 19)
+#define SX_BUSY (1 << 20)
+#define SH_BUSY (1 << 21)
+#define SPI_BUSY (1 << 22)
+#define SC_BUSY (1 << 24)
+#define PA_BUSY (1 << 25)
+#define DB_BUSY (1 << 26)
+#define CP_COHERENCY_BUSY (1 << 28)
+#define CP_BUSY (1 << 29)
+#define CB_BUSY (1 << 30)
+#define GUI_ACTIVE (1 << 31)
+#define GRBM_STATUS_SE0 0x8014
+#define GRBM_STATUS_SE1 0x8018
+#define SE_SX_CLEAN (1 << 0)
+#define SE_DB_CLEAN (1 << 1)
+#define SE_CB_CLEAN (1 << 2)
+#define SE_VGT_BUSY (1 << 23)
+#define SE_PA_BUSY (1 << 24)
+#define SE_TA_BUSY (1 << 25)
+#define SE_SX_BUSY (1 << 26)
+#define SE_SPI_BUSY (1 << 27)
+#define SE_SH_BUSY (1 << 28)
+#define SE_SC_BUSY (1 << 29)
+#define SE_DB_BUSY (1 << 30)
+#define SE_CB_BUSY (1 << 31)
+#define GRBM_SOFT_RESET 0x8020
+#define SOFT_RESET_CP (1 << 0)
+#define SOFT_RESET_CB (1 << 1)
+#define SOFT_RESET_DB (1 << 3)
+#define SOFT_RESET_GDS (1 << 4)
+#define SOFT_RESET_PA (1 << 5)
+#define SOFT_RESET_SC (1 << 6)
+#define SOFT_RESET_SPI (1 << 8)
+#define SOFT_RESET_SH (1 << 9)
+#define SOFT_RESET_SX (1 << 10)
+#define SOFT_RESET_TC (1 << 11)
+#define SOFT_RESET_TA (1 << 12)
+#define SOFT_RESET_VGT (1 << 14)
+#define SOFT_RESET_IA (1 << 15)
+
+#define SCRATCH_REG0 0x8500
+#define SCRATCH_REG1 0x8504
+#define SCRATCH_REG2 0x8508
+#define SCRATCH_REG3 0x850C
+#define SCRATCH_REG4 0x8510
+#define SCRATCH_REG5 0x8514
+#define SCRATCH_REG6 0x8518
+#define SCRATCH_REG7 0x851C
+#define SCRATCH_UMSK 0x8540
+#define SCRATCH_ADDR 0x8544
+#define CP_SEM_WAIT_TIMER 0x85BC
+#define CP_ME_CNTL 0x86D8
+#define CP_ME_HALT (1 << 28)
+#define CP_PFP_HALT (1 << 26)
+#define CP_RB2_RPTR 0x86f8
+#define CP_RB1_RPTR 0x86fc
+#define CP_RB0_RPTR 0x8700
+#define CP_RB_WPTR_DELAY 0x8704
+#define CP_MEQ_THRESHOLDS 0x8764
+#define MEQ1_START(x) ((x) << 0)
+#define MEQ2_START(x) ((x) << 8)
+#define CP_PERFMON_CNTL 0x87FC
+
+#define VGT_CACHE_INVALIDATION 0x88C4
+#define CACHE_INVALIDATION(x) ((x) << 0)
+#define VC_ONLY 0
+#define TC_ONLY 1
+#define VC_AND_TC 2
+#define AUTO_INVLD_EN(x) ((x) << 6)
+#define NO_AUTO 0
+#define ES_AUTO 1
+#define GS_AUTO 2
+#define ES_AND_GS_AUTO 3
+#define VGT_GS_VERTEX_REUSE 0x88D4
+
+#define CC_GC_SHADER_PIPE_CONFIG 0x8950
+#define GC_USER_SHADER_PIPE_CONFIG 0x8954
+#define INACTIVE_QD_PIPES(x) ((x) << 8)
+#define INACTIVE_QD_PIPES_MASK 0x0000FF00
+#define INACTIVE_QD_PIPES_SHIFT 8
+#define INACTIVE_SIMDS(x) ((x) << 16)
+#define INACTIVE_SIMDS_MASK 0xFFFF0000
+#define INACTIVE_SIMDS_SHIFT 16
+
+#define VGT_PRIMITIVE_TYPE 0x8958
+#define VGT_NUM_INSTANCES 0x8974
+#define VGT_TF_RING_SIZE 0x8988
+#define VGT_OFFCHIP_LDS_BASE 0x89b4
+
+#define PA_SC_LINE_STIPPLE_STATE 0x8B10
+#define PA_CL_ENHANCE 0x8A14
+#define CLIP_VTX_REORDER_ENA (1 << 0)
+#define NUM_CLIP_SEQ(x) ((x) << 1)
+#define PA_SC_FIFO_SIZE 0x8BCC
+#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
+#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
+
+#define SQ_CONFIG 0x8C00
+#define VC_ENABLE (1 << 0)
+#define EXPORT_SRC_C (1 << 1)
+#define GFX_PRIO(x) ((x) << 2)
+#define CS1_PRIO(x) ((x) << 4)
+#define CS2_PRIO(x) ((x) << 6)
+#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
+#define NUM_PS_GPRS(x) ((x) << 0)
+#define NUM_VS_GPRS(x) ((x) << 16)
+#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
+#define SQ_ESGS_RING_SIZE 0x8c44
+#define SQ_GSVS_RING_SIZE 0x8c4c
+#define SQ_ESTMP_RING_BASE 0x8c50
+#define SQ_ESTMP_RING_SIZE 0x8c54
+#define SQ_GSTMP_RING_BASE 0x8c58
+#define SQ_GSTMP_RING_SIZE 0x8c5c
+#define SQ_VSTMP_RING_BASE 0x8c60
+#define SQ_VSTMP_RING_SIZE 0x8c64
+#define SQ_PSTMP_RING_BASE 0x8c68
+#define SQ_PSTMP_RING_SIZE 0x8c6c
+#define SQ_MS_FIFO_SIZES 0x8CF0
+#define CACHE_FIFO_SIZE(x) ((x) << 0)
+#define FETCH_FIFO_HIWATER(x) ((x) << 8)
+#define DONE_FIFO_HIWATER(x) ((x) << 16)
+#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
+#define SQ_LSTMP_RING_BASE 0x8e10
+#define SQ_LSTMP_RING_SIZE 0x8e14
+#define SQ_HSTMP_RING_BASE 0x8e18
+#define SQ_HSTMP_RING_SIZE 0x8e1c
+#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
+#define DYN_GPR_ENABLE (1 << 8)
+#define SQ_CONST_MEM_BASE 0x8df8
+
+#define SX_EXPORT_BUFFER_SIZES 0x900C
+#define COLOR_BUFFER_SIZE(x) ((x) << 0)
+#define POSITION_BUFFER_SIZE(x) ((x) << 8)
+#define SMX_BUFFER_SIZE(x) ((x) << 16)
+#define SX_DEBUG_1 0x9058
+#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
+
+#define SPI_CONFIG_CNTL 0x9100
+#define GPR_WRITE_PRIORITY(x) ((x) << 0)
+#define SPI_CONFIG_CNTL_1 0x913C
+#define VTX_DONE_DELAY(x) ((x) << 0)
+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
+#define CRC_SIMD_ID_WADDR_DISABLE (1 << 8)
+
+#define CGTS_TCC_DISABLE 0x9148
+#define CGTS_USER_TCC_DISABLE 0x914C
+#define TCC_DISABLE_MASK 0xFFFF0000
+#define TCC_DISABLE_SHIFT 16
+#define CGTS_SM_CTRL_REG 0x915C
+#define OVERRIDE (1 << 21)
+
+#define TA_CNTL_AUX 0x9508
+#define DISABLE_CUBE_WRAP (1 << 0)
+#define DISABLE_CUBE_ANISO (1 << 1)
+
+#define TCP_CHAN_STEER_LO 0x960c
+#define TCP_CHAN_STEER_HI 0x9610
+
+#define CC_RB_BACKEND_DISABLE 0x98F4
+#define BACKEND_DISABLE(x) ((x) << 16)
+#define GB_ADDR_CONFIG 0x98F8
+#define NUM_PIPES(x) ((x) << 0)
+#define NUM_PIPES_MASK 0x00000007
+#define NUM_PIPES_SHIFT 0
+#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
+#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070
+#define PIPE_INTERLEAVE_SIZE_SHIFT 4
+#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
+#define NUM_SHADER_ENGINES(x) ((x) << 12)
+#define NUM_SHADER_ENGINES_MASK 0x00003000
+#define NUM_SHADER_ENGINES_SHIFT 12
+#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
+#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000
+#define SHADER_ENGINE_TILE_SIZE_SHIFT 16
+#define NUM_GPUS(x) ((x) << 20)
+#define NUM_GPUS_MASK 0x00700000
+#define NUM_GPUS_SHIFT 20
+#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
+#define MULTI_GPU_TILE_SIZE_MASK 0x03000000
+#define MULTI_GPU_TILE_SIZE_SHIFT 24
+#define ROW_SIZE(x) ((x) << 28)
+#define ROW_SIZE_MASK 0x30000007
+#define ROW_SIZE_SHIFT 28
+#define NUM_LOWER_PIPES(x) ((x) << 30)
+#define NUM_LOWER_PIPES_MASK 0x40000000
+#define NUM_LOWER_PIPES_SHIFT 30
+#define GB_BACKEND_MAP 0x98FC
+
+#define CB_PERF_CTR0_SEL_0 0x9A20
+#define CB_PERF_CTR0_SEL_1 0x9A24
+#define CB_PERF_CTR1_SEL_0 0x9A28
+#define CB_PERF_CTR1_SEL_1 0x9A2C
+#define CB_PERF_CTR2_SEL_0 0x9A30
+#define CB_PERF_CTR2_SEL_1 0x9A34
+#define CB_PERF_CTR3_SEL_0 0x9A38
+#define CB_PERF_CTR3_SEL_1 0x9A3C
+
+#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
+#define BACKEND_DISABLE_MASK 0x00FF0000
+#define BACKEND_DISABLE_SHIFT 16
+
+#define SMX_DC_CTL0 0xA020
+#define USE_HASH_FUNCTION (1 << 0)
+#define NUMBER_OF_SETS(x) ((x) << 1)
+#define FLUSH_ALL_ON_EVENT (1 << 10)
+#define STALL_ON_EVENT (1 << 11)
+#define SMX_EVENT_CTL 0xA02C
+#define ES_FLUSH_CTL(x) ((x) << 0)
+#define GS_FLUSH_CTL(x) ((x) << 3)
+#define ACK_FLUSH_CTL(x) ((x) << 6)
+#define SYNC_FLUSH_CTL (1 << 8)
+
+#define CP_RB0_BASE 0xC100
+#define CP_RB0_CNTL 0xC104
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
+#define BUF_SWAP_32BIT (2 << 16)
+#define CP_RB0_RPTR_ADDR 0xC10C
+#define CP_RB0_RPTR_ADDR_HI 0xC110
+#define CP_RB0_WPTR 0xC114
+#define CP_RB1_BASE 0xC180
+#define CP_RB1_CNTL 0xC184
+#define CP_RB1_RPTR_ADDR 0xC188
+#define CP_RB1_RPTR_ADDR_HI 0xC18C
+#define CP_RB1_WPTR 0xC190
+#define CP_RB2_BASE 0xC194
+#define CP_RB2_CNTL 0xC198
+#define CP_RB2_RPTR_ADDR 0xC19C
+#define CP_RB2_RPTR_ADDR_HI 0xC1A0
+#define CP_RB2_WPTR 0xC1A4
+#define CP_PFP_UCODE_ADDR 0xC150
+#define CP_PFP_UCODE_DATA 0xC154
+#define CP_ME_RAM_RADDR 0xC158
+#define CP_ME_RAM_WADDR 0xC15C
+#define CP_ME_RAM_DATA 0xC160
+#define CP_DEBUG 0xC1FC
+
+/*
+ * PM4
+ */
+#define PACKET_TYPE0 0
+#define PACKET_TYPE1 1
+#define PACKET_TYPE2 2
+#define PACKET_TYPE3 3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+ (((reg) >> 2) & 0xFFFF) | \
+ ((n) & 0x3FFF) << 16)
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define PACKET3_NOP 0x10
+#define PACKET3_SET_BASE 0x11
+#define PACKET3_CLEAR_STATE 0x12
+#define PACKET3_INDEX_BUFFER_SIZE 0x13
+#define PACKET3_DEALLOC_STATE 0x14
+#define PACKET3_DISPATCH_DIRECT 0x15
+#define PACKET3_DISPATCH_INDIRECT 0x16
+#define PACKET3_INDIRECT_BUFFER_END 0x17
+#define PACKET3_SET_PREDICATION 0x20
+#define PACKET3_REG_RMW 0x21
+#define PACKET3_COND_EXEC 0x22
+#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_DRAW_INDIRECT 0x24
+#define PACKET3_DRAW_INDEX_INDIRECT 0x25
+#define PACKET3_INDEX_BASE 0x26
+#define PACKET3_DRAW_INDEX_2 0x27
+#define PACKET3_CONTEXT_CONTROL 0x28
+#define PACKET3_DRAW_INDEX_OFFSET 0x29
+#define PACKET3_INDEX_TYPE 0x2A
+#define PACKET3_DRAW_INDEX 0x2B
+#define PACKET3_DRAW_INDEX_AUTO 0x2D
+#define PACKET3_DRAW_INDEX_IMMD 0x2E
+#define PACKET3_NUM_INSTANCES 0x2F
+#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
+#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
+#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
+#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
+#define PACKET3_WRITE_DATA 0x37
+#define PACKET3_MEM_SEMAPHORE 0x39
+#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_WAIT_REG_MEM 0x3C
+#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_SURFACE_SYNC 0x43
+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
+# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
+# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
+# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
+# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
+# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
+# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
+# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
+# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
+# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
+# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
+# define PACKET3_CB11_DEST_BASE_ENA (1 << 18)
+# define PACKET3_FULL_CACHE_ENA (1 << 20)
+# define PACKET3_TC_ACTION_ENA (1 << 23)
+# define PACKET3_CB_ACTION_ENA (1 << 25)
+# define PACKET3_DB_ACTION_ENA (1 << 26)
+# define PACKET3_SH_ACTION_ENA (1 << 27)
+# define PACKET3_SX_ACTION_ENA (1 << 28)
+#define PACKET3_ME_INITIALIZE 0x44
+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define PACKET3_COND_WRITE 0x45
+#define PACKET3_EVENT_WRITE 0x46
+#define PACKET3_EVENT_WRITE_EOP 0x47
+#define PACKET3_EVENT_WRITE_EOS 0x48
+#define PACKET3_PREAMBLE_CNTL 0x4A
+# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
+# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
+#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C
+#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D
+#define PACKET3_ALU_PS_CONST_UPDATE 0x4E
+#define PACKET3_ALU_VS_CONST_UPDATE 0x4F
+#define PACKET3_ONE_REG_WRITE 0x57
+#define PACKET3_SET_CONFIG_REG 0x68
+#define PACKET3_SET_CONFIG_REG_START 0x00008000
+#define PACKET3_SET_CONFIG_REG_END 0x0000ac00
+#define PACKET3_SET_CONTEXT_REG 0x69
+#define PACKET3_SET_CONTEXT_REG_START 0x00028000
+#define PACKET3_SET_CONTEXT_REG_END 0x00029000
+#define PACKET3_SET_ALU_CONST 0x6A
+/* alu const buffers only; no reg file */
+#define PACKET3_SET_BOOL_CONST 0x6B
+#define PACKET3_SET_BOOL_CONST_START 0x0003a500
+#define PACKET3_SET_BOOL_CONST_END 0x0003a518
+#define PACKET3_SET_LOOP_CONST 0x6C
+#define PACKET3_SET_LOOP_CONST_START 0x0003a200
+#define PACKET3_SET_LOOP_CONST_END 0x0003a500
+#define PACKET3_SET_RESOURCE 0x6D
+#define PACKET3_SET_RESOURCE_START 0x00030000
+#define PACKET3_SET_RESOURCE_END 0x00038000
+#define PACKET3_SET_SAMPLER 0x6E
+#define PACKET3_SET_SAMPLER_START 0x0003c000
+#define PACKET3_SET_SAMPLER_END 0x0003c600
+#define PACKET3_SET_CTL_CONST 0x6F
+#define PACKET3_SET_CTL_CONST_START 0x0003cff0
+#define PACKET3_SET_CTL_CONST_END 0x0003ff0c
+#define PACKET3_SET_RESOURCE_OFFSET 0x70
+#define PACKET3_SET_ALU_CONST_VS 0x71
+#define PACKET3_SET_ALU_CONST_DI 0x72
+#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
+#define PACKET3_SET_RESOURCE_INDIRECT 0x74
+#define PACKET3_SET_APPEND_CNT 0x75
+
#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index e372f9e1e5ce..fcc23e4e0b3c 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1205,14 +1205,12 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
if (waitreloc.reg != RADEON_WAIT_UNTIL ||
waitreloc.count != 0) {
DRM_ERROR("vline wait had illegal wait until segment\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
DRM_ERROR("vline wait had illegal wait until\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
/* jump over the NOP */
@@ -1230,8 +1228,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
@@ -1253,14 +1250,13 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
break;
default:
DRM_ERROR("unknown crtc reloc\n");
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
ib[h_idx] = header;
ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
}
-out:
- return r;
+
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9b3fad23b76c..12fdebf9aed8 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -47,6 +47,7 @@
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
#define EVERGREEN_RLC_UCODE_SIZE 768
+#define CAYMAN_RLC_UCODE_SIZE 1024
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -2739,7 +2740,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev)
/* Allocate ring buffer */
if (rdev->ih.ring_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
+ r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->ih.ring_obj);
@@ -2820,13 +2821,20 @@ static int r600_rlc_init(struct radeon_device *rdev)
WREG32(RLC_HB_CNTL, 0);
WREG32(RLC_HB_RPTR, 0);
WREG32(RLC_HB_WPTR, 0);
- WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
- WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+ if (rdev->family <= CHIP_CAICOS) {
+ WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+ WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+ }
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
fw_data = (const __be32 *)rdev->rlc_fw->data;
- if (rdev->family >= CHIP_CEDAR) {
+ if (rdev->family >= CHIP_CAYMAN) {
+ for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else if (rdev->family >= CHIP_CEDAR) {
for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index b5443fe1c1d1..846fae576399 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -26,6 +26,7 @@
#include "drmP.h"
#include "radeon.h"
#include "radeon_reg.h"
+#include "radeon_asic.h"
#include "atom.h"
#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index df68d91e8190..9aa74c3f8cb6 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -512,7 +512,7 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size += r6xx_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("r600 failed to allocate shader\n");
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 153095fba62f..3324620b2db6 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -71,75 +71,167 @@ struct r600_cs_track {
u64 db_bo_mc;
};
+#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc }
+#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc }
+#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0 }
+#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc }
+#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0 }
+#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc }
+#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0 }
+#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16, vc }
+
+struct gpu_formats {
+ unsigned blockwidth;
+ unsigned blockheight;
+ unsigned blocksize;
+ unsigned valid_color;
+};
+
+static const struct gpu_formats color_formats_table[] = {
+ /* 8 bit */
+ FMT_8_BIT(V_038004_COLOR_8, 1),
+ FMT_8_BIT(V_038004_COLOR_4_4, 1),
+ FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
+ FMT_8_BIT(V_038004_FMT_1, 0),
+
+ /* 16-bit */
+ FMT_16_BIT(V_038004_COLOR_16, 1),
+ FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
+ FMT_16_BIT(V_038004_COLOR_8_8, 1),
+ FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
+ FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
+ FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
+ FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
+ FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
+
+ /* 24-bit */
+ FMT_24_BIT(V_038004_FMT_8_8_8),
+
+ /* 32-bit */
+ FMT_32_BIT(V_038004_COLOR_32, 1),
+ FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_16_16, 1),
+ FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_8_24, 1),
+ FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_24_8, 1),
+ FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
+ FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
+ FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
+ FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
+ FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
+ FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
+ FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
+ FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
+
+ /* 48-bit */
+ FMT_48_BIT(V_038004_FMT_16_16_16),
+ FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
+
+ /* 64-bit */
+ FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
+ FMT_64_BIT(V_038004_COLOR_32_32, 1),
+ FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
+ FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
+ FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
+
+ FMT_96_BIT(V_038004_FMT_32_32_32),
+ FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
+
+ /* 128-bit */
+ FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
+ FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
+
+ [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
+ [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
+
+ /* block compressed formats */
+ [V_038004_FMT_BC1] = { 4, 4, 8, 0 },
+ [V_038004_FMT_BC2] = { 4, 4, 16, 0 },
+ [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
+ [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
+ [V_038004_FMT_BC5] = { 4, 4, 16, 0},
+
+};
+
+static inline bool fmt_is_valid_color(u32 format)
+{
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return false;
+
+ if (color_formats_table[format].valid_color)
+ return true;
+
+ return false;
+}
+
+static inline bool fmt_is_valid_texture(u32 format)
+{
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return false;
+
+ if (color_formats_table[format].blockwidth > 0)
+ return true;
+
+ return false;
+}
+
+static inline int fmt_get_blocksize(u32 format)
+{
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return 0;
+
+ return color_formats_table[format].blocksize;
+}
+
+static inline int fmt_get_nblocksx(u32 format, u32 w)
+{
+ unsigned bw;
+
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return 0;
+
+ bw = color_formats_table[format].blockwidth;
+ if (bw == 0)
+ return 0;
+
+ return (w + bw - 1) / bw;
+}
+
+static inline int fmt_get_nblocksy(u32 format, u32 h)
+{
+ unsigned bh;
+
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return 0;
+
+ bh = color_formats_table[format].blockheight;
+ if (bh == 0)
+ return 0;
+
+ return (h + bh - 1) / bh;
+}
+
static inline int r600_bpe_from_format(u32 *bpe, u32 format)
{
- switch (format) {
- case V_038004_COLOR_8:
- case V_038004_COLOR_4_4:
- case V_038004_COLOR_3_3_2:
- case V_038004_FMT_1:
- *bpe = 1;
- break;
- case V_038004_COLOR_16:
- case V_038004_COLOR_16_FLOAT:
- case V_038004_COLOR_8_8:
- case V_038004_COLOR_5_6_5:
- case V_038004_COLOR_6_5_5:
- case V_038004_COLOR_1_5_5_5:
- case V_038004_COLOR_4_4_4_4:
- case V_038004_COLOR_5_5_5_1:
- *bpe = 2;
- break;
- case V_038004_FMT_8_8_8:
- *bpe = 3;
- break;
- case V_038004_COLOR_32:
- case V_038004_COLOR_32_FLOAT:
- case V_038004_COLOR_16_16:
- case V_038004_COLOR_16_16_FLOAT:
- case V_038004_COLOR_8_24:
- case V_038004_COLOR_8_24_FLOAT:
- case V_038004_COLOR_24_8:
- case V_038004_COLOR_24_8_FLOAT:
- case V_038004_COLOR_10_11_11:
- case V_038004_COLOR_10_11_11_FLOAT:
- case V_038004_COLOR_11_11_10:
- case V_038004_COLOR_11_11_10_FLOAT:
- case V_038004_COLOR_2_10_10_10:
- case V_038004_COLOR_8_8_8_8:
- case V_038004_COLOR_10_10_10_2:
- case V_038004_FMT_5_9_9_9_SHAREDEXP:
- case V_038004_FMT_32_AS_8:
- case V_038004_FMT_32_AS_8_8:
- *bpe = 4;
- break;
- case V_038004_COLOR_X24_8_32_FLOAT:
- case V_038004_COLOR_32_32:
- case V_038004_COLOR_32_32_FLOAT:
- case V_038004_COLOR_16_16_16_16:
- case V_038004_COLOR_16_16_16_16_FLOAT:
- *bpe = 8;
- break;
- case V_038004_FMT_16_16_16:
- case V_038004_FMT_16_16_16_FLOAT:
- *bpe = 6;
- break;
- case V_038004_FMT_32_32_32:
- case V_038004_FMT_32_32_32_FLOAT:
- *bpe = 12;
- break;
- case V_038004_COLOR_32_32_32_32:
- case V_038004_COLOR_32_32_32_32_FLOAT:
- *bpe = 16;
- break;
- case V_038004_FMT_GB_GR:
- case V_038004_FMT_BG_RG:
- case V_038004_COLOR_INVALID:
- default:
- *bpe = 16;
- return -EINVAL;
- }
+ unsigned res;
+
+ if (format >= ARRAY_SIZE(color_formats_table))
+ goto fail;
+
+ res = color_formats_table[format].blocksize;
+ if (res == 0)
+ goto fail;
+
+ *bpe = res;
return 0;
+
+fail:
+ *bpe = 16;
+ return -EINVAL;
}
struct array_mode_checker {
@@ -148,7 +240,7 @@ struct array_mode_checker {
u32 nbanks;
u32 npipes;
u32 nsamples;
- u32 bpe;
+ u32 blocksize;
};
/* returns alignment in pixels for pitch/height/depth and bytes for base */
@@ -162,7 +254,7 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
u32 tile_height = 8;
u32 macro_tile_width = values->nbanks;
u32 macro_tile_height = values->npipes;
- u32 tile_bytes = tile_width * tile_height * values->bpe * values->nsamples;
+ u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
switch (values->array_mode) {
@@ -174,7 +266,7 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
*base_align = 1;
break;
case ARRAY_LINEAR_ALIGNED:
- *pitch_align = max((u32)64, (u32)(values->group_size / values->bpe));
+ *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
*height_align = tile_height;
*depth_align = 1;
*base_align = values->group_size;
@@ -182,7 +274,7 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
case ARRAY_1D_TILED_THIN1:
*pitch_align = max((u32)tile_width,
(u32)(values->group_size /
- (tile_height * values->bpe * values->nsamples)));
+ (tile_height * values->blocksize * values->nsamples)));
*height_align = tile_height;
*depth_align = 1;
*base_align = values->group_size;
@@ -190,12 +282,12 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
case ARRAY_2D_TILED_THIN1:
*pitch_align = max((u32)macro_tile_width,
(u32)(((values->group_size / tile_height) /
- (values->bpe * values->nsamples)) *
+ (values->blocksize * values->nsamples)) *
values->nbanks)) * tile_width;
*height_align = macro_tile_height * tile_height;
*depth_align = 1;
*base_align = max(macro_tile_bytes,
- (*pitch_align) * values->bpe * (*height_align) * values->nsamples);
+ (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
break;
default:
return -EINVAL;
@@ -234,21 +326,22 @@ static void r600_cs_track_init(struct r600_cs_track *track)
static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
{
struct r600_cs_track *track = p->track;
- u32 bpe = 0, slice_tile_max, size, tmp;
+ u32 slice_tile_max, size, tmp;
u32 height, height_align, pitch, pitch_align, depth_align;
u64 base_offset, base_align;
struct array_mode_checker array_check;
volatile u32 *ib = p->ib->ptr;
unsigned array_mode;
-
+ u32 format;
if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
return -EINVAL;
}
size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
- if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
+ format = G_0280A0_FORMAT(track->cb_color_info[i]);
+ if (!fmt_is_valid_color(format)) {
dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
- __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
+ __func__, __LINE__, format,
i, track->cb_color_info[i]);
return -EINVAL;
}
@@ -267,7 +360,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = track->nsamples;
- array_check.bpe = bpe;
+ array_check.blocksize = fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
@@ -311,7 +404,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
}
/* check offset */
- tmp = height * pitch * bpe;
+ tmp = fmt_get_nblocksy(format, height) * fmt_get_nblocksx(format, pitch) * fmt_get_blocksize(format);
if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
/* the initial DDX does bad things with the CB size occasionally */
@@ -436,7 +529,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = track->nsamples;
- array_check.bpe = bpe;
+ array_check.blocksize = bpe;
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
@@ -687,33 +780,28 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
if (wait_reg_mem.type != PACKET_TYPE3 ||
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
/* bit 4 is reg (0) or mem (1) */
if (wait_reg_mem_info & 0x10) {
DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
/* waiting for value to be equal */
if ((wait_reg_mem_info & 0x7) != 0x3) {
DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
/* jump over the NOP */
@@ -732,8 +820,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
@@ -756,14 +843,13 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
break;
default:
DRM_ERROR("unknown crtc reloc\n");
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
ib[h_idx] = header;
ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
}
-out:
- return r;
+
+ return 0;
}
static int r600_packet0_check(struct radeon_cs_parser *p,
@@ -1113,39 +1199,61 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
return 0;
}
-static inline unsigned minify(unsigned size, unsigned levels)
+static inline unsigned mip_minify(unsigned size, unsigned level)
{
- size = size >> levels;
- if (size < 1)
- size = 1;
- return size;
+ unsigned val;
+
+ val = max(1U, size >> level);
+ if (level > 0)
+ val = roundup_pow_of_two(val);
+ return val;
}
-static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
- unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
- unsigned pitch_align,
+static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
+ unsigned w0, unsigned h0, unsigned d0, unsigned format,
+ unsigned block_align, unsigned height_align, unsigned base_align,
unsigned *l0_size, unsigned *mipmap_size)
{
- unsigned offset, i, level, face;
- unsigned width, height, depth, rowstride, size;
-
- w0 = minify(w0, 0);
- h0 = minify(h0, 0);
- d0 = minify(d0, 0);
+ unsigned offset, i, level;
+ unsigned width, height, depth, size;
+ unsigned blocksize;
+ unsigned nbx, nby;
+ unsigned nlevels = llevel - blevel + 1;
+
+ *l0_size = -1;
+ blocksize = fmt_get_blocksize(format);
+
+ w0 = mip_minify(w0, 0);
+ h0 = mip_minify(h0, 0);
+ d0 = mip_minify(d0, 0);
for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
- width = minify(w0, i);
- height = minify(h0, i);
- depth = minify(d0, i);
- for(face = 0; face < nfaces; face++) {
- rowstride = ALIGN((width * bpe), pitch_align);
- size = height * rowstride * depth;
- offset += size;
- offset = (offset + 0x1f) & ~0x1f;
- }
+ width = mip_minify(w0, i);
+ nbx = fmt_get_nblocksx(format, width);
+
+ nbx = round_up(nbx, block_align);
+
+ height = mip_minify(h0, i);
+ nby = fmt_get_nblocksy(format, height);
+ nby = round_up(nby, height_align);
+
+ depth = mip_minify(d0, i);
+
+ size = nbx * nby * blocksize;
+ if (nfaces)
+ size *= nfaces;
+ else
+ size *= depth;
+
+ if (i == 0)
+ *l0_size = size;
+
+ if (i == 0 || i == 1)
+ offset = round_up(offset, base_align);
+
+ offset += size;
}
- *l0_size = ALIGN((w0 * bpe), pitch_align) * h0 * d0;
*mipmap_size = offset;
- if (!nlevels)
+ if (llevel == 0)
*mipmap_size = *l0_size;
if (!blevel)
*mipmap_size -= *l0_size;
@@ -1169,11 +1277,13 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
u32 tiling_flags)
{
struct r600_cs_track *track = p->track;
- u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
- u32 word0, word1, l0_size, mipmap_size;
+ u32 nfaces, llevel, blevel, w0, h0, d0;
+ u32 word0, word1, l0_size, mipmap_size, word2, word3;
u32 height_align, pitch, pitch_align, depth_align;
+ u32 array, barray, larray;
u64 base_align;
struct array_mode_checker array_check;
+ u32 format;
/* on legacy kernel we don't perform advanced check */
if (p->rdev == NULL)
@@ -1199,19 +1309,25 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
case V_038000_SQ_TEX_DIM_3D:
break;
case V_038000_SQ_TEX_DIM_CUBEMAP:
- nfaces = 6;
+ if (p->family >= CHIP_RV770)
+ nfaces = 8;
+ else
+ nfaces = 6;
break;
case V_038000_SQ_TEX_DIM_1D_ARRAY:
case V_038000_SQ_TEX_DIM_2D_ARRAY:
+ array = 1;
+ break;
case V_038000_SQ_TEX_DIM_2D_MSAA:
case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
default:
dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
return -EINVAL;
}
- if (r600_bpe_from_format(&bpe, G_038004_DATA_FORMAT(word1))) {
+ format = G_038004_DATA_FORMAT(word1);
+ if (!fmt_is_valid_texture(format)) {
dev_warn(p->dev, "%s:%d texture invalid format %d\n",
- __func__, __LINE__, G_038004_DATA_FORMAT(word1));
+ __func__, __LINE__, format);
return -EINVAL;
}
@@ -1222,7 +1338,7 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = 1;
- array_check.bpe = bpe;
+ array_check.blocksize = fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
@@ -1248,25 +1364,34 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
return -EINVAL;
}
+ word2 = radeon_get_ib_value(p, idx + 2) << 8;
+ word3 = radeon_get_ib_value(p, idx + 3) << 8;
+
word0 = radeon_get_ib_value(p, idx + 4);
word1 = radeon_get_ib_value(p, idx + 5);
blevel = G_038010_BASE_LEVEL(word0);
- nlevels = G_038014_LAST_LEVEL(word1);
- r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe,
- (pitch_align * bpe),
+ llevel = G_038014_LAST_LEVEL(word1);
+ if (array == 1) {
+ barray = G_038014_BASE_ARRAY(word1);
+ larray = G_038014_LAST_ARRAY(word1);
+
+ nfaces = larray - barray + 1;
+ }
+ r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format,
+ pitch_align, height_align, base_align,
&l0_size, &mipmap_size);
/* using get ib will give us the offset into the texture bo */
- word0 = radeon_get_ib_value(p, idx + 2) << 8;
- if ((l0_size + word0) > radeon_bo_size(texture)) {
+ if ((l0_size + word2) > radeon_bo_size(texture)) {
dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
- w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
+ w0, h0, format, word2, l0_size, radeon_bo_size(texture));
+ dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
return -EINVAL;
}
/* using get ib will give us the offset into the mipmap bo */
- word0 = radeon_get_ib_value(p, idx + 3) << 8;
- if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
+ word3 = radeon_get_ib_value(p, idx + 3) << 8;
+ if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
- w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
+ w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
}
return 0;
}
@@ -1289,6 +1414,38 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
idx_value = radeon_get_ib_value(p, idx);
switch (pkt->opcode) {
+ case PACKET3_SET_PREDICATION:
+ {
+ int pred_op;
+ int tmp;
+ if (pkt->count != 1) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx + 1);
+ pred_op = (tmp >> 16) & 0x7;
+
+ /* for the clear predicate operation */
+ if (pred_op == 0)
+ return 0;
+
+ if (pred_op > 2) {
+ DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ return -EINVAL;
+ }
+
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
+ }
+ break;
+
case PACKET3_START_3D_CMDBUF:
if (p->family >= CHIP_RV770 || pkt->count) {
DRM_ERROR("bad START_3D\n");
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e6a58ed48dcf..50db6d62eec2 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -26,6 +26,7 @@
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
/*
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 04bac0bbd3ec..b2b944bcd05a 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1304,6 +1304,11 @@
#define V_038004_FMT_16_16_16_FLOAT 0x0000002E
#define V_038004_FMT_32_32_32 0x0000002F
#define V_038004_FMT_32_32_32_FLOAT 0x00000030
+#define V_038004_FMT_BC1 0x00000031
+#define V_038004_FMT_BC2 0x00000032
+#define V_038004_FMT_BC3 0x00000033
+#define V_038004_FMT_BC4 0x00000034
+#define V_038004_FMT_BC5 0x00000035
#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 6b3429495118..cfe3af1a7935 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -258,8 +258,9 @@ struct radeon_bo {
int surface_reg;
/* Constant after initialization */
struct radeon_device *rdev;
- struct drm_gem_object *gobj;
+ struct drm_gem_object gem_base;
};
+#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
struct radeon_bo_list {
struct ttm_validate_buffer tv;
@@ -288,6 +289,15 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr);
void radeon_gem_object_unpin(struct drm_gem_object *obj);
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
/*
* GART structures, functions & helpers
@@ -319,6 +329,7 @@ struct radeon_gart {
union radeon_gart_table table;
struct page **pages;
dma_addr_t *pages_addr;
+ bool *ttm_alloced;
bool ready;
};
@@ -331,7 +342,8 @@ void radeon_gart_fini(struct radeon_device *rdev);
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
- int pages, struct page **pagelist);
+ int pages, struct page **pagelist,
+ dma_addr_t *dma_addr);
/*
@@ -651,6 +663,8 @@ struct radeon_wb {
#define RADEON_WB_SCRATCH_OFFSET 0
#define RADEON_WB_CP_RPTR_OFFSET 1024
+#define RADEON_WB_CP1_RPTR_OFFSET 1280
+#define RADEON_WB_CP2_RPTR_OFFSET 1536
#define R600_WB_IH_WPTR_OFFSET 2048
#define R600_WB_EVENT_OFFSET 3072
@@ -1037,12 +1051,52 @@ struct evergreen_asic {
struct r100_gpu_lockup lockup;
};
+struct cayman_asic {
+ unsigned max_shader_engines;
+ unsigned max_pipes_per_simd;
+ unsigned max_tile_pipes;
+ unsigned max_simds_per_se;
+ unsigned max_backends_per_se;
+ unsigned max_texture_channel_caches;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_gs_threads;
+ unsigned max_stack_entries;
+ unsigned sx_num_of_sets;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned max_hw_contexts;
+ unsigned sq_num_cf_insts;
+ unsigned sc_prim_fifo_size;
+ unsigned sc_hiz_tile_fifo_size;
+ unsigned sc_earlyz_tile_fifo_size;
+
+ unsigned num_shader_engines;
+ unsigned num_shader_pipes_per_simd;
+ unsigned num_tile_pipes;
+ unsigned num_simds_per_se;
+ unsigned num_backends_per_se;
+ unsigned backend_disable_mask_per_asic;
+ unsigned backend_map;
+ unsigned num_texture_channel_caches;
+ unsigned mem_max_burst_length_bytes;
+ unsigned mem_row_size_in_kb;
+ unsigned shader_engine_tile_size;
+ unsigned num_gpus;
+ unsigned multi_gpu_tile_size;
+
+ unsigned tile_config;
+ struct r100_gpu_lockup lockup;
+};
+
union radeon_asic_config {
struct r300_asic r300;
struct r100_asic r100;
struct r600_asic r600;
struct rv770_asic rv770;
struct evergreen_asic evergreen;
+ struct cayman_asic cayman;
};
/*
@@ -1133,6 +1187,9 @@ struct radeon_device {
struct radeon_mman mman;
struct radeon_fence_driver fence_drv;
struct radeon_cp cp;
+ /* cayman compute rings */
+ struct radeon_cp cp1;
+ struct radeon_cp cp2;
struct radeon_ib_pool ib_pool;
struct radeon_irq irq;
struct radeon_asic *asic;
@@ -1185,19 +1242,6 @@ int radeon_device_init(struct radeon_device *rdev,
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-/* r600 blit */
-int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
-void r600_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- int size_bytes);
-/* evergreen blit */
-int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
-void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
-void evergreen_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- int size_bytes);
-
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
if (reg < rdev->rmmio_size)
@@ -1449,62 +1493,15 @@ extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
-/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
-extern bool r600_card_posted(struct radeon_device *rdev);
-extern void r600_cp_stop(struct radeon_device *rdev);
-extern int r600_cp_start(struct radeon_device *rdev);
-extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
-extern int r600_cp_resume(struct radeon_device *rdev);
-extern void r600_cp_fini(struct radeon_device *rdev);
-extern int r600_count_pipe_bits(uint32_t val);
-extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
-extern int r600_pcie_gart_init(struct radeon_device *rdev);
-extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
-extern int r600_ib_test(struct radeon_device *rdev);
-extern int r600_ring_test(struct radeon_device *rdev);
-extern void r600_scratch_init(struct radeon_device *rdev);
-extern int r600_blit_init(struct radeon_device *rdev);
-extern void r600_blit_fini(struct radeon_device *rdev);
-extern int r600_init_microcode(struct radeon_device *rdev);
-extern int r600_asic_reset(struct radeon_device *rdev);
-/* r600 irq */
-extern int r600_irq_init(struct radeon_device *rdev);
-extern void r600_irq_fini(struct radeon_device *rdev);
-extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
-extern int r600_irq_set(struct radeon_device *rdev);
-extern void r600_irq_suspend(struct radeon_device *rdev);
-extern void r600_disable_interrupts(struct radeon_device *rdev);
-extern void r600_rlc_stop(struct radeon_device *rdev);
-/* r600 audio */
-extern int r600_audio_init(struct radeon_device *rdev);
-extern int r600_audio_tmds_index(struct drm_encoder *encoder);
-extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
-extern int r600_audio_channels(struct radeon_device *rdev);
-extern int r600_audio_bits_per_sample(struct radeon_device *rdev);
-extern int r600_audio_rate(struct radeon_device *rdev);
-extern uint8_t r600_audio_status_bits(struct radeon_device *rdev);
-extern uint8_t r600_audio_category_code(struct radeon_device *rdev);
-extern void r600_audio_schedule_polling(struct radeon_device *rdev);
-extern void r600_audio_enable_polling(struct drm_encoder *encoder);
-extern void r600_audio_disable_polling(struct drm_encoder *encoder);
-extern void r600_audio_fini(struct radeon_device *rdev);
-extern void r600_hdmi_init(struct drm_encoder *encoder);
+/*
+ * r600 functions used by radeon_encoder.c
+ */
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
-extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
-extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
-
-extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
-extern void r700_cp_stop(struct radeon_device *rdev);
-extern void r700_cp_fini(struct radeon_device *rdev);
-extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
-extern int evergreen_irq_set(struct radeon_device *rdev);
-extern int evergreen_blit_init(struct radeon_device *rdev);
-extern void evergreen_blit_fini(struct radeon_device *rdev);
extern int ni_init_microcode(struct radeon_device *rdev);
-extern int btc_mc_load_microcode(struct radeon_device *rdev);
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
/* radeon_acpi.c */
#if defined(CONFIG_ACPI)
@@ -1513,14 +1510,6 @@ extern int radeon_acpi_init(struct radeon_device *rdev);
static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
#endif
-/* evergreen */
-struct evergreen_mc_save {
- u32 vga_control[6];
- u32 vga_render_control;
- u32 vga_hdp_control;
- u32 crtc_control[6];
-};
-
#include "radeon_object.h"
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 793c5e6026ad..eb888ee5f674 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -885,6 +885,52 @@ static struct radeon_asic btc_asic = {
.post_page_flip = &evergreen_post_page_flip,
};
+static struct radeon_asic cayman_asic = {
+ .init = &cayman_init,
+ .fini = &cayman_fini,
+ .suspend = &cayman_suspend,
+ .resume = &cayman_resume,
+ .cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &cayman_gpu_is_lockup,
+ .asic_reset = &cayman_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &evergreen_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .copy_blit = NULL,
+ .copy_dma = NULL,
+ .copy = NULL,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
+};
+
int radeon_asic_init(struct radeon_device *rdev)
{
radeon_register_accessor_init(rdev);
@@ -977,6 +1023,9 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_CAICOS:
rdev->asic = &btc_asic;
break;
+ case CHIP_CAYMAN:
+ rdev->asic = &cayman_asic;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c59bd98a2029..3d7a0d7c6a9a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -57,8 +57,6 @@ int r100_init(struct radeon_device *rdev);
void r100_fini(struct radeon_device *rdev);
int r100_suspend(struct radeon_device *rdev);
int r100_resume(struct radeon_device *rdev);
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
bool r100_gpu_is_lockup(struct radeon_device *rdev);
int r100_asic_reset(struct radeon_device *rdev);
@@ -164,8 +162,6 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
-extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,7 +204,6 @@ void rs400_gart_adjust_size(struct radeon_device *rdev);
void rs400_gart_disable(struct radeon_device *rdev);
void rs400_gart_fini(struct radeon_device *rdev);
-
/*
* rs600.
*/
@@ -270,8 +265,6 @@ void rv515_fini(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_ring_start(struct radeon_device *rdev);
-uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
-void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_bandwidth_update(struct radeon_device *rdev);
int rv515_resume(struct radeon_device *rdev);
int rv515_suspend(struct radeon_device *rdev);
@@ -307,14 +300,13 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-int r600_irq_process(struct radeon_device *rdev);
-int r600_irq_set(struct radeon_device *rdev);
bool r600_gpu_is_lockup(struct radeon_device *rdev);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
+int r600_ib_test(struct radeon_device *rdev);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
@@ -333,6 +325,50 @@ extern void rs780_pm_init_profile(struct radeon_device *rdev);
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int r600_get_pcie_lanes(struct radeon_device *rdev);
+bool r600_card_posted(struct radeon_device *rdev);
+void r600_cp_stop(struct radeon_device *rdev);
+int r600_cp_start(struct radeon_device *rdev);
+void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
+int r600_cp_resume(struct radeon_device *rdev);
+void r600_cp_fini(struct radeon_device *rdev);
+int r600_count_pipe_bits(uint32_t val);
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+int r600_pcie_gart_init(struct radeon_device *rdev);
+void r600_scratch_init(struct radeon_device *rdev);
+int r600_blit_init(struct radeon_device *rdev);
+void r600_blit_fini(struct radeon_device *rdev);
+int r600_init_microcode(struct radeon_device *rdev);
+/* r600 irq */
+int r600_irq_process(struct radeon_device *rdev);
+int r600_irq_init(struct radeon_device *rdev);
+void r600_irq_fini(struct radeon_device *rdev);
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
+int r600_irq_set(struct radeon_device *rdev);
+void r600_irq_suspend(struct radeon_device *rdev);
+void r600_disable_interrupts(struct radeon_device *rdev);
+void r600_rlc_stop(struct radeon_device *rdev);
+/* r600 audio */
+int r600_audio_init(struct radeon_device *rdev);
+int r600_audio_tmds_index(struct drm_encoder *encoder);
+void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+int r600_audio_channels(struct radeon_device *rdev);
+int r600_audio_bits_per_sample(struct radeon_device *rdev);
+int r600_audio_rate(struct radeon_device *rdev);
+uint8_t r600_audio_status_bits(struct radeon_device *rdev);
+uint8_t r600_audio_category_code(struct radeon_device *rdev);
+void r600_audio_schedule_polling(struct radeon_device *rdev);
+void r600_audio_enable_polling(struct drm_encoder *encoder);
+void r600_audio_disable_polling(struct drm_encoder *encoder);
+void r600_audio_fini(struct radeon_device *rdev);
+void r600_hdmi_init(struct drm_encoder *encoder);
+int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+/* r600 blit */
+int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+void r600_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ int size_bytes);
/*
* rv770,rv730,rv710,rv740
@@ -341,12 +377,21 @@ int rv770_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
-extern void rv770_pm_misc(struct radeon_device *rdev);
-extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+void rv770_pm_misc(struct radeon_device *rdev);
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+void r700_cp_stop(struct radeon_device *rdev);
+void r700_cp_fini(struct radeon_device *rdev);
/*
* evergreen
*/
+struct evergreen_mc_save {
+ u32 vga_control[6];
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+ u32 crtc_control[6];
+};
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
@@ -374,5 +419,25 @@ extern void evergreen_pm_finish(struct radeon_device *rdev);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
+void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+int evergreen_blit_init(struct radeon_device *rdev);
+void evergreen_blit_fini(struct radeon_device *rdev);
+/* evergreen blit */
+int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
+void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+void evergreen_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ int size_bytes);
+
+/*
+ * cayman
+ */
+void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int cayman_init(struct radeon_device *rdev);
+void cayman_fini(struct radeon_device *rdev);
+int cayman_suspend(struct radeon_device *rdev);
+int cayman_resume(struct radeon_device *rdev);
+bool cayman_gpu_is_lockup(struct radeon_device *rdev);
+int cayman_asic_reset(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index c558685cc637..10191d9372d8 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
size = bsize;
n = 1024;
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
if (r) {
goto out_cleanup;
}
@@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 22b7e3dc0eca..3f3c9aac46cc 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -972,7 +972,16 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
- else
+ else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
+ if (ASIC_IS_DCE3(rdev)) {
+ /* HDMI 1.3+ supports max clock of 340 Mhz */
+ if (mode->clock > 340000)
+ return MODE_CLOCK_HIGH;
+ else
+ return MODE_OK;
+ } else
+ return MODE_CLOCK_HIGH;
+ } else
return MODE_CLOCK_HIGH;
}
return MODE_OK;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index eb6b9eed7349..3d599e33b9cc 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2113,9 +2113,9 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
break;
}
- if (drm_device_is_agp(dev))
+ if (drm_pci_device_is_agp(dev))
dev_priv->flags |= RADEON_IS_AGP;
- else if (drm_device_is_pcie(dev))
+ else if (drm_pci_device_is_pcie(dev))
dev_priv->flags |= RADEON_IS_PCIE;
else
dev_priv->flags |= RADEON_IS_PCI;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 35b5eb8fbe2a..8c1916941871 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -75,7 +75,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
return -ENOENT;
}
p->relocs_ptr[i] = &p->relocs[i];
- p->relocs[i].robj = p->relocs[i].gobj->driver_private;
+ p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
p->relocs[i].lobj.bo = p->relocs[i].robj;
p->relocs[i].lobj.wdomain = r->write_domain;
p->relocs[i].lobj.rdomain = r->read_domains;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4954e2d6ffa2..f0209be7a34b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -85,6 +85,7 @@ static const char radeon_family_name[][16] = {
"BARTS",
"TURKS",
"CAICOS",
+ "CAYMAN",
"LAST",
};
@@ -184,7 +185,7 @@ int radeon_wb_init(struct radeon_device *rdev)
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
@@ -860,7 +861,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
if (rfb == NULL || rfb->obj == NULL) {
continue;
}
- robj = rfb->obj->driver_private;
+ robj = gem_to_radeon_bo(rfb->obj);
/* don't unpin kernel fb objects */
if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
r = radeon_bo_reserve(robj, false);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 3e7e7f9eb781..4be58793dc17 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -371,7 +371,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
new_radeon_fb = to_radeon_framebuffer(fb);
/* schedule unpin of the old buffer */
obj = old_radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
work->old_rbo = rbo;
INIT_WORK(&work->work, radeon_unpin_work_func);
@@ -391,7 +391,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
/* pin the new buffer */
obj = new_radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
work->old_rbo, rbo);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 275b26a708d6..63d2de8771dc 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -49,9 +49,10 @@
* - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
* 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
* 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
+ * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 8
+#define KMS_DRIVER_MINOR 9
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -84,6 +85,16 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
extern struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl;
int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor);
void radeon_debugfs_cleanup(struct drm_minor *minor);
@@ -228,11 +239,6 @@ static struct drm_driver driver_old = {
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -322,6 +328,9 @@ static struct drm_driver kms_driver = {
.gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
.dma_ioctl = radeon_dma_ioctl_kms,
+ .dumb_create = radeon_mode_dumb_create,
+ .dumb_map_offset = radeon_mode_dumb_mmap,
+ .dumb_destroy = radeon_mode_dumb_destroy,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -336,15 +345,6 @@ static struct drm_driver kms_driver = {
#endif
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = radeon_pci_probe,
- .remove = radeon_pci_remove,
- .suspend = radeon_pci_suspend,
- .resume = radeon_pci_resume,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -354,15 +354,32 @@ static struct drm_driver kms_driver = {
};
static struct drm_driver *driver;
+static struct pci_driver *pdriver;
+
+static struct pci_driver radeon_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
+static struct pci_driver radeon_kms_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = radeon_pci_probe,
+ .remove = radeon_pci_remove,
+ .suspend = radeon_pci_suspend,
+ .resume = radeon_pci_resume,
+};
static int __init radeon_init(void)
{
driver = &driver_old;
+ pdriver = &radeon_pci_driver;
driver->num_ioctls = radeon_max_ioctl;
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && radeon_modeset == -1) {
DRM_INFO("VGACON disable radeon kernel modesetting.\n");
driver = &driver_old;
+ pdriver = &radeon_pci_driver;
driver->driver_features &= ~DRIVER_MODESET;
radeon_modeset = 0;
}
@@ -380,18 +397,19 @@ static int __init radeon_init(void)
if (radeon_modeset == 1) {
DRM_INFO("radeon kernel modesetting enabled.\n");
driver = &kms_driver;
+ pdriver = &radeon_kms_pci_driver;
driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = radeon_max_kms_ioctl;
radeon_register_atpx_handler();
}
/* if the vga console setting is enabled still
* let modprobe override it */
- return drm_init(driver);
+ return drm_pci_init(driver, pdriver);
}
static void __exit radeon_exit(void)
{
- drm_exit(driver);
+ drm_pci_exit(driver, pdriver);
radeon_unregister_atpx_handler();
}
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 1ca55eb09ad3..6f1d9e563e77 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -84,6 +84,7 @@ enum radeon_family {
CHIP_BARTS,
CHIP_TURKS,
CHIP_CAICOS,
+ CHIP_CAYMAN,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index cc44bdfec80f..0b7b486c97e8 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -64,7 +64,7 @@ static struct fb_ops radeonfb_ops = {
};
-static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
+int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
{
int aligned = width;
int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
@@ -90,7 +90,7 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
{
- struct radeon_bo *rbo = gobj->driver_private;
+ struct radeon_bo *rbo = gem_to_radeon_bo(gobj);
int ret;
ret = radeon_bo_reserve(rbo, false);
@@ -131,7 +131,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
aligned_size);
return -ENOMEM;
}
- rbo = gobj->driver_private;
+ rbo = gem_to_radeon_bo(gobj);
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
@@ -205,7 +205,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
mode_cmd.depth = sizes->surface_depth;
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
- rbo = gobj->driver_private;
+ rbo = gem_to_radeon_bo(gobj);
/* okay we have an object now allocate the framebuffer */
info = framebuffer_alloc(0, device);
@@ -406,14 +406,14 @@ int radeon_fbdev_total_size(struct radeon_device *rdev)
struct radeon_bo *robj;
int size = 0;
- robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
+ robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj);
size += radeon_bo_size(robj);
return size;
}
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
- if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
+ if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
return true;
return false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 171b0b2e3a64..9e59868d354e 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -60,8 +60,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
trace_radeon_fence_emit(rdev->ddev, fence->seq);
fence->emited = true;
- list_del(&fence->list);
- list_add_tail(&fence->list, &rdev->fence_drv.emited);
+ list_move_tail(&fence->list, &rdev->fence_drv.emited);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
}
@@ -121,8 +120,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
i = n;
do {
n = i->prev;
- list_del(i);
- list_add_tail(i, &rdev->fence_drv.signaled);
+ list_move_tail(i, &rdev->fence_drv.signaled);
fence = list_entry(i, struct radeon_fence, list);
fence->signaled = true;
i = n;
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 65016117d95f..f0534ef2f331 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -78,7 +78,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
int r;
if (rdev->gart.table.vram.robj == NULL) {
- r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
+ r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->gart.table.vram.robj);
if (r) {
@@ -149,8 +149,9 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
- pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (!rdev->gart.ttm_alloced[p])
+ pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p];
@@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
}
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
- int pages, struct page **pagelist)
+ int pages, struct page **pagelist, dma_addr_t *dma_addr)
{
unsigned t;
unsigned p;
@@ -180,15 +181,22 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- /* we need to support large memory configurations */
- /* assume that unbind have already been call on the range */
- rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
+ /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32
+ * is requested. */
+ if (dma_addr[i] != DMA_ERROR_CODE) {
+ rdev->gart.ttm_alloced[p] = true;
+ rdev->gart.pages_addr[p] = dma_addr[i];
+ } else {
+ /* we need to support large memory configurations */
+ /* assume that unbind have already been call on the range */
+ rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
- /* FIXME: failed to map page (return -ENOMEM?) */
- radeon_gart_unbind(rdev, offset, pages);
- return -ENOMEM;
+ if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
+ /* FIXME: failed to map page (return -ENOMEM?) */
+ radeon_gart_unbind(rdev, offset, pages);
+ return -ENOMEM;
+ }
}
rdev->gart.pages[p] = pagelist[i];
page_base = rdev->gart.pages_addr[p];
@@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
+ rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
+ rdev->gart.num_cpu_pages, GFP_KERNEL);
+ if (rdev->gart.ttm_alloced == NULL) {
+ radeon_gart_fini(rdev);
+ return -ENOMEM;
+ }
/* set GART entry to point to the dummy page by default */
for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -267,6 +281,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
rdev->gart.ready = false;
kfree(rdev->gart.pages);
kfree(rdev->gart.pages_addr);
+ kfree(rdev->gart.ttm_alloced);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
+ rdev->gart.ttm_alloced = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 1fe95dfe48c9..aa1ca2dea42f 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -32,21 +32,18 @@
int radeon_gem_object_init(struct drm_gem_object *obj)
{
- /* we do nothings here */
+ BUG();
+
return 0;
}
void radeon_gem_object_free(struct drm_gem_object *gobj)
{
- struct radeon_bo *robj = gobj->driver_private;
+ struct radeon_bo *robj = gem_to_radeon_bo(gobj);
- gobj->driver_private = NULL;
if (robj) {
radeon_bo_unref(&robj);
}
-
- drm_gem_object_release(gobj);
- kfree(gobj);
}
int radeon_gem_object_create(struct radeon_device *rdev, int size,
@@ -54,36 +51,34 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
bool discardable, bool kernel,
struct drm_gem_object **obj)
{
- struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r;
*obj = NULL;
- gobj = drm_gem_object_alloc(rdev->ddev, size);
- if (!gobj) {
- return -ENOMEM;
- }
/* At least align on page size */
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
- r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj);
+ r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
- drm_gem_object_unreference_unlocked(gobj);
return r;
}
- gobj->driver_private = robj;
- *obj = gobj;
+ *obj = &robj->gem_base;
+
+ mutex_lock(&rdev->gem.mutex);
+ list_add_tail(&robj->list, &rdev->gem.objects);
+ mutex_unlock(&rdev->gem.mutex);
+
return 0;
}
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr)
{
- struct radeon_bo *robj = obj->driver_private;
+ struct radeon_bo *robj = gem_to_radeon_bo(obj);
int r;
r = radeon_bo_reserve(robj, false);
@@ -96,7 +91,7 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
void radeon_gem_object_unpin(struct drm_gem_object *obj)
{
- struct radeon_bo *robj = obj->driver_private;
+ struct radeon_bo *robj = gem_to_radeon_bo(obj);
int r;
r = radeon_bo_reserve(robj, false);
@@ -114,7 +109,7 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
int r;
/* FIXME: reeimplement */
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
/* work out where to validate the buffer to */
domain = wdomain;
if (!domain) {
@@ -231,7 +226,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
@@ -239,23 +234,31 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return r;
}
-int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp)
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p)
{
- struct drm_radeon_gem_mmap *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ gobj = drm_gem_object_lookup(dev, filp, handle);
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
- args->addr_ptr = radeon_bo_mmap_offset(robj);
+ robj = gem_to_radeon_bo(gobj);
+ *offset_p = radeon_bo_mmap_offset(robj);
drm_gem_object_unreference_unlocked(gobj);
return 0;
}
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_radeon_gem_mmap *args = data;
+
+ return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
+}
+
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -269,7 +272,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, &cur_placement, true);
switch (cur_placement) {
case TTM_PL_VRAM:
@@ -299,7 +302,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, NULL, false);
/* callback hw specific functions if any */
if (robj->rdev->asic->ioctl_wait_idle)
@@ -320,7 +323,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
return -ENOENT;
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
drm_gem_object_unreference_unlocked(gobj);
return r;
@@ -338,7 +341,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
return -ENOENT;
- rbo = gobj->driver_private;
+ rbo = gem_to_radeon_bo(gobj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
goto out;
@@ -348,3 +351,40 @@ out:
drm_gem_object_unreference_unlocked(gobj);
return r;
}
+
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_gem_object *gobj;
+ uint32_t handle;
+ int r;
+
+ args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+ args->size = args->pitch * args->height;
+ args->size = ALIGN(args->size, PAGE_SIZE);
+
+ r = radeon_gem_object_create(rdev, args->size, 0,
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_device,
+ &gobj);
+ if (r)
+ return -ENOMEM;
+
+ r = drm_gem_handle_create(file_priv, gobj, &handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(gobj);
+ if (r) {
+ return r;
+ }
+ args->handle = handle;
+ return 0;
+}
+
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8387d32caaa7..bf7d4c061451 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -58,9 +58,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)rdev;
/* update BUS flag */
- if (drm_device_is_agp(dev)) {
+ if (drm_pci_device_is_agp(dev)) {
flags |= RADEON_IS_AGP;
- } else if (drm_device_is_pcie(dev)) {
+ } else if (drm_pci_device_is_pcie(dev)) {
flags |= RADEON_IS_PCIE;
} else {
flags |= RADEON_IS_PCI;
@@ -169,7 +169,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
value = rdev->accel_working;
break;
case RADEON_INFO_TILING_CONFIG:
- if (rdev->family >= CHIP_CEDAR)
+ if (rdev->family >= CHIP_CAYMAN)
+ value = rdev->config.cayman.tile_config;
+ else if (rdev->family >= CHIP_CEDAR)
value = rdev->config.evergreen.tile_config;
else if (rdev->family >= CHIP_RV770)
value = rdev->config.rv770.tile_config;
@@ -205,6 +207,20 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
/* return clock value in KHz */
value = rdev->clock.spll.reference_freq * 10;
break;
+ case RADEON_INFO_NUM_BACKENDS:
+ if (rdev->family >= CHIP_CAYMAN)
+ value = rdev->config.cayman.max_backends_per_se *
+ rdev->config.cayman.max_shader_engines;
+ else if (rdev->family >= CHIP_CEDAR)
+ value = rdev->config.evergreen.max_backends;
+ else if (rdev->family >= CHIP_RV770)
+ value = rdev->config.rv770.max_backends;
+ else if (rdev->family >= CHIP_R600)
+ value = rdev->config.r600.max_backends;
+ else {
+ return -EINVAL;
+ }
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 78968b738e88..66c9af1b3d96 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -415,7 +415,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -521,7 +521,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
- rbo = radeon_fb->obj->driver_private;
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index a670caaee29e..5067d18d0009 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -676,4 +676,5 @@ void radeon_fb_output_poll_changed(struct radeon_device *rdev);
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
+int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7d6b8e88f746..976c3b1b1b6e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -55,6 +55,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
+ drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
@@ -86,7 +87,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placement.num_busy_placement = c;
}
-int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
+int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain,
struct radeon_bo **bo_ptr)
{
@@ -96,6 +97,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
unsigned long max_size = 0;
int r;
+ size = ALIGN(size, PAGE_SIZE);
+
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
}
@@ -118,8 +121,13 @@ retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
+ r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
+ if (unlikely(r)) {
+ kfree(bo);
+ return r;
+ }
bo->rdev = rdev;
- bo->gobj = gobj;
+ bo->gem_base.driver_private = NULL;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
radeon_ttm_placement_from_domain(bo, domain);
@@ -142,12 +150,9 @@ retry:
return r;
}
*bo_ptr = bo;
- if (gobj) {
- mutex_lock(&bo->rdev->gem.mutex);
- list_add_tail(&bo->list, &rdev->gem.objects);
- mutex_unlock(&bo->rdev->gem.mutex);
- }
+
trace_radeon_bo_create(bo);
+
return 0;
}
@@ -260,7 +265,6 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
void radeon_bo_force_delete(struct radeon_device *rdev)
{
struct radeon_bo *bo, *n;
- struct drm_gem_object *gobj;
if (list_empty(&rdev->gem.objects)) {
return;
@@ -268,16 +272,14 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
dev_err(rdev->dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
mutex_lock(&rdev->ddev->struct_mutex);
- gobj = bo->gobj;
dev_err(rdev->dev, "%p %p %lu %lu force free\n",
- gobj, bo, (unsigned long)gobj->size,
- *((unsigned long *)&gobj->refcount));
+ &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
+ *((unsigned long *)&bo->gem_base.refcount));
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
- radeon_bo_unref(&bo);
- gobj->driver_private = NULL;
- drm_gem_object_unreference(gobj);
+ /* this should unref the ttm bo */
+ drm_gem_object_unreference(&bo->gem_base);
mutex_unlock(&rdev->ddev->struct_mutex);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 22d4c237dea5..7f8e778dba46 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -137,10 +137,9 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
}
extern int radeon_bo_create(struct radeon_device *rdev,
- struct drm_gem_object *gobj, unsigned long size,
- int byte_align,
- bool kernel, u32 domain,
- struct radeon_bo **bo_ptr);
+ unsigned long size, int byte_align,
+ bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 06e79822a2bf..992d99d13fc5 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -175,7 +175,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
return 0;
INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
/* Allocate 1M object buffer */
- r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
+ r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
&rdev->ib_pool.robj);
if (r) {
@@ -332,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
rdev->cp.ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->cp.ring_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->cp.ring_obj);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 5b44f652145c..dee4a0c1b4b2 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup;
}
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
@@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev)
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 8389b4c63d12..60125ddba1e9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -529,7 +529,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
- r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM,
&rdev->stollen_vga_memory);
if (r) {
@@ -661,6 +661,7 @@ struct radeon_ttm_backend {
unsigned long num_pages;
struct page **pages;
struct page *dummy_read_page;
+ dma_addr_t *dma_addrs;
bool populated;
bool bound;
unsigned offset;
@@ -669,12 +670,14 @@ struct radeon_ttm_backend {
static int radeon_ttm_backend_populate(struct ttm_backend *backend,
unsigned long num_pages,
struct page **pages,
- struct page *dummy_read_page)
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct radeon_ttm_backend *gtt;
gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->pages = pages;
+ gtt->dma_addrs = dma_addrs;
gtt->num_pages = num_pages;
gtt->dummy_read_page = dummy_read_page;
gtt->populated = true;
@@ -687,6 +690,7 @@ static void radeon_ttm_backend_clear(struct ttm_backend *backend)
gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->pages = NULL;
+ gtt->dma_addrs = NULL;
gtt->num_pages = 0;
gtt->dummy_read_page = NULL;
gtt->populated = false;
@@ -707,7 +711,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
gtt->num_pages, bo_mem, backend);
}
r = radeon_gart_bind(gtt->rdev, gtt->offset,
- gtt->num_pages, gtt->pages);
+ gtt->num_pages, gtt->pages, gtt->dma_addrs);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
gtt->num_pages, gtt->offset);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
new file mode 100644
index 000000000000..6334f8ac1209
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -0,0 +1,619 @@
+cayman 0x9400
+0x0000802C GRBM_GFX_INDEX
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x000088C4 VGT_CACHE_INVALIDATION
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00008958 VGT_PRIMITIVE_TYPE
+0x0000895C VGT_INDEX_TYPE
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00008990 VGT_COMPUTE_DIM_X
+0x00008994 VGT_COMPUTE_DIM_Y
+0x00008998 VGT_COMPUTE_DIM_Z
+0x0000899C VGT_COMPUTE_START_X
+0x000089A0 VGT_COMPUTE_START_Y
+0x000089A4 VGT_COMPUTE_START_Z
+0x000089A8 VGT_COMPUTE_INDEX
+0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
+0x000089B0 VGT_HS_OFFCHIP_PARAM
+0x00008A14 PA_CL_ENHANCE
+0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00008BF0 PA_SC_ENHANCE
+0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
+0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
+0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E20 SQ_STATIC_THREAD_MGMT_1
+0x00008E24 SQ_STATIC_THREAD_MGMT_2
+0x00008E28 SQ_STATIC_THREAD_MGMT_3
+0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x00009830 DB_DEBUG
+0x00009834 DB_DEBUG2
+0x00009838 DB_DEBUG3
+0x0000983C DB_DEBUG4
+0x00009854 DB_WATERMARKS
+0x0000A400 TD_PS_BORDER_COLOR_INDEX
+0x0000A404 TD_PS_BORDER_COLOR_RED
+0x0000A408 TD_PS_BORDER_COLOR_GREEN
+0x0000A40C TD_PS_BORDER_COLOR_BLUE
+0x0000A410 TD_PS_BORDER_COLOR_ALPHA
+0x0000A414 TD_VS_BORDER_COLOR_INDEX
+0x0000A418 TD_VS_BORDER_COLOR_RED
+0x0000A41C TD_VS_BORDER_COLOR_GREEN
+0x0000A420 TD_VS_BORDER_COLOR_BLUE
+0x0000A424 TD_VS_BORDER_COLOR_ALPHA
+0x0000A428 TD_GS_BORDER_COLOR_INDEX
+0x0000A42C TD_GS_BORDER_COLOR_RED
+0x0000A430 TD_GS_BORDER_COLOR_GREEN
+0x0000A434 TD_GS_BORDER_COLOR_BLUE
+0x0000A438 TD_GS_BORDER_COLOR_ALPHA
+0x0000A43C TD_HS_BORDER_COLOR_INDEX
+0x0000A440 TD_HS_BORDER_COLOR_RED
+0x0000A444 TD_HS_BORDER_COLOR_GREEN
+0x0000A448 TD_HS_BORDER_COLOR_BLUE
+0x0000A44C TD_HS_BORDER_COLOR_ALPHA
+0x0000A450 TD_LS_BORDER_COLOR_INDEX
+0x0000A454 TD_LS_BORDER_COLOR_RED
+0x0000A458 TD_LS_BORDER_COLOR_GREEN
+0x0000A45C TD_LS_BORDER_COLOR_BLUE
+0x0000A460 TD_LS_BORDER_COLOR_ALPHA
+0x0000A464 TD_CS_BORDER_COLOR_INDEX
+0x0000A468 TD_CS_BORDER_COLOR_RED
+0x0000A46C TD_CS_BORDER_COLOR_GREEN
+0x0000A470 TD_CS_BORDER_COLOR_BLUE
+0x0000A474 TD_CS_BORDER_COLOR_ALPHA
+0x00028000 DB_RENDER_CONTROL
+0x00028004 DB_COUNT_CONTROL
+0x0002800C DB_RENDER_OVERRIDE
+0x00028010 DB_RENDER_OVERRIDE2
+0x00028028 DB_STENCIL_CLEAR
+0x0002802C DB_DEPTH_CLEAR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x0002805C DB_DEPTH_SLICE
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028230 PA_SC_EDGERULE
+0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028350 SX_MISC
+0x00028354 SX_SURFACE_SYNC
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028408 VGT_INDX_OFFSET
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028414 CB_BLEND_RED
+0x00028418 CB_BLEND_GREEN
+0x0002841C CB_BLEND_BLUE
+0x00028420 CB_BLEND_ALPHA
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028438 SX_ALPHA_REF
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x000285BC PA_CL_UCP_0_X
+0x000285C0 PA_CL_UCP_0_Y
+0x000285C4 PA_CL_UCP_0_Z
+0x000285C8 PA_CL_UCP_0_W
+0x000285CC PA_CL_UCP_1_X
+0x000285D0 PA_CL_UCP_1_Y
+0x000285D4 PA_CL_UCP_1_Z
+0x000285D8 PA_CL_UCP_1_W
+0x000285DC PA_CL_UCP_2_X
+0x000285E0 PA_CL_UCP_2_Y
+0x000285E4 PA_CL_UCP_2_Z
+0x000285E8 PA_CL_UCP_2_W
+0x000285EC PA_CL_UCP_3_X
+0x000285F0 PA_CL_UCP_3_Y
+0x000285F4 PA_CL_UCP_3_Z
+0x000285F8 PA_CL_UCP_3_W
+0x000285FC PA_CL_UCP_4_X
+0x00028600 PA_CL_UCP_4_Y
+0x00028604 PA_CL_UCP_4_Z
+0x00028608 PA_CL_UCP_4_W
+0x0002860C PA_CL_UCP_5_X
+0x00028610 PA_CL_UCP_5_Y
+0x00028614 PA_CL_UCP_5_Z
+0x00028618 PA_CL_UCP_5_W
+0x0002861C SPI_VS_OUT_ID_0
+0x00028620 SPI_VS_OUT_ID_1
+0x00028624 SPI_VS_OUT_ID_2
+0x00028628 SPI_VS_OUT_ID_3
+0x0002862C SPI_VS_OUT_ID_4
+0x00028630 SPI_VS_OUT_ID_5
+0x00028634 SPI_VS_OUT_ID_6
+0x00028638 SPI_VS_OUT_ID_7
+0x0002863C SPI_VS_OUT_ID_8
+0x00028640 SPI_VS_OUT_ID_9
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286C4 SPI_VS_OUT_CONFIG
+0x000286C8 SPI_THREAD_GROUPING
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286D4 SPI_INTERP_CONTROL_0
+0x000286D8 SPI_INPUT_Z
+0x000286DC SPI_FOG_CNTL
+0x000286E0 SPI_BARYC_CNTL
+0x000286E4 SPI_PS_IN_CONTROL_2
+0x000286E8 SPI_COMPUTE_INPUT_CNTL
+0x000286EC SPI_COMPUTE_NUM_THREAD_X
+0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
+0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x000286F8 SPI_GPR_MGMT
+0x000286FC SPI_LDS_MGMT
+0x00028700 SPI_STACK_MGMT
+0x00028704 SPI_WAVE_MGMT_1
+0x00028708 SPI_WAVE_MGMT_2
+0x00028724 GDS_ADDR_SIZE
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x000287CC CS_COPY_STATE
+0x000287D0 GFX_COPY_STATE
+0x000287D4 PA_CL_POINT_X_RAD
+0x000287D8 PA_CL_POINT_Y_RAD
+0x000287DC PA_CL_POINT_SIZE
+0x000287E0 PA_CL_POINT_CULL_RAD
+0x00028808 CB_COLOR_CONTROL
+0x0002880C DB_SHADER_CONTROL
+0x00028810 PA_CL_CLIP_CNTL
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028820 PA_CL_NANINF_CNTL
+0x00028824 PA_SU_LINE_STIPPLE_CNTL
+0x00028828 PA_SU_LINE_STIPPLE_SCALE
+0x0002882C PA_SU_PRIM_FILTER_CNTL
+0x00028844 SQ_PGM_RESOURCES_PS
+0x00028848 SQ_PGM_RESOURCES_2_PS
+0x0002884C SQ_PGM_EXPORTS_PS
+0x00028860 SQ_PGM_RESOURCES_VS
+0x00028864 SQ_PGM_RESOURCES_2_VS
+0x00028878 SQ_PGM_RESOURCES_GS
+0x0002887C SQ_PGM_RESOURCES_2_GS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x00028894 SQ_PGM_RESOURCES_2_ES
+0x000288A8 SQ_PGM_RESOURCES_FS
+0x000288BC SQ_PGM_RESOURCES_HS
+0x000288C0 SQ_PGM_RESOURCES_2_HS
+0x000288D4 SQ_PGM_RESOURCES_LS
+0x000288D8 SQ_PGM_RESOURCES_2_LS
+0x000288E8 SQ_LDS_ALLOC
+0x000288EC SQ_LDS_ALLOC_PS
+0x000288F0 SQ_VTX_SEMANTIC_CLEAR
+0x00028A00 PA_SU_POINT_SIZE
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A2C VGT_GROUP_DECR
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A48 PA_SC_MODE_CNTL_0
+0x00028A4C PA_SC_MODE_CNTL_1
+0x00028A50 VGT_ENHANCE
+0x00028A54 VGT_GS_PER_ES
+0x00028A58 VGT_ES_PER_GS
+0x00028A5C VGT_GS_PER_VS
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028A70 IA_ENHANCE
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028AA8 IA_MULTI_VGT_PARAM
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x00028ABC DB_HTILE_SURFACE
+0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+0x00028AC8 DB_PRELOAD_CONTROL
+0x00028B38 VGT_GS_MAX_VERT_OUT
+0x00028B54 VGT_SHADER_STAGES_EN
+0x00028B58 VGT_LS_HS_CONFIG
+0x00028B6C VGT_TF_PARAM
+0x00028B70 DB_ALPHA_TO_MASK
+0x00028B74 VGT_DISPATCH_INITIATOR
+0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028B7C PA_SU_POLY_OFFSET_CLAMP
+0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028BD4 PA_SC_CENTROID_PRIORITY_0
+0x00028BD8 PA_SC_CENTROID_PRIORITY_1
+0x00028BDC PA_SC_LINE_CNTL
+0x00028BE4 PA_SU_VTX_CNTL
+0x00028BE8 PA_CL_GB_VERT_CLIP_ADJ
+0x00028BEC PA_CL_GB_VERT_DISC_ADJ
+0x00028BF0 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028BF4 PA_CL_GB_HORZ_DISC_ADJ
+0x00028BF8 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_0
+0x00028BFC PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_1
+0x00028C00 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_2
+0x00028C04 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_3
+0x00028C08 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_0
+0x00028C0C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_1
+0x00028C10 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_2
+0x00028C14 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_3
+0x00028C18 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_0
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_1
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_2
+0x00028C24 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_3
+0x00028C28 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_0
+0x00028C2C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_1
+0x00028C30 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_2
+0x00028C34 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_3
+0x00028C38 PA_SC_AA_MASK_X0_Y0_X1_Y0
+0x00028C3C PA_SC_AA_MASK_X0_Y1_X1_Y1
+0x00028C8C CB_COLOR0_CLEAR_WORD0
+0x00028C90 CB_COLOR0_CLEAR_WORD1
+0x00028C94 CB_COLOR0_CLEAR_WORD2
+0x00028C98 CB_COLOR0_CLEAR_WORD3
+0x00028CC8 CB_COLOR1_CLEAR_WORD0
+0x00028CCC CB_COLOR1_CLEAR_WORD1
+0x00028CD0 CB_COLOR1_CLEAR_WORD2
+0x00028CD4 CB_COLOR1_CLEAR_WORD3
+0x00028D04 CB_COLOR2_CLEAR_WORD0
+0x00028D08 CB_COLOR2_CLEAR_WORD1
+0x00028D0C CB_COLOR2_CLEAR_WORD2
+0x00028D10 CB_COLOR2_CLEAR_WORD3
+0x00028D40 CB_COLOR3_CLEAR_WORD0
+0x00028D44 CB_COLOR3_CLEAR_WORD1
+0x00028D48 CB_COLOR3_CLEAR_WORD2
+0x00028D4C CB_COLOR3_CLEAR_WORD3
+0x00028D7C CB_COLOR4_CLEAR_WORD0
+0x00028D80 CB_COLOR4_CLEAR_WORD1
+0x00028D84 CB_COLOR4_CLEAR_WORD2
+0x00028D88 CB_COLOR4_CLEAR_WORD3
+0x00028DB8 CB_COLOR5_CLEAR_WORD0
+0x00028DBC CB_COLOR5_CLEAR_WORD1
+0x00028DC0 CB_COLOR5_CLEAR_WORD2
+0x00028DC4 CB_COLOR5_CLEAR_WORD3
+0x00028DF4 CB_COLOR6_CLEAR_WORD0
+0x00028DF8 CB_COLOR6_CLEAR_WORD1
+0x00028DFC CB_COLOR6_CLEAR_WORD2
+0x00028E00 CB_COLOR6_CLEAR_WORD3
+0x00028E30 CB_COLOR7_CLEAR_WORD0
+0x00028E34 CB_COLOR7_CLEAR_WORD1
+0x00028E38 CB_COLOR7_CLEAR_WORD2
+0x00028E3C CB_COLOR7_CLEAR_WORD3
+0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
+0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
+0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
+0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
+0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
+0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
+0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
+0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
+0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
+0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
+0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
+0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
+0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
+0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
+0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
+0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
+0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
+0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
+0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
+0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
+0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
+0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
+0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
+0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
+0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
+0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
+0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
+0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
+0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
+0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
+0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
+0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
+0x0003CFF0 SQ_VTX_BASE_VTX_LOC
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003FF00 SQ_TEX_SAMPLER_CLEAR
+0x0003FF04 SQ_TEX_RESOURCE_CLEAR
+0x0003FF08 SQ_LOOP_BOOL_CLEAR
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index 9177f9191837..7e1637176e08 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -1,4 +1,5 @@
evergreen 0x9400
+0x0000802C GRBM_GFX_INDEX
0x00008040 WAIT_UNTIL
0x00008044 WAIT_UNTIL_POLL_CNTL
0x00008048 WAIT_UNTIL_POLL_MASK
@@ -220,6 +221,7 @@ evergreen 0x9400
0x00028348 PA_SC_VPORT_ZMIN_15
0x0002834C PA_SC_VPORT_ZMAX_15
0x00028350 SX_MISC
+0x00028354 SX_SURFACE_SYNC
0x00028380 SQ_VTX_SEMANTIC_0
0x00028384 SQ_VTX_SEMANTIC_1
0x00028388 SQ_VTX_SEMANTIC_2
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 714ad45757d0..4cc7b717fedd 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1003,7 +1003,7 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev)
u64 gpu_addr;
if (rdev->vram_scratch.robj == NULL) {
- r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->vram_scratch.robj);
if (r) {
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index fa64d25d4248..6464490b240b 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -55,11 +55,6 @@ static struct drm_driver driver = {
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -68,15 +63,20 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver savage_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init savage_init(void)
{
driver.num_ioctls = savage_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &savage_pci_driver);
}
static void __exit savage_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &savage_pci_driver);
}
module_init(savage_init);
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 4caf5d01cfd3..46d5be6e97e5 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -82,10 +82,6 @@ static struct drm_driver driver = {
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -95,15 +91,20 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver sis_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init sis_init(void)
{
driver.num_ioctls = sis_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &sis_pci_driver);
}
static void __exit sis_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &sis_pci_driver);
}
module_init(sis_init);
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index b70fa91d761a..8bf98810a8d6 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -52,10 +52,6 @@ static struct drm_driver driver = {
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -65,14 +61,19 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver tdfx_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init tdfx_init(void)
{
- return drm_init(&driver);
+ return drm_pci_init(&driver, &tdfx_pci_driver);
}
static void __exit tdfx_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &tdfx_pci_driver);
}
module_init(tdfx_init);
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index f999e36f30b4..1c4a72f681c1 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -47,7 +47,8 @@ struct ttm_agp_backend {
static int ttm_agp_populate(struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page)
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct ttm_agp_backend *agp_be =
container_of(backend, struct ttm_agp_backend, backend);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index af61fc29e843..0b6a55ac2f87 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -406,11 +406,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+ if (bdev->driver->move_notify)
+ bdev->driver->move_notify(bo, mem);
bo->mem = *mem;
mem->mm_node = NULL;
goto moved;
}
-
}
if (bdev->driver->move_notify)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index b1e02fffd3cc..737a2a2e46a5 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -38,6 +38,7 @@
#include <linux/mm.h>
#include <linux/seq_file.h> /* for seq_printf */
#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include <asm/atomic.h>
@@ -662,7 +663,8 @@ out:
* cached pages.
*/
int ttm_get_pages(struct list_head *pages, int flags,
- enum ttm_caching_state cstate, unsigned count)
+ enum ttm_caching_state cstate, unsigned count,
+ dma_addr_t *dma_address)
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p = NULL;
@@ -681,14 +683,22 @@ int ttm_get_pages(struct list_head *pages, int flags,
gfp_flags |= GFP_HIGHUSER;
for (r = 0; r < count; ++r) {
- p = alloc_page(gfp_flags);
+ if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
+ void *addr;
+ addr = dma_alloc_coherent(NULL, PAGE_SIZE,
+ &dma_address[r],
+ gfp_flags);
+ if (addr == NULL)
+ return -ENOMEM;
+ p = virt_to_page(addr);
+ } else
+ p = alloc_page(gfp_flags);
if (!p) {
printk(KERN_ERR TTM_PFX
"Unable to allocate page.");
return -ENOMEM;
}
-
list_add(&p->lru, pages);
}
return 0;
@@ -720,7 +730,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
printk(KERN_ERR TTM_PFX
"Failed to allocate extra pages "
"for large request.");
- ttm_put_pages(pages, 0, flags, cstate);
+ ttm_put_pages(pages, 0, flags, cstate, NULL);
return r;
}
}
@@ -731,17 +741,29 @@ int ttm_get_pages(struct list_head *pages, int flags,
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
- enum ttm_caching_state cstate)
+ enum ttm_caching_state cstate, dma_addr_t *dma_address)
{
unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p, *tmp;
+ unsigned r;
if (pool == NULL) {
/* No pool for this memory type so free the pages */
+ r = page_count-1;
list_for_each_entry_safe(p, tmp, pages, lru) {
- __free_page(p);
+ if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
+ void *addr = page_address(p);
+ WARN_ON(!addr || !dma_address[r]);
+ if (addr)
+ dma_free_coherent(NULL, PAGE_SIZE,
+ addr,
+ dma_address[r]);
+ dma_address[r] = 0;
+ } else
+ __free_page(p);
+ r--;
}
/* Make the pages list empty */
INIT_LIST_HEAD(pages);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index af789dc869b9..86d5b1745a45 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -49,12 +49,16 @@ static int ttm_tt_swapin(struct ttm_tt *ttm);
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
+ ttm->dma_address = drm_calloc_large(ttm->num_pages,
+ sizeof(*ttm->dma_address));
}
static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
{
drm_free_large(ttm->pages);
ttm->pages = NULL;
+ drm_free_large(ttm->dma_address);
+ ttm->dma_address = NULL;
}
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
@@ -105,7 +109,8 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
INIT_LIST_HEAD(&h);
- ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
+ ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
+ &ttm->dma_address[index]);
if (ret != 0)
return NULL;
@@ -164,7 +169,7 @@ int ttm_tt_populate(struct ttm_tt *ttm)
}
be->func->populate(be, ttm->num_pages, ttm->pages,
- ttm->dummy_read_page);
+ ttm->dummy_read_page, ttm->dma_address);
ttm->state = tt_unbound;
return 0;
}
@@ -298,7 +303,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
count++;
}
}
- ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
+ ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
+ ttm->dma_address);
ttm->state = tt_unpopulated;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index e1ff4e7a6eb0..920a55214bcf 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -62,10 +62,6 @@ static struct drm_driver driver = {
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -75,16 +71,21 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver via_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init via_init(void)
{
driver.num_ioctls = via_max_ioctl;
via_init_command_verifier();
- return drm_init(&driver);
+ return drm_pci_init(&driver, &via_pci_driver);
}
static void __exit via_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &via_pci_driver);
}
module_init(via_init);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 80bc37b274e7..87e43e0733bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -102,7 +102,8 @@ struct vmw_ttm_backend {
static int vmw_ttm_populate(struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page)
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct vmw_ttm_backend *vmw_be =
container_of(backend, struct vmw_ttm_backend, backend);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 10ca97ee0206..96949b93d920 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -909,15 +909,6 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = VMWGFX_DRIVER_NAME,
- .id_table = vmw_pci_id_list,
- .probe = vmw_probe,
- .remove = vmw_remove,
- .driver = {
- .pm = &vmw_pm_ops
- }
- },
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
.date = VMWGFX_DRIVER_DATE,
@@ -926,6 +917,16 @@ static struct drm_driver driver = {
.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
};
+static struct pci_driver vmw_pci_driver = {
+ .name = VMWGFX_DRIVER_NAME,
+ .id_table = vmw_pci_id_list,
+ .probe = vmw_probe,
+ .remove = vmw_remove,
+ .driver = {
+ .pm = &vmw_pm_ops
+ }
+};
+
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_pci_dev(pdev, ent, &driver);
@@ -934,7 +935,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static int __init vmwgfx_init(void)
{
int ret;
- ret = drm_init(&driver);
+ ret = drm_pci_init(&driver, &vmw_pci_driver);
if (ret)
DRM_ERROR("Failed initializing DRM.\n");
return ret;
@@ -942,7 +943,7 @@ static int __init vmwgfx_init(void)
static void __exit vmwgfx_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &vmw_pci_driver);
}
module_init(vmwgfx_init);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 29113c9b26a8..b3a2cd5118d7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -345,7 +345,7 @@ static enum drm_connector_status
return connector_status_disconnected;
}
-static struct drm_display_mode vmw_ldu_connector_builtin[] = {
+static const struct drm_display_mode vmw_ldu_connector_builtin[] = {
/* 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 489, 492, 525, 0,
@@ -429,7 +429,6 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_display_mode *mode = NULL;
- struct drm_display_mode *bmode;
struct drm_display_mode prefmode = { DRM_MODE("preferred",
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -459,6 +458,8 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
}
for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
+ const struct drm_display_mode *bmode;
+
bmode = &vmw_ldu_connector_builtin[i];
if (bmode->hdisplay > max_width ||
bmode->vdisplay > max_height)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 297bc9a7d6e6..1bfb4439e4e1 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -467,6 +467,17 @@ config SENSORS_JC42
This driver can also be built as a module. If so, the module
will be called jc42.
+config SENSORS_LINEAGE
+ tristate "Lineage Compact Power Line Power Entry Module"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for the Lineage Compact Power Line
+ series of DC/DC and AC/DC converters such as CP1800, CP2000AC,
+ CP2000DC, CP2725, and others.
+
+ This driver can also be built as a module. If so, the module
+ will be called lineage-pem.
+
config SENSORS_LM63
tristate "National Semiconductor LM63 and LM64"
depends on I2C
@@ -625,6 +636,17 @@ config SENSORS_LM93
This driver can also be built as a module. If so, the module
will be called lm93.
+config SENSORS_LTC4151
+ tristate "Linear Technology LTC4151"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4151
+ High Voltage I2C Current and Voltage Monitor interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4151.
+
config SENSORS_LTC4215
tristate "Linear Technology LTC4215"
depends on I2C && EXPERIMENTAL
@@ -685,6 +707,16 @@ config SENSORS_MAX1619
This driver can also be built as a module. If so, the module
will be called max1619.
+config SENSORS_MAX6639
+ tristate "Maxim MAX6639 sensor chip"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for the MAX6639
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6639.
+
config SENSORS_MAX6650
tristate "Maxim MAX6650 sensor chip"
depends on I2C && EXPERIMENTAL
@@ -735,6 +767,61 @@ config SENSORS_PCF8591
These devices are hard to detect and rarely found on mainstream
hardware. If unsure, say N.
+config PMBUS
+ tristate "PMBus support"
+ depends on I2C && EXPERIMENTAL
+ default n
+ help
+ Say yes here if you want to enable PMBus support.
+
+ This driver can also be built as a module. If so, the module will
+ be called pmbus_core.
+
+if PMBUS
+
+config SENSORS_PMBUS
+ tristate "Generic PMBus devices"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for generic
+ PMBus devices, including but not limited to BMR450, BMR451, BMR453,
+ BMR454, and LTC2978.
+
+ This driver can also be built as a module. If so, the module will
+ be called pmbus.
+
+config SENSORS_MAX16064
+ tristate "Maxim MAX16064"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX16064.
+
+ This driver can also be built as a module. If so, the module will
+ be called max16064.
+
+config SENSORS_MAX34440
+ tristate "Maxim MAX34440/MAX34441"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX34440 and MAX34441.
+
+ This driver can also be built as a module. If so, the module will
+ be called max34440.
+
+config SENSORS_MAX8688
+ tristate "Maxim MAX8688"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX8688.
+
+ This driver can also be built as a module. If so, the module will
+ be called max8688.
+
+endif # PMBUS
+
config SENSORS_SHT15
tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
depends on GENERIC_GPIO
@@ -1083,7 +1170,7 @@ config SENSORS_W83627HF
will be called w83627hf.
config SENSORS_W83627EHF
- tristate "Winbond W83627EHF/EHG/DHG, W83667HG"
+ tristate "Winbond W83627EHF/EHG/DHG, W83667HG, NCT6775F, NCT6776F"
select HWMON_VID
help
If you say yes here you get support for the hardware
@@ -1094,7 +1181,8 @@ config SENSORS_W83627EHF
chip suited for specific Intel processors that use PECI such as
the Core 2 Duo.
- This driver also supports the W83667HG chip.
+ This driver also supports Nuvoton W83667HG, W83667HG-B, NCT6775F
+ (also known as W83667HG-I), and NCT6776F.
This driver can also be built as a module. If so, the module
will be called w83627ehf.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index dde02d99c238..bd0410e4b44f 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_SENSORS_JC42) += jc42.o
obj-$(CONFIG_SENSORS_JZ4740) += jz4740-hwmon.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
+obj-$(CONFIG_SENSORS_LINEAGE) += lineage-pem.o
obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o hp_accel.o
obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d.o lis3lv02d_spi.o
obj-$(CONFIG_SENSORS_LIS3_I2C) += lis3lv02d.o lis3lv02d_i2c.o
@@ -79,11 +80,13 @@ obj-$(CONFIG_SENSORS_LM90) += lm90.o
obj-$(CONFIG_SENSORS_LM92) += lm92.o
obj-$(CONFIG_SENSORS_LM93) += lm93.o
obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
+obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o
obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o
obj-$(CONFIG_SENSORS_MAX1111) += max1111.o
obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
+obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
@@ -112,6 +115,13 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
+# PMBus drivers
+obj-$(CONFIG_PMBUS) += pmbus_core.o
+obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
+obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
+obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
+
ifeq ($(CONFIG_HWMON_DEBUG_CHIP),y)
EXTRA_CFLAGS += -DDEBUG
endif
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 6e06019015a5..a4d430ee7e20 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1,6 +1,6 @@
/***************************************************************************
* Copyright (C) 2006 by Hans Edgington <hans@edgington.nl> *
- * Copyright (C) 2007-2009 Hans de Goede <hdegoede@redhat.com> *
+ * Copyright (C) 2007-2011 Hans de Goede <hdegoede@redhat.com> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
@@ -47,22 +47,23 @@
#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
#define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */
+#define SIO_F71808E_ID 0x0901 /* Chipset ID */
#define SIO_F71858_ID 0x0507 /* Chipset ID */
#define SIO_F71862_ID 0x0601 /* Chipset ID */
+#define SIO_F71869_ID 0x0814 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F71889_ID 0x0723 /* Chipset ID */
+#define SIO_F71889E_ID 0x0909 /* Chipset ID */
#define SIO_F8000_ID 0x0581 /* Chipset ID */
#define REGION_LENGTH 8
#define ADDR_REG_OFFSET 5
#define DATA_REG_OFFSET 6
-#define F71882FG_REG_PECI 0x0A
-
-#define F71882FG_REG_IN_STATUS 0x12 /* f71882fg only */
-#define F71882FG_REG_IN_BEEP 0x13 /* f71882fg only */
+#define F71882FG_REG_IN_STATUS 0x12 /* f7188x only */
+#define F71882FG_REG_IN_BEEP 0x13 /* f7188x only */
#define F71882FG_REG_IN(nr) (0x20 + (nr))
-#define F71882FG_REG_IN1_HIGH 0x32 /* f71882fg only */
+#define F71882FG_REG_IN1_HIGH 0x32 /* f7188x only */
#define F71882FG_REG_FAN(nr) (0xA0 + (16 * (nr)))
#define F71882FG_REG_FAN_TARGET(nr) (0xA2 + (16 * (nr)))
@@ -86,28 +87,71 @@
#define F71882FG_REG_FAN_HYST(nr) (0x98 + (nr))
+#define F71882FG_REG_FAN_FAULT_T 0x9F
+#define F71882FG_FAN_NEG_TEMP_EN 0x20
+#define F71882FG_FAN_PROG_SEL 0x80
+
#define F71882FG_REG_POINT_PWM(pwm, point) (0xAA + (point) + (16 * (pwm)))
#define F71882FG_REG_POINT_TEMP(pwm, point) (0xA6 + (point) + (16 * (pwm)))
#define F71882FG_REG_POINT_MAPPING(nr) (0xAF + 16 * (nr))
#define F71882FG_REG_START 0x01
+#define F71882FG_MAX_INS 9
+
#define FAN_MIN_DETECT 366 /* Lowest detectable fanspeed */
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-enum chips { f71858fg, f71862fg, f71882fg, f71889fg, f8000 };
+enum chips { f71808e, f71858fg, f71862fg, f71869, f71882fg, f71889fg,
+ f71889ed, f8000 };
static const char *f71882fg_names[] = {
+ "f71808e",
"f71858fg",
"f71862fg",
+ "f71869", /* Both f71869f and f71869e, reg. compatible and same id */
"f71882fg",
"f71889fg",
+ "f71889ed",
"f8000",
};
+static const char f71882fg_has_in[8][F71882FG_MAX_INS] = {
+ { 1, 1, 1, 1, 1, 1, 0, 1, 1 }, /* f71808e */
+ { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, /* f71858fg */
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71862fg */
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71869 */
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71882fg */
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71889fg */
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71889ed */
+ { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, /* f8000 */
+};
+
+static const char f71882fg_has_in1_alarm[8] = {
+ 0, /* f71808e */
+ 0, /* f71858fg */
+ 0, /* f71862fg */
+ 0, /* f71869 */
+ 1, /* f71882fg */
+ 1, /* f71889fg */
+ 1, /* f71889ed */
+ 0, /* f8000 */
+};
+
+static const char f71882fg_has_beep[8] = {
+ 0, /* f71808e */
+ 0, /* f71858fg */
+ 1, /* f71862fg */
+ 1, /* f71869 */
+ 1, /* f71882fg */
+ 1, /* f71889fg */
+ 1, /* f71889ed */
+ 0, /* f8000 */
+};
+
static struct platform_device *f71882fg_pdev;
/* Super-I/O Function prototypes */
@@ -129,11 +173,12 @@ struct f71882fg_data {
struct mutex update_lock;
int temp_start; /* temp numbering start (0 or 1) */
char valid; /* !=0 if following fields are valid */
+ char auto_point_temp_signed;
unsigned long last_updated; /* In jiffies */
unsigned long last_limits; /* In jiffies */
/* Register Values */
- u8 in[9];
+ u8 in[F71882FG_MAX_INS];
u8 in1_max;
u8 in_status;
u8 in_beep;
@@ -142,7 +187,7 @@ struct f71882fg_data {
u16 fan_full_speed[4];
u8 fan_status;
u8 fan_beep;
- /* Note: all models have only 3 temperature channels, but on some
+ /* Note: all models have max 3 temperature channels, but on some
they are addressed as 0-2 and on others as 1-3, so for coding
convenience we reserve space for 4 channels */
u16 temp[4];
@@ -262,13 +307,9 @@ static struct platform_driver f71882fg_driver = {
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-/* Temp and in attr for the f71858fg, the f71858fg is special as it
- has its temperature indexes start at 0 (the others start at 1) and
- it only has 3 voltage inputs */
-static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
- SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
- SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
- SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
+/* Temp attr for the f71858fg, the f71858fg is special as it has its
+ temperature indexes start at 0 (the others start at 1) */
+static struct sensor_device_attribute_2 f71858fg_temp_attr[] = {
SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0),
SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 0),
@@ -292,7 +333,6 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
SENSOR_ATTR_2(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
0, 1),
SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5),
- SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 1),
SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1),
SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2),
SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
@@ -308,17 +348,8 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
};
-/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */
-static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
- SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
- SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
- SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
- SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3),
- SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4),
- SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5),
- SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6),
- SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7),
- SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8),
+/* Temp attr for the standard models */
+static struct sensor_device_attribute_2 fxxxx_temp_attr[3][9] = { {
SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1),
SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 1),
@@ -328,17 +359,14 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
the max and crit alarms separately and lm_sensors v2 depends on the
presence of temp#_alarm files. The same goes for temp2/3 _alarm. */
SENSOR_ATTR_2(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 1),
- SENSOR_ATTR_2(temp1_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 1),
SENSOR_ATTR_2(temp1_crit, S_IRUGO|S_IWUSR, show_temp_crit,
store_temp_crit, 0, 1),
SENSOR_ATTR_2(temp1_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
0, 1),
SENSOR_ATTR_2(temp1_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5),
- SENSOR_ATTR_2(temp1_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 5),
SENSOR_ATTR_2(temp1_type, S_IRUGO, show_temp_type, NULL, 0, 1),
SENSOR_ATTR_2(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0, 1),
+}, {
SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 2),
SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 2),
@@ -346,17 +374,14 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
store_temp_max_hyst, 0, 2),
/* Should be temp2_max_alarm, see temp1_alarm note */
SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 2),
- SENSOR_ATTR_2(temp2_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 2),
SENSOR_ATTR_2(temp2_crit, S_IRUGO|S_IWUSR, show_temp_crit,
store_temp_crit, 0, 2),
SENSOR_ATTR_2(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
0, 2),
SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6),
- SENSOR_ATTR_2(temp2_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 6),
SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2),
SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
+}, {
SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3),
SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 3),
@@ -364,37 +389,39 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
store_temp_max_hyst, 0, 3),
/* Should be temp3_max_alarm, see temp1_alarm note */
SENSOR_ATTR_2(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 3),
- SENSOR_ATTR_2(temp3_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 3),
SENSOR_ATTR_2(temp3_crit, S_IRUGO|S_IWUSR, show_temp_crit,
store_temp_crit, 0, 3),
SENSOR_ATTR_2(temp3_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
0, 3),
SENSOR_ATTR_2(temp3_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 7),
- SENSOR_ATTR_2(temp3_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 7),
SENSOR_ATTR_2(temp3_type, S_IRUGO, show_temp_type, NULL, 0, 3),
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 3),
-};
+} };
-/* For models with in1 alarm capability */
-static struct sensor_device_attribute_2 fxxxx_in1_alarm_attr[] = {
- SENSOR_ATTR_2(in1_max, S_IRUGO|S_IWUSR, show_in_max, store_in_max,
- 0, 1),
- SENSOR_ATTR_2(in1_beep, S_IRUGO|S_IWUSR, show_in_beep, store_in_beep,
- 0, 1),
- SENSOR_ATTR_2(in1_alarm, S_IRUGO, show_in_alarm, NULL, 0, 1),
-};
+/* Temp attr for models which can beep on temp alarm */
+static struct sensor_device_attribute_2 fxxxx_temp_beep_attr[3][2] = { {
+ SENSOR_ATTR_2(temp1_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 1),
+ SENSOR_ATTR_2(temp1_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 5),
+}, {
+ SENSOR_ATTR_2(temp2_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 2),
+ SENSOR_ATTR_2(temp2_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 6),
+}, {
+ SENSOR_ATTR_2(temp3_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 3),
+ SENSOR_ATTR_2(temp3_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 7),
+} };
-/* Temp and in attr for the f8000
+/* Temp attr for the f8000
Note on the f8000 temp_ovt (crit) is used as max, and temp_high (max)
is used as hysteresis value to clear alarms
Also like the f71858fg its temperature indexes start at 0
*/
-static struct sensor_device_attribute_2 f8000_in_temp_attr[] = {
- SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
- SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
- SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
+static struct sensor_device_attribute_2 f8000_temp_attr[] = {
SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0),
SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_crit,
store_temp_crit, 0, 0),
@@ -408,7 +435,6 @@ static struct sensor_device_attribute_2 f8000_in_temp_attr[] = {
SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 1),
SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5),
- SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 1),
SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1),
SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2),
SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_crit,
@@ -419,6 +445,28 @@ static struct sensor_device_attribute_2 f8000_in_temp_attr[] = {
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
};
+/* in attr for all models */
+static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
+ SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
+ SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
+ SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
+ SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3),
+ SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4),
+ SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5),
+ SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6),
+ SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7),
+ SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8),
+};
+
+/* For models with in1 alarm capability */
+static struct sensor_device_attribute_2 fxxxx_in1_alarm_attr[] = {
+ SENSOR_ATTR_2(in1_max, S_IRUGO|S_IWUSR, show_in_max, store_in_max,
+ 0, 1),
+ SENSOR_ATTR_2(in1_beep, S_IRUGO|S_IWUSR, show_in_beep, store_in_beep,
+ 0, 1),
+ SENSOR_ATTR_2(in1_alarm, S_IRUGO, show_in_alarm, NULL, 0, 1),
+};
+
/* Fan / PWM attr common to all models */
static struct sensor_device_attribute_2 fxxxx_fan_attr[4][6] = { {
SENSOR_ATTR_2(fan1_input, S_IRUGO, show_fan, NULL, 0, 0),
@@ -479,7 +527,7 @@ static struct sensor_device_attribute_2 fxxxx_fan_beep_attr[] = {
};
/* PWM attr for the f71862fg, fewer pwms and fewer zones per pwm than the
- f71858fg / f71882fg / f71889fg */
+ standard models */
static struct sensor_device_attribute_2 f71862fg_auto_pwm_attr[] = {
SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR,
show_pwm_auto_point_channel,
@@ -548,7 +596,87 @@ static struct sensor_device_attribute_2 f71862fg_auto_pwm_attr[] = {
show_pwm_auto_point_temp_hyst, NULL, 3, 2),
};
-/* PWM attr common to the f71858fg, f71882fg and f71889fg */
+/* PWM attr for the f71808e/f71869, almost identical to the f71862fg, but the
+ pwm setting when the temperature is above the pwmX_auto_point1_temp can be
+ programmed instead of being hardcoded to 0xff */
+static struct sensor_device_attribute_2 f71869_auto_pwm_attr[] = {
+ SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_channel,
+ store_pwm_auto_point_channel, 0, 0),
+ SENSOR_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 0, 0),
+ SENSOR_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 1, 0),
+ SENSOR_ATTR_2(pwm1_auto_point3_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 4, 0),
+ SENSOR_ATTR_2(pwm1_auto_point1_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 0, 0),
+ SENSOR_ATTR_2(pwm1_auto_point2_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 3, 0),
+ SENSOR_ATTR_2(pwm1_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp_hyst,
+ store_pwm_auto_point_temp_hyst,
+ 0, 0),
+ SENSOR_ATTR_2(pwm1_auto_point2_temp_hyst, S_IRUGO,
+ show_pwm_auto_point_temp_hyst, NULL, 3, 0),
+
+ SENSOR_ATTR_2(pwm2_auto_channels_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_channel,
+ store_pwm_auto_point_channel, 0, 1),
+ SENSOR_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 0, 1),
+ SENSOR_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 1, 1),
+ SENSOR_ATTR_2(pwm2_auto_point3_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 4, 1),
+ SENSOR_ATTR_2(pwm2_auto_point1_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 0, 1),
+ SENSOR_ATTR_2(pwm2_auto_point2_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 3, 1),
+ SENSOR_ATTR_2(pwm2_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp_hyst,
+ store_pwm_auto_point_temp_hyst,
+ 0, 1),
+ SENSOR_ATTR_2(pwm2_auto_point2_temp_hyst, S_IRUGO,
+ show_pwm_auto_point_temp_hyst, NULL, 3, 1),
+
+ SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_channel,
+ store_pwm_auto_point_channel, 0, 2),
+ SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 0, 2),
+ SENSOR_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 1, 2),
+ SENSOR_ATTR_2(pwm3_auto_point3_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 4, 2),
+ SENSOR_ATTR_2(pwm3_auto_point1_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 0, 2),
+ SENSOR_ATTR_2(pwm3_auto_point2_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 3, 2),
+ SENSOR_ATTR_2(pwm3_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp_hyst,
+ store_pwm_auto_point_temp_hyst,
+ 0, 2),
+ SENSOR_ATTR_2(pwm3_auto_point2_temp_hyst, S_IRUGO,
+ show_pwm_auto_point_temp_hyst, NULL, 3, 2),
+};
+
+/* PWM attr for the standard models */
static struct sensor_device_attribute_2 fxxxx_auto_pwm_attr[4][14] = { {
SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR,
show_pwm_auto_point_channel,
@@ -943,16 +1071,16 @@ static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr)
static struct f71882fg_data *f71882fg_update_device(struct device *dev)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr, reg = 0, reg2;
+ int nr, reg, point;
int nr_fans = (data->type == f71882fg) ? 4 : 3;
- int nr_ins = (data->type == f71858fg || data->type == f8000) ? 3 : 9;
+ int nr_temps = (data->type == f71808e) ? 2 : 3;
mutex_lock(&data->update_lock);
/* Update once every 60 seconds */
if (time_after(jiffies, data->last_limits + 60 * HZ) ||
!data->valid) {
- if (data->type == f71882fg || data->type == f71889fg) {
+ if (f71882fg_has_in1_alarm[data->type]) {
data->in1_max =
f71882fg_read8(data, F71882FG_REG_IN1_HIGH);
data->in_beep =
@@ -960,7 +1088,8 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
}
/* Get High & boundary temps*/
- for (nr = data->temp_start; nr < 3 + data->temp_start; nr++) {
+ for (nr = data->temp_start; nr < nr_temps + data->temp_start;
+ nr++) {
data->temp_ovt[nr] = f71882fg_read8(data,
F71882FG_REG_TEMP_OVT(nr));
data->temp_high[nr] = f71882fg_read8(data,
@@ -973,44 +1102,19 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
data->temp_hyst[1] = f71882fg_read8(data,
F71882FG_REG_TEMP_HYST(1));
}
+ /* All but the f71858fg / f8000 have this register */
+ if ((data->type != f71858fg) && (data->type != f8000)) {
+ reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
+ data->temp_type[1] = (reg & 0x02) ? 2 : 4;
+ data->temp_type[2] = (reg & 0x04) ? 2 : 4;
+ data->temp_type[3] = (reg & 0x08) ? 2 : 4;
+ }
- if (data->type == f71862fg || data->type == f71882fg ||
- data->type == f71889fg) {
+ if (f71882fg_has_beep[data->type]) {
data->fan_beep = f71882fg_read8(data,
F71882FG_REG_FAN_BEEP);
data->temp_beep = f71882fg_read8(data,
F71882FG_REG_TEMP_BEEP);
- /* Have to hardcode type, because temp1 is special */
- reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
- data->temp_type[2] = (reg & 0x04) ? 2 : 4;
- data->temp_type[3] = (reg & 0x08) ? 2 : 4;
- }
- /* Determine temp index 1 sensor type */
- if (data->type == f71889fg) {
- reg2 = f71882fg_read8(data, F71882FG_REG_START);
- switch ((reg2 & 0x60) >> 5) {
- case 0x00: /* BJT / Thermistor */
- data->temp_type[1] = (reg & 0x02) ? 2 : 4;
- break;
- case 0x01: /* AMDSI */
- data->temp_type[1] = 5;
- break;
- case 0x02: /* PECI */
- case 0x03: /* Ibex Peak ?? Report as PECI for now */
- data->temp_type[1] = 6;
- break;
- }
- } else {
- reg2 = f71882fg_read8(data, F71882FG_REG_PECI);
- if ((reg2 & 0x03) == 0x01)
- data->temp_type[1] = 6; /* PECI */
- else if ((reg2 & 0x03) == 0x02)
- data->temp_type[1] = 5; /* AMDSI */
- else if (data->type == f71862fg ||
- data->type == f71882fg)
- data->temp_type[1] = (reg & 0x02) ? 2 : 4;
- else /* f71858fg and f8000 only support BJT */
- data->temp_type[1] = 2;
}
data->pwm_enable = f71882fg_read8(data,
@@ -1025,8 +1129,8 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
f71882fg_read8(data,
F71882FG_REG_POINT_MAPPING(nr));
- if (data->type != f71862fg) {
- int point;
+ switch (data->type) {
+ default:
for (point = 0; point < 5; point++) {
data->pwm_auto_point_pwm[nr][point] =
f71882fg_read8(data,
@@ -1039,7 +1143,14 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
F71882FG_REG_POINT_TEMP
(nr, point));
}
- } else {
+ break;
+ case f71808e:
+ case f71869:
+ data->pwm_auto_point_pwm[nr][0] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_PWM(nr, 0));
+ /* Fall through */
+ case f71862fg:
data->pwm_auto_point_pwm[nr][1] =
f71882fg_read8(data,
F71882FG_REG_POINT_PWM
@@ -1056,6 +1167,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
f71882fg_read8(data,
F71882FG_REG_POINT_TEMP
(nr, 3));
+ break;
}
}
data->last_limits = jiffies;
@@ -1067,7 +1179,8 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
F71882FG_REG_TEMP_STATUS);
data->temp_diode_open = f71882fg_read8(data,
F71882FG_REG_TEMP_DIODE_OPEN);
- for (nr = data->temp_start; nr < 3 + data->temp_start; nr++)
+ for (nr = data->temp_start; nr < nr_temps + data->temp_start;
+ nr++)
data->temp[nr] = f71882fg_read_temp(data, nr);
data->fan_status = f71882fg_read8(data,
@@ -1083,17 +1196,18 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
data->pwm[nr] =
f71882fg_read8(data, F71882FG_REG_PWM(nr));
}
-
/* The f8000 can monitor 1 more fan, but has no pwm for it */
if (data->type == f8000)
data->fan[3] = f71882fg_read16(data,
F71882FG_REG_FAN(3));
- if (data->type == f71882fg || data->type == f71889fg)
+
+ if (f71882fg_has_in1_alarm[data->type])
data->in_status = f71882fg_read8(data,
F71882FG_REG_IN_STATUS);
- for (nr = 0; nr < nr_ins; nr++)
- data->in[nr] = f71882fg_read8(data,
- F71882FG_REG_IN(nr));
+ for (nr = 0; nr < F71882FG_MAX_INS; nr++)
+ if (f71882fg_has_in[data->type][nr])
+ data->in[nr] = f71882fg_read8(data,
+ F71882FG_REG_IN(nr));
data->last_updated = jiffies;
data->valid = 1;
@@ -1882,7 +1996,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
val /= 1000;
- if (data->type == f71889fg)
+ if (data->auto_point_temp_signed)
val = SENSORS_LIMIT(val, -128, 127);
else
val = SENSORS_LIMIT(val, 0, 127);
@@ -1929,7 +2043,8 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
struct f71882fg_data *data;
struct f71882fg_sio_data *sio_data = pdev->dev.platform_data;
int err, i, nr_fans = (sio_data->type == f71882fg) ? 4 : 3;
- u8 start_reg;
+ int nr_temps = (sio_data->type == f71808e) ? 2 : 3;
+ u8 start_reg, reg;
data = kzalloc(sizeof(struct f71882fg_data), GFP_KERNEL);
if (!data)
@@ -1968,37 +2083,72 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
/* The f71858fg temperature alarms behave as
the f8000 alarms in this mode */
err = f71882fg_create_sysfs_files(pdev,
- f8000_in_temp_attr,
- ARRAY_SIZE(f8000_in_temp_attr));
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
else
err = f71882fg_create_sysfs_files(pdev,
- f71858fg_in_temp_attr,
- ARRAY_SIZE(f71858fg_in_temp_attr));
- break;
- case f71882fg:
- case f71889fg:
- err = f71882fg_create_sysfs_files(pdev,
- fxxxx_in1_alarm_attr,
- ARRAY_SIZE(fxxxx_in1_alarm_attr));
- if (err)
- goto exit_unregister_sysfs;
- /* fall through! */
- case f71862fg:
- err = f71882fg_create_sysfs_files(pdev,
- fxxxx_in_temp_attr,
- ARRAY_SIZE(fxxxx_in_temp_attr));
+ f71858fg_temp_attr,
+ ARRAY_SIZE(f71858fg_temp_attr));
break;
case f8000:
err = f71882fg_create_sysfs_files(pdev,
- f8000_in_temp_attr,
- ARRAY_SIZE(f8000_in_temp_attr));
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
break;
+ default:
+ err = f71882fg_create_sysfs_files(pdev,
+ &fxxxx_temp_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps);
}
if (err)
goto exit_unregister_sysfs;
+
+ if (f71882fg_has_beep[data->type]) {
+ err = f71882fg_create_sysfs_files(pdev,
+ &fxxxx_temp_beep_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_beep_attr[0])
+ * nr_temps);
+ if (err)
+ goto exit_unregister_sysfs;
+ }
+
+ for (i = 0; i < F71882FG_MAX_INS; i++) {
+ if (f71882fg_has_in[data->type][i]) {
+ err = device_create_file(&pdev->dev,
+ &fxxxx_in_attr[i].dev_attr);
+ if (err)
+ goto exit_unregister_sysfs;
+ }
+ }
+ if (f71882fg_has_in1_alarm[data->type]) {
+ err = f71882fg_create_sysfs_files(pdev,
+ fxxxx_in1_alarm_attr,
+ ARRAY_SIZE(fxxxx_in1_alarm_attr));
+ if (err)
+ goto exit_unregister_sysfs;
+ }
}
if (start_reg & 0x02) {
+ switch (data->type) {
+ case f71808e:
+ case f71869:
+ /* These always have signed auto point temps */
+ data->auto_point_temp_signed = 1;
+ /* Fall through to select correct fan/pwm reg bank! */
+ case f71889fg:
+ case f71889ed:
+ reg = f71882fg_read8(data, F71882FG_REG_FAN_FAULT_T);
+ if (reg & F71882FG_FAN_NEG_TEMP_EN)
+ data->auto_point_temp_signed = 1;
+ /* Ensure banked pwm registers point to right bank */
+ reg &= ~F71882FG_FAN_PROG_SEL;
+ f71882fg_write8(data, F71882FG_REG_FAN_FAULT_T, reg);
+ break;
+ default:
+ break;
+ }
+
data->pwm_enable =
f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
@@ -2013,8 +2163,11 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
case f71862fg:
err = (data->pwm_enable & 0x15) != 0x15;
break;
+ case f71808e:
+ case f71869:
case f71882fg:
case f71889fg:
+ case f71889ed:
err = 0;
break;
case f8000:
@@ -2034,8 +2187,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
if (err)
goto exit_unregister_sysfs;
- if (data->type == f71862fg || data->type == f71882fg ||
- data->type == f71889fg) {
+ if (f71882fg_has_beep[data->type]) {
err = f71882fg_create_sysfs_files(pdev,
fxxxx_fan_beep_attr, nr_fans);
if (err)
@@ -2043,11 +2195,42 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
}
switch (data->type) {
+ case f71808e:
+ case f71869:
+ case f71889fg:
+ case f71889ed:
+ for (i = 0; i < nr_fans; i++) {
+ data->pwm_auto_point_mapping[i] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_MAPPING(i));
+ if ((data->pwm_auto_point_mapping[i] & 0x80) ||
+ (data->pwm_auto_point_mapping[i] & 3) == 0)
+ break;
+ }
+ if (i != nr_fans) {
+ dev_warn(&pdev->dev,
+ "Auto pwm controlled by raw digital "
+ "data, disabling pwm auto_point "
+ "sysfs attributes\n");
+ goto no_pwm_auto_point;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (data->type) {
case f71862fg:
err = f71882fg_create_sysfs_files(pdev,
f71862fg_auto_pwm_attr,
ARRAY_SIZE(f71862fg_auto_pwm_attr));
break;
+ case f71808e:
+ case f71869:
+ err = f71882fg_create_sysfs_files(pdev,
+ f71869_auto_pwm_attr,
+ ARRAY_SIZE(f71869_auto_pwm_attr));
+ break;
case f8000:
err = f71882fg_create_sysfs_files(pdev,
f8000_fan_attr,
@@ -2058,23 +2241,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
f8000_auto_pwm_attr,
ARRAY_SIZE(f8000_auto_pwm_attr));
break;
- case f71889fg:
- for (i = 0; i < nr_fans; i++) {
- data->pwm_auto_point_mapping[i] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_MAPPING(i));
- if (data->pwm_auto_point_mapping[i] & 0x80)
- break;
- }
- if (i != nr_fans) {
- dev_warn(&pdev->dev,
- "Auto pwm controlled by raw digital "
- "data, disabling pwm auto_point "
- "sysfs attributes\n");
- break;
- }
- /* fall through */
- default: /* f71858fg / f71882fg */
+ default:
err = f71882fg_create_sysfs_files(pdev,
&fxxxx_auto_pwm_attr[0][0],
ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
@@ -2082,6 +2249,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
if (err)
goto exit_unregister_sysfs;
+no_pwm_auto_point:
for (i = 0; i < nr_fans; i++)
dev_info(&pdev->dev, "Fan: %d is in %s mode\n", i + 1,
(data->pwm_enable & (1 << 2 * i)) ?
@@ -2108,7 +2276,8 @@ exit_free:
static int f71882fg_remove(struct platform_device *pdev)
{
struct f71882fg_data *data = platform_get_drvdata(pdev);
- int nr_fans = (data->type == f71882fg) ? 4 : 3;
+ int i, nr_fans = (data->type == f71882fg) ? 4 : 3;
+ int nr_temps = (data->type == f71808e) ? 2 : 3;
u8 start_reg = f71882fg_read8(data, F71882FG_REG_START);
if (data->hwmon_dev)
@@ -2121,29 +2290,39 @@ static int f71882fg_remove(struct platform_device *pdev)
case f71858fg:
if (data->temp_config & 0x10)
f71882fg_remove_sysfs_files(pdev,
- f8000_in_temp_attr,
- ARRAY_SIZE(f8000_in_temp_attr));
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
else
f71882fg_remove_sysfs_files(pdev,
- f71858fg_in_temp_attr,
- ARRAY_SIZE(f71858fg_in_temp_attr));
- break;
- case f71882fg:
- case f71889fg:
- f71882fg_remove_sysfs_files(pdev,
- fxxxx_in1_alarm_attr,
- ARRAY_SIZE(fxxxx_in1_alarm_attr));
- /* fall through! */
- case f71862fg:
- f71882fg_remove_sysfs_files(pdev,
- fxxxx_in_temp_attr,
- ARRAY_SIZE(fxxxx_in_temp_attr));
+ f71858fg_temp_attr,
+ ARRAY_SIZE(f71858fg_temp_attr));
break;
case f8000:
f71882fg_remove_sysfs_files(pdev,
- f8000_in_temp_attr,
- ARRAY_SIZE(f8000_in_temp_attr));
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
break;
+ default:
+ f71882fg_remove_sysfs_files(pdev,
+ &fxxxx_temp_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps);
+ }
+ if (f71882fg_has_beep[data->type]) {
+ f71882fg_remove_sysfs_files(pdev,
+ &fxxxx_temp_beep_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_beep_attr[0]) * nr_temps);
+ }
+
+ for (i = 0; i < F71882FG_MAX_INS; i++) {
+ if (f71882fg_has_in[data->type][i]) {
+ device_remove_file(&pdev->dev,
+ &fxxxx_in_attr[i].dev_attr);
+ }
+ }
+ if (f71882fg_has_in1_alarm[data->type]) {
+ f71882fg_remove_sysfs_files(pdev,
+ fxxxx_in1_alarm_attr,
+ ARRAY_SIZE(fxxxx_in1_alarm_attr));
}
}
@@ -2151,10 +2330,10 @@ static int f71882fg_remove(struct platform_device *pdev)
f71882fg_remove_sysfs_files(pdev, &fxxxx_fan_attr[0][0],
ARRAY_SIZE(fxxxx_fan_attr[0]) * nr_fans);
- if (data->type == f71862fg || data->type == f71882fg ||
- data->type == f71889fg)
+ if (f71882fg_has_beep[data->type]) {
f71882fg_remove_sysfs_files(pdev,
fxxxx_fan_beep_attr, nr_fans);
+ }
switch (data->type) {
case f71862fg:
@@ -2162,6 +2341,12 @@ static int f71882fg_remove(struct platform_device *pdev)
f71862fg_auto_pwm_attr,
ARRAY_SIZE(f71862fg_auto_pwm_attr));
break;
+ case f71808e:
+ case f71869:
+ f71882fg_remove_sysfs_files(pdev,
+ f71869_auto_pwm_attr,
+ ARRAY_SIZE(f71869_auto_pwm_attr));
+ break;
case f8000:
f71882fg_remove_sysfs_files(pdev,
f8000_fan_attr,
@@ -2170,7 +2355,7 @@ static int f71882fg_remove(struct platform_device *pdev)
f8000_auto_pwm_attr,
ARRAY_SIZE(f8000_auto_pwm_attr));
break;
- default: /* f71858fg / f71882fg / f71889fg */
+ default:
f71882fg_remove_sysfs_files(pdev,
&fxxxx_auto_pwm_attr[0][0],
ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
@@ -2200,18 +2385,27 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID);
switch (devid) {
+ case SIO_F71808E_ID:
+ sio_data->type = f71808e;
+ break;
case SIO_F71858_ID:
sio_data->type = f71858fg;
break;
case SIO_F71862_ID:
sio_data->type = f71862fg;
break;
+ case SIO_F71869_ID:
+ sio_data->type = f71869;
+ break;
case SIO_F71882_ID:
sio_data->type = f71882fg;
break;
case SIO_F71889_ID:
sio_data->type = f71889fg;
break;
+ case SIO_F71889E_ID:
+ sio_data->type = f71889ed;
+ break;
case SIO_F8000_ID:
sio_data->type = f8000;
break;
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
new file mode 100644
index 000000000000..58eded27f385
--- /dev/null
+++ b/drivers/hwmon/lineage-pem.c
@@ -0,0 +1,586 @@
+/*
+ * Driver for Lineage Compact Power Line series of power entry modules.
+ *
+ * Copyright (C) 2010, 2011 Ericsson AB.
+ *
+ * Documentation:
+ * http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+/*
+ * This driver supports various Lineage Compact Power Line DC/DC and AC/DC
+ * converters such as CP1800, CP2000AC, CP2000DC, CP2100DC, and others.
+ *
+ * The devices are nominally PMBus compliant. However, most standard PMBus
+ * commands are not supported. Specifically, all hardware monitoring and
+ * status reporting commands are non-standard. For this reason, a standard
+ * PMBus driver can not be used.
+ *
+ * All Lineage CPL devices have a built-in I2C bus master selector (PCA9541).
+ * To ensure device access, this driver should only be used as client driver
+ * to the pca9541 I2C master selector driver.
+ */
+
+/* Command codes */
+#define PEM_OPERATION 0x01
+#define PEM_CLEAR_INFO_FLAGS 0x03
+#define PEM_VOUT_COMMAND 0x21
+#define PEM_VOUT_OV_FAULT_LIMIT 0x40
+#define PEM_READ_DATA_STRING 0xd0
+#define PEM_READ_INPUT_STRING 0xdc
+#define PEM_READ_FIRMWARE_REV 0xdd
+#define PEM_READ_RUN_TIMER 0xde
+#define PEM_FAN_HI_SPEED 0xdf
+#define PEM_FAN_NORMAL_SPEED 0xe0
+#define PEM_READ_FAN_SPEED 0xe1
+
+/* offsets in data string */
+#define PEM_DATA_STATUS_2 0
+#define PEM_DATA_STATUS_1 1
+#define PEM_DATA_ALARM_2 2
+#define PEM_DATA_ALARM_1 3
+#define PEM_DATA_VOUT_LSB 4
+#define PEM_DATA_VOUT_MSB 5
+#define PEM_DATA_CURRENT 6
+#define PEM_DATA_TEMP 7
+
+/* Virtual entries, to report constants */
+#define PEM_DATA_TEMP_MAX 10
+#define PEM_DATA_TEMP_CRIT 11
+
+/* offsets in input string */
+#define PEM_INPUT_VOLTAGE 0
+#define PEM_INPUT_POWER_LSB 1
+#define PEM_INPUT_POWER_MSB 2
+
+/* offsets in fan data */
+#define PEM_FAN_ADJUSTMENT 0
+#define PEM_FAN_FAN1 1
+#define PEM_FAN_FAN2 2
+#define PEM_FAN_FAN3 3
+
+/* Status register bits */
+#define STS1_OUTPUT_ON (1 << 0)
+#define STS1_LEDS_FLASHING (1 << 1)
+#define STS1_EXT_FAULT (1 << 2)
+#define STS1_SERVICE_LED_ON (1 << 3)
+#define STS1_SHUTDOWN_OCCURRED (1 << 4)
+#define STS1_INT_FAULT (1 << 5)
+#define STS1_ISOLATION_TEST_OK (1 << 6)
+
+#define STS2_ENABLE_PIN_HI (1 << 0)
+#define STS2_DATA_OUT_RANGE (1 << 1)
+#define STS2_RESTARTED_OK (1 << 1)
+#define STS2_ISOLATION_TEST_FAIL (1 << 3)
+#define STS2_HIGH_POWER_CAP (1 << 4)
+#define STS2_INVALID_INSTR (1 << 5)
+#define STS2_WILL_RESTART (1 << 6)
+#define STS2_PEC_ERR (1 << 7)
+
+/* Alarm register bits */
+#define ALRM1_VIN_OUT_LIMIT (1 << 0)
+#define ALRM1_VOUT_OUT_LIMIT (1 << 1)
+#define ALRM1_OV_VOLT_SHUTDOWN (1 << 2)
+#define ALRM1_VIN_OVERCURRENT (1 << 3)
+#define ALRM1_TEMP_WARNING (1 << 4)
+#define ALRM1_TEMP_SHUTDOWN (1 << 5)
+#define ALRM1_PRIMARY_FAULT (1 << 6)
+#define ALRM1_POWER_LIMIT (1 << 7)
+
+#define ALRM2_5V_OUT_LIMIT (1 << 1)
+#define ALRM2_TEMP_FAULT (1 << 2)
+#define ALRM2_OV_LOW (1 << 3)
+#define ALRM2_DCDC_TEMP_HIGH (1 << 4)
+#define ALRM2_PRI_TEMP_HIGH (1 << 5)
+#define ALRM2_NO_PRIMARY (1 << 6)
+#define ALRM2_FAN_FAULT (1 << 7)
+
+#define FIRMWARE_REV_LEN 4
+#define DATA_STRING_LEN 9
+#define INPUT_STRING_LEN 5 /* 4 for most devices */
+#define FAN_SPEED_LEN 5
+
+struct pem_data {
+ struct device *hwmon_dev;
+
+ struct mutex update_lock;
+ bool valid;
+ bool fans_supported;
+ int input_length;
+ unsigned long last_updated; /* in jiffies */
+
+ u8 firmware_rev[FIRMWARE_REV_LEN];
+ u8 data_string[DATA_STRING_LEN];
+ u8 input_string[INPUT_STRING_LEN];
+ u8 fan_speed[FAN_SPEED_LEN];
+};
+
+static int pem_read_block(struct i2c_client *client, u8 command, u8 *data,
+ int data_len)
+{
+ u8 block_buffer[I2C_SMBUS_BLOCK_MAX];
+ int result;
+
+ result = i2c_smbus_read_block_data(client, command, block_buffer);
+ if (unlikely(result < 0))
+ goto abort;
+ if (unlikely(result == 0xff || result != data_len)) {
+ result = -EIO;
+ goto abort;
+ }
+ memcpy(data, block_buffer, data_len);
+ result = 0;
+abort:
+ return result;
+}
+
+static struct pem_data *pem_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pem_data *data = i2c_get_clientdata(client);
+ struct pem_data *ret = data;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ int result;
+
+ /* Read data string */
+ result = pem_read_block(client, PEM_READ_DATA_STRING,
+ data->data_string,
+ sizeof(data->data_string));
+ if (unlikely(result < 0)) {
+ ret = ERR_PTR(result);
+ goto abort;
+ }
+
+ /* Read input string */
+ if (data->input_length) {
+ result = pem_read_block(client, PEM_READ_INPUT_STRING,
+ data->input_string,
+ data->input_length);
+ if (unlikely(result < 0)) {
+ ret = ERR_PTR(result);
+ goto abort;
+ }
+ }
+
+ /* Read fan speeds */
+ if (data->fans_supported) {
+ result = pem_read_block(client, PEM_READ_FAN_SPEED,
+ data->fan_speed,
+ sizeof(data->fan_speed));
+ if (unlikely(result < 0)) {
+ ret = ERR_PTR(result);
+ goto abort;
+ }
+ }
+
+ i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS);
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static long pem_get_data(u8 *data, int len, int index)
+{
+ long val;
+
+ switch (index) {
+ case PEM_DATA_VOUT_LSB:
+ val = (data[index] + (data[index+1] << 8)) * 5 / 2;
+ break;
+ case PEM_DATA_CURRENT:
+ val = data[index] * 200;
+ break;
+ case PEM_DATA_TEMP:
+ val = data[index] * 1000;
+ break;
+ case PEM_DATA_TEMP_MAX:
+ val = 97 * 1000; /* 97 degrees C per datasheet */
+ break;
+ case PEM_DATA_TEMP_CRIT:
+ val = 107 * 1000; /* 107 degrees C per datasheet */
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ val = 0;
+ }
+ return val;
+}
+
+static long pem_get_input(u8 *data, int len, int index)
+{
+ long val;
+
+ switch (index) {
+ case PEM_INPUT_VOLTAGE:
+ if (len == INPUT_STRING_LEN)
+ val = (data[index] + (data[index+1] << 8) - 75) * 1000;
+ else
+ val = (data[index] - 75) * 1000;
+ break;
+ case PEM_INPUT_POWER_LSB:
+ if (len == INPUT_STRING_LEN)
+ index++;
+ val = (data[index] + (data[index+1] << 8)) * 1000000L;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ val = 0;
+ }
+ return val;
+}
+
+static long pem_get_fan(u8 *data, int len, int index)
+{
+ long val;
+
+ switch (index) {
+ case PEM_FAN_FAN1:
+ case PEM_FAN_FAN2:
+ case PEM_FAN_FAN3:
+ val = data[index] * 100;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ val = 0;
+ }
+ return val;
+}
+
+/*
+ * Show boolean, either a fault or an alarm.
+ * .nr points to the register, .index is the bit mask to check
+ */
+static ssize_t pem_show_bool(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da);
+ struct pem_data *data = pem_update_device(dev);
+ u8 status;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ status = data->data_string[attr->nr] & attr->index;
+ return snprintf(buf, PAGE_SIZE, "%d\n", !!status);
+}
+
+static ssize_t pem_show_data(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pem_data *data = pem_update_device(dev);
+ long value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = pem_get_data(data->data_string, sizeof(data->data_string),
+ attr->index);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", value);
+}
+
+static ssize_t pem_show_input(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pem_data *data = pem_update_device(dev);
+ long value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = pem_get_input(data->input_string, sizeof(data->input_string),
+ attr->index);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", value);
+}
+
+static ssize_t pem_show_fan(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pem_data *data = pem_update_device(dev);
+ long value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = pem_get_fan(data->fan_speed, sizeof(data->fan_speed),
+ attr->index);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", value);
+}
+
+/* Voltages */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_VOUT_LSB);
+static SENSOR_DEVICE_ATTR_2(in1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_VOUT_OUT_LIMIT);
+static SENSOR_DEVICE_ATTR_2(in1_crit_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_OV_VOLT_SHUTDOWN);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, pem_show_input, NULL,
+ PEM_INPUT_VOLTAGE);
+static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1,
+ ALRM1_VIN_OUT_LIMIT | ALRM1_PRIMARY_FAULT);
+
+/* Currents */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_CURRENT);
+static SENSOR_DEVICE_ATTR_2(curr1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_VIN_OVERCURRENT);
+
+/* Power */
+static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, pem_show_input, NULL,
+ PEM_INPUT_POWER_LSB);
+static SENSOR_DEVICE_ATTR_2(power1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_POWER_LIMIT);
+
+/* Fans */
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, pem_show_fan, NULL,
+ PEM_FAN_FAN1);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, pem_show_fan, NULL,
+ PEM_FAN_FAN2);
+static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, pem_show_fan, NULL,
+ PEM_FAN_FAN3);
+static SENSOR_DEVICE_ATTR_2(fan1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_2, ALRM2_FAN_FAULT);
+
+/* Temperatures */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_TEMP);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_TEMP_MAX);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_TEMP_CRIT);
+static SENSOR_DEVICE_ATTR_2(temp1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_TEMP_WARNING);
+static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_TEMP_SHUTDOWN);
+static SENSOR_DEVICE_ATTR_2(temp1_fault, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_2, ALRM2_TEMP_FAULT);
+
+static struct attribute *pem_attributes[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_in2_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_power1_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_fan1_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_fault.dev_attr.attr,
+
+ NULL,
+};
+
+static const struct attribute_group pem_group = {
+ .attrs = pem_attributes,
+};
+
+static struct attribute *pem_input_attributes[] = {
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_power1_input.dev_attr.attr,
+};
+
+static const struct attribute_group pem_input_group = {
+ .attrs = pem_input_attributes,
+};
+
+static struct attribute *pem_fan_attributes[] = {
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_input.dev_attr.attr,
+};
+
+static const struct attribute_group pem_fan_group = {
+ .attrs = pem_fan_attributes,
+};
+
+static int pem_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct pem_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BLOCK_DATA
+ | I2C_FUNC_SMBUS_WRITE_BYTE))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /*
+ * We use the next two commands to determine if the device is really
+ * there.
+ */
+ ret = pem_read_block(client, PEM_READ_FIRMWARE_REV,
+ data->firmware_rev, sizeof(data->firmware_rev));
+ if (ret < 0)
+ goto out_kfree;
+
+ ret = i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS);
+ if (ret < 0)
+ goto out_kfree;
+
+ dev_info(&client->dev, "Firmware revision %d.%d.%d\n",
+ data->firmware_rev[0], data->firmware_rev[1],
+ data->firmware_rev[2]);
+
+ /* Register sysfs hooks */
+ ret = sysfs_create_group(&client->dev.kobj, &pem_group);
+ if (ret)
+ goto out_kfree;
+
+ /*
+ * Check if input readings are supported.
+ * This is the case if we can read input data,
+ * and if the returned data is not all zeros.
+ * Note that input alarms are always supported.
+ */
+ ret = pem_read_block(client, PEM_READ_INPUT_STRING,
+ data->input_string,
+ sizeof(data->input_string) - 1);
+ if (!ret && (data->input_string[0] || data->input_string[1] ||
+ data->input_string[2]))
+ data->input_length = sizeof(data->input_string) - 1;
+ else if (ret < 0) {
+ /* Input string is one byte longer for some devices */
+ ret = pem_read_block(client, PEM_READ_INPUT_STRING,
+ data->input_string,
+ sizeof(data->input_string));
+ if (!ret && (data->input_string[0] || data->input_string[1] ||
+ data->input_string[2] || data->input_string[3]))
+ data->input_length = sizeof(data->input_string);
+ }
+ ret = 0;
+ if (data->input_length) {
+ ret = sysfs_create_group(&client->dev.kobj, &pem_input_group);
+ if (ret)
+ goto out_remove_groups;
+ }
+
+ /*
+ * Check if fan speed readings are supported.
+ * This is the case if we can read fan speed data,
+ * and if the returned data is not all zeros.
+ * Note that the fan alarm is always supported.
+ */
+ ret = pem_read_block(client, PEM_READ_FAN_SPEED,
+ data->fan_speed,
+ sizeof(data->fan_speed));
+ if (!ret && (data->fan_speed[0] || data->fan_speed[1] ||
+ data->fan_speed[2] || data->fan_speed[3])) {
+ data->fans_supported = true;
+ ret = sysfs_create_group(&client->dev.kobj, &pem_fan_group);
+ if (ret)
+ goto out_remove_groups;
+ }
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ goto out_remove_groups;
+ }
+
+ return 0;
+
+out_remove_groups:
+ sysfs_remove_group(&client->dev.kobj, &pem_input_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_fan_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_group);
+out_kfree:
+ kfree(data);
+ return ret;
+}
+
+static int pem_remove(struct i2c_client *client)
+{
+ struct pem_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+
+ sysfs_remove_group(&client->dev.kobj, &pem_input_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_fan_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_group);
+
+ kfree(data);
+ return 0;
+}
+
+static const struct i2c_device_id pem_id[] = {
+ {"lineage_pem", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pem_id);
+
+static struct i2c_driver pem_driver = {
+ .driver = {
+ .name = "lineage_pem",
+ },
+ .probe = pem_probe,
+ .remove = pem_remove,
+ .id_table = pem_id,
+};
+
+static int __init pem_init(void)
+{
+ return i2c_add_driver(&pem_driver);
+}
+
+static void __exit pem_exit(void)
+{
+ i2c_del_driver(&pem_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_DESCRIPTION("Lineage CPL PEM hardware monitoring driver");
+MODULE_LICENSE("GPL");
+
+module_init(pem_init);
+module_exit(pem_exit);
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index 2549de1de4e2..c1f8a8fbf694 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/spi/spi.h>
+#include <linux/pm.h>
#include "lis3lv02d.h"
@@ -88,9 +89,10 @@ static int __devexit lis302dl_spi_remove(struct spi_device *spi)
return lis3lv02d_remove_fs(&lis3_dev);
}
-#ifdef CONFIG_PM
-static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int lis3lv02d_spi_suspend(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
if (!lis3->pdata || !lis3->pdata->wakeup_flags)
@@ -99,8 +101,9 @@ static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
return 0;
}
-static int lis3lv02d_spi_resume(struct spi_device *spi)
+static int lis3lv02d_spi_resume(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
if (!lis3->pdata || !lis3->pdata->wakeup_flags)
@@ -108,21 +111,19 @@ static int lis3lv02d_spi_resume(struct spi_device *spi)
return 0;
}
-
-#else
-#define lis3lv02d_spi_suspend NULL
-#define lis3lv02d_spi_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(lis3lv02d_spi_pm, lis3lv02d_spi_suspend,
+ lis3lv02d_spi_resume);
+
static struct spi_driver lis302dl_spi_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .pm = &lis3lv02d_spi_pm,
},
.probe = lis302dl_spi_probe,
.remove = __devexit_p(lis302dl_spi_remove),
- .suspend = lis3lv02d_spi_suspend,
- .resume = lis3lv02d_spi_resume,
};
static int __init lis302dl_init(void)
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index d2cc28660816..cf47e6e476ed 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
enum chips {
any_chip, lm85b, lm85c,
adm1027, adt7463, adt7468,
- emc6d100, emc6d102, emc6d103
+ emc6d100, emc6d102, emc6d103, emc6d103s
};
/* The LM85 registers */
@@ -283,10 +283,6 @@ struct lm85_zone {
u8 hyst; /* Low limit hysteresis. (0-15) */
u8 range; /* Temp range, encoded */
s8 critical; /* "All fans ON" temp limit */
- u8 off_desired; /* Actual "off" temperature specified. Preserved
- * to prevent "drift" as other autofan control
- * values change.
- */
u8 max_desired; /* Actual "max" temperature specified. Preserved
* to prevent "drift" as other autofan control
* values change.
@@ -306,6 +302,8 @@ struct lm85_data {
const int *freq_map;
enum chips type;
+ bool has_vid5; /* true if VID5 is configured for ADT7463 or ADT7468 */
+
struct mutex update_lock;
int valid; /* !=0 if following fields are valid */
unsigned long last_reading; /* In jiffies */
@@ -352,6 +350,7 @@ static const struct i2c_device_id lm85_id[] = {
{ "emc6d101", emc6d100 },
{ "emc6d102", emc6d102 },
{ "emc6d103", emc6d103 },
+ { "emc6d103s", emc6d103s },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm85_id);
@@ -420,8 +419,7 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
struct lm85_data *data = lm85_update_device(dev);
int vid;
- if ((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80)) {
+ if (data->has_vid5) {
/* 6-pin VID (VRM 10) */
vid = vid_from_reg(data->vid & 0x3f, data->vrm);
} else {
@@ -891,7 +889,6 @@ static ssize_t set_temp_auto_temp_off(struct device *dev,
mutex_lock(&data->update_lock);
min = TEMP_FROM_REG(data->zone[nr].limit);
- data->zone[nr].off_desired = TEMP_TO_REG(val);
data->zone[nr].hyst = HYST_TO_REG(min - val);
if (nr == 0 || nr == 1) {
lm85_write_value(client, LM85_REG_AFAN_HYST1,
@@ -934,18 +931,6 @@ static ssize_t set_temp_auto_temp_min(struct device *dev,
((data->zone[nr].range & 0x0f) << 4)
| (data->pwm_freq[nr] & 0x07));
-/* Update temp_auto_hyst and temp_auto_off */
- data->zone[nr].hyst = HYST_TO_REG(TEMP_FROM_REG(
- data->zone[nr].limit) - TEMP_FROM_REG(
- data->zone[nr].off_desired));
- if (nr == 0 || nr == 1) {
- lm85_write_value(client, LM85_REG_AFAN_HYST1,
- (data->zone[0].hyst << 4)
- | data->zone[1].hyst);
- } else {
- lm85_write_value(client, LM85_REG_AFAN_HYST2,
- (data->zone[2].hyst << 4));
- }
mutex_unlock(&data->update_lock);
return count;
}
@@ -1084,13 +1069,7 @@ static struct attribute *lm85_attributes[] = {
&sensor_dev_attr_pwm1_auto_pwm_min.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_pwm_min.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_pwm_min.dev_attr.attr,
- &sensor_dev_attr_pwm1_auto_pwm_minctl.dev_attr.attr,
- &sensor_dev_attr_pwm2_auto_pwm_minctl.dev_attr.attr,
- &sensor_dev_attr_pwm3_auto_pwm_minctl.dev_attr.attr,
- &sensor_dev_attr_temp1_auto_temp_off.dev_attr.attr,
- &sensor_dev_attr_temp2_auto_temp_off.dev_attr.attr,
- &sensor_dev_attr_temp3_auto_temp_off.dev_attr.attr,
&sensor_dev_attr_temp1_auto_temp_min.dev_attr.attr,
&sensor_dev_attr_temp2_auto_temp_min.dev_attr.attr,
&sensor_dev_attr_temp3_auto_temp_min.dev_attr.attr,
@@ -1111,6 +1090,26 @@ static const struct attribute_group lm85_group = {
.attrs = lm85_attributes,
};
+static struct attribute *lm85_attributes_minctl[] = {
+ &sensor_dev_attr_pwm1_auto_pwm_minctl.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_pwm_minctl.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_pwm_minctl.dev_attr.attr,
+};
+
+static const struct attribute_group lm85_group_minctl = {
+ .attrs = lm85_attributes_minctl,
+};
+
+static struct attribute *lm85_attributes_temp_off[] = {
+ &sensor_dev_attr_temp1_auto_temp_off.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_temp_off.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_temp_off.dev_attr.attr,
+};
+
+static const struct attribute_group lm85_group_temp_off = {
+ .attrs = lm85_attributes_temp_off,
+};
+
static struct attribute *lm85_attributes_in4[] = {
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in4_min.dev_attr.attr,
@@ -1258,16 +1257,9 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
case LM85_VERSTEP_EMC6D103_A1:
type_name = "emc6d103";
break;
- /*
- * Registers apparently missing in EMC6D103S/EMC6D103:A2
- * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102
- * (according to the data sheets), but used unconditionally
- * in the driver: 62[5:7], 6D[0:7], and 6E[0:7].
- * So skip EMC6D103S for now.
case LM85_VERSTEP_EMC6D103S:
type_name = "emc6d103s";
break;
- */
}
} else {
dev_dbg(&adapter->dev,
@@ -1280,6 +1272,19 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
+static void lm85_remove_files(struct i2c_client *client, struct lm85_data *data)
+{
+ sysfs_remove_group(&client->dev.kobj, &lm85_group);
+ if (data->type != emc6d103s) {
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_minctl);
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_temp_off);
+ }
+ if (!data->has_vid5)
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_in4);
+ if (data->type == emc6d100)
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_in567);
+}
+
static int lm85_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1302,6 +1307,7 @@ static int lm85_probe(struct i2c_client *client,
case emc6d100:
case emc6d102:
case emc6d103:
+ case emc6d103s:
data->freq_map = adm1027_freq_map;
break;
default:
@@ -1319,11 +1325,26 @@ static int lm85_probe(struct i2c_client *client,
if (err)
goto err_kfree;
+ /* minctl and temp_off exist on all chips except emc6d103s */
+ if (data->type != emc6d103s) {
+ err = sysfs_create_group(&client->dev.kobj, &lm85_group_minctl);
+ if (err)
+ goto err_kfree;
+ err = sysfs_create_group(&client->dev.kobj,
+ &lm85_group_temp_off);
+ if (err)
+ goto err_kfree;
+ }
+
/* The ADT7463/68 have an optional VRM 10 mode where pin 21 is used
as a sixth digital VID input rather than an analog input. */
- data->vid = lm85_read_value(client, LM85_REG_VID);
- if (!((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80)))
+ if (data->type == adt7463 || data->type == adt7468) {
+ u8 vid = lm85_read_value(client, LM85_REG_VID);
+ if (vid & 0x80)
+ data->has_vid5 = true;
+ }
+
+ if (!data->has_vid5)
if ((err = sysfs_create_group(&client->dev.kobj,
&lm85_group_in4)))
goto err_remove_files;
@@ -1344,10 +1365,7 @@ static int lm85_probe(struct i2c_client *client,
/* Error out and cleanup code */
err_remove_files:
- sysfs_remove_group(&client->dev.kobj, &lm85_group);
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in4);
- if (data->type == emc6d100)
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in567);
+ lm85_remove_files(client, data);
err_kfree:
kfree(data);
return err;
@@ -1357,10 +1375,7 @@ static int lm85_remove(struct i2c_client *client)
{
struct lm85_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &lm85_group);
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in4);
- if (data->type == emc6d100)
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in567);
+ lm85_remove_files(client, data);
kfree(data);
return 0;
}
@@ -1457,11 +1472,8 @@ static struct lm85_data *lm85_update_device(struct device *dev)
lm85_read_value(client, LM85_REG_FAN(i));
}
- if (!((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80))) {
- data->in[4] = lm85_read_value(client,
- LM85_REG_IN(4));
- }
+ if (!data->has_vid5)
+ data->in[4] = lm85_read_value(client, LM85_REG_IN(4));
if (data->type == adt7468)
data->cfg5 = lm85_read_value(client, ADT7468_REG_CFG5);
@@ -1487,7 +1499,8 @@ static struct lm85_data *lm85_update_device(struct device *dev)
/* More alarm bits */
data->alarms |= lm85_read_value(client,
EMC6D100_REG_ALARM3) << 16;
- } else if (data->type == emc6d102 || data->type == emc6d103) {
+ } else if (data->type == emc6d102 || data->type == emc6d103 ||
+ data->type == emc6d103s) {
/* Have to read LSB bits after the MSB ones because
the reading of the MSB bits has frozen the
LSBs (backward from the ADM1027).
@@ -1528,8 +1541,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
lm85_read_value(client, LM85_REG_FAN_MIN(i));
}
- if (!((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80))) {
+ if (!data->has_vid5) {
data->in_min[4] = lm85_read_value(client,
LM85_REG_IN_MIN(4));
data->in_max[4] = lm85_read_value(client,
@@ -1573,17 +1585,19 @@ static struct lm85_data *lm85_update_device(struct device *dev)
}
}
- i = lm85_read_value(client, LM85_REG_AFAN_SPIKE1);
- data->autofan[0].min_off = (i & 0x20) != 0;
- data->autofan[1].min_off = (i & 0x40) != 0;
- data->autofan[2].min_off = (i & 0x80) != 0;
+ if (data->type != emc6d103s) {
+ i = lm85_read_value(client, LM85_REG_AFAN_SPIKE1);
+ data->autofan[0].min_off = (i & 0x20) != 0;
+ data->autofan[1].min_off = (i & 0x40) != 0;
+ data->autofan[2].min_off = (i & 0x80) != 0;
- i = lm85_read_value(client, LM85_REG_AFAN_HYST1);
- data->zone[0].hyst = i >> 4;
- data->zone[1].hyst = i & 0x0f;
+ i = lm85_read_value(client, LM85_REG_AFAN_HYST1);
+ data->zone[0].hyst = i >> 4;
+ data->zone[1].hyst = i & 0x0f;
- i = lm85_read_value(client, LM85_REG_AFAN_HYST2);
- data->zone[2].hyst = i >> 4;
+ i = lm85_read_value(client, LM85_REG_AFAN_HYST2);
+ data->zone[2].hyst = i >> 4;
+ }
data->last_config = jiffies;
} /* last_config */
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
new file mode 100644
index 000000000000..4ac06b75aa60
--- /dev/null
+++ b/drivers/hwmon/ltc4151.c
@@ -0,0 +1,256 @@
+/*
+ * Driver for Linear Technology LTC4151 High Voltage I2C Current
+ * and Voltage Monitor
+ *
+ * Copyright (C) 2011 AppearTV AS
+ *
+ * Derived from:
+ *
+ * Driver for Linear Technology LTC4261 I2C Negative Voltage Hot
+ * Swap Controller
+ * Copyright (C) 2010 Ericsson AB.
+ *
+ * Datasheet: http://www.linear.com/docs/Datasheet/4151fc.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+/* chip registers */
+#define LTC4151_SENSE_H 0x00
+#define LTC4151_SENSE_L 0x01
+#define LTC4151_VIN_H 0x02
+#define LTC4151_VIN_L 0x03
+#define LTC4151_ADIN_H 0x04
+#define LTC4151_ADIN_L 0x05
+
+struct ltc4151_data {
+ struct device *hwmon_dev;
+
+ struct mutex update_lock;
+ bool valid;
+ unsigned long last_updated; /* in jiffies */
+
+ /* Registers */
+ u8 regs[6];
+};
+
+static struct ltc4151_data *ltc4151_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ltc4151_data *data = i2c_get_clientdata(client);
+ struct ltc4151_data *ret = data;
+
+ mutex_lock(&data->update_lock);
+
+ /*
+ * The chip's A/D updates 6 times per second
+ * (Conversion Rate 6 - 9 Hz)
+ */
+ if (time_after(jiffies, data->last_updated + HZ / 6) || !data->valid) {
+ int i;
+
+ dev_dbg(&client->dev, "Starting ltc4151 update\n");
+
+ /* Read all registers */
+ for (i = 0; i < ARRAY_SIZE(data->regs); i++) {
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, i);
+ if (unlikely(val < 0)) {
+ dev_dbg(dev,
+ "Failed to read ADC value: error %d\n",
+ val);
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->regs[i] = val;
+ }
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+/* Return the voltage from the given register in mV */
+static int ltc4151_get_value(struct ltc4151_data *data, u8 reg)
+{
+ u32 val;
+
+ val = (data->regs[reg] << 4) + (data->regs[reg + 1] >> 4);
+
+ switch (reg) {
+ case LTC4151_ADIN_H:
+ /* 500uV resolution. Convert to mV. */
+ val = val * 500 / 1000;
+ break;
+ case LTC4151_SENSE_H:
+ /*
+ * 20uV resolution. Convert to current as measured with
+ * an 1 mOhm sense resistor, in mA.
+ */
+ val = val * 20;
+ break;
+ case LTC4151_VIN_H:
+ /* 25 mV per increment */
+ val = val * 25;
+ break;
+ default:
+ /* If we get here, the developer messed up */
+ WARN_ON_ONCE(1);
+ val = 0;
+ break;
+ }
+
+ return val;
+}
+
+static ssize_t ltc4151_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ltc4151_data *data = ltc4151_update_device(dev);
+ int value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = ltc4151_get_value(data, attr->index);
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+/*
+ * Input voltages.
+ */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, \
+ ltc4151_show_value, NULL, LTC4151_VIN_H);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, \
+ ltc4151_show_value, NULL, LTC4151_ADIN_H);
+
+/* Currents (via sense resistor) */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, \
+ ltc4151_show_value, NULL, LTC4151_SENSE_H);
+
+/* Finally, construct an array of pointers to members of the above objects,
+ * as required for sysfs_create_group()
+ */
+static struct attribute *ltc4151_attributes[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+
+ NULL,
+};
+
+static const struct attribute_group ltc4151_group = {
+ .attrs = ltc4151_attributes,
+};
+
+static int ltc4151_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct ltc4151_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out_kzalloc;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* Register sysfs hooks */
+ ret = sysfs_create_group(&client->dev.kobj, &ltc4151_group);
+ if (ret)
+ goto out_sysfs_create_group;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ goto out_hwmon_device_register;
+ }
+
+ return 0;
+
+out_hwmon_device_register:
+ sysfs_remove_group(&client->dev.kobj, &ltc4151_group);
+out_sysfs_create_group:
+ kfree(data);
+out_kzalloc:
+ return ret;
+}
+
+static int ltc4151_remove(struct i2c_client *client)
+{
+ struct ltc4151_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &ltc4151_group);
+
+ kfree(data);
+
+ return 0;
+}
+
+static const struct i2c_device_id ltc4151_id[] = {
+ { "ltc4151", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ltc4151_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ltc4151_driver = {
+ .driver = {
+ .name = "ltc4151",
+ },
+ .probe = ltc4151_probe,
+ .remove = ltc4151_remove,
+ .id_table = ltc4151_id,
+};
+
+static int __init ltc4151_init(void)
+{
+ return i2c_add_driver(&ltc4151_driver);
+}
+
+static void __exit ltc4151_exit(void)
+{
+ i2c_del_driver(&ltc4151_driver);
+}
+
+MODULE_AUTHOR("Per Dalen <per.dalen@appeartv.com>");
+MODULE_DESCRIPTION("LTC4151 driver");
+MODULE_LICENSE("GPL");
+
+module_init(ltc4151_init);
+module_exit(ltc4151_exit);
diff --git a/drivers/hwmon/max16064.c b/drivers/hwmon/max16064.c
new file mode 100644
index 000000000000..1d6d717060d3
--- /dev/null
+++ b/drivers/hwmon/max16064.c
@@ -0,0 +1,91 @@
+/*
+ * Hardware monitoring driver for Maxim MAX16064
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+static struct pmbus_driver_info max16064_info = {
+ .pages = 4,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .m[PSC_VOLTAGE_IN] = 19995,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -1,
+ .m[PSC_VOLTAGE_OUT] = 19995,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = -1,
+ .m[PSC_TEMPERATURE] = -7612,
+ .b[PSC_TEMPERATURE] = 335,
+ .R[PSC_TEMPERATURE] = -3,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_TEMP,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+};
+
+static int max16064_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &max16064_info);
+}
+
+static int max16064_remove(struct i2c_client *client)
+{
+ return pmbus_do_remove(client);
+}
+
+static const struct i2c_device_id max16064_id[] = {
+ {"max16064", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, max16064_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver max16064_driver = {
+ .driver = {
+ .name = "max16064",
+ },
+ .probe = max16064_probe,
+ .remove = max16064_remove,
+ .id_table = max16064_id,
+};
+
+static int __init max16064_init(void)
+{
+ return i2c_add_driver(&max16064_driver);
+}
+
+static void __exit max16064_exit(void)
+{
+ i2c_del_driver(&max16064_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX16064");
+MODULE_LICENSE("GPL");
+module_init(max16064_init);
+module_exit(max16064_exit);
diff --git a/drivers/hwmon/max34440.c b/drivers/hwmon/max34440.c
new file mode 100644
index 000000000000..992b701b4c5e
--- /dev/null
+++ b/drivers/hwmon/max34440.c
@@ -0,0 +1,199 @@
+/*
+ * Hardware monitoring driver for Maxim MAX34440/MAX34441
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+enum chips { max34440, max34441 };
+
+#define MAX34440_STATUS_OC_WARN (1 << 0)
+#define MAX34440_STATUS_OC_FAULT (1 << 1)
+#define MAX34440_STATUS_OT_FAULT (1 << 5)
+#define MAX34440_STATUS_OT_WARN (1 << 6)
+
+static int max34440_get_status(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+ int mfg_status;
+
+ ret = pmbus_set_page(client, page);
+ if (ret < 0)
+ return ret;
+
+ switch (reg) {
+ case PMBUS_STATUS_IOUT:
+ mfg_status = pmbus_read_word_data(client, 0,
+ PMBUS_STATUS_MFR_SPECIFIC);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX34440_STATUS_OC_WARN)
+ ret |= PB_IOUT_OC_WARNING;
+ if (mfg_status & MAX34440_STATUS_OC_FAULT)
+ ret |= PB_IOUT_OC_FAULT;
+ break;
+ case PMBUS_STATUS_TEMPERATURE:
+ mfg_status = pmbus_read_word_data(client, 0,
+ PMBUS_STATUS_MFR_SPECIFIC);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX34440_STATUS_OT_WARN)
+ ret |= PB_TEMP_OT_WARNING;
+ if (mfg_status & MAX34440_STATUS_OT_FAULT)
+ ret |= PB_TEMP_OT_FAULT;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static struct pmbus_driver_info max34440_info[] = {
+ [max34440] = {
+ .pages = 14,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .direct[PSC_CURRENT_OUT] = true,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 3, /* R = 0 in datasheet reflects mV */
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3, /* R = 0 in datasheet reflects mV */
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 3, /* R = 0 in datasheet reflects mA */
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[5] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[6] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[7] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[8] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[9] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[12] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[13] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .get_status = max34440_get_status,
+ },
+ [max34441] = {
+ .pages = 12,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .direct[PSC_CURRENT_OUT] = true,
+ .direct[PSC_FAN] = true,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 3,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 3,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .m[PSC_FAN] = 1,
+ .b[PSC_FAN] = 0,
+ .R[PSC_FAN] = 0,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[5] = PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12,
+ .func[6] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[7] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[8] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[9] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .get_status = max34440_get_status,
+ },
+};
+
+static int max34440_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &max34440_info[id->driver_data]);
+}
+
+static int max34440_remove(struct i2c_client *client)
+{
+ return pmbus_do_remove(client);
+}
+
+static const struct i2c_device_id max34440_id[] = {
+ {"max34440", max34440},
+ {"max34441", max34441},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, max34440_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver max34440_driver = {
+ .driver = {
+ .name = "max34440",
+ },
+ .probe = max34440_probe,
+ .remove = max34440_remove,
+ .id_table = max34440_id,
+};
+
+static int __init max34440_init(void)
+{
+ return i2c_add_driver(&max34440_driver);
+}
+
+static void __exit max34440_exit(void)
+{
+ i2c_del_driver(&max34440_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX34440/MAX34441");
+MODULE_LICENSE("GPL");
+module_init(max34440_init);
+module_exit(max34440_exit);
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
new file mode 100644
index 000000000000..f20d9978ee78
--- /dev/null
+++ b/drivers/hwmon/max6639.c
@@ -0,0 +1,653 @@
+/*
+ * max6639.c - Support for Maxim MAX6639
+ *
+ * 2-Channel Temperature Monitor with Dual PWM Fan-Speed Controller
+ *
+ * Copyright (C) 2010, 2011 Roland Stigge <stigge@antcom.de>
+ *
+ * based on the initial MAX6639 support from semptian.net
+ * by He Changqing <hechangqing@semptian.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/i2c/max6639.h>
+
+/* Addresses to scan */
+static unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END };
+
+/* The MAX6639 registers, valid channel numbers: 0, 1 */
+#define MAX6639_REG_TEMP(ch) (0x00 + (ch))
+#define MAX6639_REG_STATUS 0x02
+#define MAX6639_REG_OUTPUT_MASK 0x03
+#define MAX6639_REG_GCONFIG 0x04
+#define MAX6639_REG_TEMP_EXT(ch) (0x05 + (ch))
+#define MAX6639_REG_ALERT_LIMIT(ch) (0x08 + (ch))
+#define MAX6639_REG_OT_LIMIT(ch) (0x0A + (ch))
+#define MAX6639_REG_THERM_LIMIT(ch) (0x0C + (ch))
+#define MAX6639_REG_FAN_CONFIG1(ch) (0x10 + (ch) * 4)
+#define MAX6639_REG_FAN_CONFIG2a(ch) (0x11 + (ch) * 4)
+#define MAX6639_REG_FAN_CONFIG2b(ch) (0x12 + (ch) * 4)
+#define MAX6639_REG_FAN_CONFIG3(ch) (0x13 + (ch) * 4)
+#define MAX6639_REG_FAN_CNT(ch) (0x20 + (ch))
+#define MAX6639_REG_TARGET_CNT(ch) (0x22 + (ch))
+#define MAX6639_REG_FAN_PPR(ch) (0x24 + (ch))
+#define MAX6639_REG_TARGTDUTY(ch) (0x26 + (ch))
+#define MAX6639_REG_FAN_START_TEMP(ch) (0x28 + (ch))
+#define MAX6639_REG_DEVID 0x3D
+#define MAX6639_REG_MANUID 0x3E
+#define MAX6639_REG_DEVREV 0x3F
+
+/* Register bits */
+#define MAX6639_GCONFIG_STANDBY 0x80
+#define MAX6639_GCONFIG_POR 0x40
+#define MAX6639_GCONFIG_DISABLE_TIMEOUT 0x20
+#define MAX6639_GCONFIG_CH2_LOCAL 0x10
+#define MAX6639_GCONFIG_PWM_FREQ_HI 0x08
+
+#define MAX6639_FAN_CONFIG1_PWM 0x80
+
+#define MAX6639_FAN_CONFIG3_THERM_FULL_SPEED 0x40
+
+static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
+
+#define FAN_FROM_REG(val, div, rpm_range) ((val) == 0 ? -1 : \
+ (val) == 255 ? 0 : (rpm_ranges[rpm_range] * 30) / ((div + 1) * (val)))
+#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255)
+
+/*
+ * Client data (each client gets its own)
+ */
+struct max6639_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ char valid; /* !=0 if following fields are valid */
+ unsigned long last_updated; /* In jiffies */
+
+ /* Register values sampled regularly */
+ u16 temp[2]; /* Temperature, in 1/8 C, 0..255 C */
+ bool temp_fault[2]; /* Detected temperature diode failure */
+ u8 fan[2]; /* Register value: TACH count for fans >=30 */
+ u8 status; /* Detected channel alarms and fan failures */
+
+ /* Register values only written to */
+ u8 pwm[2]; /* Register value: Duty cycle 0..120 */
+ u8 temp_therm[2]; /* THERM Temperature, 0..255 C (->_max) */
+ u8 temp_alert[2]; /* ALERT Temperature, 0..255 C (->_crit) */
+ u8 temp_ot[2]; /* OT Temperature, 0..255 C (->_emergency) */
+
+ /* Register values initialized only once */
+ u8 ppr; /* Pulses per rotation 0..3 for 1..4 ppr */
+ u8 rpm_range; /* Index in above rpm_ranges table */
+};
+
+static struct max6639_data *max6639_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct max6639_data *ret = data;
+ int i;
+ int status_reg;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
+ int res;
+
+ dev_dbg(&client->dev, "Starting max6639 update\n");
+
+ status_reg = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_STATUS);
+ if (status_reg < 0) {
+ ret = ERR_PTR(status_reg);
+ goto abort;
+ }
+
+ data->status = status_reg;
+
+ for (i = 0; i < 2; i++) {
+ res = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_FAN_CNT(i));
+ if (res < 0) {
+ ret = ERR_PTR(res);
+ goto abort;
+ }
+ data->fan[i] = res;
+
+ res = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_TEMP_EXT(i));
+ if (res < 0) {
+ ret = ERR_PTR(res);
+ goto abort;
+ }
+ data->temp[i] = res >> 5;
+ data->temp_fault[i] = res & 0x01;
+
+ res = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_TEMP(i));
+ if (res < 0) {
+ ret = ERR_PTR(res);
+ goto abort;
+ }
+ data->temp[i] |= res << 3;
+ }
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+
+ return ret;
+}
+
+static ssize_t show_temp_input(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ long temp;
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ temp = data->temp[attr->index] * 125;
+ return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_temp_fault(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", data->temp_fault[attr->index]);
+}
+
+static ssize_t show_temp_max(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", (data->temp_therm[attr->index] * 1000));
+}
+
+static ssize_t set_temp_max(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ mutex_lock(&data->update_lock);
+ data->temp_therm[attr->index] = TEMP_LIMIT_TO_REG(val);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_THERM_LIMIT(attr->index),
+ data->temp_therm[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_temp_crit(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", (data->temp_alert[attr->index] * 1000));
+}
+
+static ssize_t set_temp_crit(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ mutex_lock(&data->update_lock);
+ data->temp_alert[attr->index] = TEMP_LIMIT_TO_REG(val);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_ALERT_LIMIT(attr->index),
+ data->temp_alert[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_temp_emergency(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", (data->temp_ot[attr->index] * 1000));
+}
+
+static ssize_t set_temp_emergency(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ mutex_lock(&data->update_lock);
+ data->temp_ot[attr->index] = TEMP_LIMIT_TO_REG(val);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_OT_LIMIT(attr->index),
+ data->temp_ot[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_pwm(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", data->pwm[attr->index] * 255 / 120);
+}
+
+static ssize_t set_pwm(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ val = SENSORS_LIMIT(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ data->pwm[attr->index] = (u8)(val * 120 / 255);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_TARGTDUTY(attr->index),
+ data->pwm[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_fan_input(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index],
+ data->ppr, data->rpm_range));
+}
+
+static ssize_t show_alarm(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", !!(data->status & (1 << attr->index)));
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
+ set_temp_max, 0);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
+ set_temp_max, 1);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ set_temp_crit, 0);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ set_temp_crit, 1);
+static SENSOR_DEVICE_ATTR(temp1_emergency, S_IWUSR | S_IRUGO,
+ show_temp_emergency, set_temp_emergency, 0);
+static SENSOR_DEVICE_ATTR(temp2_emergency, S_IWUSR | S_IRUGO,
+ show_temp_emergency, set_temp_emergency, 1);
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0);
+static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1);
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp1_emergency_alarm, S_IRUGO, show_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp2_emergency_alarm, S_IRUGO, show_alarm, NULL, 4);
+
+
+static struct attribute *max6639_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_fault.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency.dev_attr.attr,
+ &sensor_dev_attr_temp2_emergency.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_pwm2.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_fault.dev_attr.attr,
+ &sensor_dev_attr_fan2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_emergency_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group max6639_group = {
+ .attrs = max6639_attributes,
+};
+
+/*
+ * returns respective index in rpm_ranges table
+ * 1 by default on invalid range
+ */
+static int rpm_range_to_reg(int range)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rpm_ranges); i++) {
+ if (rpm_ranges[i] == range)
+ return i;
+ }
+
+ return 1; /* default: 4000 RPM */
+}
+
+static int max6639_init_client(struct i2c_client *client)
+{
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct max6639_platform_data *max6639_info =
+ client->dev.platform_data;
+ int i = 0;
+ int rpm_range = 1; /* default: 4000 RPM */
+ int err = 0;
+
+ /* Reset chip to default values, see below for GCONFIG setup */
+ err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
+ MAX6639_GCONFIG_POR);
+ if (err)
+ goto exit;
+
+ /* Fans pulse per revolution is 2 by default */
+ if (max6639_info && max6639_info->ppr > 0 &&
+ max6639_info->ppr < 5)
+ data->ppr = max6639_info->ppr;
+ else
+ data->ppr = 2;
+ data->ppr -= 1;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_PPR(i),
+ data->ppr << 5);
+ if (err)
+ goto exit;
+
+ if (max6639_info)
+ rpm_range = rpm_range_to_reg(max6639_info->rpm_range);
+ data->rpm_range = rpm_range;
+
+ for (i = 0; i < 2; i++) {
+
+ /* Fans config PWM, RPM */
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG1(i),
+ MAX6639_FAN_CONFIG1_PWM | rpm_range);
+ if (err)
+ goto exit;
+
+ /* Fans PWM polarity high by default */
+ if (max6639_info && max6639_info->pwm_polarity == 0)
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG2a(i), 0x00);
+ else
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG2a(i), 0x02);
+ if (err)
+ goto exit;
+
+ /*
+ * /THERM full speed enable,
+ * PWM frequency 25kHz, see also GCONFIG below
+ */
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG3(i),
+ MAX6639_FAN_CONFIG3_THERM_FULL_SPEED | 0x03);
+ if (err)
+ goto exit;
+
+ /* Max. temp. 80C/90C/100C */
+ data->temp_therm[i] = 80;
+ data->temp_alert[i] = 90;
+ data->temp_ot[i] = 100;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_THERM_LIMIT(i),
+ data->temp_therm[i]);
+ if (err)
+ goto exit;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_ALERT_LIMIT(i),
+ data->temp_alert[i]);
+ if (err)
+ goto exit;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_OT_LIMIT(i), data->temp_ot[i]);
+ if (err)
+ goto exit;
+
+ /* PWM 120/120 (i.e. 100%) */
+ data->pwm[i] = 120;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_TARGTDUTY(i), data->pwm[i]);
+ if (err)
+ goto exit;
+ }
+ /* Start monitoring */
+ err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
+ MAX6639_GCONFIG_DISABLE_TIMEOUT | MAX6639_GCONFIG_CH2_LOCAL |
+ MAX6639_GCONFIG_PWM_FREQ_HI);
+exit:
+ return err;
+}
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int max6639_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int dev_id, manu_id;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ /* Actual detection via device and manufacturer ID */
+ dev_id = i2c_smbus_read_byte_data(client, MAX6639_REG_DEVID);
+ manu_id = i2c_smbus_read_byte_data(client, MAX6639_REG_MANUID);
+ if (dev_id != 0x58 || manu_id != 0x4D)
+ return -ENODEV;
+
+ strlcpy(info->type, "max6639", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int max6639_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max6639_data *data;
+ int err;
+
+ data = kzalloc(sizeof(struct max6639_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* Initialize the max6639 chip */
+ err = max6639_init_client(client);
+ if (err < 0)
+ goto error_free;
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &max6639_group);
+ if (err)
+ goto error_free;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto error_remove;
+ }
+
+ dev_info(&client->dev, "temperature sensor and fan control found\n");
+
+ return 0;
+
+error_remove:
+ sysfs_remove_group(&client->dev.kobj, &max6639_group);
+error_free:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int max6639_remove(struct i2c_client *client)
+{
+ struct max6639_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &max6639_group);
+
+ kfree(data);
+ return 0;
+}
+
+static int max6639_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
+ if (data < 0)
+ return data;
+
+ return i2c_smbus_write_byte_data(client,
+ MAX6639_REG_GCONFIG, data | MAX6639_GCONFIG_STANDBY);
+}
+
+static int max6639_resume(struct i2c_client *client)
+{
+ int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
+ if (data < 0)
+ return data;
+
+ return i2c_smbus_write_byte_data(client,
+ MAX6639_REG_GCONFIG, data & ~MAX6639_GCONFIG_STANDBY);
+}
+
+static const struct i2c_device_id max6639_id[] = {
+ {"max6639", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, max6639_id);
+
+static struct i2c_driver max6639_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max6639",
+ },
+ .probe = max6639_probe,
+ .remove = max6639_remove,
+ .suspend = max6639_suspend,
+ .resume = max6639_resume,
+ .id_table = max6639_id,
+ .detect = max6639_detect,
+ .address_list = normal_i2c,
+};
+
+static int __init max6639_init(void)
+{
+ return i2c_add_driver(&max6639_driver);
+}
+
+static void __exit max6639_exit(void)
+{
+ i2c_del_driver(&max6639_driver);
+}
+
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("max6639 driver");
+MODULE_LICENSE("GPL");
+
+module_init(max6639_init);
+module_exit(max6639_exit);
diff --git a/drivers/hwmon/max8688.c b/drivers/hwmon/max8688.c
new file mode 100644
index 000000000000..8ebfef2ecf26
--- /dev/null
+++ b/drivers/hwmon/max8688.c
@@ -0,0 +1,158 @@
+/*
+ * Hardware monitoring driver for Maxim MAX8688
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+#define MAX8688_MFG_STATUS 0xd8
+
+#define MAX8688_STATUS_OC_FAULT (1 << 4)
+#define MAX8688_STATUS_OV_FAULT (1 << 5)
+#define MAX8688_STATUS_OV_WARNING (1 << 8)
+#define MAX8688_STATUS_UV_FAULT (1 << 9)
+#define MAX8688_STATUS_UV_WARNING (1 << 10)
+#define MAX8688_STATUS_UC_FAULT (1 << 11)
+#define MAX8688_STATUS_OC_WARNING (1 << 12)
+#define MAX8688_STATUS_OT_FAULT (1 << 13)
+#define MAX8688_STATUS_OT_WARNING (1 << 14)
+
+static int max8688_get_status(struct i2c_client *client, int page, int reg)
+{
+ int ret = 0;
+ int mfg_status;
+
+ if (page)
+ return -EINVAL;
+
+ switch (reg) {
+ case PMBUS_STATUS_VOUT:
+ mfg_status = pmbus_read_word_data(client, 0,
+ MAX8688_MFG_STATUS);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX8688_STATUS_UV_WARNING)
+ ret |= PB_VOLTAGE_UV_WARNING;
+ if (mfg_status & MAX8688_STATUS_UV_FAULT)
+ ret |= PB_VOLTAGE_UV_FAULT;
+ if (mfg_status & MAX8688_STATUS_OV_WARNING)
+ ret |= PB_VOLTAGE_OV_WARNING;
+ if (mfg_status & MAX8688_STATUS_OV_FAULT)
+ ret |= PB_VOLTAGE_OV_FAULT;
+ break;
+ case PMBUS_STATUS_IOUT:
+ mfg_status = pmbus_read_word_data(client, 0,
+ MAX8688_MFG_STATUS);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX8688_STATUS_UC_FAULT)
+ ret |= PB_IOUT_UC_FAULT;
+ if (mfg_status & MAX8688_STATUS_OC_WARNING)
+ ret |= PB_IOUT_OC_WARNING;
+ if (mfg_status & MAX8688_STATUS_OC_FAULT)
+ ret |= PB_IOUT_OC_FAULT;
+ break;
+ case PMBUS_STATUS_TEMPERATURE:
+ mfg_status = pmbus_read_word_data(client, 0,
+ MAX8688_MFG_STATUS);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX8688_STATUS_OT_WARNING)
+ ret |= PB_TEMP_OT_WARNING;
+ if (mfg_status & MAX8688_STATUS_OT_FAULT)
+ ret |= PB_TEMP_OT_FAULT;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static struct pmbus_driver_info max8688_info = {
+ .pages = 1,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .direct[PSC_CURRENT_OUT] = true,
+ .m[PSC_VOLTAGE_IN] = 19995,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -1,
+ .m[PSC_VOLTAGE_OUT] = 19995,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = -1,
+ .m[PSC_CURRENT_OUT] = 23109,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = -2,
+ .m[PSC_TEMPERATURE] = -7612,
+ .b[PSC_TEMPERATURE] = 335,
+ .R[PSC_TEMPERATURE] = -3,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_STATUS_TEMP,
+ .get_status = max8688_get_status,
+};
+
+static int max8688_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &max8688_info);
+}
+
+static int max8688_remove(struct i2c_client *client)
+{
+ return pmbus_do_remove(client);
+}
+
+static const struct i2c_device_id max8688_id[] = {
+ {"max8688", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, max8688_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver max8688_driver = {
+ .driver = {
+ .name = "max8688",
+ },
+ .probe = max8688_probe,
+ .remove = max8688_remove,
+ .id_table = max8688_id,
+};
+
+static int __init max8688_init(void)
+{
+ return i2c_add_driver(&max8688_driver);
+}
+
+static void __exit max8688_exit(void)
+{
+ i2c_del_driver(&max8688_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX8688");
+MODULE_LICENSE("GPL");
+module_init(max8688_init);
+module_exit(max8688_exit);
diff --git a/drivers/hwmon/pmbus.c b/drivers/hwmon/pmbus.c
new file mode 100644
index 000000000000..98e2e28899e2
--- /dev/null
+++ b/drivers/hwmon/pmbus.c
@@ -0,0 +1,203 @@
+/*
+ * Hardware monitoring driver for PMBus devices
+ *
+ * Copyright (c) 2010, 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+/*
+ * Find sensor groups and status registers on each page.
+ */
+static void pmbus_find_sensor_groups(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ int page;
+
+ /* Sensors detected on page 0 only */
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_VIN))
+ info->func[0] |= PMBUS_HAVE_VIN;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_VCAP))
+ info->func[0] |= PMBUS_HAVE_VCAP;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_IIN))
+ info->func[0] |= PMBUS_HAVE_IIN;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_PIN))
+ info->func[0] |= PMBUS_HAVE_PIN;
+ if (info->func[0]
+ && pmbus_check_byte_register(client, 0, PMBUS_STATUS_INPUT))
+ info->func[0] |= PMBUS_HAVE_STATUS_INPUT;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
+ info->func[0] |= PMBUS_HAVE_FAN12;
+ if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_12))
+ info->func[0] |= PMBUS_HAVE_STATUS_FAN12;
+ }
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
+ info->func[0] |= PMBUS_HAVE_FAN34;
+ if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_34))
+ info->func[0] |= PMBUS_HAVE_STATUS_FAN34;
+ }
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_1)) {
+ info->func[0] |= PMBUS_HAVE_TEMP;
+ if (pmbus_check_byte_register(client, 0,
+ PMBUS_STATUS_TEMPERATURE))
+ info->func[0] |= PMBUS_HAVE_STATUS_TEMP;
+ }
+
+ /* Sensors detected on all pages */
+ for (page = 0; page < info->pages; page++) {
+ if (pmbus_check_word_register(client, page, PMBUS_READ_VOUT)) {
+ info->func[page] |= PMBUS_HAVE_VOUT;
+ if (pmbus_check_byte_register(client, page,
+ PMBUS_STATUS_VOUT))
+ info->func[page] |= PMBUS_HAVE_STATUS_VOUT;
+ }
+ if (pmbus_check_word_register(client, page, PMBUS_READ_IOUT)) {
+ info->func[page] |= PMBUS_HAVE_IOUT;
+ if (pmbus_check_byte_register(client, 0,
+ PMBUS_STATUS_IOUT))
+ info->func[page] |= PMBUS_HAVE_STATUS_IOUT;
+ }
+ if (pmbus_check_word_register(client, page, PMBUS_READ_POUT))
+ info->func[page] |= PMBUS_HAVE_POUT;
+ }
+}
+
+/*
+ * Identify chip parameters.
+ */
+static int pmbus_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ if (!info->pages) {
+ /*
+ * Check if the PAGE command is supported. If it is,
+ * keep setting the page number until it fails or until the
+ * maximum number of pages has been reached. Assume that
+ * this is the number of pages supported by the chip.
+ */
+ if (pmbus_check_byte_register(client, 0, PMBUS_PAGE)) {
+ int page;
+
+ for (page = 1; page < PMBUS_PAGES; page++) {
+ if (pmbus_set_page(client, page) < 0)
+ break;
+ }
+ pmbus_set_page(client, 0);
+ info->pages = page;
+ } else {
+ info->pages = 1;
+ }
+ }
+
+ /*
+ * We should check if the COEFFICIENTS register is supported.
+ * If it is, and the chip is configured for direct mode, we can read
+ * the coefficients from the chip, one set per group of sensor
+ * registers.
+ *
+ * To do this, we will need access to a chip which actually supports the
+ * COEFFICIENTS command, since the command is too complex to implement
+ * without testing it.
+ */
+
+ /* Try to find sensor groups */
+ pmbus_find_sensor_groups(client, info);
+
+ return 0;
+}
+
+static int pmbus_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pmbus_driver_info *info;
+ int ret;
+
+ info = kzalloc(sizeof(struct pmbus_driver_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->pages = id->driver_data;
+ info->identify = pmbus_identify;
+
+ ret = pmbus_do_probe(client, id, info);
+ if (ret < 0)
+ goto out;
+ return 0;
+
+out:
+ kfree(info);
+ return ret;
+}
+
+static int pmbus_remove(struct i2c_client *client)
+{
+ int ret;
+ const struct pmbus_driver_info *info;
+
+ info = pmbus_get_driver_info(client);
+ ret = pmbus_do_remove(client);
+ kfree(info);
+ return ret;
+}
+
+/*
+ * Use driver_data to set the number of pages supported by the chip.
+ */
+static const struct i2c_device_id pmbus_id[] = {
+ {"bmr450", 1},
+ {"bmr451", 1},
+ {"bmr453", 1},
+ {"bmr454", 1},
+ {"ltc2978", 8},
+ {"pmbus", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pmbus_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver pmbus_driver = {
+ .driver = {
+ .name = "pmbus",
+ },
+ .probe = pmbus_probe,
+ .remove = pmbus_remove,
+ .id_table = pmbus_id,
+};
+
+static int __init pmbus_init(void)
+{
+ return i2c_add_driver(&pmbus_driver);
+}
+
+static void __exit pmbus_exit(void)
+{
+ i2c_del_driver(&pmbus_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("Generic PMBus driver");
+MODULE_LICENSE("GPL");
+module_init(pmbus_init);
+module_exit(pmbus_exit);
diff --git a/drivers/hwmon/pmbus.h b/drivers/hwmon/pmbus.h
new file mode 100644
index 000000000000..a81f7f228762
--- /dev/null
+++ b/drivers/hwmon/pmbus.h
@@ -0,0 +1,313 @@
+/*
+ * pmbus.h - Common defines and structures for PMBus devices
+ *
+ * Copyright (c) 2010, 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef PMBUS_H
+#define PMBUS_H
+
+/*
+ * Registers
+ */
+#define PMBUS_PAGE 0x00
+#define PMBUS_OPERATION 0x01
+#define PMBUS_ON_OFF_CONFIG 0x02
+#define PMBUS_CLEAR_FAULTS 0x03
+#define PMBUS_PHASE 0x04
+
+#define PMBUS_CAPABILITY 0x19
+#define PMBUS_QUERY 0x1A
+
+#define PMBUS_VOUT_MODE 0x20
+#define PMBUS_VOUT_COMMAND 0x21
+#define PMBUS_VOUT_TRIM 0x22
+#define PMBUS_VOUT_CAL_OFFSET 0x23
+#define PMBUS_VOUT_MAX 0x24
+#define PMBUS_VOUT_MARGIN_HIGH 0x25
+#define PMBUS_VOUT_MARGIN_LOW 0x26
+#define PMBUS_VOUT_TRANSITION_RATE 0x27
+#define PMBUS_VOUT_DROOP 0x28
+#define PMBUS_VOUT_SCALE_LOOP 0x29
+#define PMBUS_VOUT_SCALE_MONITOR 0x2A
+
+#define PMBUS_COEFFICIENTS 0x30
+#define PMBUS_POUT_MAX 0x31
+
+#define PMBUS_FAN_CONFIG_12 0x3A
+#define PMBUS_FAN_COMMAND_1 0x3B
+#define PMBUS_FAN_COMMAND_2 0x3C
+#define PMBUS_FAN_CONFIG_34 0x3D
+#define PMBUS_FAN_COMMAND_3 0x3E
+#define PMBUS_FAN_COMMAND_4 0x3F
+
+#define PMBUS_VOUT_OV_FAULT_LIMIT 0x40
+#define PMBUS_VOUT_OV_FAULT_RESPONSE 0x41
+#define PMBUS_VOUT_OV_WARN_LIMIT 0x42
+#define PMBUS_VOUT_UV_WARN_LIMIT 0x43
+#define PMBUS_VOUT_UV_FAULT_LIMIT 0x44
+#define PMBUS_VOUT_UV_FAULT_RESPONSE 0x45
+#define PMBUS_IOUT_OC_FAULT_LIMIT 0x46
+#define PMBUS_IOUT_OC_FAULT_RESPONSE 0x47
+#define PMBUS_IOUT_OC_LV_FAULT_LIMIT 0x48
+#define PMBUS_IOUT_OC_LV_FAULT_RESPONSE 0x49
+#define PMBUS_IOUT_OC_WARN_LIMIT 0x4A
+#define PMBUS_IOUT_UC_FAULT_LIMIT 0x4B
+#define PMBUS_IOUT_UC_FAULT_RESPONSE 0x4C
+
+#define PMBUS_OT_FAULT_LIMIT 0x4F
+#define PMBUS_OT_FAULT_RESPONSE 0x50
+#define PMBUS_OT_WARN_LIMIT 0x51
+#define PMBUS_UT_WARN_LIMIT 0x52
+#define PMBUS_UT_FAULT_LIMIT 0x53
+#define PMBUS_UT_FAULT_RESPONSE 0x54
+#define PMBUS_VIN_OV_FAULT_LIMIT 0x55
+#define PMBUS_VIN_OV_FAULT_RESPONSE 0x56
+#define PMBUS_VIN_OV_WARN_LIMIT 0x57
+#define PMBUS_VIN_UV_WARN_LIMIT 0x58
+#define PMBUS_VIN_UV_FAULT_LIMIT 0x59
+
+#define PMBUS_IIN_OC_FAULT_LIMIT 0x5B
+#define PMBUS_IIN_OC_WARN_LIMIT 0x5D
+
+#define PMBUS_POUT_OP_FAULT_LIMIT 0x68
+#define PMBUS_POUT_OP_WARN_LIMIT 0x6A
+#define PMBUS_PIN_OP_WARN_LIMIT 0x6B
+
+#define PMBUS_STATUS_BYTE 0x78
+#define PMBUS_STATUS_WORD 0x79
+#define PMBUS_STATUS_VOUT 0x7A
+#define PMBUS_STATUS_IOUT 0x7B
+#define PMBUS_STATUS_INPUT 0x7C
+#define PMBUS_STATUS_TEMPERATURE 0x7D
+#define PMBUS_STATUS_CML 0x7E
+#define PMBUS_STATUS_OTHER 0x7F
+#define PMBUS_STATUS_MFR_SPECIFIC 0x80
+#define PMBUS_STATUS_FAN_12 0x81
+#define PMBUS_STATUS_FAN_34 0x82
+
+#define PMBUS_READ_VIN 0x88
+#define PMBUS_READ_IIN 0x89
+#define PMBUS_READ_VCAP 0x8A
+#define PMBUS_READ_VOUT 0x8B
+#define PMBUS_READ_IOUT 0x8C
+#define PMBUS_READ_TEMPERATURE_1 0x8D
+#define PMBUS_READ_TEMPERATURE_2 0x8E
+#define PMBUS_READ_TEMPERATURE_3 0x8F
+#define PMBUS_READ_FAN_SPEED_1 0x90
+#define PMBUS_READ_FAN_SPEED_2 0x91
+#define PMBUS_READ_FAN_SPEED_3 0x92
+#define PMBUS_READ_FAN_SPEED_4 0x93
+#define PMBUS_READ_DUTY_CYCLE 0x94
+#define PMBUS_READ_FREQUENCY 0x95
+#define PMBUS_READ_POUT 0x96
+#define PMBUS_READ_PIN 0x97
+
+#define PMBUS_REVISION 0x98
+#define PMBUS_MFR_ID 0x99
+#define PMBUS_MFR_MODEL 0x9A
+#define PMBUS_MFR_REVISION 0x9B
+#define PMBUS_MFR_LOCATION 0x9C
+#define PMBUS_MFR_DATE 0x9D
+#define PMBUS_MFR_SERIAL 0x9E
+
+/*
+ * CAPABILITY
+ */
+#define PB_CAPABILITY_SMBALERT (1<<4)
+#define PB_CAPABILITY_ERROR_CHECK (1<<7)
+
+/*
+ * VOUT_MODE
+ */
+#define PB_VOUT_MODE_MODE_MASK 0xe0
+#define PB_VOUT_MODE_PARAM_MASK 0x1f
+
+#define PB_VOUT_MODE_LINEAR 0x00
+#define PB_VOUT_MODE_VID 0x20
+#define PB_VOUT_MODE_DIRECT 0x40
+
+/*
+ * Fan configuration
+ */
+#define PB_FAN_2_PULSE_MASK ((1 << 0) | (1 << 1))
+#define PB_FAN_2_RPM (1 << 2)
+#define PB_FAN_2_INSTALLED (1 << 3)
+#define PB_FAN_1_PULSE_MASK ((1 << 4) | (1 << 5))
+#define PB_FAN_1_RPM (1 << 6)
+#define PB_FAN_1_INSTALLED (1 << 7)
+
+/*
+ * STATUS_BYTE, STATUS_WORD (lower)
+ */
+#define PB_STATUS_NONE_ABOVE (1<<0)
+#define PB_STATUS_CML (1<<1)
+#define PB_STATUS_TEMPERATURE (1<<2)
+#define PB_STATUS_VIN_UV (1<<3)
+#define PB_STATUS_IOUT_OC (1<<4)
+#define PB_STATUS_VOUT_OV (1<<5)
+#define PB_STATUS_OFF (1<<6)
+#define PB_STATUS_BUSY (1<<7)
+
+/*
+ * STATUS_WORD (upper)
+ */
+#define PB_STATUS_UNKNOWN (1<<8)
+#define PB_STATUS_OTHER (1<<9)
+#define PB_STATUS_FANS (1<<10)
+#define PB_STATUS_POWER_GOOD_N (1<<11)
+#define PB_STATUS_WORD_MFR (1<<12)
+#define PB_STATUS_INPUT (1<<13)
+#define PB_STATUS_IOUT_POUT (1<<14)
+#define PB_STATUS_VOUT (1<<15)
+
+/*
+ * STATUS_IOUT
+ */
+#define PB_POUT_OP_WARNING (1<<0)
+#define PB_POUT_OP_FAULT (1<<1)
+#define PB_POWER_LIMITING (1<<2)
+#define PB_CURRENT_SHARE_FAULT (1<<3)
+#define PB_IOUT_UC_FAULT (1<<4)
+#define PB_IOUT_OC_WARNING (1<<5)
+#define PB_IOUT_OC_LV_FAULT (1<<6)
+#define PB_IOUT_OC_FAULT (1<<7)
+
+/*
+ * STATUS_VOUT, STATUS_INPUT
+ */
+#define PB_VOLTAGE_UV_FAULT (1<<4)
+#define PB_VOLTAGE_UV_WARNING (1<<5)
+#define PB_VOLTAGE_OV_WARNING (1<<6)
+#define PB_VOLTAGE_OV_FAULT (1<<7)
+
+/*
+ * STATUS_INPUT
+ */
+#define PB_PIN_OP_WARNING (1<<0)
+#define PB_IIN_OC_WARNING (1<<1)
+#define PB_IIN_OC_FAULT (1<<2)
+
+/*
+ * STATUS_TEMPERATURE
+ */
+#define PB_TEMP_UT_FAULT (1<<4)
+#define PB_TEMP_UT_WARNING (1<<5)
+#define PB_TEMP_OT_WARNING (1<<6)
+#define PB_TEMP_OT_FAULT (1<<7)
+
+/*
+ * STATUS_FAN
+ */
+#define PB_FAN_AIRFLOW_WARNING (1<<0)
+#define PB_FAN_AIRFLOW_FAULT (1<<1)
+#define PB_FAN_FAN2_SPEED_OVERRIDE (1<<2)
+#define PB_FAN_FAN1_SPEED_OVERRIDE (1<<3)
+#define PB_FAN_FAN2_WARNING (1<<4)
+#define PB_FAN_FAN1_WARNING (1<<5)
+#define PB_FAN_FAN2_FAULT (1<<6)
+#define PB_FAN_FAN1_FAULT (1<<7)
+
+/*
+ * CML_FAULT_STATUS
+ */
+#define PB_CML_FAULT_OTHER_MEM_LOGIC (1<<0)
+#define PB_CML_FAULT_OTHER_COMM (1<<1)
+#define PB_CML_FAULT_PROCESSOR (1<<3)
+#define PB_CML_FAULT_MEMORY (1<<4)
+#define PB_CML_FAULT_PACKET_ERROR (1<<5)
+#define PB_CML_FAULT_INVALID_DATA (1<<6)
+#define PB_CML_FAULT_INVALID_COMMAND (1<<7)
+
+enum pmbus_sensor_classes {
+ PSC_VOLTAGE_IN = 0,
+ PSC_VOLTAGE_OUT,
+ PSC_CURRENT_IN,
+ PSC_CURRENT_OUT,
+ PSC_POWER,
+ PSC_TEMPERATURE,
+ PSC_FAN,
+ PSC_NUM_CLASSES /* Number of power sensor classes */
+};
+
+#define PMBUS_PAGES 32 /* Per PMBus specification */
+
+/* Functionality bit mask */
+#define PMBUS_HAVE_VIN (1 << 0)
+#define PMBUS_HAVE_VCAP (1 << 1)
+#define PMBUS_HAVE_VOUT (1 << 2)
+#define PMBUS_HAVE_IIN (1 << 3)
+#define PMBUS_HAVE_IOUT (1 << 4)
+#define PMBUS_HAVE_PIN (1 << 5)
+#define PMBUS_HAVE_POUT (1 << 6)
+#define PMBUS_HAVE_FAN12 (1 << 7)
+#define PMBUS_HAVE_FAN34 (1 << 8)
+#define PMBUS_HAVE_TEMP (1 << 9)
+#define PMBUS_HAVE_TEMP2 (1 << 10)
+#define PMBUS_HAVE_TEMP3 (1 << 11)
+#define PMBUS_HAVE_STATUS_VOUT (1 << 12)
+#define PMBUS_HAVE_STATUS_IOUT (1 << 13)
+#define PMBUS_HAVE_STATUS_INPUT (1 << 14)
+#define PMBUS_HAVE_STATUS_TEMP (1 << 15)
+#define PMBUS_HAVE_STATUS_FAN12 (1 << 16)
+#define PMBUS_HAVE_STATUS_FAN34 (1 << 17)
+
+struct pmbus_driver_info {
+ int pages; /* Total number of pages */
+ bool direct[PSC_NUM_CLASSES];
+ /* true if device uses direct data format
+ for the given sensor class */
+ /*
+ * Support one set of coefficients for each sensor type
+ * Used for chips providing data in direct mode.
+ */
+ int m[PSC_NUM_CLASSES]; /* mantissa for direct data format */
+ int b[PSC_NUM_CLASSES]; /* offset */
+ int R[PSC_NUM_CLASSES]; /* exponent */
+
+ u32 func[PMBUS_PAGES]; /* Functionality, per page */
+ /*
+ * The get_status function maps manufacturing specific status values
+ * into PMBus standard status values.
+ * This function is optional and only necessary if chip specific status
+ * register values have to be mapped into standard PMBus status register
+ * values.
+ */
+ int (*get_status)(struct i2c_client *client, int page, int reg);
+ /*
+ * The identify function determines supported PMBus functionality.
+ * This function is only necessary if a chip driver supports multiple
+ * chips, and the chip functionality is not pre-determined.
+ */
+ int (*identify)(struct i2c_client *client,
+ struct pmbus_driver_info *info);
+};
+
+/* Function declarations */
+
+int pmbus_set_page(struct i2c_client *client, u8 page);
+int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
+void pmbus_clear_faults(struct i2c_client *client);
+bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg);
+bool pmbus_check_word_register(struct i2c_client *client, int page, int reg);
+int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
+ struct pmbus_driver_info *info);
+int pmbus_do_remove(struct i2c_client *client);
+const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client
+ *client);
+
+#endif /* PMBUS_H */
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
new file mode 100644
index 000000000000..6474512f49b0
--- /dev/null
+++ b/drivers/hwmon/pmbus_core.c
@@ -0,0 +1,1658 @@
+/*
+ * Hardware monitoring driver for PMBus devices
+ *
+ * Copyright (c) 2010, 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/delay.h>
+#include <linux/i2c/pmbus.h>
+#include "pmbus.h"
+
+/*
+ * Constants needed to determine number of sensors, booleans, and labels.
+ */
+#define PMBUS_MAX_INPUT_SENSORS 11 /* 6*volt, 3*curr, 2*power */
+#define PMBUS_VOUT_SENSORS_PER_PAGE 5 /* input, min, max, lcrit,
+ crit */
+#define PMBUS_IOUT_SENSORS_PER_PAGE 4 /* input, min, max, crit */
+#define PMBUS_POUT_SENSORS_PER_PAGE 4 /* input, cap, max, crit */
+#define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */
+#define PMBUS_MAX_SENSORS_PER_TEMP 5 /* input, min, max, lcrit,
+ crit */
+
+#define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm,
+ lcrit_alarm, crit_alarm;
+ c: alarm, crit_alarm;
+ p: crit_alarm */
+#define PMBUS_VOUT_BOOLEANS_PER_PAGE 4 /* min_alarm, max_alarm,
+ lcrit_alarm, crit_alarm */
+#define PMBUS_IOUT_BOOLEANS_PER_PAGE 3 /* alarm, lcrit_alarm,
+ crit_alarm */
+#define PMBUS_POUT_BOOLEANS_PER_PAGE 2 /* alarm, crit_alarm */
+#define PMBUS_MAX_BOOLEANS_PER_FAN 2 /* alarm, fault */
+#define PMBUS_MAX_BOOLEANS_PER_TEMP 4 /* min_alarm, max_alarm,
+ lcrit_alarm, crit_alarm */
+
+#define PMBUS_MAX_INPUT_LABELS 4 /* vin, vcap, iin, pin */
+
+/*
+ * status, status_vout, status_iout, status_fans, status_fan34, and status_temp
+ * are paged. status_input is unpaged.
+ */
+#define PB_NUM_STATUS_REG (PMBUS_PAGES * 6 + 1)
+
+/*
+ * Index into status register array, per status register group
+ */
+#define PB_STATUS_BASE 0
+#define PB_STATUS_VOUT_BASE (PB_STATUS_BASE + PMBUS_PAGES)
+#define PB_STATUS_IOUT_BASE (PB_STATUS_VOUT_BASE + PMBUS_PAGES)
+#define PB_STATUS_FAN_BASE (PB_STATUS_IOUT_BASE + PMBUS_PAGES)
+#define PB_STATUS_FAN34_BASE (PB_STATUS_FAN_BASE + PMBUS_PAGES)
+#define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
+#define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1)
+
+struct pmbus_sensor {
+ char name[I2C_NAME_SIZE]; /* sysfs sensor name */
+ struct sensor_device_attribute attribute;
+ u8 page; /* page number */
+ u8 reg; /* register */
+ enum pmbus_sensor_classes class; /* sensor class */
+ bool update; /* runtime sensor update needed */
+ int data; /* Sensor data.
+ Negative if there was a read error */
+};
+
+struct pmbus_boolean {
+ char name[I2C_NAME_SIZE]; /* sysfs boolean name */
+ struct sensor_device_attribute attribute;
+};
+
+struct pmbus_label {
+ char name[I2C_NAME_SIZE]; /* sysfs label name */
+ struct sensor_device_attribute attribute;
+ char label[I2C_NAME_SIZE]; /* label */
+};
+
+struct pmbus_data {
+ struct device *hwmon_dev;
+
+ u32 flags; /* from platform data */
+
+ int exponent; /* linear mode: exponent for output voltages */
+
+ const struct pmbus_driver_info *info;
+
+ int max_attributes;
+ int num_attributes;
+ struct attribute **attributes;
+ struct attribute_group group;
+
+ /*
+ * Sensors cover both sensor and limit registers.
+ */
+ int max_sensors;
+ int num_sensors;
+ struct pmbus_sensor *sensors;
+ /*
+ * Booleans are used for alarms.
+ * Values are determined from status registers.
+ */
+ int max_booleans;
+ int num_booleans;
+ struct pmbus_boolean *booleans;
+ /*
+ * Labels are used to map generic names (e.g., "in1")
+ * to PMBus specific names (e.g., "vin" or "vout1").
+ */
+ int max_labels;
+ int num_labels;
+ struct pmbus_label *labels;
+
+ struct mutex update_lock;
+ bool valid;
+ unsigned long last_updated; /* in jiffies */
+
+ /*
+ * A single status register covers multiple attributes,
+ * so we keep them all together.
+ */
+ u8 status_bits;
+ u8 status[PB_NUM_STATUS_REG];
+
+ u8 currpage;
+};
+
+int pmbus_set_page(struct i2c_client *client, u8 page)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ int rv = 0;
+ int newpage;
+
+ if (page != data->currpage) {
+ rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
+ if (newpage != page)
+ rv = -EINVAL;
+ else
+ data->currpage = page;
+ }
+ return rv;
+}
+EXPORT_SYMBOL_GPL(pmbus_set_page);
+
+static int pmbus_write_byte(struct i2c_client *client, u8 page, u8 value)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_write_byte(client, value);
+}
+
+static int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg,
+ u16 word)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_write_word_data(client, reg, word);
+}
+
+int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_read_word_data(client, reg);
+}
+EXPORT_SYMBOL_GPL(pmbus_read_word_data);
+
+static int pmbus_read_byte_data(struct i2c_client *client, u8 page, u8 reg)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_read_byte_data(client, reg);
+}
+
+static void pmbus_clear_fault_page(struct i2c_client *client, int page)
+{
+ pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
+}
+
+void pmbus_clear_faults(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < data->info->pages; i++)
+ pmbus_clear_fault_page(client, i);
+}
+EXPORT_SYMBOL_GPL(pmbus_clear_faults);
+
+static int pmbus_check_status_cml(struct i2c_client *client, int page)
+{
+ int status, status2;
+
+ status = pmbus_read_byte_data(client, page, PMBUS_STATUS_BYTE);
+ if (status < 0 || (status & PB_STATUS_CML)) {
+ status2 = pmbus_read_byte_data(client, page, PMBUS_STATUS_CML);
+ if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
+{
+ int rv;
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ rv = pmbus_read_byte_data(client, page, reg);
+ if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
+ rv = pmbus_check_status_cml(client, page);
+ pmbus_clear_fault_page(client, page);
+ return rv >= 0;
+}
+EXPORT_SYMBOL_GPL(pmbus_check_byte_register);
+
+bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
+{
+ int rv;
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ rv = pmbus_read_word_data(client, page, reg);
+ if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
+ rv = pmbus_check_status_cml(client, page);
+ pmbus_clear_fault_page(client, page);
+ return rv >= 0;
+}
+EXPORT_SYMBOL_GPL(pmbus_check_word_register);
+
+const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ return data->info;
+}
+EXPORT_SYMBOL_GPL(pmbus_get_driver_info);
+
+static int pmbus_get_status(struct i2c_client *client, int page, int reg)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+ int status;
+
+ if (info->get_status) {
+ status = info->get_status(client, page, reg);
+ if (status != -ENODATA)
+ return status;
+ }
+ return pmbus_read_byte_data(client, page, reg);
+}
+
+static struct pmbus_data *pmbus_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+
+ mutex_lock(&data->update_lock);
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ int i;
+
+ for (i = 0; i < info->pages; i++)
+ data->status[PB_STATUS_BASE + i]
+ = pmbus_read_byte_data(client, i,
+ PMBUS_STATUS_BYTE);
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_VOUT))
+ continue;
+ data->status[PB_STATUS_VOUT_BASE + i]
+ = pmbus_get_status(client, i, PMBUS_STATUS_VOUT);
+ }
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_IOUT))
+ continue;
+ data->status[PB_STATUS_IOUT_BASE + i]
+ = pmbus_get_status(client, i, PMBUS_STATUS_IOUT);
+ }
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_TEMP))
+ continue;
+ data->status[PB_STATUS_TEMP_BASE + i]
+ = pmbus_get_status(client, i,
+ PMBUS_STATUS_TEMPERATURE);
+ }
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN12))
+ continue;
+ data->status[PB_STATUS_FAN_BASE + i]
+ = pmbus_get_status(client, i, PMBUS_STATUS_FAN_12);
+ }
+
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN34))
+ continue;
+ data->status[PB_STATUS_FAN34_BASE + i]
+ = pmbus_get_status(client, i, PMBUS_STATUS_FAN_34);
+ }
+
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
+ data->status[PB_STATUS_INPUT_BASE]
+ = pmbus_get_status(client, 0, PMBUS_STATUS_INPUT);
+
+ for (i = 0; i < data->num_sensors; i++) {
+ struct pmbus_sensor *sensor = &data->sensors[i];
+
+ if (!data->valid || sensor->update)
+ sensor->data
+ = pmbus_read_word_data(client, sensor->page,
+ sensor->reg);
+ }
+ pmbus_clear_faults(client);
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+ mutex_unlock(&data->update_lock);
+ return data;
+}
+
+/*
+ * Convert linear sensor values to milli- or micro-units
+ * depending on sensor type.
+ */
+static int pmbus_reg2data_linear(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
+{
+ s16 exponent;
+ s32 mantissa;
+ long val;
+
+ if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */
+ exponent = data->exponent;
+ mantissa = (u16) sensor->data;
+ } else { /* LINEAR11 */
+ exponent = (sensor->data >> 11) & 0x001f;
+ mantissa = sensor->data & 0x07ff;
+
+ if (exponent > 0x0f)
+ exponent |= 0xffe0; /* sign extend exponent */
+ if (mantissa > 0x03ff)
+ mantissa |= 0xfffff800; /* sign extend mantissa */
+ }
+
+ val = mantissa;
+
+ /* scale result to milli-units for all sensors except fans */
+ if (sensor->class != PSC_FAN)
+ val = val * 1000L;
+
+ /* scale result to micro-units for power sensors */
+ if (sensor->class == PSC_POWER)
+ val = val * 1000L;
+
+ if (exponent >= 0)
+ val <<= exponent;
+ else
+ val >>= -exponent;
+
+ return (int)val;
+}
+
+/*
+ * Convert direct sensor values to milli- or micro-units
+ * depending on sensor type.
+ */
+static int pmbus_reg2data_direct(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
+{
+ long val = (s16) sensor->data;
+ long m, b, R;
+
+ m = data->info->m[sensor->class];
+ b = data->info->b[sensor->class];
+ R = data->info->R[sensor->class];
+
+ if (m == 0)
+ return 0;
+
+ /* X = 1/m * (Y * 10^-R - b) */
+ R = -R;
+ /* scale result to milli-units for everything but fans */
+ if (sensor->class != PSC_FAN) {
+ R += 3;
+ b *= 1000;
+ }
+
+ /* scale result to micro-units for power sensors */
+ if (sensor->class == PSC_POWER) {
+ R += 3;
+ b *= 1000;
+ }
+
+ while (R > 0) {
+ val *= 10;
+ R--;
+ }
+ while (R < 0) {
+ val = DIV_ROUND_CLOSEST(val, 10);
+ R++;
+ }
+
+ return (int)((val - b) / m);
+}
+
+static int pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
+{
+ int val;
+
+ if (data->info->direct[sensor->class])
+ val = pmbus_reg2data_direct(data, sensor);
+ else
+ val = pmbus_reg2data_linear(data, sensor);
+
+ return val;
+}
+
+#define MAX_MANTISSA (1023 * 1000)
+#define MIN_MANTISSA (511 * 1000)
+
+static u16 pmbus_data2reg_linear(struct pmbus_data *data,
+ enum pmbus_sensor_classes class, long val)
+{
+ s16 exponent = 0, mantissa;
+ bool negative = false;
+
+ /* simple case */
+ if (val == 0)
+ return 0;
+
+ if (class == PSC_VOLTAGE_OUT) {
+ /* LINEAR16 does not support negative voltages */
+ if (val < 0)
+ return 0;
+
+ /*
+ * For a static exponents, we don't have a choice
+ * but to adjust the value to it.
+ */
+ if (data->exponent < 0)
+ val <<= -data->exponent;
+ else
+ val >>= data->exponent;
+ val = DIV_ROUND_CLOSEST(val, 1000);
+ return val & 0xffff;
+ }
+
+ if (val < 0) {
+ negative = true;
+ val = -val;
+ }
+
+ /* Power is in uW. Convert to mW before converting. */
+ if (class == PSC_POWER)
+ val = DIV_ROUND_CLOSEST(val, 1000L);
+
+ /*
+ * For simplicity, convert fan data to milli-units
+ * before calculating the exponent.
+ */
+ if (class == PSC_FAN)
+ val = val * 1000;
+
+ /* Reduce large mantissa until it fits into 10 bit */
+ while (val >= MAX_MANTISSA && exponent < 15) {
+ exponent++;
+ val >>= 1;
+ }
+ /* Increase small mantissa to improve precision */
+ while (val < MIN_MANTISSA && exponent > -15) {
+ exponent--;
+ val <<= 1;
+ }
+
+ /* Convert mantissa from milli-units to units */
+ mantissa = DIV_ROUND_CLOSEST(val, 1000);
+
+ /* Ensure that resulting number is within range */
+ if (mantissa > 0x3ff)
+ mantissa = 0x3ff;
+
+ /* restore sign */
+ if (negative)
+ mantissa = -mantissa;
+
+ /* Convert to 5 bit exponent, 11 bit mantissa */
+ return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
+}
+
+static u16 pmbus_data2reg_direct(struct pmbus_data *data,
+ enum pmbus_sensor_classes class, long val)
+{
+ long m, b, R;
+
+ m = data->info->m[class];
+ b = data->info->b[class];
+ R = data->info->R[class];
+
+ /* Power is in uW. Adjust R and b. */
+ if (class == PSC_POWER) {
+ R -= 3;
+ b *= 1000;
+ }
+
+ /* Calculate Y = (m * X + b) * 10^R */
+ if (class != PSC_FAN) {
+ R -= 3; /* Adjust R and b for data in milli-units */
+ b *= 1000;
+ }
+ val = val * m + b;
+
+ while (R > 0) {
+ val *= 10;
+ R--;
+ }
+ while (R < 0) {
+ val = DIV_ROUND_CLOSEST(val, 10);
+ R++;
+ }
+
+ return val;
+}
+
+static u16 pmbus_data2reg(struct pmbus_data *data,
+ enum pmbus_sensor_classes class, long val)
+{
+ u16 regval;
+
+ if (data->info->direct[class])
+ regval = pmbus_data2reg_direct(data, class, val);
+ else
+ regval = pmbus_data2reg_linear(data, class, val);
+
+ return regval;
+}
+
+/*
+ * Return boolean calculated from converted data.
+ * <index> defines a status register index and mask, and optionally
+ * two sensor indexes.
+ * The upper half-word references the two sensors,
+ * two sensor indices.
+ * The upper half-word references the two optional sensors,
+ * the lower half word references status register and mask.
+ * The function returns true if (status[reg] & mask) is true and,
+ * if specified, if v1 >= v2.
+ * To determine if an object exceeds upper limits, specify <v, limit>.
+ * To determine if an object exceeds lower limits, specify <limit, v>.
+ *
+ * For booleans created with pmbus_add_boolean_reg(), only the lower 16 bits of
+ * index are set. s1 and s2 (the sensor index values) are zero in this case.
+ * The function returns true if (status[reg] & mask) is true.
+ *
+ * If the boolean was created with pmbus_add_boolean_cmp(), a comparison against
+ * a specified limit has to be performed to determine the boolean result.
+ * In this case, the function returns true if v1 >= v2 (where v1 and v2 are
+ * sensor values referenced by sensor indices s1 and s2).
+ *
+ * To determine if an object exceeds upper limits, specify <s1,s2> = <v,limit>.
+ * To determine if an object exceeds lower limits, specify <s1,s2> = <limit,v>.
+ *
+ * If a negative value is stored in any of the referenced registers, this value
+ * reflects an error code which will be returned.
+ */
+static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val)
+{
+ u8 s1 = (index >> 24) & 0xff;
+ u8 s2 = (index >> 16) & 0xff;
+ u8 reg = (index >> 8) & 0xff;
+ u8 mask = index & 0xff;
+ int status;
+ u8 regval;
+
+ status = data->status[reg];
+ if (status < 0)
+ return status;
+
+ regval = status & mask;
+ if (!s1 && !s2)
+ *val = !!regval;
+ else {
+ int v1, v2;
+ struct pmbus_sensor *sensor1, *sensor2;
+
+ sensor1 = &data->sensors[s1];
+ if (sensor1->data < 0)
+ return sensor1->data;
+ sensor2 = &data->sensors[s2];
+ if (sensor2->data < 0)
+ return sensor2->data;
+
+ v1 = pmbus_reg2data(data, sensor1);
+ v2 = pmbus_reg2data(data, sensor2);
+ *val = !!(regval && v1 >= v2);
+ }
+ return 0;
+}
+
+static ssize_t pmbus_show_boolean(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pmbus_data *data = pmbus_update_device(dev);
+ int val;
+ int err;
+
+ err = pmbus_get_boolean(data, attr->index, &val);
+ if (err)
+ return err;
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t pmbus_show_sensor(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pmbus_data *data = pmbus_update_device(dev);
+ struct pmbus_sensor *sensor;
+
+ sensor = &data->sensors[attr->index];
+ if (sensor->data < 0)
+ return sensor->data;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pmbus_reg2data(data, sensor));
+}
+
+static ssize_t pmbus_set_sensor(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ struct pmbus_sensor *sensor = &data->sensors[attr->index];
+ ssize_t rv = count;
+ long val = 0;
+ int ret;
+ u16 regval;
+
+ if (strict_strtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ regval = pmbus_data2reg(data, sensor->class, val);
+ ret = pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
+ if (ret < 0)
+ rv = ret;
+ else
+ data->sensors[attr->index].data = regval;
+ mutex_unlock(&data->update_lock);
+ return rv;
+}
+
+static ssize_t pmbus_show_label(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ data->labels[attr->index].label);
+}
+
+#define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \
+do { \
+ struct sensor_device_attribute *a \
+ = &data->_type##s[data->num_##_type##s].attribute; \
+ BUG_ON(data->num_attributes >= data->max_attributes); \
+ a->dev_attr.attr.name = _name; \
+ a->dev_attr.attr.mode = _mode; \
+ a->dev_attr.show = _show; \
+ a->dev_attr.store = _set; \
+ a->index = _idx; \
+ data->attributes[data->num_attributes] = &a->dev_attr.attr; \
+ data->num_attributes++; \
+} while (0)
+
+#define PMBUS_ADD_GET_ATTR(data, _name, _type, _idx) \
+ PMBUS_ADD_ATTR(data, _name, _idx, S_IRUGO, _type, \
+ pmbus_show_##_type, NULL)
+
+#define PMBUS_ADD_SET_ATTR(data, _name, _type, _idx) \
+ PMBUS_ADD_ATTR(data, _name, _idx, S_IWUSR | S_IRUGO, _type, \
+ pmbus_show_##_type, pmbus_set_##_type)
+
+static void pmbus_add_boolean(struct pmbus_data *data,
+ const char *name, const char *type, int seq,
+ int idx)
+{
+ struct pmbus_boolean *boolean;
+
+ BUG_ON(data->num_booleans >= data->max_booleans);
+
+ boolean = &data->booleans[data->num_booleans];
+
+ snprintf(boolean->name, sizeof(boolean->name), "%s%d_%s",
+ name, seq, type);
+ PMBUS_ADD_GET_ATTR(data, boolean->name, boolean, idx);
+ data->num_booleans++;
+}
+
+static void pmbus_add_boolean_reg(struct pmbus_data *data,
+ const char *name, const char *type,
+ int seq, int reg, int bit)
+{
+ pmbus_add_boolean(data, name, type, seq, (reg << 8) | bit);
+}
+
+static void pmbus_add_boolean_cmp(struct pmbus_data *data,
+ const char *name, const char *type,
+ int seq, int i1, int i2, int reg, int mask)
+{
+ pmbus_add_boolean(data, name, type, seq,
+ (i1 << 24) | (i2 << 16) | (reg << 8) | mask);
+}
+
+static void pmbus_add_sensor(struct pmbus_data *data,
+ const char *name, const char *type, int seq,
+ int page, int reg, enum pmbus_sensor_classes class,
+ bool update)
+{
+ struct pmbus_sensor *sensor;
+
+ BUG_ON(data->num_sensors >= data->max_sensors);
+
+ sensor = &data->sensors[data->num_sensors];
+ snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
+ name, seq, type);
+ sensor->page = page;
+ sensor->reg = reg;
+ sensor->class = class;
+ sensor->update = update;
+ if (update)
+ PMBUS_ADD_GET_ATTR(data, sensor->name, sensor,
+ data->num_sensors);
+ else
+ PMBUS_ADD_SET_ATTR(data, sensor->name, sensor,
+ data->num_sensors);
+ data->num_sensors++;
+}
+
+static void pmbus_add_label(struct pmbus_data *data,
+ const char *name, int seq,
+ const char *lstring, int index)
+{
+ struct pmbus_label *label;
+
+ BUG_ON(data->num_labels >= data->max_labels);
+
+ label = &data->labels[data->num_labels];
+ snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq);
+ if (!index)
+ strncpy(label->label, lstring, sizeof(label->label) - 1);
+ else
+ snprintf(label->label, sizeof(label->label), "%s%d", lstring,
+ index);
+
+ PMBUS_ADD_GET_ATTR(data, label->name, label, data->num_labels);
+ data->num_labels++;
+}
+
+static const int pmbus_temp_registers[] = {
+ PMBUS_READ_TEMPERATURE_1,
+ PMBUS_READ_TEMPERATURE_2,
+ PMBUS_READ_TEMPERATURE_3
+};
+
+static const int pmbus_temp_flags[] = {
+ PMBUS_HAVE_TEMP,
+ PMBUS_HAVE_TEMP2,
+ PMBUS_HAVE_TEMP3
+};
+
+static const int pmbus_fan_registers[] = {
+ PMBUS_READ_FAN_SPEED_1,
+ PMBUS_READ_FAN_SPEED_2,
+ PMBUS_READ_FAN_SPEED_3,
+ PMBUS_READ_FAN_SPEED_4
+};
+
+static const int pmbus_fan_config_registers[] = {
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_34,
+ PMBUS_FAN_CONFIG_34
+};
+
+static const int pmbus_fan_status_registers[] = {
+ PMBUS_STATUS_FAN_12,
+ PMBUS_STATUS_FAN_12,
+ PMBUS_STATUS_FAN_34,
+ PMBUS_STATUS_FAN_34
+};
+
+static const u32 pmbus_fan_flags[] = {
+ PMBUS_HAVE_FAN12,
+ PMBUS_HAVE_FAN12,
+ PMBUS_HAVE_FAN34,
+ PMBUS_HAVE_FAN34
+};
+
+static const u32 pmbus_fan_status_flags[] = {
+ PMBUS_HAVE_STATUS_FAN12,
+ PMBUS_HAVE_STATUS_FAN12,
+ PMBUS_HAVE_STATUS_FAN34,
+ PMBUS_HAVE_STATUS_FAN34
+};
+
+/*
+ * Determine maximum number of sensors, booleans, and labels.
+ * To keep things simple, only make a rough high estimate.
+ */
+static void pmbus_find_max_attr(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ const struct pmbus_driver_info *info = data->info;
+ int page, max_sensors, max_booleans, max_labels;
+
+ max_sensors = PMBUS_MAX_INPUT_SENSORS;
+ max_booleans = PMBUS_MAX_INPUT_BOOLEANS;
+ max_labels = PMBUS_MAX_INPUT_LABELS;
+
+ for (page = 0; page < info->pages; page++) {
+ if (info->func[page] & PMBUS_HAVE_VOUT) {
+ max_sensors += PMBUS_VOUT_SENSORS_PER_PAGE;
+ max_booleans += PMBUS_VOUT_BOOLEANS_PER_PAGE;
+ max_labels++;
+ }
+ if (info->func[page] & PMBUS_HAVE_IOUT) {
+ max_sensors += PMBUS_IOUT_SENSORS_PER_PAGE;
+ max_booleans += PMBUS_IOUT_BOOLEANS_PER_PAGE;
+ max_labels++;
+ }
+ if (info->func[page] & PMBUS_HAVE_POUT) {
+ max_sensors += PMBUS_POUT_SENSORS_PER_PAGE;
+ max_booleans += PMBUS_POUT_BOOLEANS_PER_PAGE;
+ max_labels++;
+ }
+ if (info->func[page] & PMBUS_HAVE_FAN12) {
+ max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN;
+ max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN;
+ }
+ if (info->func[page] & PMBUS_HAVE_FAN34) {
+ max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN;
+ max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN;
+ }
+ if (info->func[page] & PMBUS_HAVE_TEMP) {
+ max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
+ max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
+ }
+ if (info->func[page] & PMBUS_HAVE_TEMP2) {
+ max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
+ max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
+ }
+ if (info->func[page] & PMBUS_HAVE_TEMP3) {
+ max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
+ max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
+ }
+ }
+ data->max_sensors = max_sensors;
+ data->max_booleans = max_booleans;
+ data->max_labels = max_labels;
+ data->max_attributes = max_sensors + max_booleans + max_labels;
+}
+
+/*
+ * Search for attributes. Allocate sensors, booleans, and labels as needed.
+ */
+static void pmbus_find_attributes(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ const struct pmbus_driver_info *info = data->info;
+ int page, i0, i1, in_index;
+
+ /*
+ * Input voltage sensors
+ */
+ in_index = 1;
+ if (info->func[0] & PMBUS_HAVE_VIN) {
+ bool have_alarm = false;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "in", in_index, "vin", 0);
+ pmbus_add_sensor(data, "in", "input", in_index,
+ 0, PMBUS_READ_VIN, PSC_VOLTAGE_IN, true);
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_UV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "min", in_index,
+ 0, PMBUS_VIN_UV_WARN_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "min_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_UV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_UV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "lcrit", in_index,
+ 0, PMBUS_VIN_UV_FAULT_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "lcrit_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_UV_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_OV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "max", in_index,
+ 0, PMBUS_VIN_OV_WARN_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "max_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_OV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_OV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "crit", in_index,
+ 0, PMBUS_VIN_OV_FAULT_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "crit_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_OV_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Add generic alarm attribute only if there are no individual
+ * attributes.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "in", "alarm",
+ in_index,
+ PB_STATUS_BASE,
+ PB_STATUS_VIN_UV);
+ in_index++;
+ }
+ if (info->func[0] & PMBUS_HAVE_VCAP) {
+ pmbus_add_label(data, "in", in_index, "vcap", 0);
+ pmbus_add_sensor(data, "in", "input", in_index, 0,
+ PMBUS_READ_VCAP, PSC_VOLTAGE_IN, true);
+ in_index++;
+ }
+
+ /*
+ * Output voltage sensors
+ */
+ for (page = 0; page < info->pages; page++) {
+ bool have_alarm = false;
+
+ if (!(info->func[page] & PMBUS_HAVE_VOUT))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "in", in_index, "vout", page + 1);
+ pmbus_add_sensor(data, "in", "input", in_index, page,
+ PMBUS_READ_VOUT, PSC_VOLTAGE_OUT, true);
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_UV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "min", in_index, page,
+ PMBUS_VOUT_UV_WARN_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "min_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_UV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_UV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "lcrit", in_index, page,
+ PMBUS_VOUT_UV_FAULT_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "lcrit_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_UV_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_OV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "max", in_index, page,
+ PMBUS_VOUT_OV_WARN_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "max_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_OV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_OV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "crit", in_index, page,
+ PMBUS_VOUT_OV_FAULT_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "crit_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_OV_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Add generic alarm attribute only if there are no individual
+ * attributes.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "in", "alarm",
+ in_index,
+ PB_STATUS_BASE + page,
+ PB_STATUS_VOUT_OV);
+ in_index++;
+ }
+
+ /*
+ * Current sensors
+ */
+
+ /*
+ * Input current sensors
+ */
+ in_index = 1;
+ if (info->func[0] & PMBUS_HAVE_IIN) {
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "curr", in_index, "iin", 0);
+ pmbus_add_sensor(data, "curr", "input", in_index,
+ 0, PMBUS_READ_IIN, PSC_CURRENT_IN, true);
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_IIN_OC_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "max", in_index,
+ 0, PMBUS_IIN_OC_WARN_LIMIT,
+ PSC_CURRENT_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "curr", "max_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_IIN_OC_WARNING);
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_IIN_OC_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "crit", in_index,
+ 0, PMBUS_IIN_OC_FAULT_LIMIT,
+ PSC_CURRENT_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
+ pmbus_add_boolean_reg(data, "curr",
+ "crit_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_IIN_OC_FAULT);
+ }
+ in_index++;
+ }
+
+ /*
+ * Output current sensors
+ */
+ for (page = 0; page < info->pages; page++) {
+ bool have_alarm = false;
+
+ if (!(info->func[page] & PMBUS_HAVE_IOUT))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "curr", in_index, "iout", page + 1);
+ pmbus_add_sensor(data, "curr", "input", in_index, page,
+ PMBUS_READ_IOUT, PSC_CURRENT_OUT, true);
+ if (pmbus_check_word_register(client, page,
+ PMBUS_IOUT_OC_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "max", in_index, page,
+ PMBUS_IOUT_OC_WARN_LIMIT,
+ PSC_CURRENT_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) {
+ pmbus_add_boolean_reg(data, "curr", "max_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE +
+ page, PB_IOUT_OC_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_IOUT_UC_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "lcrit", in_index, page,
+ PMBUS_IOUT_UC_FAULT_LIMIT,
+ PSC_CURRENT_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) {
+ pmbus_add_boolean_reg(data, "curr",
+ "lcrit_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE +
+ page, PB_IOUT_UC_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_IOUT_OC_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "crit", in_index, page,
+ PMBUS_IOUT_OC_FAULT_LIMIT,
+ PSC_CURRENT_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) {
+ pmbus_add_boolean_reg(data, "curr",
+ "crit_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE +
+ page, PB_IOUT_OC_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Add generic alarm attribute only if there are no individual
+ * attributes.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "curr", "alarm",
+ in_index,
+ PB_STATUS_BASE + page,
+ PB_STATUS_IOUT_OC);
+ in_index++;
+ }
+
+ /*
+ * Power sensors
+ */
+ /*
+ * Input Power sensors
+ */
+ in_index = 1;
+ if (info->func[0] & PMBUS_HAVE_PIN) {
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "power", in_index, "pin", 0);
+ pmbus_add_sensor(data, "power", "input", in_index,
+ 0, PMBUS_READ_PIN, PSC_POWER, true);
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_PIN_OP_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "max", in_index,
+ 0, PMBUS_PIN_OP_WARN_LIMIT, PSC_POWER,
+ false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
+ pmbus_add_boolean_reg(data, "power",
+ "alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_PIN_OP_WARNING);
+ }
+ in_index++;
+ }
+
+ /*
+ * Output Power sensors
+ */
+ for (page = 0; page < info->pages; page++) {
+ bool need_alarm = false;
+
+ if (!(info->func[page] & PMBUS_HAVE_POUT))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "power", in_index, "pout", page + 1);
+ pmbus_add_sensor(data, "power", "input", in_index, page,
+ PMBUS_READ_POUT, PSC_POWER, true);
+ /*
+ * Per hwmon sysfs API, power_cap is to be used to limit output
+ * power.
+ * We have two registers related to maximum output power,
+ * PMBUS_POUT_MAX and PMBUS_POUT_OP_WARN_LIMIT.
+ * PMBUS_POUT_MAX matches the powerX_cap attribute definition.
+ * There is no attribute in the API to match
+ * PMBUS_POUT_OP_WARN_LIMIT. We use powerX_max for now.
+ */
+ if (pmbus_check_word_register(client, page, PMBUS_POUT_MAX)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "cap", in_index, page,
+ PMBUS_POUT_MAX, PSC_POWER, false);
+ need_alarm = true;
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_POUT_OP_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "max", in_index, page,
+ PMBUS_POUT_OP_WARN_LIMIT, PSC_POWER,
+ false);
+ need_alarm = true;
+ }
+ if (need_alarm && (info->func[page] & PMBUS_HAVE_STATUS_IOUT))
+ pmbus_add_boolean_reg(data, "power", "alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE + page,
+ PB_POUT_OP_WARNING
+ | PB_POWER_LIMITING);
+
+ if (pmbus_check_word_register(client, page,
+ PMBUS_POUT_OP_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "crit", in_index, page,
+ PMBUS_POUT_OP_FAULT_LIMIT, PSC_POWER,
+ false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT)
+ pmbus_add_boolean_reg(data, "power",
+ "crit_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE
+ + page,
+ PB_POUT_OP_FAULT);
+ }
+ in_index++;
+ }
+
+ /*
+ * Temperature sensors
+ */
+ in_index = 1;
+ for (page = 0; page < info->pages; page++) {
+ int t;
+
+ for (t = 0; t < ARRAY_SIZE(pmbus_temp_registers); t++) {
+ bool have_alarm = false;
+
+ /*
+ * A PMBus chip may support any combination of
+ * temperature registers on any page. So we can not
+ * abort after a failure to detect a register, but have
+ * to continue checking for all registers on all pages.
+ */
+ if (!(info->func[page] & pmbus_temp_flags[t]))
+ continue;
+
+ if (!pmbus_check_word_register
+ (client, page, pmbus_temp_registers[t]))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "input", in_index, page,
+ pmbus_temp_registers[t],
+ PSC_TEMPERATURE, true);
+
+ /*
+ * PMBus provides only one status register for TEMP1-3.
+ * Thus, we can not use the status register to determine
+ * which of the three sensors actually caused an alarm.
+ * Always compare current temperature against the limit
+ * registers to determine alarm conditions for a
+ * specific sensor.
+ *
+ * Since there is only one set of limit registers for
+ * up to three temperature sensors, we need to update
+ * all limit registers after the limit was changed for
+ * one of the sensors. This ensures that correct limits
+ * are reported for all temperature sensors.
+ */
+ if (pmbus_check_word_register
+ (client, page, PMBUS_UT_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "min", in_index,
+ page, PMBUS_UT_WARN_LIMIT,
+ PSC_TEMPERATURE, true);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "min_alarm", in_index, i1, i0,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_UT_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_UT_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "lcrit",
+ in_index, page,
+ PMBUS_UT_FAULT_LIMIT,
+ PSC_TEMPERATURE, true);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "lcrit_alarm", in_index, i1, i0,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_UT_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register
+ (client, page, PMBUS_OT_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "max", in_index,
+ page, PMBUS_OT_WARN_LIMIT,
+ PSC_TEMPERATURE, true);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "max_alarm", in_index, i0, i1,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_OT_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_OT_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "crit", in_index,
+ page, PMBUS_OT_FAULT_LIMIT,
+ PSC_TEMPERATURE, true);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "crit_alarm", in_index, i0, i1,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_OT_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Last resort - we were not able to create any alarm
+ * registers. Report alarm for all sensors using the
+ * status register temperature alarm bit.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "temp", "alarm",
+ in_index,
+ PB_STATUS_BASE + page,
+ PB_STATUS_TEMPERATURE);
+ in_index++;
+ }
+ }
+
+ /*
+ * Fans
+ */
+ in_index = 1;
+ for (page = 0; page < info->pages; page++) {
+ int f;
+
+ for (f = 0; f < ARRAY_SIZE(pmbus_fan_registers); f++) {
+ int regval;
+
+ if (!(info->func[page] & pmbus_fan_flags[f]))
+ break;
+
+ if (!pmbus_check_word_register(client, page,
+ pmbus_fan_registers[f])
+ || !pmbus_check_byte_register(client, page,
+ pmbus_fan_config_registers[f]))
+ break;
+
+ /*
+ * Skip fan if not installed.
+ * Each fan configuration register covers multiple fans,
+ * so we have to do some magic.
+ */
+ regval = pmbus_read_byte_data(client, page,
+ pmbus_fan_config_registers[f]);
+ if (regval < 0 ||
+ (!(regval & (PB_FAN_1_INSTALLED >> ((f & 1) * 4)))))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_sensor(data, "fan", "input", in_index, page,
+ pmbus_fan_registers[f], PSC_FAN, true);
+
+ /*
+ * Each fan status register covers multiple fans,
+ * so we have to do some magic.
+ */
+ if ((info->func[page] & pmbus_fan_status_flags[f]) &&
+ pmbus_check_byte_register(client,
+ page, pmbus_fan_status_registers[f])) {
+ int base;
+
+ if (f > 1) /* fan 3, 4 */
+ base = PB_STATUS_FAN34_BASE + page;
+ else
+ base = PB_STATUS_FAN_BASE + page;
+ pmbus_add_boolean_reg(data, "fan", "alarm",
+ in_index, base,
+ PB_FAN_FAN1_WARNING >> (f & 1));
+ pmbus_add_boolean_reg(data, "fan", "fault",
+ in_index, base,
+ PB_FAN_FAN1_FAULT >> (f & 1));
+ }
+ in_index++;
+ }
+ }
+}
+
+/*
+ * Identify chip parameters.
+ * This function is called for all chips.
+ */
+static int pmbus_identify_common(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ int vout_mode = -1, exponent;
+
+ if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE))
+ vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (vout_mode >= 0 && vout_mode != 0xff) {
+ /*
+ * Not all chips support the VOUT_MODE command,
+ * so a failure to read it is not an error.
+ */
+ switch (vout_mode >> 5) {
+ case 0: /* linear mode */
+ if (data->info->direct[PSC_VOLTAGE_OUT])
+ return -ENODEV;
+
+ exponent = vout_mode & 0x1f;
+ /* and sign-extend it */
+ if (exponent & 0x10)
+ exponent |= ~0x1f;
+ data->exponent = exponent;
+ break;
+ case 2: /* direct mode */
+ if (!data->info->direct[PSC_VOLTAGE_OUT])
+ return -ENODEV;
+ break;
+ default:
+ return -ENODEV;
+ }
+ }
+
+ /* Determine maximum number of sensors, booleans, and labels */
+ pmbus_find_max_attr(client, data);
+ pmbus_clear_fault_page(client, 0);
+ return 0;
+}
+
+int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
+ struct pmbus_driver_info *info)
+{
+ const struct pmbus_platform_data *pdata = client->dev.platform_data;
+ struct pmbus_data *data;
+ int ret;
+
+ if (!info) {
+ dev_err(&client->dev, "Missing chip information");
+ return -ENODEV;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE
+ | I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&client->dev, "No memory to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /*
+ * Bail out if status register or PMBus revision register
+ * does not exist.
+ */
+ if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0
+ || i2c_smbus_read_byte_data(client, PMBUS_REVISION) < 0) {
+ dev_err(&client->dev,
+ "Status or revision register not found\n");
+ ret = -ENODEV;
+ goto out_data;
+ }
+
+ if (pdata)
+ data->flags = pdata->flags;
+ data->info = info;
+
+ pmbus_clear_faults(client);
+
+ if (info->identify) {
+ ret = (*info->identify)(client, info);
+ if (ret < 0) {
+ dev_err(&client->dev, "Chip identification failed\n");
+ goto out_data;
+ }
+ }
+
+ if (info->pages <= 0 || info->pages > PMBUS_PAGES) {
+ dev_err(&client->dev, "Bad number of PMBus pages: %d\n",
+ info->pages);
+ ret = -EINVAL;
+ goto out_data;
+ }
+ /*
+ * Bail out if more than one page was configured, but we can not
+ * select the highest page. This is an indication that the wrong
+ * chip type was selected. Better bail out now than keep
+ * returning errors later on.
+ */
+ if (info->pages > 1 && pmbus_set_page(client, info->pages - 1) < 0) {
+ dev_err(&client->dev, "Failed to select page %d\n",
+ info->pages - 1);
+ ret = -EINVAL;
+ goto out_data;
+ }
+
+ ret = pmbus_identify_common(client, data);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to identify chip capabilities\n");
+ goto out_data;
+ }
+
+ ret = -ENOMEM;
+ data->sensors = kzalloc(sizeof(struct pmbus_sensor) * data->max_sensors,
+ GFP_KERNEL);
+ if (!data->sensors) {
+ dev_err(&client->dev, "No memory to allocate sensor data\n");
+ goto out_data;
+ }
+
+ data->booleans = kzalloc(sizeof(struct pmbus_boolean)
+ * data->max_booleans, GFP_KERNEL);
+ if (!data->booleans) {
+ dev_err(&client->dev, "No memory to allocate boolean data\n");
+ goto out_sensors;
+ }
+
+ data->labels = kzalloc(sizeof(struct pmbus_label) * data->max_labels,
+ GFP_KERNEL);
+ if (!data->labels) {
+ dev_err(&client->dev, "No memory to allocate label data\n");
+ goto out_booleans;
+ }
+
+ data->attributes = kzalloc(sizeof(struct attribute *)
+ * data->max_attributes, GFP_KERNEL);
+ if (!data->attributes) {
+ dev_err(&client->dev, "No memory to allocate attribute data\n");
+ goto out_labels;
+ }
+
+ pmbus_find_attributes(client, data);
+
+ /*
+ * If there are no attributes, something is wrong.
+ * Bail out instead of trying to register nothing.
+ */
+ if (!data->num_attributes) {
+ dev_err(&client->dev, "No attributes found\n");
+ ret = -ENODEV;
+ goto out_attributes;
+ }
+
+ /* Register sysfs hooks */
+ data->group.attrs = data->attributes;
+ ret = sysfs_create_group(&client->dev.kobj, &data->group);
+ if (ret) {
+ dev_err(&client->dev, "Failed to create sysfs entries\n");
+ goto out_attributes;
+ }
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ dev_err(&client->dev, "Failed to register hwmon device\n");
+ goto out_hwmon_device_register;
+ }
+ return 0;
+
+out_hwmon_device_register:
+ sysfs_remove_group(&client->dev.kobj, &data->group);
+out_attributes:
+ kfree(data->attributes);
+out_labels:
+ kfree(data->labels);
+out_booleans:
+ kfree(data->booleans);
+out_sensors:
+ kfree(data->sensors);
+out_data:
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pmbus_do_probe);
+
+int pmbus_do_remove(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &data->group);
+ kfree(data->attributes);
+ kfree(data->labels);
+ kfree(data->booleans);
+ kfree(data->sensors);
+ kfree(data);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmbus_do_remove);
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 073eabedc432..f2b377c56a3a 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1,11 +1,12 @@
/*
w83627ehf - Driver for the hardware monitoring functionality of
- the Winbond W83627EHF Super-I/O chip
+ the Winbond W83627EHF Super-I/O chip
Copyright (C) 2005 Jean Delvare <khali@linux-fr.org>
Copyright (C) 2006 Yuan Mu (Winbond),
- Rudolf Marek <r.marek@assembler.cz>
- David Hubbard <david.c.hubbard@gmail.com>
+ Rudolf Marek <r.marek@assembler.cz>
+ David Hubbard <david.c.hubbard@gmail.com>
Daniel J Blueman <daniel.blueman@gmail.com>
+ Copyright (C) 2010 Sheng-Yuan Huang (Nuvoton) (PS00)
Shamelessly ripped from the w83627hf driver
Copyright (C) 2003 Mark Studebaker
@@ -35,11 +36,13 @@
Chip #vin #fan #pwm #temp chip IDs man ID
w83627ehf 10 5 4 3 0x8850 0x88 0x5ca3
- 0x8860 0xa1
+ 0x8860 0xa1
w83627dhg 9 5 4 3 0xa020 0xc1 0x5ca3
w83627dhg-p 9 5 4 3 0xb070 0xc1 0x5ca3
w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3
- w83667hg-b 9 5 3 3 0xb350 0xc1 0x5ca3
+ w83667hg-b 9 5 3 4 0xb350 0xc1 0x5ca3
+ nct6775f 9 4 3 9 0xb470 0xc1 0x5ca3
+ nct6776f 9 5 3 9 0xC330 0xc1 0x5ca3
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -58,21 +61,28 @@
#include <linux/io.h>
#include "lm75.h"
-enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83667hg, w83667hg_b };
+enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83667hg, w83667hg_b, nct6775,
+ nct6776 };
/* used to set data->name = w83627ehf_device_names[data->sio_kind] */
-static const char * w83627ehf_device_names[] = {
+static const char * const w83627ehf_device_names[] = {
"w83627ehf",
"w83627dhg",
"w83627dhg",
"w83667hg",
"w83667hg",
+ "nct6775",
+ "nct6776",
};
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
+static unsigned short fan_debounce;
+module_param(fan_debounce, ushort, 0);
+MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
+
#define DRVNAME "w83627ehf"
/*
@@ -80,7 +90,7 @@ MODULE_PARM_DESC(force_id, "Override the detected device ID");
*/
#define W83627EHF_LD_HWM 0x0b
-#define W83667HG_LD_VID 0x0d
+#define W83667HG_LD_VID 0x0d
#define SIO_REG_LDSEL 0x07 /* Logical device select */
#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */
@@ -94,8 +104,10 @@ MODULE_PARM_DESC(force_id, "Override the detected device ID");
#define SIO_W83627EHG_ID 0x8860
#define SIO_W83627DHG_ID 0xa020
#define SIO_W83627DHG_P_ID 0xb070
-#define SIO_W83667HG_ID 0xa510
+#define SIO_W83667HG_ID 0xa510
#define SIO_W83667HG_B_ID 0xb350
+#define SIO_NCT6775_ID 0xb470
+#define SIO_NCT6776_ID 0xc330
#define SIO_ID_MASK 0xFFF0
static inline void
@@ -138,7 +150,7 @@ superio_exit(int ioreg)
* ISA constants
*/
-#define IOREGION_ALIGNMENT ~7
+#define IOREGION_ALIGNMENT (~7)
#define IOREGION_OFFSET 5
#define IOREGION_LENGTH 2
#define ADDR_REG_OFFSET 0
@@ -164,13 +176,10 @@ static const u16 W83627EHF_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d, 0x3e, 0x55c };
#define W83627EHF_REG_IN(nr) ((nr < 7) ? (0x20 + (nr)) : \
(0x550 + (nr) - 7))
-#define W83627EHF_REG_TEMP1 0x27
-#define W83627EHF_REG_TEMP1_HYST 0x3a
-#define W83627EHF_REG_TEMP1_OVER 0x39
-static const u16 W83627EHF_REG_TEMP[] = { 0x150, 0x250 };
-static const u16 W83627EHF_REG_TEMP_HYST[] = { 0x153, 0x253 };
-static const u16 W83627EHF_REG_TEMP_OVER[] = { 0x155, 0x255 };
-static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0x152, 0x252 };
+static const u16 W83627EHF_REG_TEMP[] = { 0x27, 0x150, 0x250, 0x7e };
+static const u16 W83627EHF_REG_TEMP_HYST[] = { 0x3a, 0x153, 0x253, 0 };
+static const u16 W83627EHF_REG_TEMP_OVER[] = { 0x39, 0x155, 0x255, 0 };
+static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0, 0x152, 0x252, 0 };
/* Fan clock dividers are spread over the following five registers */
#define W83627EHF_REG_FANDIV1 0x47
@@ -179,6 +188,11 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0x152, 0x252 };
#define W83627EHF_REG_DIODE 0x59
#define W83627EHF_REG_SMI_OVT 0x4C
+/* NCT6775F has its own fan divider registers */
+#define NCT6775_REG_FANDIV1 0x506
+#define NCT6775_REG_FANDIV2 0x507
+#define NCT6775_REG_FAN_DEBOUNCE 0xf0
+
#define W83627EHF_REG_ALARM1 0x459
#define W83627EHF_REG_ALARM2 0x45A
#define W83627EHF_REG_ALARM3 0x45B
@@ -199,22 +213,123 @@ static const u8 W83627EHF_PWM_MODE_SHIFT[] = { 0, 1, 0, 6 };
static const u8 W83627EHF_PWM_ENABLE_SHIFT[] = { 2, 4, 1, 4 };
/* FAN Duty Cycle, be used to control */
-static const u8 W83627EHF_REG_PWM[] = { 0x01, 0x03, 0x11, 0x61 };
-static const u8 W83627EHF_REG_TARGET[] = { 0x05, 0x06, 0x13, 0x63 };
+static const u16 W83627EHF_REG_PWM[] = { 0x01, 0x03, 0x11, 0x61 };
+static const u16 W83627EHF_REG_TARGET[] = { 0x05, 0x06, 0x13, 0x63 };
static const u8 W83627EHF_REG_TOLERANCE[] = { 0x07, 0x07, 0x14, 0x62 };
/* Advanced Fan control, some values are common for all fans */
-static const u8 W83627EHF_REG_FAN_START_OUTPUT[] = { 0x0a, 0x0b, 0x16, 0x65 };
-static const u8 W83627EHF_REG_FAN_STOP_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 };
-static const u8 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0c, 0x0d, 0x17, 0x66 };
+static const u16 W83627EHF_REG_FAN_START_OUTPUT[] = { 0x0a, 0x0b, 0x16, 0x65 };
+static const u16 W83627EHF_REG_FAN_STOP_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 };
+static const u16 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0c, 0x0d, 0x17, 0x66 };
-static const u8 W83627EHF_REG_FAN_MAX_OUTPUT_COMMON[]
+static const u16 W83627EHF_REG_FAN_MAX_OUTPUT_COMMON[]
= { 0xff, 0x67, 0xff, 0x69 };
-static const u8 W83627EHF_REG_FAN_STEP_OUTPUT_COMMON[]
+static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_COMMON[]
= { 0xff, 0x68, 0xff, 0x6a };
-static const u8 W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B[] = { 0x67, 0x69, 0x6b };
-static const u8 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[] = { 0x68, 0x6a, 0x6c };
+static const u16 W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B[] = { 0x67, 0x69, 0x6b };
+static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[]
+ = { 0x68, 0x6a, 0x6c };
+
+static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301 };
+static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302 };
+static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = { 0x105, 0x205, 0x305 };
+static const u16 NCT6775_REG_FAN_START_OUTPUT[] = { 0x106, 0x206, 0x306 };
+static const u16 NCT6775_REG_FAN_STOP_TIME[] = { 0x107, 0x207, 0x307 };
+static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309 };
+static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a };
+static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b };
+static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
+static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642};
+
+static const u16 NCT6775_REG_TEMP[]
+ = { 0x27, 0x150, 0x250, 0x73, 0x75, 0x77, 0x62b, 0x62c, 0x62d };
+static const u16 NCT6775_REG_TEMP_CONFIG[]
+ = { 0, 0x152, 0x252, 0, 0, 0, 0x628, 0x629, 0x62A };
+static const u16 NCT6775_REG_TEMP_HYST[]
+ = { 0x3a, 0x153, 0x253, 0, 0, 0, 0x673, 0x678, 0x67D };
+static const u16 NCT6775_REG_TEMP_OVER[]
+ = { 0x39, 0x155, 0x255, 0, 0, 0, 0x672, 0x677, 0x67C };
+static const u16 NCT6775_REG_TEMP_SOURCE[]
+ = { 0x621, 0x622, 0x623, 0x100, 0x200, 0x300, 0x624, 0x625, 0x626 };
+
+static const char *const w83667hg_b_temp_label[] = {
+ "SYSTIN",
+ "CPUTIN",
+ "AUXTIN",
+ "AMDTSI",
+ "PECI Agent 1",
+ "PECI Agent 2",
+ "PECI Agent 3",
+ "PECI Agent 4"
+};
+
+static const char *const nct6775_temp_label[] = {
+ "",
+ "SYSTIN",
+ "CPUTIN",
+ "AUXTIN",
+ "AMD SB-TSI",
+ "PECI Agent 0",
+ "PECI Agent 1",
+ "PECI Agent 2",
+ "PECI Agent 3",
+ "PECI Agent 4",
+ "PECI Agent 5",
+ "PECI Agent 6",
+ "PECI Agent 7",
+ "PCH_CHIP_CPU_MAX_TEMP",
+ "PCH_CHIP_TEMP",
+ "PCH_CPU_TEMP",
+ "PCH_MCH_TEMP",
+ "PCH_DIM0_TEMP",
+ "PCH_DIM1_TEMP",
+ "PCH_DIM2_TEMP",
+ "PCH_DIM3_TEMP"
+};
+
+static const char *const nct6776_temp_label[] = {
+ "",
+ "SYSTIN",
+ "CPUTIN",
+ "AUXTIN",
+ "SMBUSMASTER 0",
+ "SMBUSMASTER 1",
+ "SMBUSMASTER 2",
+ "SMBUSMASTER 3",
+ "SMBUSMASTER 4",
+ "SMBUSMASTER 5",
+ "SMBUSMASTER 6",
+ "SMBUSMASTER 7",
+ "PECI Agent 0",
+ "PECI Agent 1",
+ "PCH_CHIP_CPU_MAX_TEMP",
+ "PCH_CHIP_TEMP",
+ "PCH_CPU_TEMP",
+ "PCH_MCH_TEMP",
+ "PCH_DIM0_TEMP",
+ "PCH_DIM1_TEMP",
+ "PCH_DIM2_TEMP",
+ "PCH_DIM3_TEMP",
+ "BYTE_TEMP"
+};
+
+#define NUM_REG_TEMP ARRAY_SIZE(NCT6775_REG_TEMP)
+
+static inline int is_word_sized(u16 reg)
+{
+ return ((((reg & 0xff00) == 0x100
+ || (reg & 0xff00) == 0x200)
+ && ((reg & 0x00ff) == 0x50
+ || (reg & 0x00ff) == 0x53
+ || (reg & 0x00ff) == 0x55))
+ || (reg & 0xfff0) == 0x630
+ || reg == 0x640 || reg == 0x642
+ || ((reg & 0xfff0) == 0x650
+ && (reg & 0x000f) >= 0x06)
+ || reg == 0x73 || reg == 0x75 || reg == 0x77
+ );
+}
/*
* Conversions
@@ -232,12 +347,36 @@ static inline u8 step_time_to_reg(unsigned int msec, u8 mode)
(msec + 200) / 400), 1, 255);
}
-static inline unsigned int
-fan_from_reg(u8 reg, unsigned int div)
+static unsigned int fan_from_reg8(u16 reg, unsigned int divreg)
{
if (reg == 0 || reg == 255)
return 0;
- return 1350000U / (reg * div);
+ return 1350000U / (reg << divreg);
+}
+
+static unsigned int fan_from_reg13(u16 reg, unsigned int divreg)
+{
+ if ((reg & 0xff1f) == 0xff1f)
+ return 0;
+
+ reg = (reg & 0x1f) | ((reg & 0xff00) >> 3);
+
+ if (reg == 0)
+ return 0;
+
+ return 1350000U / reg;
+}
+
+static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
+{
+ if (reg == 0 || reg == 0xffff)
+ return 0;
+
+ /*
+ * Even though the registers are 16 bit wide, the fan divisor
+ * still applies.
+ */
+ return 1350000U / (reg << divreg);
}
static inline unsigned int
@@ -247,21 +386,19 @@ div_from_reg(u8 reg)
}
static inline int
-temp1_from_reg(s8 reg)
+temp_from_reg(u16 reg, s16 regval)
{
- return reg * 1000;
+ if (is_word_sized(reg))
+ return LM75_TEMP_FROM_REG(regval);
+ return regval * 1000;
}
-static inline s8
-temp1_to_reg(long temp, int min, int max)
+static inline u16
+temp_to_reg(u16 reg, long temp)
{
- if (temp <= min)
- return min / 1000;
- if (temp >= max)
- return max / 1000;
- if (temp < 0)
- return (temp - 500) / 1000;
- return (temp + 500) / 1000;
+ if (is_word_sized(reg))
+ return LM75_TEMP_TO_REG(temp);
+ return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), 1000);
}
/* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */
@@ -275,7 +412,8 @@ static inline long in_from_reg(u8 reg, u8 nr)
static inline u8 in_to_reg(u32 val, u8 nr)
{
- return SENSORS_LIMIT(((val + (scale_in[nr] / 2)) / scale_in[nr]), 0, 255);
+ return SENSORS_LIMIT(((val + (scale_in[nr] / 2)) / scale_in[nr]), 0,
+ 255);
}
/*
@@ -289,38 +427,57 @@ struct w83627ehf_data {
struct device *hwmon_dev;
struct mutex lock;
- const u8 *REG_FAN_START_OUTPUT;
- const u8 *REG_FAN_STOP_OUTPUT;
- const u8 *REG_FAN_MAX_OUTPUT;
- const u8 *REG_FAN_STEP_OUTPUT;
+ u16 reg_temp[NUM_REG_TEMP];
+ u16 reg_temp_over[NUM_REG_TEMP];
+ u16 reg_temp_hyst[NUM_REG_TEMP];
+ u16 reg_temp_config[NUM_REG_TEMP];
+ u8 temp_src[NUM_REG_TEMP];
+ const char * const *temp_label;
+
+ const u16 *REG_PWM;
+ const u16 *REG_TARGET;
+ const u16 *REG_FAN;
+ const u16 *REG_FAN_MIN;
+ const u16 *REG_FAN_START_OUTPUT;
+ const u16 *REG_FAN_STOP_OUTPUT;
+ const u16 *REG_FAN_STOP_TIME;
+ const u16 *REG_FAN_MAX_OUTPUT;
+ const u16 *REG_FAN_STEP_OUTPUT;
+
+ unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg);
+ unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg);
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* Register values */
+ u8 bank; /* current register bank */
u8 in_num; /* number of in inputs we have */
u8 in[10]; /* Register value */
u8 in_max[10]; /* Register value */
u8 in_min[10]; /* Register value */
- u8 fan[5];
- u8 fan_min[5];
+ unsigned int rpm[5];
+ u16 fan_min[5];
u8 fan_div[5];
u8 has_fan; /* some fan inputs can be disabled */
+ u8 has_fan_min; /* some fans don't have min register */
+ bool has_fan_div;
u8 temp_type[3];
- s8 temp1;
- s8 temp1_max;
- s8 temp1_max_hyst;
- s16 temp[2];
- s16 temp_max[2];
- s16 temp_max_hyst[2];
+ s16 temp[9];
+ s16 temp_max[9];
+ s16 temp_max_hyst[9];
u32 alarms;
u8 pwm_mode[4]; /* 0->DC variable voltage, 1->PWM variable duty cycle */
u8 pwm_enable[4]; /* 1->manual
2->thermal cruise mode (also called SmartFan I)
3->fan speed cruise mode
- 4->variable thermal cruise (also called SmartFan III) */
+ 4->variable thermal cruise (also called
+ SmartFan III)
+ 5->enhanced variable thermal cruise (also called
+ SmartFan IV) */
+ u8 pwm_enable_orig[4]; /* original value of pwm_enable */
u8 pwm_num; /* number of pwm */
u8 pwm[4];
u8 target_temp[4];
@@ -335,7 +492,7 @@ struct w83627ehf_data {
u8 vid;
u8 vrm;
- u8 temp3_disable;
+ u16 have_temp;
u8 in6_skip;
};
@@ -344,30 +501,19 @@ struct w83627ehf_sio_data {
enum kinds kind;
};
-static inline int is_word_sized(u16 reg)
-{
- return (((reg & 0xff00) == 0x100
- || (reg & 0xff00) == 0x200)
- && ((reg & 0x00ff) == 0x50
- || (reg & 0x00ff) == 0x53
- || (reg & 0x00ff) == 0x55));
-}
-
-/* Registers 0x50-0x5f are banked */
+/*
+ * On older chips, only registers 0x50-0x5f are banked.
+ * On more recent chips, all registers are banked.
+ * Assume that is the case and set the bank number for each access.
+ * Cache the bank number so it only needs to be set if it changes.
+ */
static inline void w83627ehf_set_bank(struct w83627ehf_data *data, u16 reg)
{
- if ((reg & 0x00f0) == 0x50) {
+ u8 bank = reg >> 8;
+ if (data->bank != bank) {
outb_p(W83627EHF_REG_BANK, data->addr + ADDR_REG_OFFSET);
- outb_p(reg >> 8, data->addr + DATA_REG_OFFSET);
- }
-}
-
-/* Not strictly necessary, but play it safe for now */
-static inline void w83627ehf_reset_bank(struct w83627ehf_data *data, u16 reg)
-{
- if (reg & 0xff00) {
- outb_p(W83627EHF_REG_BANK, data->addr + ADDR_REG_OFFSET);
- outb_p(0, data->addr + DATA_REG_OFFSET);
+ outb_p(bank, data->addr + DATA_REG_OFFSET);
+ data->bank = bank;
}
}
@@ -385,14 +531,13 @@ static u16 w83627ehf_read_value(struct w83627ehf_data *data, u16 reg)
data->addr + ADDR_REG_OFFSET);
res = (res << 8) + inb_p(data->addr + DATA_REG_OFFSET);
}
- w83627ehf_reset_bank(data, reg);
mutex_unlock(&data->lock);
-
return res;
}
-static int w83627ehf_write_value(struct w83627ehf_data *data, u16 reg, u16 value)
+static int w83627ehf_write_value(struct w83627ehf_data *data, u16 reg,
+ u16 value)
{
int word_sized = is_word_sized(reg);
@@ -406,13 +551,40 @@ static int w83627ehf_write_value(struct w83627ehf_data *data, u16 reg, u16 value
data->addr + ADDR_REG_OFFSET);
}
outb_p(value & 0xff, data->addr + DATA_REG_OFFSET);
- w83627ehf_reset_bank(data, reg);
mutex_unlock(&data->lock);
return 0;
}
/* This function assumes that the caller holds data->update_lock */
+static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr)
+{
+ u8 reg;
+
+ switch (nr) {
+ case 0:
+ reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x70)
+ | (data->fan_div[0] & 0x7);
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
+ break;
+ case 1:
+ reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7)
+ | ((data->fan_div[1] << 4) & 0x70);
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
+ case 2:
+ reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70)
+ | (data->fan_div[2] & 0x7);
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
+ break;
+ case 3:
+ reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x7)
+ | ((data->fan_div[3] << 4) & 0x70);
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
+ break;
+ }
+}
+
+/* This function assumes that the caller holds data->update_lock */
static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
{
u8 reg;
@@ -463,6 +635,32 @@ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
}
}
+static void w83627ehf_write_fan_div_common(struct device *dev,
+ struct w83627ehf_data *data, int nr)
+{
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+
+ if (sio_data->kind == nct6776)
+ ; /* no dividers, do nothing */
+ else if (sio_data->kind == nct6775)
+ nct6775_write_fan_div(data, nr);
+ else
+ w83627ehf_write_fan_div(data, nr);
+}
+
+static void nct6775_update_fan_div(struct w83627ehf_data *data)
+{
+ u8 i;
+
+ i = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
+ data->fan_div[0] = i & 0x7;
+ data->fan_div[1] = (i & 0x70) >> 4;
+ i = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
+ data->fan_div[2] = i & 0x7;
+ if (data->has_fan & (1<<3))
+ data->fan_div[3] = (i & 0x70) >> 4;
+}
+
static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
{
int i;
@@ -488,10 +686,79 @@ static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
}
}
+static void w83627ehf_update_fan_div_common(struct device *dev,
+ struct w83627ehf_data *data)
+{
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+
+ if (sio_data->kind == nct6776)
+ ; /* no dividers, do nothing */
+ else if (sio_data->kind == nct6775)
+ nct6775_update_fan_div(data);
+ else
+ w83627ehf_update_fan_div(data);
+}
+
+static void nct6775_update_pwm(struct w83627ehf_data *data)
+{
+ int i;
+ int pwmcfg, fanmodecfg;
+
+ for (i = 0; i < data->pwm_num; i++) {
+ pwmcfg = w83627ehf_read_value(data,
+ W83627EHF_REG_PWM_ENABLE[i]);
+ fanmodecfg = w83627ehf_read_value(data,
+ NCT6775_REG_FAN_MODE[i]);
+ data->pwm_mode[i] =
+ ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
+ data->pwm_enable[i] = ((fanmodecfg >> 4) & 7) + 1;
+ data->tolerance[i] = fanmodecfg & 0x0f;
+ data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
+ }
+}
+
+static void w83627ehf_update_pwm(struct w83627ehf_data *data)
+{
+ int i;
+ int pwmcfg = 0, tolerance = 0; /* shut up the compiler */
+
+ for (i = 0; i < data->pwm_num; i++) {
+ if (!(data->has_fan & (1 << i)))
+ continue;
+
+ /* pwmcfg, tolerance mapped for i=0, i=1 to same reg */
+ if (i != 1) {
+ pwmcfg = w83627ehf_read_value(data,
+ W83627EHF_REG_PWM_ENABLE[i]);
+ tolerance = w83627ehf_read_value(data,
+ W83627EHF_REG_TOLERANCE[i]);
+ }
+ data->pwm_mode[i] =
+ ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
+ data->pwm_enable[i] = ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i])
+ & 3) + 1;
+ data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
+
+ data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0)) & 0x0f;
+ }
+}
+
+static void w83627ehf_update_pwm_common(struct device *dev,
+ struct w83627ehf_data *data)
+{
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776)
+ nct6775_update_pwm(data);
+ else
+ w83627ehf_update_pwm(data);
+}
+
static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- int pwmcfg = 0, tolerance = 0; /* shut up the compiler */
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+
int i;
mutex_lock(&data->update_lock);
@@ -499,7 +766,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + HZ + HZ/2)
|| !data->valid) {
/* Fan clock dividers */
- w83627ehf_update_fan_div(data);
+ w83627ehf_update_fan_div_common(dev, data);
/* Measured voltages and limits */
for (i = 0; i < data->in_num; i++) {
@@ -513,92 +780,90 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
/* Measured fan speeds and limits */
for (i = 0; i < 5; i++) {
+ u16 reg;
+
if (!(data->has_fan & (1 << i)))
continue;
- data->fan[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN[i]);
- data->fan_min[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN_MIN[i]);
+ reg = w83627ehf_read_value(data, data->REG_FAN[i]);
+ data->rpm[i] = data->fan_from_reg(reg,
+ data->fan_div[i]);
+
+ if (data->has_fan_min & (1 << i))
+ data->fan_min[i] = w83627ehf_read_value(data,
+ data->REG_FAN_MIN[i]);
/* If we failed to measure the fan speed and clock
divider can be increased, let's try that for next
time */
- if (data->fan[i] == 0xff
- && data->fan_div[i] < 0x07) {
- dev_dbg(dev, "Increasing fan%d "
+ if (data->has_fan_div
+ && (reg >= 0xff || (sio_data->kind == nct6775
+ && reg == 0x00))
+ && data->fan_div[i] < 0x07) {
+ dev_dbg(dev, "Increasing fan%d "
"clock divider from %u to %u\n",
i + 1, div_from_reg(data->fan_div[i]),
div_from_reg(data->fan_div[i] + 1));
data->fan_div[i]++;
- w83627ehf_write_fan_div(data, i);
+ w83627ehf_write_fan_div_common(dev, data, i);
/* Preserve min limit if possible */
- if (data->fan_min[i] >= 2
+ if ((data->has_fan_min & (1 << i))
+ && data->fan_min[i] >= 2
&& data->fan_min[i] != 255)
w83627ehf_write_value(data,
- W83627EHF_REG_FAN_MIN[i],
+ data->REG_FAN_MIN[i],
(data->fan_min[i] /= 2));
}
}
+ w83627ehf_update_pwm_common(dev, data);
+
for (i = 0; i < data->pwm_num; i++) {
if (!(data->has_fan & (1 << i)))
continue;
- /* pwmcfg, tolerance mapped for i=0, i=1 to same reg */
- if (i != 1) {
- pwmcfg = w83627ehf_read_value(data,
- W83627EHF_REG_PWM_ENABLE[i]);
- tolerance = w83627ehf_read_value(data,
- W83627EHF_REG_TOLERANCE[i]);
- }
- data->pwm_mode[i] =
- ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1)
- ? 0 : 1;
- data->pwm_enable[i] =
- ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i])
- & 3) + 1;
- data->pwm[i] = w83627ehf_read_value(data,
- W83627EHF_REG_PWM[i]);
- data->fan_start_output[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN_START_OUTPUT[i]);
- data->fan_stop_output[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN_STOP_OUTPUT[i]);
- data->fan_stop_time[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN_STOP_TIME[i]);
-
- if (data->REG_FAN_MAX_OUTPUT[i] != 0xff)
+ data->fan_start_output[i] =
+ w83627ehf_read_value(data,
+ data->REG_FAN_START_OUTPUT[i]);
+ data->fan_stop_output[i] =
+ w83627ehf_read_value(data,
+ data->REG_FAN_STOP_OUTPUT[i]);
+ data->fan_stop_time[i] =
+ w83627ehf_read_value(data,
+ data->REG_FAN_STOP_TIME[i]);
+
+ if (data->REG_FAN_MAX_OUTPUT &&
+ data->REG_FAN_MAX_OUTPUT[i] != 0xff)
data->fan_max_output[i] =
w83627ehf_read_value(data,
- data->REG_FAN_MAX_OUTPUT[i]);
+ data->REG_FAN_MAX_OUTPUT[i]);
- if (data->REG_FAN_STEP_OUTPUT[i] != 0xff)
+ if (data->REG_FAN_STEP_OUTPUT &&
+ data->REG_FAN_STEP_OUTPUT[i] != 0xff)
data->fan_step_output[i] =
w83627ehf_read_value(data,
- data->REG_FAN_STEP_OUTPUT[i]);
+ data->REG_FAN_STEP_OUTPUT[i]);
data->target_temp[i] =
w83627ehf_read_value(data,
- W83627EHF_REG_TARGET[i]) &
+ data->REG_TARGET[i]) &
(data->pwm_mode[i] == 1 ? 0x7f : 0xff);
- data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0))
- & 0x0f;
}
/* Measured temperatures and limits */
- data->temp1 = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP1);
- data->temp1_max = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP1_OVER);
- data->temp1_max_hyst = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP1_HYST);
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ if (!(data->have_temp & (1 << i)))
+ continue;
data->temp[i] = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP[i]);
- data->temp_max[i] = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP_OVER[i]);
- data->temp_max_hyst[i] = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP_HYST[i]);
+ data->reg_temp[i]);
+ if (data->reg_temp_over[i])
+ data->temp_max[i]
+ = w83627ehf_read_value(data,
+ data->reg_temp_over[i]);
+ if (data->reg_temp_hyst[i])
+ data->temp_max_hyst[i]
+ = w83627ehf_read_value(data,
+ data->reg_temp_hyst[i]);
}
data->alarms = w83627ehf_read_value(data,
@@ -625,7 +890,8 @@ show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr)); \
}
@@ -635,14 +901,18 @@ show_in_reg(in_max)
#define store_in_reg(REG, reg) \
static ssize_t \
-store_in_##reg (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
+store_in_##reg(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
{ \
struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- u32 val = simple_strtoul(buf, NULL, 10); \
- \
+ unsigned long val; \
+ int err; \
+ err = strict_strtoul(buf, 10, &val); \
+ if (err < 0) \
+ return err; \
mutex_lock(&data->update_lock); \
data->in_##reg[nr] = in_to_reg(val, nr); \
w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \
@@ -654,7 +924,8 @@ store_in_##reg (struct device *dev, struct device_attribute *attr, \
store_in_reg(MIN, min)
store_in_reg(MAX, max)
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct w83627ehf_data *data = w83627ehf_update_device(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
@@ -689,45 +960,50 @@ static struct sensor_device_attribute sda_in_alarm[] = {
};
static struct sensor_device_attribute sda_in_min[] = {
- SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0),
- SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1),
- SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2),
- SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3),
- SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4),
- SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5),
- SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6),
- SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7),
- SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8),
- SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9),
+ SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0),
+ SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1),
+ SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2),
+ SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3),
+ SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4),
+ SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5),
+ SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6),
+ SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7),
+ SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8),
+ SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9),
};
static struct sensor_device_attribute sda_in_max[] = {
- SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0),
- SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1),
- SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2),
- SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3),
- SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4),
- SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5),
- SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6),
- SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7),
- SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8),
- SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9),
+ SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0),
+ SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1),
+ SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2),
+ SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3),
+ SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4),
+ SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5),
+ SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6),
+ SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7),
+ SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8),
+ SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9),
};
-#define show_fan_reg(reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", \
- fan_from_reg(data->reg[nr], \
- div_from_reg(data->fan_div[nr]))); \
+static ssize_t
+show_fan(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ return sprintf(buf, "%d\n", data->rpm[nr]);
+}
+
+static ssize_t
+show_fan_min(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ return sprintf(buf, "%d\n",
+ data->fan_from_reg_min(data->fan_min[nr],
+ data->fan_div[nr]));
}
-show_fan_reg(fan);
-show_fan_reg(fan_min);
static ssize_t
show_fan_div(struct device *dev, struct device_attribute *attr,
@@ -746,11 +1022,32 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- unsigned int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
unsigned int reg;
u8 new_div;
+ err = strict_strtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
mutex_lock(&data->update_lock);
+ if (!data->has_fan_div) {
+ /*
+ * Only NCT6776F for now, so we know that this is a 13 bit
+ * register
+ */
+ if (!val) {
+ val = 0xff1f;
+ } else {
+ if (val > 1350000U)
+ val = 135000U;
+ val = 1350000U / val;
+ val = (val & 0x1f) | ((val << 3) & 0xff00);
+ }
+ data->fan_min[nr] = val;
+ goto done; /* Leave fan divider alone */
+ }
if (!val) {
/* No min limit, alarm disabled */
data->fan_min[nr] = 255;
@@ -761,15 +1058,17 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
even with the highest divider (128) */
data->fan_min[nr] = 254;
new_div = 7; /* 128 == (1 << 7) */
- dev_warn(dev, "fan%u low limit %u below minimum %u, set to "
- "minimum\n", nr + 1, val, fan_from_reg(254, 128));
+ dev_warn(dev, "fan%u low limit %lu below minimum %u, set to "
+ "minimum\n", nr + 1, val,
+ data->fan_from_reg_min(254, 7));
} else if (!reg) {
/* Speed above this value cannot possibly be represented,
even with the lowest divider (1) */
data->fan_min[nr] = 1;
new_div = 0; /* 1 == (1 << 0) */
- dev_warn(dev, "fan%u low limit %u above maximum %u, set to "
- "maximum\n", nr + 1, val, fan_from_reg(1, 1));
+ dev_warn(dev, "fan%u low limit %lu above maximum %u, set to "
+ "maximum\n", nr + 1, val,
+ data->fan_from_reg_min(1, 0));
} else {
/* Automatically pick the best divider, i.e. the one such
that the min limit will correspond to a register value
@@ -785,25 +1084,16 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
/* Write both the fan clock divider (if it changed) and the new
fan min (unconditionally) */
if (new_div != data->fan_div[nr]) {
- /* Preserve the fan speed reading */
- if (data->fan[nr] != 0xff) {
- if (new_div > data->fan_div[nr])
- data->fan[nr] >>= new_div - data->fan_div[nr];
- else if (data->fan[nr] & 0x80)
- data->fan[nr] = 0xff;
- else
- data->fan[nr] <<= data->fan_div[nr] - new_div;
- }
-
dev_dbg(dev, "fan%u clock divider changed from %u to %u\n",
nr + 1, div_from_reg(data->fan_div[nr]),
div_from_reg(new_div));
data->fan_div[nr] = new_div;
- w83627ehf_write_fan_div(data, nr);
+ w83627ehf_write_fan_div_common(dev, data, nr);
/* Give the chip time to sample a new speed value */
data->last_updated = jiffies;
}
- w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[nr],
+done:
+ w83627ehf_write_value(data, data->REG_FAN_MIN[nr],
data->fan_min[nr]);
mutex_unlock(&data->update_lock);
@@ -847,70 +1137,54 @@ static struct sensor_device_attribute sda_fan_div[] = {
SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4),
};
-#define show_temp1_reg(reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- return sprintf(buf, "%d\n", temp1_from_reg(data->reg)); \
-}
-show_temp1_reg(temp1);
-show_temp1_reg(temp1_max);
-show_temp1_reg(temp1_max_hyst);
-
-#define store_temp1_reg(REG, reg) \
-static ssize_t \
-store_temp1_##reg(struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- struct w83627ehf_data *data = dev_get_drvdata(dev); \
- long val = simple_strtol(buf, NULL, 10); \
- \
- mutex_lock(&data->update_lock); \
- data->temp1_##reg = temp1_to_reg(val, -128000, 127000); \
- w83627ehf_write_value(data, W83627EHF_REG_TEMP1_##REG, \
- data->temp1_##reg); \
- mutex_unlock(&data->update_lock); \
- return count; \
+static ssize_t
+show_temp_label(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]);
}
-store_temp1_reg(OVER, max);
-store_temp1_reg(HYST, max_hyst);
-#define show_temp_reg(reg) \
+#define show_temp_reg(addr, reg) \
static ssize_t \
show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
return sprintf(buf, "%d\n", \
- LM75_TEMP_FROM_REG(data->reg[nr])); \
+ temp_from_reg(data->addr[nr], data->reg[nr])); \
}
-show_temp_reg(temp);
-show_temp_reg(temp_max);
-show_temp_reg(temp_max_hyst);
+show_temp_reg(reg_temp, temp);
+show_temp_reg(reg_temp_over, temp_max);
+show_temp_reg(reg_temp_hyst, temp_max_hyst);
-#define store_temp_reg(REG, reg) \
+#define store_temp_reg(addr, reg) \
static ssize_t \
store_##reg(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- long val = simple_strtol(buf, NULL, 10); \
- \
+ int err; \
+ long val; \
+ err = strict_strtol(buf, 10, &val); \
+ if (err < 0) \
+ return err; \
mutex_lock(&data->update_lock); \
- data->reg[nr] = LM75_TEMP_TO_REG(val); \
- w83627ehf_write_value(data, W83627EHF_REG_TEMP_##REG[nr], \
+ data->reg[nr] = temp_to_reg(data->addr[nr], val); \
+ w83627ehf_write_value(data, data->addr[nr], \
data->reg[nr]); \
mutex_unlock(&data->update_lock); \
return count; \
}
-store_temp_reg(OVER, temp_max);
-store_temp_reg(HYST, temp_max_hyst);
+store_temp_reg(reg_temp_over, temp_max);
+store_temp_reg(reg_temp_hyst, temp_max_hyst);
static ssize_t
show_temp_type(struct device *dev, struct device_attribute *attr, char *buf)
@@ -922,27 +1196,69 @@ show_temp_type(struct device *dev, struct device_attribute *attr, char *buf)
}
static struct sensor_device_attribute sda_temp_input[] = {
- SENSOR_ATTR(temp1_input, S_IRUGO, show_temp1, NULL, 0),
- SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0),
- SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 1),
+ SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0),
+ SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1),
+ SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2),
+ SENSOR_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3),
+ SENSOR_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4),
+ SENSOR_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5),
+ SENSOR_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6),
+ SENSOR_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7),
+ SENSOR_ATTR(temp9_input, S_IRUGO, show_temp, NULL, 8),
+};
+
+static struct sensor_device_attribute sda_temp_label[] = {
+ SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0),
+ SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1),
+ SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2),
+ SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3),
+ SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4),
+ SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5),
+ SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6),
+ SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7),
+ SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8),
};
static struct sensor_device_attribute sda_temp_max[] = {
- SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp1_max,
- store_temp1_max, 0),
- SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
+ SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max,
store_temp_max, 0),
- SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
+ SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
store_temp_max, 1),
+ SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 2),
+ SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 3),
+ SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 4),
+ SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 5),
+ SENSOR_ATTR(temp7_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 6),
+ SENSOR_ATTR(temp8_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 7),
+ SENSOR_ATTR(temp9_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 8),
};
static struct sensor_device_attribute sda_temp_max_hyst[] = {
- SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp1_max_hyst,
- store_temp1_max_hyst, 0),
- SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
store_temp_max_hyst, 0),
- SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
store_temp_max_hyst, 1),
+ SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 2),
+ SENSOR_ATTR(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 3),
+ SENSOR_ATTR(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 4),
+ SENSOR_ATTR(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 5),
+ SENSOR_ATTR(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 6),
+ SENSOR_ATTR(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 7),
+ SENSOR_ATTR(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 8),
};
static struct sensor_device_attribute sda_temp_alarm[] = {
@@ -958,11 +1274,12 @@ static struct sensor_device_attribute sda_temp_type[] = {
};
#define show_pwm_reg(reg) \
-static ssize_t show_##reg (struct device *dev, struct device_attribute *attr, \
- char *buf) \
+static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
return sprintf(buf, "%d\n", data->reg[nr]); \
}
@@ -978,9 +1295,14 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- u32 val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
u16 reg;
+ err = strict_strtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
if (val > 1)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1001,11 +1323,18 @@ store_pwm(struct device *dev, struct device_attribute *attr,
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 0, 255);
+ unsigned long val;
+ int err;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm[nr] = val;
- w83627ehf_write_value(data, W83627EHF_REG_PWM[nr], val);
+ w83627ehf_write_value(data, data->REG_PWM[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -1015,19 +1344,38 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- u32 val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
u16 reg;
- if (!val || (val > 4))
+ err = strict_strtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ if (!val || (val > 4 && val != data->pwm_enable_orig[nr]))
return -EINVAL;
+ /* SmartFan III mode is not supported on NCT6776F */
+ if (sio_data->kind == nct6776 && val == 4)
+ return -EINVAL;
+
mutex_lock(&data->update_lock);
- reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
data->pwm_enable[nr] = val;
- reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]);
- reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr];
- w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ reg = w83627ehf_read_value(data,
+ NCT6775_REG_FAN_MODE[nr]);
+ reg &= 0x0f;
+ reg |= (val - 1) << 4;
+ w83627ehf_write_value(data,
+ NCT6775_REG_FAN_MODE[nr], reg);
+ } else {
+ reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
+ reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]);
+ reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr];
+ w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
+ }
mutex_unlock(&data->update_lock);
return count;
}
@@ -1038,9 +1386,10 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", temp1_from_reg(data->reg[nr])); \
+ return sprintf(buf, "%d\n", data->reg[nr] * 1000); \
}
show_tol_temp(tolerance)
@@ -1053,11 +1402,18 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- u8 val = temp1_to_reg(simple_strtoul(buf, NULL, 10), 0, 127000);
+ long val;
+ int err;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
mutex_lock(&data->update_lock);
data->target_temp[nr] = val;
- w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val);
+ w83627ehf_write_value(data, data->REG_TARGET[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -1067,20 +1423,37 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
u16 reg;
+ long val;
+ int err;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err < 0)
+ return err;
+
/* Limit the temp to 0C - 15C */
- u8 val = temp1_to_reg(simple_strtoul(buf, NULL, 10), 0, 15000);
+ val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
mutex_lock(&data->update_lock);
- reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
- data->tolerance[nr] = val;
- if (nr == 1)
- reg = (reg & 0x0f) | (val << 4);
- else
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ /* Limit tolerance further for NCT6776F */
+ if (sio_data->kind == nct6776 && val > 7)
+ val = 7;
+ reg = w83627ehf_read_value(data, NCT6775_REG_FAN_MODE[nr]);
reg = (reg & 0xf0) | val;
- w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
+ w83627ehf_write_value(data, NCT6775_REG_FAN_MODE[nr], reg);
+ } else {
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
+ if (nr == 1)
+ reg = (reg & 0x0f) | (val << 4);
+ else
+ reg = (reg & 0xf0) | val;
+ w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
+ }
+ data->tolerance[nr] = val;
mutex_unlock(&data->update_lock);
return count;
}
@@ -1143,18 +1516,25 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
return sprintf(buf, "%d\n", data->reg[nr]); \
-}\
+} \
static ssize_t \
store_##reg(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
-{\
+{ \
struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 1, 255); \
+ unsigned long val; \
+ int err; \
+ err = strict_strtoul(buf, 10, &val); \
+ if (err < 0) \
+ return err; \
+ val = SENSORS_LIMIT(val, 1, 255); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
w83627ehf_write_value(data, data->REG_##REG[nr], val); \
@@ -1172,10 +1552,12 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
return sprintf(buf, "%d\n", \
- step_time_from_reg(data->reg[nr], data->pwm_mode[nr])); \
+ step_time_from_reg(data->reg[nr], \
+ data->pwm_mode[nr])); \
} \
\
static ssize_t \
@@ -1183,10 +1565,15 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- u8 val = step_time_to_reg(simple_strtoul(buf, NULL, 10), \
- data->pwm_mode[nr]); \
+ unsigned long val; \
+ int err; \
+ err = strict_strtoul(buf, 10, &val); \
+ if (err < 0) \
+ return err; \
+ val = step_time_to_reg(val, data->pwm_mode[nr]); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
w83627ehf_write_value(data, W83627EHF_REG_##REG[nr], val); \
@@ -1283,7 +1670,8 @@ static void w83627ehf_device_remove_files(struct device *dev)
for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
struct sensor_device_attribute *attr =
&sda_sf3_max_step_arrays[i];
- if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff)
+ if (data->REG_FAN_STEP_OUTPUT &&
+ data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff)
device_remove_file(dev, &attr->dev_attr);
}
for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++)
@@ -1309,12 +1697,15 @@ static void w83627ehf_device_remove_files(struct device *dev)
device_remove_file(dev, &sda_target_temp[i].dev_attr);
device_remove_file(dev, &sda_tolerance[i].dev_attr);
}
- for (i = 0; i < 3; i++) {
- if ((i == 2) && data->temp3_disable)
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ if (!(data->have_temp & (1 << i)))
continue;
device_remove_file(dev, &sda_temp_input[i].dev_attr);
+ device_remove_file(dev, &sda_temp_label[i].dev_attr);
device_remove_file(dev, &sda_temp_max[i].dev_attr);
device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr);
+ if (i > 2)
+ continue;
device_remove_file(dev, &sda_temp_alarm[i].dev_attr);
device_remove_file(dev, &sda_temp_type[i].dev_attr);
}
@@ -1335,15 +1726,17 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
w83627ehf_write_value(data, W83627EHF_REG_CONFIG,
tmp | 0x01);
- /* Enable temp2 and temp3 if needed */
- for (i = 0; i < 2; i++) {
- tmp = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP_CONFIG[i]);
- if ((i == 1) && data->temp3_disable)
+ /* Enable temperature sensors if needed */
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ if (!(data->have_temp & (1 << i)))
+ continue;
+ if (!data->reg_temp_config[i])
continue;
+ tmp = w83627ehf_read_value(data,
+ data->reg_temp_config[i]);
if (tmp & 0x01)
w83627ehf_write_value(data,
- W83627EHF_REG_TEMP_CONFIG[i],
+ data->reg_temp_config[i],
tmp & 0xfe);
}
@@ -1362,13 +1755,39 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
}
}
+static void w82627ehf_swap_tempreg(struct w83627ehf_data *data,
+ int r1, int r2)
+{
+ u16 tmp;
+
+ tmp = data->temp_src[r1];
+ data->temp_src[r1] = data->temp_src[r2];
+ data->temp_src[r2] = tmp;
+
+ tmp = data->reg_temp[r1];
+ data->reg_temp[r1] = data->reg_temp[r2];
+ data->reg_temp[r2] = tmp;
+
+ tmp = data->reg_temp_over[r1];
+ data->reg_temp_over[r1] = data->reg_temp_over[r2];
+ data->reg_temp_over[r2] = tmp;
+
+ tmp = data->reg_temp_hyst[r1];
+ data->reg_temp_hyst[r1] = data->reg_temp_hyst[r2];
+ data->reg_temp_hyst[r2] = tmp;
+
+ tmp = data->reg_temp_config[r1];
+ data->reg_temp_config[r1] = data->reg_temp_config[r2];
+ data->reg_temp_config[r2] = tmp;
+}
+
static int __devinit w83627ehf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct w83627ehf_sio_data *sio_data = dev->platform_data;
struct w83627ehf_data *data;
struct resource *res;
- u8 fan4pin, fan5pin, en_vrm10;
+ u8 fan3pin, fan4pin, fan4min, fan5pin, en_vrm10;
int i, err = 0;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -1380,7 +1799,8 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
goto exit;
}
- if (!(data = kzalloc(sizeof(struct w83627ehf_data), GFP_KERNEL))) {
+ data = kzalloc(sizeof(struct w83627ehf_data), GFP_KERNEL);
+ if (!data) {
err = -ENOMEM;
goto exit_release;
}
@@ -1393,25 +1813,202 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
/* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9;
- /* 667HG has 3 pwms */
+ /* 667HG, NCT6775F, and NCT6776F have 3 pwms */
data->pwm_num = (sio_data->kind == w83667hg
- || sio_data->kind == w83667hg_b) ? 3 : 4;
+ || sio_data->kind == w83667hg_b
+ || sio_data->kind == nct6775
+ || sio_data->kind == nct6776) ? 3 : 4;
+ data->have_temp = 0x07;
/* Check temp3 configuration bit for 667HG */
- if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
- data->temp3_disable = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP_CONFIG[1]) & 0x01;
- data->in6_skip = !data->temp3_disable;
+ if (sio_data->kind == w83667hg) {
+ u8 reg;
+
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TEMP_CONFIG[2]);
+ if (reg & 0x01)
+ data->have_temp &= ~(1 << 2);
+ else
+ data->in6_skip = 1; /* either temp3 or in6 */
+ }
+
+ /* Deal with temperature register setup first. */
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ int mask = 0;
+
+ /*
+ * Display temperature sensor output only if it monitors
+ * a source other than one already reported. Always display
+ * first three temperature registers, though.
+ */
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ u8 src;
+
+ data->reg_temp[i] = NCT6775_REG_TEMP[i];
+ data->reg_temp_over[i] = NCT6775_REG_TEMP_OVER[i];
+ data->reg_temp_hyst[i] = NCT6775_REG_TEMP_HYST[i];
+ data->reg_temp_config[i] = NCT6775_REG_TEMP_CONFIG[i];
+
+ src = w83627ehf_read_value(data,
+ NCT6775_REG_TEMP_SOURCE[i]);
+ src &= 0x1f;
+ if (src && !(mask & (1 << src))) {
+ data->have_temp |= 1 << i;
+ mask |= 1 << src;
+ }
+
+ data->temp_src[i] = src;
+
+ /*
+ * Now do some register swapping if index 0..2 don't
+ * point to SYSTIN(1), CPUIN(2), and AUXIN(3).
+ * Idea is to have the first three attributes
+ * report SYSTIN, CPUIN, and AUXIN if possible
+ * without overriding the basic system configuration.
+ */
+ if (i > 0 && data->temp_src[0] != 1
+ && data->temp_src[i] == 1)
+ w82627ehf_swap_tempreg(data, 0, i);
+ if (i > 1 && data->temp_src[1] != 2
+ && data->temp_src[i] == 2)
+ w82627ehf_swap_tempreg(data, 1, i);
+ if (i > 2 && data->temp_src[2] != 3
+ && data->temp_src[i] == 3)
+ w82627ehf_swap_tempreg(data, 2, i);
+ }
+ if (sio_data->kind == nct6776) {
+ /*
+ * On NCT6776, AUXTIN and VIN3 pins are shared.
+ * Only way to detect it is to check if AUXTIN is used
+ * as a temperature source, and if that source is
+ * enabled.
+ *
+ * If that is the case, disable in6, which reports VIN3.
+ * Otherwise disable temp3.
+ */
+ if (data->temp_src[2] == 3) {
+ u8 reg;
+
+ if (data->reg_temp_config[2])
+ reg = w83627ehf_read_value(data,
+ data->reg_temp_config[2]);
+ else
+ reg = 0; /* Assume AUXTIN is used */
+
+ if (reg & 0x01)
+ data->have_temp &= ~(1 << 2);
+ else
+ data->in6_skip = 1;
+ }
+ data->temp_label = nct6776_temp_label;
+ } else {
+ data->temp_label = nct6775_temp_label;
+ }
+ } else if (sio_data->kind == w83667hg_b) {
+ u8 reg;
+
+ /*
+ * Temperature sources are selected with bank 0, registers 0x49
+ * and 0x4a.
+ */
+ for (i = 0; i < ARRAY_SIZE(W83627EHF_REG_TEMP); i++) {
+ data->reg_temp[i] = W83627EHF_REG_TEMP[i];
+ data->reg_temp_over[i] = W83627EHF_REG_TEMP_OVER[i];
+ data->reg_temp_hyst[i] = W83627EHF_REG_TEMP_HYST[i];
+ data->reg_temp_config[i] = W83627EHF_REG_TEMP_CONFIG[i];
+ }
+ reg = w83627ehf_read_value(data, 0x4a);
+ data->temp_src[0] = reg >> 5;
+ reg = w83627ehf_read_value(data, 0x49);
+ data->temp_src[1] = reg & 0x07;
+ data->temp_src[2] = (reg >> 4) & 0x07;
+
+ /*
+ * W83667HG-B has another temperature register at 0x7e.
+ * The temperature source is selected with register 0x7d.
+ * Support it if the source differs from already reported
+ * sources.
+ */
+ reg = w83627ehf_read_value(data, 0x7d);
+ reg &= 0x07;
+ if (reg != data->temp_src[0] && reg != data->temp_src[1]
+ && reg != data->temp_src[2]) {
+ data->temp_src[3] = reg;
+ data->have_temp |= 1 << 3;
+ }
+
+ /*
+ * Chip supports either AUXTIN or VIN3. Try to find out which
+ * one.
+ */
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TEMP_CONFIG[2]);
+ if (data->temp_src[2] == 2 && (reg & 0x01))
+ data->have_temp &= ~(1 << 2);
+
+ if ((data->temp_src[2] == 2 && (data->have_temp & (1 << 2)))
+ || (data->temp_src[3] == 2 && (data->have_temp & (1 << 3))))
+ data->in6_skip = 1;
+
+ data->temp_label = w83667hg_b_temp_label;
+ } else {
+ /* Temperature sources are fixed */
+ for (i = 0; i < 3; i++) {
+ data->reg_temp[i] = W83627EHF_REG_TEMP[i];
+ data->reg_temp_over[i] = W83627EHF_REG_TEMP_OVER[i];
+ data->reg_temp_hyst[i] = W83627EHF_REG_TEMP_HYST[i];
+ data->reg_temp_config[i] = W83627EHF_REG_TEMP_CONFIG[i];
+ }
}
- data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
- if (sio_data->kind == w83667hg_b) {
+ if (sio_data->kind == nct6775) {
+ data->has_fan_div = true;
+ data->fan_from_reg = fan_from_reg16;
+ data->fan_from_reg_min = fan_from_reg8;
+ data->REG_PWM = NCT6775_REG_PWM;
+ data->REG_TARGET = NCT6775_REG_TARGET;
+ data->REG_FAN = NCT6775_REG_FAN;
+ data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
+ data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
+ data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
+ data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
+ data->REG_FAN_MAX_OUTPUT = NCT6775_REG_FAN_MAX_OUTPUT;
+ data->REG_FAN_STEP_OUTPUT = NCT6775_REG_FAN_STEP_OUTPUT;
+ } else if (sio_data->kind == nct6776) {
+ data->has_fan_div = false;
+ data->fan_from_reg = fan_from_reg13;
+ data->fan_from_reg_min = fan_from_reg13;
+ data->REG_PWM = NCT6775_REG_PWM;
+ data->REG_TARGET = NCT6775_REG_TARGET;
+ data->REG_FAN = NCT6775_REG_FAN;
+ data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
+ data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
+ data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
+ data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
+ } else if (sio_data->kind == w83667hg_b) {
+ data->has_fan_div = true;
+ data->fan_from_reg = fan_from_reg8;
+ data->fan_from_reg_min = fan_from_reg8;
+ data->REG_PWM = W83627EHF_REG_PWM;
+ data->REG_TARGET = W83627EHF_REG_TARGET;
+ data->REG_FAN = W83627EHF_REG_FAN;
+ data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
+ data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
+ data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
+ data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
data->REG_FAN_MAX_OUTPUT =
W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B;
data->REG_FAN_STEP_OUTPUT =
W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B;
} else {
+ data->has_fan_div = true;
+ data->fan_from_reg = fan_from_reg8;
+ data->fan_from_reg_min = fan_from_reg8;
+ data->REG_PWM = W83627EHF_REG_PWM;
+ data->REG_TARGET = W83627EHF_REG_TARGET;
+ data->REG_FAN = W83627EHF_REG_FAN;
+ data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
+ data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
+ data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
+ data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
data->REG_FAN_MAX_OUTPUT =
W83627EHF_REG_FAN_MAX_OUTPUT_COMMON;
data->REG_FAN_STEP_OUTPUT =
@@ -1424,7 +2021,8 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
data->vrm = vid_which_vrm();
superio_enter(sio_data->sioreg);
/* Read VID value */
- if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
+ if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b ||
+ sio_data->kind == nct6775 || sio_data->kind == nct6776) {
/* W83667HG has different pins for VID input and output, so
we can get the VID input values directly at logical device D
0xe3. */
@@ -1475,13 +2073,44 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
}
/* fan4 and fan5 share some pins with the GPIO and serial flash */
- if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
- fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20;
+ if (sio_data->kind == nct6775) {
+ /* On NCT6775, fan4 shares pins with the fdc interface */
+ fan3pin = 1;
+ fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80);
+ fan4min = 0;
+ fan5pin = 0;
+ } else if (sio_data->kind == nct6776) {
+ fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
+ fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
+ fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
+ fan4min = fan4pin;
+ } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
+ fan3pin = 1;
fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40;
+ fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20;
+ fan4min = fan4pin;
} else {
- fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02);
+ fan3pin = 1;
fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06);
+ fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02);
+ fan4min = fan4pin;
}
+
+ if (fan_debounce &&
+ (sio_data->kind == nct6775 || sio_data->kind == nct6776)) {
+ u8 tmp;
+
+ superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
+ tmp = superio_inb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE);
+ if (sio_data->kind == nct6776)
+ superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
+ 0x3e | tmp);
+ else
+ superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
+ 0x1e | tmp);
+ pr_info("Enabled fan debounce for chip %s\n", data->name);
+ }
+
superio_exit(sio_data->sioreg);
/* It looks like fan4 and fan5 pins can be alternatively used
@@ -1490,26 +2119,54 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
connected fan5 as input unless they are emitting log 1, which
is not the default. */
- data->has_fan = 0x07; /* fan1, fan2 and fan3 */
- i = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
- if ((i & (1 << 2)) && fan4pin)
- data->has_fan |= (1 << 3);
- if (!(i & (1 << 1)) && fan5pin)
- data->has_fan |= (1 << 4);
+ data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */
+
+ data->has_fan |= (fan3pin << 2);
+ data->has_fan_min |= (fan3pin << 2);
+
+ /*
+ * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1 register
+ */
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ data->has_fan |= (fan4pin << 3) | (fan5pin << 4);
+ data->has_fan_min |= (fan4min << 3) | (fan5pin << 4);
+ } else {
+ i = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
+ if ((i & (1 << 2)) && fan4pin) {
+ data->has_fan |= (1 << 3);
+ data->has_fan_min |= (1 << 3);
+ }
+ if (!(i & (1 << 1)) && fan5pin) {
+ data->has_fan |= (1 << 4);
+ data->has_fan_min |= (1 << 4);
+ }
+ }
/* Read fan clock dividers immediately */
- w83627ehf_update_fan_div(data);
+ w83627ehf_update_fan_div_common(dev, data);
+
+ /* Read pwm data to save original values */
+ w83627ehf_update_pwm_common(dev, data);
+ for (i = 0; i < data->pwm_num; i++)
+ data->pwm_enable_orig[i] = data->pwm_enable[i];
+
+ /* Read pwm data to save original values */
+ w83627ehf_update_pwm_common(dev, data);
+ for (i = 0; i < data->pwm_num; i++)
+ data->pwm_enable_orig[i] = data->pwm_enable[i];
/* Register sysfs hooks */
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++)
- if ((err = device_create_file(dev,
- &sda_sf3_arrays[i].dev_attr)))
+ for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) {
+ err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr);
+ if (err)
goto exit_remove;
+ }
for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
struct sensor_device_attribute *attr =
&sda_sf3_max_step_arrays[i];
- if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) {
+ if (data->REG_FAN_STEP_OUTPUT &&
+ data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) {
err = device_create_file(dev, &attr->dev_attr);
if (err)
goto exit_remove;
@@ -1518,8 +2175,9 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
/* if fan4 is enabled create the sf3 files for it */
if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4)
for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) {
- if ((err = device_create_file(dev,
- &sda_sf3_arrays_fan4[i].dev_attr)))
+ err = device_create_file(dev,
+ &sda_sf3_arrays_fan4[i].dev_attr);
+ if (err)
goto exit_remove;
}
@@ -1541,12 +2199,20 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
if ((err = device_create_file(dev,
&sda_fan_input[i].dev_attr))
|| (err = device_create_file(dev,
- &sda_fan_alarm[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_fan_div[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_fan_min[i].dev_attr)))
+ &sda_fan_alarm[i].dev_attr)))
goto exit_remove;
+ if (sio_data->kind != nct6776) {
+ err = device_create_file(dev,
+ &sda_fan_div[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+ if (data->has_fan_min & (1 << i)) {
+ err = device_create_file(dev,
+ &sda_fan_min[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
if (i < data->pwm_num &&
((err = device_create_file(dev,
&sda_pwm[i].dev_attr))
@@ -1562,16 +2228,33 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < 3; i++) {
- if ((i == 2) && data->temp3_disable)
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ if (!(data->have_temp & (1 << i)))
+ continue;
+ err = device_create_file(dev, &sda_temp_input[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ if (data->temp_label) {
+ err = device_create_file(dev,
+ &sda_temp_label[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+ if (data->reg_temp_over[i]) {
+ err = device_create_file(dev,
+ &sda_temp_max[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+ if (data->reg_temp_hyst[i]) {
+ err = device_create_file(dev,
+ &sda_temp_max_hyst[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+ if (i > 2)
continue;
if ((err = device_create_file(dev,
- &sda_temp_input[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_temp_max[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_temp_max_hyst[i].dev_attr))
- || (err = device_create_file(dev,
&sda_temp_alarm[i].dev_attr))
|| (err = device_create_file(dev,
&sda_temp_type[i].dev_attr)))
@@ -1632,6 +2315,8 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
static const char __initdata sio_name_W83627DHG_P[] = "W83627DHG-P";
static const char __initdata sio_name_W83667HG[] = "W83667HG";
static const char __initdata sio_name_W83667HG_B[] = "W83667HG-B";
+ static const char __initdata sio_name_NCT6775[] = "NCT6775F";
+ static const char __initdata sio_name_NCT6776[] = "NCT6776F";
u16 val;
const char *sio_name;
@@ -1668,6 +2353,14 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
sio_data->kind = w83667hg_b;
sio_name = sio_name_W83667HG_B;
break;
+ case SIO_NCT6775_ID:
+ sio_data->kind = nct6775;
+ sio_name = sio_name_NCT6775;
+ break;
+ case SIO_NCT6776_ID:
+ sio_data->kind = nct6776;
+ sio_name = sio_name_NCT6776;
+ break;
default:
if (val != 0xffff)
pr_debug("unsupported chip ID: 0x%04x\n", val);
@@ -1689,7 +2382,8 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
/* Activate logical device if needed */
val = superio_inb(sioaddr, SIO_REG_ENABLE);
if (!(val & 0x01)) {
- pr_warn("Forcibly enabling Super-I/O. Sensor is probably unusable.\n");
+ pr_warn("Forcibly enabling Super-I/O. "
+ "Sensor is probably unusable.\n");
superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
}
@@ -1726,7 +2420,8 @@ static int __init sensors_w83627ehf_init(void)
if (err)
goto exit;
- if (!(pdev = platform_device_alloc(DRVNAME, address))) {
+ pdev = platform_device_alloc(DRVNAME, address);
+ if (!pdev) {
err = -ENOMEM;
pr_err("Device allocation failed\n");
goto exit_unregister;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 113505a6434e..230601e8853f 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -433,7 +433,7 @@ config I2C_IXP2000
config I2C_MPC
tristate "MPC107/824x/85xx/512x/52xx/83xx/86xx"
- depends on PPC32
+ depends on PPC
help
If you say yes to this option, support will be included for the
built-in I2C interface on the MPC107, Tsi107, MPC512x, MPC52xx,
@@ -452,6 +452,16 @@ config I2C_MV64XXX
This driver can also be built as a module. If so, the module
will be called i2c-mv64xxx.
+config I2C_MXS
+ tristate "Freescale i.MX28 I2C interface"
+ depends on SOC_IMX28
+ help
+ Say Y here if you want to use the I2C bus controller on
+ the Freescale i.MX28 processors.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-mxs.
+
config I2C_NOMADIK
tristate "ST-Ericsson Nomadik/Ux500 I2C Controller"
depends on PLAT_NOMADIK
@@ -523,6 +533,17 @@ config I2C_PNX
This driver can also be built as a module. If so, the module
will be called i2c-pnx.
+config I2C_PUV3
+ tristate "PKUnity v3 I2C bus support"
+ depends on UNICORE32 && ARCH_PUV3
+ select I2C_ALGOBIT
+ help
+ This driver supports the I2C IP inside the PKUnity-v3 SoC.
+ This I2C bus controller is under AMBA/AXI bus.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-puv3.
+
config I2C_PXA
tristate "Intel PXA2XX I2C adapter"
depends on ARCH_PXA || ARCH_MMP
@@ -607,6 +628,13 @@ config I2C_STU300
This driver can also be built as a module. If so, the module
will be called i2c-stu300.
+config I2C_TEGRA
+ tristate "NVIDIA Tegra internal I2C controller"
+ depends on ARCH_TEGRA
+ help
+ If you say yes to this option, support will be included for the
+ I2C controller embedded in NVIDIA Tegra SOCs
+
config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support"
depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 9d2d0ec7fb23..3878c959d4fa 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
+obj-$(CONFIG_I2C_MXS) += i2c-mxs.o
obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o
obj-$(CONFIG_I2C_NUC900) += i2c-nuc900.o
obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
@@ -51,6 +52,7 @@ obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
+obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
obj-$(CONFIG_I2C_S6000) += i2c-s6000.o
@@ -58,6 +60,7 @@ obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
+obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
new file mode 100644
index 000000000000..8022e2390a5a
--- /dev/null
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -0,0 +1,412 @@
+/*
+ * Freescale MXS I2C bus driver
+ *
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ *
+ * based on a (non-working) driver which was:
+ *
+ * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * TODO: add dma-support if platform-support for it is available
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+
+#include <mach/common.h>
+
+#define DRIVER_NAME "mxs-i2c"
+
+#define MXS_I2C_CTRL0 (0x00)
+#define MXS_I2C_CTRL0_SET (0x04)
+
+#define MXS_I2C_CTRL0_SFTRST 0x80000000
+#define MXS_I2C_CTRL0_SEND_NAK_ON_LAST 0x02000000
+#define MXS_I2C_CTRL0_RETAIN_CLOCK 0x00200000
+#define MXS_I2C_CTRL0_POST_SEND_STOP 0x00100000
+#define MXS_I2C_CTRL0_PRE_SEND_START 0x00080000
+#define MXS_I2C_CTRL0_MASTER_MODE 0x00020000
+#define MXS_I2C_CTRL0_DIRECTION 0x00010000
+#define MXS_I2C_CTRL0_XFER_COUNT(v) ((v) & 0x0000FFFF)
+
+#define MXS_I2C_CTRL1 (0x40)
+#define MXS_I2C_CTRL1_SET (0x44)
+#define MXS_I2C_CTRL1_CLR (0x48)
+
+#define MXS_I2C_CTRL1_BUS_FREE_IRQ 0x80
+#define MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ 0x40
+#define MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ 0x20
+#define MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ 0x10
+#define MXS_I2C_CTRL1_EARLY_TERM_IRQ 0x08
+#define MXS_I2C_CTRL1_MASTER_LOSS_IRQ 0x04
+#define MXS_I2C_CTRL1_SLAVE_STOP_IRQ 0x02
+#define MXS_I2C_CTRL1_SLAVE_IRQ 0x01
+
+#define MXS_I2C_IRQ_MASK (MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ | \
+ MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ | \
+ MXS_I2C_CTRL1_EARLY_TERM_IRQ | \
+ MXS_I2C_CTRL1_MASTER_LOSS_IRQ | \
+ MXS_I2C_CTRL1_SLAVE_STOP_IRQ | \
+ MXS_I2C_CTRL1_SLAVE_IRQ)
+
+#define MXS_I2C_QUEUECTRL (0x60)
+#define MXS_I2C_QUEUECTRL_SET (0x64)
+#define MXS_I2C_QUEUECTRL_CLR (0x68)
+
+#define MXS_I2C_QUEUECTRL_QUEUE_RUN 0x20
+#define MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE 0x04
+
+#define MXS_I2C_QUEUESTAT (0x70)
+#define MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY 0x00002000
+
+#define MXS_I2C_QUEUECMD (0x80)
+
+#define MXS_I2C_QUEUEDATA (0x90)
+
+#define MXS_I2C_DATA (0xa0)
+
+
+#define MXS_CMD_I2C_SELECT (MXS_I2C_CTRL0_RETAIN_CLOCK | \
+ MXS_I2C_CTRL0_PRE_SEND_START | \
+ MXS_I2C_CTRL0_MASTER_MODE | \
+ MXS_I2C_CTRL0_DIRECTION | \
+ MXS_I2C_CTRL0_XFER_COUNT(1))
+
+#define MXS_CMD_I2C_WRITE (MXS_I2C_CTRL0_PRE_SEND_START | \
+ MXS_I2C_CTRL0_MASTER_MODE | \
+ MXS_I2C_CTRL0_DIRECTION)
+
+#define MXS_CMD_I2C_READ (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \
+ MXS_I2C_CTRL0_MASTER_MODE)
+
+/**
+ * struct mxs_i2c_dev - per device, private MXS-I2C data
+ *
+ * @dev: driver model device node
+ * @regs: IO registers pointer
+ * @cmd_complete: completion object for transaction wait
+ * @cmd_err: error code for last transaction
+ * @adapter: i2c subsystem adapter node
+ */
+struct mxs_i2c_dev {
+ struct device *dev;
+ void __iomem *regs;
+ struct completion cmd_complete;
+ u32 cmd_err;
+ struct i2c_adapter adapter;
+};
+
+/*
+ * TODO: check if calls to here are really needed. If not, we could get rid of
+ * mxs_reset_block and the mach-dependency. Needs an I2C analyzer, probably.
+ */
+static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
+{
+ mxs_reset_block(i2c->regs);
+ writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
+}
+
+static void mxs_i2c_pioq_setup_read(struct mxs_i2c_dev *i2c, u8 addr, int len,
+ int flags)
+{
+ u32 data;
+
+ writel(MXS_CMD_I2C_SELECT, i2c->regs + MXS_I2C_QUEUECMD);
+
+ data = (addr << 1) | I2C_SMBUS_READ;
+ writel(data, i2c->regs + MXS_I2C_DATA);
+
+ data = MXS_CMD_I2C_READ | MXS_I2C_CTRL0_XFER_COUNT(len) | flags;
+ writel(data, i2c->regs + MXS_I2C_QUEUECMD);
+}
+
+static void mxs_i2c_pioq_setup_write(struct mxs_i2c_dev *i2c,
+ u8 addr, u8 *buf, int len, int flags)
+{
+ u32 data;
+ int i, shifts_left;
+
+ data = MXS_CMD_I2C_WRITE | MXS_I2C_CTRL0_XFER_COUNT(len + 1) | flags;
+ writel(data, i2c->regs + MXS_I2C_QUEUECMD);
+
+ /*
+ * We have to copy the slave address (u8) and buffer (arbitrary number
+ * of u8) into the data register (u32). To achieve that, the u8 are put
+ * into the MSBs of 'data' which is then shifted for the next u8. When
+ * apropriate, 'data' is written to MXS_I2C_DATA. So, the first u32
+ * looks like this:
+ *
+ * 3 2 1 0
+ * 10987654|32109876|54321098|76543210
+ * --------+--------+--------+--------
+ * buffer+2|buffer+1|buffer+0|slave_addr
+ */
+
+ data = ((addr << 1) | I2C_SMBUS_WRITE) << 24;
+
+ for (i = 0; i < len; i++) {
+ data >>= 8;
+ data |= buf[i] << 24;
+ if ((i & 3) == 2)
+ writel(data, i2c->regs + MXS_I2C_DATA);
+ }
+
+ /* Write out the remaining bytes if any */
+ shifts_left = 24 - (i & 3) * 8;
+ if (shifts_left)
+ writel(data >> shifts_left, i2c->regs + MXS_I2C_DATA);
+}
+
+/*
+ * TODO: should be replaceable with a waitqueue and RD_QUEUE_IRQ (setting the
+ * rd_threshold to 1). Couldn't get this to work, though.
+ */
+static int mxs_i2c_wait_for_data(struct mxs_i2c_dev *i2c)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ while (readl(i2c->regs + MXS_I2C_QUEUESTAT)
+ & MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cond_resched();
+ }
+
+ return 0;
+}
+
+static int mxs_i2c_finish_read(struct mxs_i2c_dev *i2c, u8 *buf, int len)
+{
+ u32 data;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if ((i & 3) == 0) {
+ if (mxs_i2c_wait_for_data(i2c))
+ return -ETIMEDOUT;
+ data = readl(i2c->regs + MXS_I2C_QUEUEDATA);
+ }
+ buf[i] = data & 0xff;
+ data >>= 8;
+ }
+
+ return 0;
+}
+
+/*
+ * Low level master read/write transaction.
+ */
+static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
+ int stop)
+{
+ struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
+ int ret;
+ int flags;
+
+ init_completion(&i2c->cmd_complete);
+
+ dev_dbg(i2c->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
+ msg->addr, msg->len, msg->flags, stop);
+
+ if (msg->len == 0)
+ return -EINVAL;
+
+ flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
+
+ if (msg->flags & I2C_M_RD)
+ mxs_i2c_pioq_setup_read(i2c, msg->addr, msg->len, flags);
+ else
+ mxs_i2c_pioq_setup_write(i2c, msg->addr, msg->buf, msg->len,
+ flags);
+
+ writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
+ i2c->regs + MXS_I2C_QUEUECTRL_SET);
+
+ ret = wait_for_completion_timeout(&i2c->cmd_complete,
+ msecs_to_jiffies(1000));
+ if (ret == 0)
+ goto timeout;
+
+ if ((!i2c->cmd_err) && (msg->flags & I2C_M_RD)) {
+ ret = mxs_i2c_finish_read(i2c, msg->buf, msg->len);
+ if (ret)
+ goto timeout;
+ }
+
+ if (i2c->cmd_err == -ENXIO)
+ mxs_i2c_reset(i2c);
+
+ dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err);
+
+ return i2c->cmd_err;
+
+timeout:
+ dev_dbg(i2c->dev, "Timeout!\n");
+ mxs_i2c_reset(i2c);
+ return -ETIMEDOUT;
+}
+
+static int mxs_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
+{
+ int i;
+ int err;
+
+ for (i = 0; i < num; i++) {
+ err = mxs_i2c_xfer_msg(adap, &msgs[i], i == (num - 1));
+ if (err)
+ return err;
+ }
+
+ return num;
+}
+
+static u32 mxs_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
+{
+ struct mxs_i2c_dev *i2c = dev_id;
+ u32 stat = readl(i2c->regs + MXS_I2C_CTRL1) & MXS_I2C_IRQ_MASK;
+
+ if (!stat)
+ return IRQ_NONE;
+
+ if (stat & MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ)
+ i2c->cmd_err = -ENXIO;
+ else if (stat & (MXS_I2C_CTRL1_EARLY_TERM_IRQ |
+ MXS_I2C_CTRL1_MASTER_LOSS_IRQ |
+ MXS_I2C_CTRL1_SLAVE_STOP_IRQ | MXS_I2C_CTRL1_SLAVE_IRQ))
+ /* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */
+ i2c->cmd_err = -EIO;
+ else
+ i2c->cmd_err = 0;
+
+ complete(&i2c->cmd_complete);
+
+ writel(stat, i2c->regs + MXS_I2C_CTRL1_CLR);
+ return IRQ_HANDLED;
+}
+
+static const struct i2c_algorithm mxs_i2c_algo = {
+ .master_xfer = mxs_i2c_xfer,
+ .functionality = mxs_i2c_func,
+};
+
+static int __devinit mxs_i2c_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mxs_i2c_dev *i2c;
+ struct i2c_adapter *adap;
+ struct resource *res;
+ resource_size_t res_size;
+ int err, irq;
+
+ i2c = devm_kzalloc(dev, sizeof(struct mxs_i2c_dev), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
+ res_size = resource_size(res);
+ if (!devm_request_mem_region(dev, res->start, res_size, res->name))
+ return -EBUSY;
+
+ i2c->regs = devm_ioremap_nocache(dev, res->start, res_size);
+ if (!i2c->regs)
+ return -EBUSY;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, mxs_i2c_isr, 0, dev_name(dev), i2c);
+ if (err)
+ return err;
+
+ i2c->dev = dev;
+ platform_set_drvdata(pdev, i2c);
+
+ /* Do reset to enforce correct startup after pinmuxing */
+ mxs_i2c_reset(i2c);
+ writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
+ i2c->regs + MXS_I2C_QUEUECTRL_SET);
+
+ adap = &i2c->adapter;
+ strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
+ adap->owner = THIS_MODULE;
+ adap->algo = &mxs_i2c_algo;
+ adap->dev.parent = dev;
+ adap->nr = pdev->id;
+ i2c_set_adapdata(adap, i2c);
+ err = i2c_add_numbered_adapter(adap);
+ if (err) {
+ dev_err(dev, "Failed to add adapter (%d)\n", err);
+ writel(MXS_I2C_CTRL0_SFTRST,
+ i2c->regs + MXS_I2C_CTRL0_SET);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __devexit mxs_i2c_remove(struct platform_device *pdev)
+{
+ struct mxs_i2c_dev *i2c = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i2c_del_adapter(&i2c->adapter);
+ if (ret)
+ return -EBUSY;
+
+ writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
+ i2c->regs + MXS_I2C_QUEUECTRL_CLR);
+ writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver mxs_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(mxs_i2c_remove),
+};
+
+static int __init mxs_i2c_init(void)
+{
+ return platform_driver_probe(&mxs_i2c_driver, mxs_i2c_probe);
+}
+subsys_initcall(mxs_i2c_init);
+
+static void __exit mxs_i2c_exit(void)
+{
+ platform_driver_unregister(&mxs_i2c_driver);
+}
+module_exit(mxs_i2c_exit);
+
+MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_DESCRIPTION("MXS I2C Bus Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
new file mode 100644
index 000000000000..fac673940849
--- /dev/null
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -0,0 +1,306 @@
+/*
+ * I2C driver for PKUnity-v3 SoC
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <mach/hardware.h>
+
+/*
+ * Poll the i2c status register until the specified bit is set.
+ * Returns 0 if timed out (100 msec).
+ */
+static short poll_status(unsigned long bit)
+{
+ int loop_cntr = 1000;
+
+ if (bit & I2C_STATUS_TFNF) {
+ do {
+ udelay(10);
+ } while (!(readl(I2C_STATUS) & bit) && (--loop_cntr > 0));
+ } else {
+ /* RXRDY handler */
+ do {
+ if (readl(I2C_TAR) == I2C_TAR_EEPROM)
+ msleep(20);
+ else
+ udelay(10);
+ } while (!(readl(I2C_RXFLR) & 0xf) && (--loop_cntr > 0));
+ }
+
+ return (loop_cntr > 0);
+}
+
+static int xfer_read(struct i2c_adapter *adap, unsigned char *buf, int length)
+{
+ int i2c_reg = *buf;
+
+ /* Read data */
+ while (length--) {
+ if (!poll_status(I2C_STATUS_TFNF)) {
+ dev_dbg(&adap->dev, "Tx FIFO Not Full timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* send addr */
+ writel(i2c_reg | I2C_DATACMD_WRITE, I2C_DATACMD);
+
+ /* get ready to next write */
+ i2c_reg++;
+
+ /* send read CMD */
+ writel(I2C_DATACMD_READ, I2C_DATACMD);
+
+ /* wait until the Rx FIFO have available */
+ if (!poll_status(I2C_STATUS_RFNE)) {
+ dev_dbg(&adap->dev, "RXRDY timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* read the data to buf */
+ *buf = (readl(I2C_DATACMD) & I2C_DATACMD_DAT_MASK);
+ buf++;
+ }
+
+ return 0;
+}
+
+static int xfer_write(struct i2c_adapter *adap, unsigned char *buf, int length)
+{
+ int i2c_reg = *buf;
+
+ /* Do nothing but storing the reg_num to a static variable */
+ if (i2c_reg == -1) {
+ printk(KERN_WARNING "Error i2c reg\n");
+ return -ETIMEDOUT;
+ }
+
+ if (length == 1)
+ return 0;
+
+ buf++;
+ length--;
+ while (length--) {
+ /* send addr */
+ writel(i2c_reg | I2C_DATACMD_WRITE, I2C_DATACMD);
+
+ /* send write CMD */
+ writel(*buf | I2C_DATACMD_WRITE, I2C_DATACMD);
+
+ /* wait until the Rx FIFO have available */
+ msleep(20);
+
+ /* read the data to buf */
+ i2c_reg++;
+ buf++;
+ }
+
+ return 0;
+}
+
+/*
+ * Generic i2c master transfer entrypoint.
+ *
+ */
+static int puv3_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *pmsg,
+ int num)
+{
+ int i, ret;
+ unsigned char swap;
+
+ /* Disable i2c */
+ writel(I2C_ENABLE_DISABLE, I2C_ENABLE);
+
+ /* Set the work mode and speed*/
+ writel(I2C_CON_MASTER | I2C_CON_SPEED_STD | I2C_CON_SLAVEDISABLE, I2C_CON);
+
+ writel(pmsg->addr, I2C_TAR);
+
+ /* Enable i2c */
+ writel(I2C_ENABLE_ENABLE, I2C_ENABLE);
+
+ dev_dbg(&adap->dev, "puv3_i2c_xfer: processing %d messages:\n", num);
+
+ for (i = 0; i < num; i++) {
+ dev_dbg(&adap->dev, " #%d: %sing %d byte%s %s 0x%02x\n", i,
+ pmsg->flags & I2C_M_RD ? "read" : "writ",
+ pmsg->len, pmsg->len > 1 ? "s" : "",
+ pmsg->flags & I2C_M_RD ? "from" : "to", pmsg->addr);
+
+ if (pmsg->len && pmsg->buf) { /* sanity check */
+ if (pmsg->flags & I2C_M_RD)
+ ret = xfer_read(adap, pmsg->buf, pmsg->len);
+ else
+ ret = xfer_write(adap, pmsg->buf, pmsg->len);
+
+ if (ret)
+ return ret;
+
+ }
+ dev_dbg(&adap->dev, "transfer complete\n");
+ pmsg++; /* next message */
+ }
+
+ /* XXX: fixup be16_to_cpu in bq27x00_battery.c */
+ if (pmsg->addr == I2C_TAR_PWIC) {
+ swap = pmsg->buf[0];
+ pmsg->buf[0] = pmsg->buf[1];
+ pmsg->buf[1] = swap;
+ }
+
+ return i;
+}
+
+/*
+ * Return list of supported functionality.
+ */
+static u32 puv3_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm puv3_i2c_algorithm = {
+ .master_xfer = puv3_i2c_xfer,
+ .functionality = puv3_i2c_func,
+};
+
+/*
+ * Main initialization routine.
+ */
+static int __devinit puv3_i2c_probe(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter;
+ struct resource *mem;
+ int rc;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -ENODEV;
+
+ if (!request_mem_region(mem->start, resource_size(mem), "puv3_i2c"))
+ return -EBUSY;
+
+ adapter = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
+ if (adapter == NULL) {
+ dev_err(&pdev->dev, "can't allocate inteface!\n");
+ rc = -ENOMEM;
+ goto fail_nomem;
+ }
+ snprintf(adapter->name, sizeof(adapter->name), "PUV3-I2C at 0x%08x",
+ mem->start);
+ adapter->algo = &puv3_i2c_algorithm;
+ adapter->class = I2C_CLASS_HWMON;
+ adapter->dev.parent = &pdev->dev;
+
+ platform_set_drvdata(pdev, adapter);
+
+ adapter->nr = pdev->id;
+ rc = i2c_add_numbered_adapter(adapter);
+ if (rc) {
+ dev_err(&pdev->dev, "Adapter '%s' registration failed\n",
+ adapter->name);
+ goto fail_add_adapter;
+ }
+
+ dev_info(&pdev->dev, "PKUnity v3 i2c bus adapter.\n");
+ return 0;
+
+fail_add_adapter:
+ platform_set_drvdata(pdev, NULL);
+ kfree(adapter);
+fail_nomem:
+ release_mem_region(mem->start, resource_size(mem));
+
+ return rc;
+}
+
+static int __devexit puv3_i2c_remove(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter = platform_get_drvdata(pdev);
+ struct resource *mem;
+ int rc;
+
+ rc = i2c_del_adapter(adapter);
+ if (rc) {
+ dev_err(&pdev->dev, "Adapter '%s' delete fail\n",
+ adapter->name);
+ return rc;
+ }
+
+ put_device(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+
+ return rc;
+}
+
+#ifdef CONFIG_PM
+static int puv3_i2c_suspend(struct platform_device *dev, pm_message_t state)
+{
+ int poll_count;
+ /* Disable the IIC */
+ writel(I2C_ENABLE_DISABLE, I2C_ENABLE);
+ for (poll_count = 0; poll_count < 50; poll_count++) {
+ if (readl(I2C_ENSTATUS) & I2C_ENSTATUS_ENABLE)
+ udelay(25);
+ }
+
+ return 0;
+}
+
+static int puv3_i2c_resume(struct platform_device *dev)
+{
+ return 0 ;
+}
+#else
+#define puv3_i2c_suspend NULL
+#define puv3_i2c_resume NULL
+#endif
+
+MODULE_ALIAS("platform:puv3_i2c");
+
+static struct platform_driver puv3_i2c_driver = {
+ .probe = puv3_i2c_probe,
+ .remove = __devexit_p(puv3_i2c_remove),
+ .suspend = puv3_i2c_suspend,
+ .resume = puv3_i2c_resume,
+ .driver = {
+ .name = "PKUnity-v3-I2C",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init puv3_i2c_init(void)
+{
+ return platform_driver_register(&puv3_i2c_driver);
+}
+
+static void __exit puv3_i2c_exit(void)
+{
+ platform_driver_unregister(&puv3_i2c_driver);
+}
+
+module_init(puv3_i2c_init);
+module_exit(puv3_i2c_exit);
+
+MODULE_DESCRIPTION("PKUnity v3 I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
new file mode 100644
index 000000000000..3921f664c9c3
--- /dev/null
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -0,0 +1,700 @@
+/*
+ * drivers/i2c/busses/i2c-tegra.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c-tegra.h>
+
+#include <asm/unaligned.h>
+
+#include <mach/clk.h>
+
+#define TEGRA_I2C_TIMEOUT (msecs_to_jiffies(1000))
+#define BYTES_PER_FIFO_WORD 4
+
+#define I2C_CNFG 0x000
+#define I2C_CNFG_PACKET_MODE_EN (1<<10)
+#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
+#define I2C_SL_CNFG 0x020
+#define I2C_SL_CNFG_NEWSL (1<<2)
+#define I2C_SL_ADDR1 0x02c
+#define I2C_TX_FIFO 0x050
+#define I2C_RX_FIFO 0x054
+#define I2C_PACKET_TRANSFER_STATUS 0x058
+#define I2C_FIFO_CONTROL 0x05c
+#define I2C_FIFO_CONTROL_TX_FLUSH (1<<1)
+#define I2C_FIFO_CONTROL_RX_FLUSH (1<<0)
+#define I2C_FIFO_CONTROL_TX_TRIG_SHIFT 5
+#define I2C_FIFO_CONTROL_RX_TRIG_SHIFT 2
+#define I2C_FIFO_STATUS 0x060
+#define I2C_FIFO_STATUS_TX_MASK 0xF0
+#define I2C_FIFO_STATUS_TX_SHIFT 4
+#define I2C_FIFO_STATUS_RX_MASK 0x0F
+#define I2C_FIFO_STATUS_RX_SHIFT 0
+#define I2C_INT_MASK 0x064
+#define I2C_INT_STATUS 0x068
+#define I2C_INT_PACKET_XFER_COMPLETE (1<<7)
+#define I2C_INT_ALL_PACKETS_XFER_COMPLETE (1<<6)
+#define I2C_INT_TX_FIFO_OVERFLOW (1<<5)
+#define I2C_INT_RX_FIFO_UNDERFLOW (1<<4)
+#define I2C_INT_NO_ACK (1<<3)
+#define I2C_INT_ARBITRATION_LOST (1<<2)
+#define I2C_INT_TX_FIFO_DATA_REQ (1<<1)
+#define I2C_INT_RX_FIFO_DATA_REQ (1<<0)
+#define I2C_CLK_DIVISOR 0x06c
+
+#define DVC_CTRL_REG1 0x000
+#define DVC_CTRL_REG1_INTR_EN (1<<10)
+#define DVC_CTRL_REG2 0x004
+#define DVC_CTRL_REG3 0x008
+#define DVC_CTRL_REG3_SW_PROG (1<<26)
+#define DVC_CTRL_REG3_I2C_DONE_INTR_EN (1<<30)
+#define DVC_STATUS 0x00c
+#define DVC_STATUS_I2C_DONE_INTR (1<<30)
+
+#define I2C_ERR_NONE 0x00
+#define I2C_ERR_NO_ACK 0x01
+#define I2C_ERR_ARBITRATION_LOST 0x02
+
+#define PACKET_HEADER0_HEADER_SIZE_SHIFT 28
+#define PACKET_HEADER0_PACKET_ID_SHIFT 16
+#define PACKET_HEADER0_CONT_ID_SHIFT 12
+#define PACKET_HEADER0_PROTOCOL_I2C (1<<4)
+
+#define I2C_HEADER_HIGHSPEED_MODE (1<<22)
+#define I2C_HEADER_CONT_ON_NAK (1<<21)
+#define I2C_HEADER_SEND_START_BYTE (1<<20)
+#define I2C_HEADER_READ (1<<19)
+#define I2C_HEADER_10BIT_ADDR (1<<18)
+#define I2C_HEADER_IE_ENABLE (1<<17)
+#define I2C_HEADER_REPEAT_START (1<<16)
+#define I2C_HEADER_MASTER_ADDR_SHIFT 12
+#define I2C_HEADER_SLAVE_ADDR_SHIFT 1
+
+/**
+ * struct tegra_i2c_dev - per device i2c context
+ * @dev: device reference for power management
+ * @adapter: core i2c layer adapter information
+ * @clk: clock reference for i2c controller
+ * @i2c_clk: clock reference for i2c bus
+ * @iomem: memory resource for registers
+ * @base: ioremapped registers cookie
+ * @cont_id: i2c controller id, used for for packet header
+ * @irq: irq number of transfer complete interrupt
+ * @is_dvc: identifies the DVC i2c controller, has a different register layout
+ * @msg_complete: transfer completion notifier
+ * @msg_err: error code for completed message
+ * @msg_buf: pointer to current message data
+ * @msg_buf_remaining: size of unsent data in the message buffer
+ * @msg_read: identifies read transfers
+ * @bus_clk_rate: current i2c bus clock rate
+ * @is_suspended: prevents i2c controller accesses after suspend is called
+ */
+struct tegra_i2c_dev {
+ struct device *dev;
+ struct i2c_adapter adapter;
+ struct clk *clk;
+ struct clk *i2c_clk;
+ struct resource *iomem;
+ void __iomem *base;
+ int cont_id;
+ int irq;
+ int is_dvc;
+ struct completion msg_complete;
+ int msg_err;
+ u8 *msg_buf;
+ size_t msg_buf_remaining;
+ int msg_read;
+ unsigned long bus_clk_rate;
+ bool is_suspended;
+};
+
+static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg)
+{
+ writel(val, i2c_dev->base + reg);
+}
+
+static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
+{
+ return readl(i2c_dev->base + reg);
+}
+
+/*
+ * i2c_writel and i2c_readl will offset the register if necessary to talk
+ * to the I2C block inside the DVC block
+ */
+static unsigned long tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev,
+ unsigned long reg)
+{
+ if (i2c_dev->is_dvc)
+ reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40;
+ return reg;
+}
+
+static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
+ unsigned long reg)
+{
+ writel(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+}
+
+static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
+{
+ return readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+}
+
+static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data,
+ unsigned long reg, int len)
+{
+ writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
+}
+
+static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
+ unsigned long reg, int len)
+{
+ readsl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
+}
+
+static void tegra_i2c_mask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
+{
+ u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
+ int_mask &= ~mask;
+ i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
+}
+
+static void tegra_i2c_unmask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
+{
+ u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
+ int_mask |= mask;
+ i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
+}
+
+static int tegra_i2c_flush_fifos(struct tegra_i2c_dev *i2c_dev)
+{
+ unsigned long timeout = jiffies + HZ;
+ u32 val = i2c_readl(i2c_dev, I2C_FIFO_CONTROL);
+ val |= I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH;
+ i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+
+ while (i2c_readl(i2c_dev, I2C_FIFO_CONTROL) &
+ (I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH)) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(i2c_dev->dev, "timeout waiting for fifo flush\n");
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ return 0;
+}
+
+static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int rx_fifo_avail;
+ u8 *buf = i2c_dev->msg_buf;
+ size_t buf_remaining = i2c_dev->msg_buf_remaining;
+ int words_to_transfer;
+
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ rx_fifo_avail = (val & I2C_FIFO_STATUS_RX_MASK) >>
+ I2C_FIFO_STATUS_RX_SHIFT;
+
+ /* Rounds down to not include partial word at the end of buf */
+ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
+ if (words_to_transfer > rx_fifo_avail)
+ words_to_transfer = rx_fifo_avail;
+
+ i2c_readsl(i2c_dev, buf, I2C_RX_FIFO, words_to_transfer);
+
+ buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+ buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+ rx_fifo_avail -= words_to_transfer;
+
+ /*
+ * If there is a partial word at the end of buf, handle it manually to
+ * prevent overwriting past the end of buf
+ */
+ if (rx_fifo_avail > 0 && buf_remaining > 0) {
+ BUG_ON(buf_remaining > 3);
+ val = i2c_readl(i2c_dev, I2C_RX_FIFO);
+ memcpy(buf, &val, buf_remaining);
+ buf_remaining = 0;
+ rx_fifo_avail--;
+ }
+
+ BUG_ON(rx_fifo_avail > 0 && buf_remaining > 0);
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf;
+ return 0;
+}
+
+static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int tx_fifo_avail;
+ u8 *buf = i2c_dev->msg_buf;
+ size_t buf_remaining = i2c_dev->msg_buf_remaining;
+ int words_to_transfer;
+
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ tx_fifo_avail = (val & I2C_FIFO_STATUS_TX_MASK) >>
+ I2C_FIFO_STATUS_TX_SHIFT;
+
+ /* Rounds down to not include partial word at the end of buf */
+ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
+ if (words_to_transfer > tx_fifo_avail)
+ words_to_transfer = tx_fifo_avail;
+
+ i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+
+ buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+ buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+ tx_fifo_avail -= words_to_transfer;
+
+ /*
+ * If there is a partial word at the end of buf, handle it manually to
+ * prevent reading past the end of buf, which could cross a page
+ * boundary and fault.
+ */
+ if (tx_fifo_avail > 0 && buf_remaining > 0) {
+ BUG_ON(buf_remaining > 3);
+ memcpy(&val, buf, buf_remaining);
+ i2c_writel(i2c_dev, val, I2C_TX_FIFO);
+ buf_remaining = 0;
+ tx_fifo_avail--;
+ }
+
+ BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf;
+ return 0;
+}
+
+/*
+ * One of the Tegra I2C blocks is inside the DVC (Digital Voltage Controller)
+ * block. This block is identical to the rest of the I2C blocks, except that
+ * it only supports master mode, it has registers moved around, and it needs
+ * some extra init to get it into I2C mode. The register moves are handled
+ * by i2c_readl and i2c_writel
+ */
+static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val = 0;
+ val = dvc_readl(i2c_dev, DVC_CTRL_REG3);
+ val |= DVC_CTRL_REG3_SW_PROG;
+ val |= DVC_CTRL_REG3_I2C_DONE_INTR_EN;
+ dvc_writel(i2c_dev, val, DVC_CTRL_REG3);
+
+ val = dvc_readl(i2c_dev, DVC_CTRL_REG1);
+ val |= DVC_CTRL_REG1_INTR_EN;
+ dvc_writel(i2c_dev, val, DVC_CTRL_REG1);
+}
+
+static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int err = 0;
+
+ clk_enable(i2c_dev->clk);
+
+ tegra_periph_reset_assert(i2c_dev->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(i2c_dev->clk);
+
+ if (i2c_dev->is_dvc)
+ tegra_dvc_init(i2c_dev);
+
+ val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN;
+ i2c_writel(i2c_dev, val, I2C_CNFG);
+ i2c_writel(i2c_dev, 0, I2C_INT_MASK);
+ clk_set_rate(i2c_dev->clk, i2c_dev->bus_clk_rate * 8);
+
+ val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
+ 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
+ i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+
+ if (tegra_i2c_flush_fifos(i2c_dev))
+ err = -ETIMEDOUT;
+
+ clk_disable(i2c_dev->clk);
+ return err;
+}
+
+static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
+{
+ u32 status;
+ const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ struct tegra_i2c_dev *i2c_dev = dev_id;
+
+ status = i2c_readl(i2c_dev, I2C_INT_STATUS);
+
+ if (status == 0) {
+ dev_warn(i2c_dev->dev, "interrupt with no status\n");
+ return IRQ_NONE;
+ }
+
+ if (unlikely(status & status_err)) {
+ if (status & I2C_INT_NO_ACK)
+ i2c_dev->msg_err |= I2C_ERR_NO_ACK;
+ if (status & I2C_INT_ARBITRATION_LOST)
+ i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST;
+ complete(&i2c_dev->msg_complete);
+ goto err;
+ }
+
+ if (i2c_dev->msg_read && (status & I2C_INT_RX_FIFO_DATA_REQ)) {
+ if (i2c_dev->msg_buf_remaining)
+ tegra_i2c_empty_rx_fifo(i2c_dev);
+ else
+ BUG();
+ }
+
+ if (!i2c_dev->msg_read && (status & I2C_INT_TX_FIFO_DATA_REQ)) {
+ if (i2c_dev->msg_buf_remaining)
+ tegra_i2c_fill_tx_fifo(i2c_dev);
+ else
+ tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
+ }
+
+ if ((status & I2C_INT_PACKET_XFER_COMPLETE) &&
+ !i2c_dev->msg_buf_remaining)
+ complete(&i2c_dev->msg_complete);
+
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ if (i2c_dev->is_dvc)
+ dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+ return IRQ_HANDLED;
+err:
+ /* An error occured, mask all interrupts */
+ tegra_i2c_mask_irq(i2c_dev, I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST |
+ I2C_INT_PACKET_XFER_COMPLETE | I2C_INT_TX_FIFO_DATA_REQ |
+ I2C_INT_RX_FIFO_DATA_REQ);
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ return IRQ_HANDLED;
+}
+
+static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
+ struct i2c_msg *msg, int stop)
+{
+ u32 packet_header;
+ u32 int_mask;
+ int ret;
+
+ tegra_i2c_flush_fifos(i2c_dev);
+ i2c_writel(i2c_dev, 0xFF, I2C_INT_STATUS);
+
+ if (msg->len == 0)
+ return -EINVAL;
+
+ i2c_dev->msg_buf = msg->buf;
+ i2c_dev->msg_buf_remaining = msg->len;
+ i2c_dev->msg_err = I2C_ERR_NONE;
+ i2c_dev->msg_read = (msg->flags & I2C_M_RD);
+ INIT_COMPLETION(i2c_dev->msg_complete);
+
+ packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) |
+ PACKET_HEADER0_PROTOCOL_I2C |
+ (i2c_dev->cont_id << PACKET_HEADER0_CONT_ID_SHIFT) |
+ (1 << PACKET_HEADER0_PACKET_ID_SHIFT);
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ packet_header = msg->len - 1;
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
+ packet_header |= I2C_HEADER_IE_ENABLE;
+ if (msg->flags & I2C_M_TEN)
+ packet_header |= I2C_HEADER_10BIT_ADDR;
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ packet_header |= I2C_HEADER_CONT_ON_NAK;
+ if (msg->flags & I2C_M_NOSTART)
+ packet_header |= I2C_HEADER_REPEAT_START;
+ if (msg->flags & I2C_M_RD)
+ packet_header |= I2C_HEADER_READ;
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ if (!(msg->flags & I2C_M_RD))
+ tegra_i2c_fill_tx_fifo(i2c_dev);
+
+ int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ if (msg->flags & I2C_M_RD)
+ int_mask |= I2C_INT_RX_FIFO_DATA_REQ;
+ else if (i2c_dev->msg_buf_remaining)
+ int_mask |= I2C_INT_TX_FIFO_DATA_REQ;
+ tegra_i2c_unmask_irq(i2c_dev, int_mask);
+ dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n",
+ i2c_readl(i2c_dev, I2C_INT_MASK));
+
+ ret = wait_for_completion_timeout(&i2c_dev->msg_complete, TEGRA_I2C_TIMEOUT);
+ tegra_i2c_mask_irq(i2c_dev, int_mask);
+
+ if (WARN_ON(ret == 0)) {
+ dev_err(i2c_dev->dev, "i2c transfer timed out\n");
+
+ tegra_i2c_init(i2c_dev);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(i2c_dev->dev, "transfer complete: %d %d %d\n",
+ ret, completion_done(&i2c_dev->msg_complete), i2c_dev->msg_err);
+
+ if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
+ return 0;
+
+ tegra_i2c_init(i2c_dev);
+ if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ return 0;
+ return -EREMOTEIO;
+ }
+
+ return -EIO;
+}
+
+static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
+{
+ struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+ int i;
+ int ret = 0;
+
+ if (i2c_dev->is_suspended)
+ return -EBUSY;
+
+ clk_enable(i2c_dev->clk);
+ for (i = 0; i < num; i++) {
+ int stop = (i == (num - 1)) ? 1 : 0;
+ ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], stop);
+ if (ret)
+ break;
+ }
+ clk_disable(i2c_dev->clk);
+ return ret ?: i;
+}
+
+static u32 tegra_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C;
+}
+
+static const struct i2c_algorithm tegra_i2c_algo = {
+ .master_xfer = tegra_i2c_xfer,
+ .functionality = tegra_i2c_func,
+};
+
+static int tegra_i2c_probe(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev;
+ struct tegra_i2c_platform_data *pdata = pdev->dev.platform_data;
+ struct resource *res;
+ struct resource *iomem;
+ struct clk *clk;
+ struct clk *i2c_clk;
+ void *base;
+ int irq;
+ int ret = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource\n");
+ return -EINVAL;
+ }
+ iomem = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!iomem) {
+ dev_err(&pdev->dev, "I2C region already claimed\n");
+ return -EBUSY;
+ }
+
+ base = ioremap(iomem->start, resource_size(iomem));
+ if (!base) {
+ dev_err(&pdev->dev, "Cannot ioremap I2C region\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no irq resource\n");
+ ret = -EINVAL;
+ goto err_iounmap;
+ }
+ irq = res->start;
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "missing controller clock");
+ ret = PTR_ERR(clk);
+ goto err_release_region;
+ }
+
+ i2c_clk = clk_get(&pdev->dev, "i2c");
+ if (IS_ERR(i2c_clk)) {
+ dev_err(&pdev->dev, "missing bus clock");
+ ret = PTR_ERR(i2c_clk);
+ goto err_clk_put;
+ }
+
+ i2c_dev = kzalloc(sizeof(struct tegra_i2c_dev), GFP_KERNEL);
+ if (!i2c_dev) {
+ ret = -ENOMEM;
+ goto err_i2c_clk_put;
+ }
+
+ i2c_dev->base = base;
+ i2c_dev->clk = clk;
+ i2c_dev->i2c_clk = i2c_clk;
+ i2c_dev->iomem = iomem;
+ i2c_dev->adapter.algo = &tegra_i2c_algo;
+ i2c_dev->irq = irq;
+ i2c_dev->cont_id = pdev->id;
+ i2c_dev->dev = &pdev->dev;
+ i2c_dev->bus_clk_rate = pdata ? pdata->bus_clk_rate : 100000;
+
+ if (pdev->id == 3)
+ i2c_dev->is_dvc = 1;
+ init_completion(&i2c_dev->msg_complete);
+
+ platform_set_drvdata(pdev, i2c_dev);
+
+ ret = tegra_i2c_init(i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize i2c controller");
+ goto err_free;
+ }
+
+ ret = request_irq(i2c_dev->irq, tegra_i2c_isr, 0, pdev->name, i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
+ goto err_free;
+ }
+
+ clk_enable(i2c_dev->i2c_clk);
+
+ i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
+ i2c_dev->adapter.owner = THIS_MODULE;
+ i2c_dev->adapter.class = I2C_CLASS_HWMON;
+ strlcpy(i2c_dev->adapter.name, "Tegra I2C adapter",
+ sizeof(i2c_dev->adapter.name));
+ i2c_dev->adapter.algo = &tegra_i2c_algo;
+ i2c_dev->adapter.dev.parent = &pdev->dev;
+ i2c_dev->adapter.nr = pdev->id;
+
+ ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add I2C adapter\n");
+ goto err_free_irq;
+ }
+
+ return 0;
+err_free_irq:
+ free_irq(i2c_dev->irq, i2c_dev);
+err_free:
+ kfree(i2c_dev);
+err_i2c_clk_put:
+ clk_put(i2c_clk);
+err_clk_put:
+ clk_put(clk);
+err_release_region:
+ release_mem_region(iomem->start, resource_size(iomem));
+err_iounmap:
+ iounmap(base);
+ return ret;
+}
+
+static int tegra_i2c_remove(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ i2c_del_adapter(&i2c_dev->adapter);
+ free_irq(i2c_dev->irq, i2c_dev);
+ clk_put(i2c_dev->i2c_clk);
+ clk_put(i2c_dev->clk);
+ release_mem_region(i2c_dev->iomem->start,
+ resource_size(i2c_dev->iomem));
+ iounmap(i2c_dev->base);
+ kfree(i2c_dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_i2c_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+
+ i2c_lock_adapter(&i2c_dev->adapter);
+ i2c_dev->is_suspended = true;
+ i2c_unlock_adapter(&i2c_dev->adapter);
+
+ return 0;
+}
+
+static int tegra_i2c_resume(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ int ret;
+
+ i2c_lock_adapter(&i2c_dev->adapter);
+
+ ret = tegra_i2c_init(i2c_dev);
+
+ if (ret) {
+ i2c_unlock_adapter(&i2c_dev->adapter);
+ return ret;
+ }
+
+ i2c_dev->is_suspended = false;
+
+ i2c_unlock_adapter(&i2c_dev->adapter);
+
+ return 0;
+}
+#endif
+
+static struct platform_driver tegra_i2c_driver = {
+ .probe = tegra_i2c_probe,
+ .remove = tegra_i2c_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_i2c_suspend,
+ .resume = tegra_i2c_resume,
+#endif
+ .driver = {
+ .name = "tegra-i2c",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_i2c_init_driver(void)
+{
+ return platform_driver_register(&tegra_i2c_driver);
+}
+
+static void __exit tegra_i2c_exit_driver(void)
+{
+ platform_driver_unregister(&tegra_i2c_driver);
+}
+
+subsys_initcall(tegra_i2c_init_driver);
+module_exit(tegra_i2c_exit_driver);
+
+MODULE_DESCRIPTION("nVidia Tegra2 I2C Bus Controller driver");
+MODULE_AUTHOR("Colin Cross");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 7b2fc98e2f2b..8db008de5392 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -532,6 +532,29 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
stats->custom[3].value = conn->fmr_unalign_cnt;
}
+static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
+{
+ struct iser_conn *ib_conn = ep->dd_data;
+ int len;
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ if (!ib_conn || !ib_conn->cma_id)
+ return -ENOTCONN;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &ib_conn->cma_id->route.addr.dst_addr,
+ param, buf);
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return len;
+}
+
static struct iscsi_endpoint *
iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
@@ -637,6 +660,8 @@ static struct iscsi_transport iscsi_iser_transport = {
ISCSI_MAX_BURST |
ISCSI_PDU_INORDER_EN |
ISCSI_DATASEQ_INORDER_EN |
+ ISCSI_CONN_PORT |
+ ISCSI_CONN_ADDRESS |
ISCSI_EXP_STATSN |
ISCSI_PERSISTENT_PORT |
ISCSI_PERSISTENT_ADDRESS |
@@ -659,6 +684,7 @@ static struct iscsi_transport iscsi_iser_transport = {
.destroy_conn = iscsi_iser_conn_destroy,
.set_param = iscsi_iser_set_param,
.get_conn_param = iscsi_conn_get_param,
+ .get_ep_param = iscsi_iser_get_ep_param,
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_iser_conn_start,
.stop_conn = iscsi_iser_conn_stop,
diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h
new file mode 100644
index 000000000000..73f5cc124a36
--- /dev/null
+++ b/drivers/input/serio/i8042-unicore32io.h
@@ -0,0 +1,73 @@
+/*
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2011 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _I8042_UNICORE32_H
+#define _I8042_UNICORE32_H
+
+#include <mach/hardware.h>
+
+/*
+ * Names.
+ */
+#define I8042_KBD_PHYS_DESC "isa0060/serio0"
+#define I8042_AUX_PHYS_DESC "isa0060/serio1"
+#define I8042_MUX_PHYS_DESC "isa0060/serio%d"
+
+/*
+ * IRQs.
+ */
+#define I8042_KBD_IRQ IRQ_PS2_KBD
+#define I8042_AUX_IRQ IRQ_PS2_AUX
+
+/*
+ * Register numbers.
+ */
+#define I8042_COMMAND_REG PS2_COMMAND
+#define I8042_STATUS_REG PS2_STATUS
+#define I8042_DATA_REG PS2_DATA
+
+#define I8042_REGION_START (resource_size_t)(PS2_DATA)
+#define I8042_REGION_SIZE (resource_size_t)(16)
+
+static inline int i8042_read_data(void)
+{
+ return readb(I8042_DATA_REG);
+}
+
+static inline int i8042_read_status(void)
+{
+ return readb(I8042_STATUS_REG);
+}
+
+static inline void i8042_write_data(int val)
+{
+ writeb(val, I8042_DATA_REG);
+}
+
+static inline void i8042_write_command(int val)
+{
+ writeb(val, I8042_COMMAND_REG);
+}
+
+static inline int i8042_platform_init(void)
+{
+ if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042"))
+ return -EBUSY;
+
+ i8042_reset = 1;
+ return 0;
+}
+
+static inline void i8042_platform_exit(void)
+{
+ release_mem_region(I8042_REGION_START, I8042_REGION_SIZE);
+}
+
+#endif /* _I8042_UNICORE32_H */
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index ac1d759d0f55..3452708fbe3b 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -26,6 +26,8 @@
#include "i8042-sparcio.h"
#elif defined(CONFIG_X86) || defined(CONFIG_IA64)
#include "i8042-x86ia64io.h"
+#elif defined(CONFIG_UNICORE32)
+#include "i8042-unicore32io.h"
#else
#include "i8042-io.h"
#endif
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index b82d28819e2a..4b0b63c290a6 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1283,24 +1283,22 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (!error && !clone->errors)
return 0; /* I/O complete */
- if (error == -EOPNOTSUPP)
- return error;
-
- if (clone->cmd_flags & REQ_DISCARD)
- /*
- * Pass all discard request failures up.
- * FIXME: only fail_path if the discard failed due to a
- * transport problem. This requires precise understanding
- * of the underlying failure (e.g. the SCSI sense).
- */
+ if (error == -EOPNOTSUPP || error == -EREMOTEIO)
return error;
if (mpio->pgpath)
fail_path(mpio->pgpath);
spin_lock_irqsave(&m->lock, flags);
- if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m))
- r = -EIO;
+ if (!m->nr_valid_paths) {
+ if (!m->queue_if_no_path) {
+ if (!__must_push_back(m))
+ r = -EIO;
+ } else {
+ if (error == -EBADE)
+ r = error;
+ }
+ }
spin_unlock_irqrestore(&m->lock, flags);
return r;
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 013c7d881948..22027e7946f7 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -2593,6 +2593,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
#define MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE (0x03)
#define MPI_SAS_IOUNIT0_RATE_1_5 (0x08)
#define MPI_SAS_IOUNIT0_RATE_3_0 (0x09)
+#define MPI_SAS_IOUNIT0_RATE_6_0 (0x0A)
/* see mpi_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index 8faa4fab7b89..fd6222882a0e 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -841,6 +841,7 @@ typedef struct _EVENT_DATA_SAS_PHY_LINK_STATUS
#define MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE (0x03)
#define MPI_EVENT_SAS_PLS_LR_RATE_1_5 (0x08)
#define MPI_EVENT_SAS_PLS_LR_RATE_3_0 (0x09)
+#define MPI_EVENT_SAS_PLS_LR_RATE_6_0 (0x0A)
/* SAS Discovery Event data */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 3358c0af3466..ec8080c98081 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -7418,7 +7418,12 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
case MPI_EVENT_SAS_PLS_LR_RATE_3_0:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
- " Rate 3.0 Gpbs",PhyNumber);
+ " Rate 3.0 Gbps", PhyNumber);
+ break;
+ case MPI_EVENT_SAS_PLS_LR_RATE_6_0:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
+ " Rate 6.0 Gbps", PhyNumber);
break;
default:
snprintf(evStr, EVENT_DESCR_STR_SZ,
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index e8deb8ed0499..878bda0cce70 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1314,8 +1314,10 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
else
karg->adapterType = MPT_IOCTL_INTERFACE_SCSI;
- if (karg->hdr.port > 1)
+ if (karg->hdr.port > 1) {
+ kfree(karg);
return -EINVAL;
+ }
port = karg->hdr.port;
karg->port = port;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 8aefb1829fcd..f5a14afad2cd 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1973,7 +1973,6 @@ static struct scsi_host_template mptsas_driver_template = {
.change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort,
.eh_device_reset_handler = mptscsih_dev_reset,
- .eh_bus_reset_handler = mptscsih_bus_reset,
.eh_host_reset_handler = mptscsih_host_reset,
.bios_param = mptscsih_bios_param,
.can_queue = MPT_SAS_CAN_QUEUE,
@@ -3063,6 +3062,9 @@ static int mptsas_probe_one_phy(struct device *dev,
case MPI_SAS_IOUNIT0_RATE_3_0:
phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
break;
+ case MPI_SAS_IOUNIT0_RATE_6_0:
+ phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
+ break;
case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
default:
@@ -3691,7 +3693,8 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event)
}
if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
- link_rate == MPI_SAS_IOUNIT0_RATE_3_0) {
+ link_rate == MPI_SAS_IOUNIT0_RATE_3_0 ||
+ link_rate == MPI_SAS_IOUNIT0_RATE_6_0) {
if (!port_info) {
if (ioc->old_sas_discovery_protocal) {
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 9d48659e3b28..32e64cc85d2c 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -145,13 +145,6 @@ static struct {
{ "Broadcom NetXtreme II BCM57712E XGb" }
};
-#ifndef PCI_DEVICE_ID_NX2_57712
-#define PCI_DEVICE_ID_NX2_57712 0x1662
-#endif
-#ifndef PCI_DEVICE_ID_NX2_57712E
-#define PCI_DEVICE_ID_NX2_57712E 0x1663
-#endif
-
static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 98e6fdf34d30..77cf813ba264 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_PCI_IOV) += iov.o
obj-$(CONFIG_X86) += setup-bus.o
obj-$(CONFIG_ALPHA) += setup-bus.o setup-irq.o
obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o
+obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o
obj-$(CONFIG_PARISC) += setup-bus.o
obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
obj-$(CONFIG_PPC) += setup-bus.o
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index a9fe23d5bd0f..379d8592bc6e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2648,6 +2648,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-EAGAIN);
}
+ len_to_track_end = 0;
/*
* A tidaw can address 4k of memory, but must not cross page boundaries
* We can let the block layer handle this by setting
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 3c3f3ffe2179..e950f1ad4dd1 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -50,7 +50,7 @@ MODULE_LICENSE("GPL");
static void chsc_subchannel_irq(struct subchannel *sch)
{
- struct chsc_private *private = sch->private;
+ struct chsc_private *private = dev_get_drvdata(&sch->dev);
struct chsc_request *request = private->request;
struct irb *irb = (struct irb *)&S390_lowcore.irb;
@@ -80,13 +80,14 @@ static int chsc_subchannel_probe(struct subchannel *sch)
private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private)
return -ENOMEM;
+ dev_set_drvdata(&sch->dev, private);
ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
if (ret) {
CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
sch->schid.ssid, sch->schid.sch_no, ret);
+ dev_set_drvdata(&sch->dev, NULL);
kfree(private);
} else {
- sch->private = private;
if (dev_get_uevent_suppress(&sch->dev)) {
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
@@ -100,8 +101,8 @@ static int chsc_subchannel_remove(struct subchannel *sch)
struct chsc_private *private;
cio_disable_subchannel(sch);
- private = sch->private;
- sch->private = NULL;
+ private = dev_get_drvdata(&sch->dev);
+ dev_set_drvdata(&sch->dev, NULL);
if (private->request) {
complete(&private->request->completion);
put_device(&sch->dev);
@@ -147,7 +148,10 @@ static struct css_device_id chsc_subchannel_ids[] = {
MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
static struct css_driver chsc_subchannel_driver = {
- .owner = THIS_MODULE,
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "chsc_subchannel",
+ },
.subchannel_type = chsc_subchannel_ids,
.irq = chsc_subchannel_irq,
.probe = chsc_subchannel_probe,
@@ -157,7 +161,6 @@ static struct css_driver chsc_subchannel_driver = {
.freeze = chsc_subchannel_freeze,
.thaw = chsc_subchannel_restore,
.restore = chsc_subchannel_restore,
- .name = "chsc_subchannel",
};
static int __init chsc_init_dbfs(void)
@@ -241,7 +244,7 @@ static int chsc_async(struct chsc_async_area *chsc_area,
chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
while ((sch = chsc_get_next_subchannel(sch))) {
spin_lock(sch->lock);
- private = sch->private;
+ private = dev_get_drvdata(&sch->dev);
if (private->request) {
spin_unlock(sch->lock);
ret = -EBUSY;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 430f875006f2..cbde448f9947 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -84,29 +84,14 @@ out_unregister:
arch_initcall (cio_debug_init);
-int
-cio_set_options (struct subchannel *sch, int flags)
+int cio_set_options(struct subchannel *sch, int flags)
{
- sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
- sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
- sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
- return 0;
-}
+ struct io_subchannel_private *priv = to_io_private(sch);
-/* FIXME: who wants to use this? */
-int
-cio_get_options (struct subchannel *sch)
-{
- int flags;
-
- flags = 0;
- if (sch->options.suspend)
- flags |= DOIO_ALLOW_SUSPEND;
- if (sch->options.prefetch)
- flags |= DOIO_DENY_PREFETCH;
- if (sch->options.inter)
- flags |= DOIO_SUPPRESS_INTER;
- return flags;
+ priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
+ priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
+ priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
+ return 0;
}
static int
@@ -139,21 +124,21 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
__u8 lpm, /* logical path mask */
__u8 key) /* storage key */
{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ union orb *orb = &priv->orb;
int ccode;
- union orb *orb;
CIO_TRACE_EVENT(5, "stIO");
CIO_TRACE_EVENT(5, dev_name(&sch->dev));
- orb = &to_io_private(sch)->orb;
memset(orb, 0, sizeof(union orb));
/* sch is always under 2G. */
orb->cmd.intparm = (u32)(addr_t)sch;
orb->cmd.fmt = 1;
- orb->cmd.pfch = sch->options.prefetch == 0;
- orb->cmd.spnd = sch->options.suspend;
- orb->cmd.ssic = sch->options.suspend && sch->options.inter;
+ orb->cmd.pfch = priv->options.prefetch == 0;
+ orb->cmd.spnd = priv->options.suspend;
+ orb->cmd.ssic = priv->options.suspend && priv->options.inter;
orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
#ifdef CONFIG_64BIT
/*
@@ -630,11 +615,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
irb = (struct irb *)&S390_lowcore.irb;
do {
kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
- /*
- * Non I/O-subchannel thin interrupts are processed differently
- */
- if (tpi_info->adapter_IO == 1 &&
- tpi_info->int_type == IO_INTERRUPT_TYPE) {
+ if (tpi_info->adapter_IO) {
do_adapter_IO(tpi_info->isc);
continue;
}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index bf7f80f5a330..155a82bcb9e5 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -84,13 +84,6 @@ struct subchannel {
SUBCHANNEL_TYPE_MSG = 2,
SUBCHANNEL_TYPE_ADM = 3,
} st; /* subchannel type */
-
- struct {
- unsigned int suspend:1; /* allow suspend */
- unsigned int prefetch:1;/* deny prefetch */
- unsigned int inter:1; /* suppress intermediate interrupts */
- } __attribute__ ((packed)) options;
-
__u8 vpm; /* verified path mask */
__u8 lpm; /* logical path mask */
__u8 opm; /* operational path mask */
@@ -99,14 +92,11 @@ struct subchannel {
struct chsc_ssd_info ssd_info; /* subchannel description */
struct device dev; /* entry in device tree */
struct css_driver *driver;
- void *private; /* private per subchannel type data */
enum sch_todo todo;
struct work_struct todo_work;
struct schib_config config;
} __attribute__ ((aligned(8)));
-#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
-
#define to_subchannel(n) container_of(n, struct subchannel, dev)
extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
@@ -120,7 +110,6 @@ extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
extern int cio_cancel (struct subchannel *);
extern int cio_set_options (struct subchannel *, int);
-extern int cio_get_options (struct subchannel *);
extern int cio_update_schib(struct subchannel *sch);
extern int cio_commit_config(struct subchannel *sch);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 24d8e97355b9..c47b25fd3f43 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -35,6 +35,7 @@ int css_init_done = 0;
int max_ssid;
struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
+static struct bus_type css_bus_type;
int
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
@@ -1214,7 +1215,7 @@ static const struct dev_pm_ops css_pm_ops = {
.restore = css_pm_restore,
};
-struct bus_type css_bus_type = {
+static struct bus_type css_bus_type = {
.name = "css",
.match = css_bus_match,
.probe = css_probe,
@@ -1233,9 +1234,7 @@ struct bus_type css_bus_type = {
*/
int css_driver_register(struct css_driver *cdrv)
{
- cdrv->drv.name = cdrv->name;
cdrv->drv.bus = &css_bus_type;
- cdrv->drv.owner = cdrv->owner;
return driver_register(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_register);
@@ -1253,4 +1252,3 @@ void css_driver_unregister(struct css_driver *cdrv)
EXPORT_SYMBOL_GPL(css_driver_unregister);
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(css_bus_type);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 7e37886de231..80ebdddf7747 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -63,7 +63,6 @@ struct subchannel;
struct chp_link;
/**
* struct css_driver - device driver for subchannels
- * @owner: owning module
* @subchannel_type: subchannel type supported by this driver
* @drv: embedded device driver structure
* @irq: called on interrupts
@@ -78,10 +77,8 @@ struct chp_link;
* @thaw: undo work done in @freeze
* @restore: callback for restoring after hibernation
* @settle: wait for asynchronous work to finish
- * @name: name of the device driver
*/
struct css_driver {
- struct module *owner;
struct css_device_id *subchannel_type;
struct device_driver drv;
void (*irq)(struct subchannel *);
@@ -96,16 +93,10 @@ struct css_driver {
int (*thaw) (struct subchannel *);
int (*restore)(struct subchannel *);
int (*settle)(void);
- const char *name;
};
#define to_cssdriver(n) container_of(n, struct css_driver, drv)
-/*
- * all css_drivers have the css_bus_type
- */
-extern struct bus_type css_bus_type;
-
extern int css_driver_register(struct css_driver *);
extern void css_driver_unregister(struct css_driver *);
@@ -140,7 +131,6 @@ struct channel_subsystem {
};
#define to_css(dev) container_of(dev, struct channel_subsystem, device)
-extern struct bus_type css_bus_type;
extern struct channel_subsystem *channel_subsystems[];
/* Helper functions to build lists for the slow path. */
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index b7eaff9ca19e..e50b12163afe 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -172,9 +172,11 @@ static int io_subchannel_settle(void)
}
static struct css_driver io_subchannel_driver = {
- .owner = THIS_MODULE,
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "io_subchannel",
+ },
.subchannel_type = io_subchannel_ids,
- .name = "io_subchannel",
.irq = io_subchannel_irq,
.sch_event = io_subchannel_sch_event,
.chp_event = io_subchannel_chp_event,
@@ -1030,6 +1032,7 @@ static void io_subchannel_init_fields(struct subchannel *sch)
*/
static int io_subchannel_probe(struct subchannel *sch)
{
+ struct io_subchannel_private *io_priv;
struct ccw_device *cdev;
int rc;
@@ -1073,10 +1076,11 @@ static int io_subchannel_probe(struct subchannel *sch)
if (rc)
goto out_schedule;
/* Allocate I/O subchannel private data. */
- sch->private = kzalloc(sizeof(struct io_subchannel_private),
- GFP_KERNEL | GFP_DMA);
- if (!sch->private)
+ io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+ if (!io_priv)
goto out_schedule;
+
+ set_io_private(sch, io_priv);
css_schedule_eval(sch->schid);
return 0;
@@ -1090,6 +1094,7 @@ out_schedule:
static int
io_subchannel_remove (struct subchannel *sch)
{
+ struct io_subchannel_private *io_priv = to_io_private(sch);
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
@@ -1099,11 +1104,12 @@ io_subchannel_remove (struct subchannel *sch)
/* Set ccw device to not operational and drop reference. */
spin_lock_irq(cdev->ccwlock);
sch_set_cdev(sch, NULL);
+ set_io_private(sch, NULL);
cdev->private->state = DEV_STATE_NOT_OPER;
spin_unlock_irq(cdev->ccwlock);
ccw_device_unregister(cdev);
out_free:
- kfree(sch->private);
+ kfree(io_priv);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return 0;
}
@@ -1553,11 +1559,12 @@ spinlock_t * cio_get_console_lock(void)
static int ccw_device_console_enable(struct ccw_device *cdev,
struct subchannel *sch)
{
+ struct io_subchannel_private *io_priv = cio_get_console_priv();
int rc;
/* Attach subchannel private data. */
- sch->private = cio_get_console_priv();
- memset(sch->private, 0, sizeof(struct io_subchannel_private));
+ memset(io_priv, 0, sizeof(*io_priv));
+ set_io_private(sch, io_priv);
io_subchannel_init_fields(sch);
rc = cio_commit_config(sch);
if (rc)
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index d024d2c21897..ba31ad88f4f7 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -5,68 +5,36 @@
#include <asm/schid.h>
#include <asm/ccwdev.h>
#include "css.h"
-
-/*
- * command-mode operation request block
- */
-struct cmd_orb {
- u32 intparm; /* interruption parameter */
- u32 key : 4; /* flags, like key, suspend control, etc. */
- u32 spnd : 1; /* suspend control */
- u32 res1 : 1; /* reserved */
- u32 mod : 1; /* modification control */
- u32 sync : 1; /* synchronize control */
- u32 fmt : 1; /* format control */
- u32 pfch : 1; /* prefetch control */
- u32 isic : 1; /* initial-status-interruption control */
- u32 alcc : 1; /* address-limit-checking control */
- u32 ssic : 1; /* suppress-suspended-interr. control */
- u32 res2 : 1; /* reserved */
- u32 c64 : 1; /* IDAW/QDIO 64 bit control */
- u32 i2k : 1; /* IDAW 2/4kB block size control */
- u32 lpm : 8; /* logical path mask */
- u32 ils : 1; /* incorrect length */
- u32 zero : 6; /* reserved zeros */
- u32 orbx : 1; /* ORB extension control */
- u32 cpa; /* channel program address */
-} __attribute__ ((packed, aligned(4)));
-
-/*
- * transport-mode operation request block
- */
-struct tm_orb {
- u32 intparm;
- u32 key:4;
- u32 :9;
- u32 b:1;
- u32 :2;
- u32 lpm:8;
- u32 :7;
- u32 x:1;
- u32 tcw;
- u32 prio:8;
- u32 :8;
- u32 rsvpgm:8;
- u32 :8;
- u32 :32;
- u32 :32;
- u32 :32;
- u32 :32;
-} __attribute__ ((packed, aligned(4)));
-
-union orb {
- struct cmd_orb cmd;
- struct tm_orb tm;
-} __attribute__ ((packed, aligned(4)));
+#include "orb.h"
struct io_subchannel_private {
union orb orb; /* operation request block */
struct ccw1 sense_ccw; /* static ccw for sense command */
-} __attribute__ ((aligned(8)));
+ struct ccw_device *cdev;/* pointer to the child ccw device */
+ struct {
+ unsigned int suspend:1; /* allow suspend */
+ unsigned int prefetch:1;/* deny prefetch */
+ unsigned int inter:1; /* suppress intermediate interrupts */
+ } __packed options;
+} __aligned(8);
-#define to_io_private(n) ((struct io_subchannel_private *)n->private)
-#define sch_get_cdev(n) (dev_get_drvdata(&n->dev))
-#define sch_set_cdev(n, c) (dev_set_drvdata(&n->dev, c))
+#define to_io_private(n) ((struct io_subchannel_private *) \
+ dev_get_drvdata(&(n)->dev))
+#define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p))
+
+static inline struct ccw_device *sch_get_cdev(struct subchannel *sch)
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ return priv ? priv->cdev : NULL;
+}
+
+static inline void sch_set_cdev(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ if (priv)
+ priv->cdev = cdev;
+}
#define MAX_CIWS 8
@@ -191,23 +159,6 @@ struct ccw_device_private {
void *cmb_wait; /* deferred cmb enable/disable */
};
-static inline int ssch(struct subchannel_id schid, union orb *addr)
-{
- register struct subchannel_id reg1 asm("1") = schid;
- int ccode = -EIO;
-
- asm volatile(
- " ssch 0(%2)\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (ccode)
- : "d" (reg1), "a" (addr), "m" (*addr)
- : "cc", "memory");
- return ccode;
-}
-
static inline int rsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
@@ -223,21 +174,6 @@ static inline int rsch(struct subchannel_id schid)
return ccode;
}
-static inline int csch(struct subchannel_id schid)
-{
- register struct subchannel_id reg1 asm("1") = schid;
- int ccode;
-
- asm volatile(
- " csch\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (reg1)
- : "cc");
- return ccode;
-}
-
static inline int hsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index fac06155773f..4d80fc67a06b 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -3,6 +3,8 @@
#include <asm/chpid.h>
#include <asm/schid.h>
+#include "orb.h"
+#include "cio.h"
/*
* TPI info structure
@@ -87,6 +89,38 @@ static inline int tsch(struct subchannel_id schid, struct irb *addr)
return ccode;
}
+static inline int ssch(struct subchannel_id schid, union orb *addr)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode = -EIO;
+
+ asm volatile(
+ " ssch 0(%2)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (ccode)
+ : "d" (reg1), "a" (addr), "m" (*addr)
+ : "cc", "memory");
+ return ccode;
+}
+
+static inline int csch(struct subchannel_id schid)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode;
+
+ asm volatile(
+ " csch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc");
+ return ccode;
+}
+
static inline int tpi(struct tpi_info *addr)
{
int ccode;
diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h
new file mode 100644
index 000000000000..45a9865c2b36
--- /dev/null
+++ b/drivers/s390/cio/orb.h
@@ -0,0 +1,67 @@
+/*
+ * Orb related data structures.
+ *
+ * Copyright IBM Corp. 2007, 2011
+ *
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ * Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#ifndef S390_ORB_H
+#define S390_ORB_H
+
+/*
+ * Command-mode operation request block
+ */
+struct cmd_orb {
+ u32 intparm; /* interruption parameter */
+ u32 key:4; /* flags, like key, suspend control, etc. */
+ u32 spnd:1; /* suspend control */
+ u32 res1:1; /* reserved */
+ u32 mod:1; /* modification control */
+ u32 sync:1; /* synchronize control */
+ u32 fmt:1; /* format control */
+ u32 pfch:1; /* prefetch control */
+ u32 isic:1; /* initial-status-interruption control */
+ u32 alcc:1; /* address-limit-checking control */
+ u32 ssic:1; /* suppress-suspended-interr. control */
+ u32 res2:1; /* reserved */
+ u32 c64:1; /* IDAW/QDIO 64 bit control */
+ u32 i2k:1; /* IDAW 2/4kB block size control */
+ u32 lpm:8; /* logical path mask */
+ u32 ils:1; /* incorrect length */
+ u32 zero:6; /* reserved zeros */
+ u32 orbx:1; /* ORB extension control */
+ u32 cpa; /* channel program address */
+} __packed __aligned(4);
+
+/*
+ * Transport-mode operation request block
+ */
+struct tm_orb {
+ u32 intparm;
+ u32 key:4;
+ u32:9;
+ u32 b:1;
+ u32:2;
+ u32 lpm:8;
+ u32:7;
+ u32 x:1;
+ u32 tcw;
+ u32 prio:8;
+ u32:8;
+ u32 rsvpgm:8;
+ u32:8;
+ u32:32;
+ u32:32;
+ u32:32;
+ u32:32;
+} __packed __aligned(4);
+
+union orb {
+ struct cmd_orb cmd;
+ struct tm_orb tm;
+} __packed __aligned(4);
+
+#endif /* S390_ORB_H */
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 51c666fb67a4..645b0fcbb370 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -122,36 +122,21 @@ static int __init zfcp_module_init(void)
{
int retval = -ENOMEM;
- zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn",
- sizeof(struct zfcp_fc_gpn_ft_req));
- if (!zfcp_data.gpn_ft_cache)
- goto out;
-
- zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb",
- sizeof(struct fsf_qtcb));
- if (!zfcp_data.qtcb_cache)
+ zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
+ sizeof(struct fsf_qtcb));
+ if (!zfcp_fsf_qtcb_cache)
goto out_qtcb_cache;
- zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr",
- sizeof(struct fsf_status_read_buffer));
- if (!zfcp_data.sr_buffer_cache)
- goto out_sr_cache;
+ zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req",
+ sizeof(struct zfcp_fc_req));
+ if (!zfcp_fc_req_cache)
+ goto out_fc_cache;
- zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
- sizeof(struct zfcp_fc_gid_pn));
- if (!zfcp_data.gid_pn_cache)
- goto out_gid_cache;
-
- zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc",
- sizeof(struct zfcp_fc_els_adisc));
- if (!zfcp_data.adisc_cache)
- goto out_adisc_cache;
-
- zfcp_data.scsi_transport_template =
+ zfcp_scsi_transport_template =
fc_attach_transport(&zfcp_transport_functions);
- if (!zfcp_data.scsi_transport_template)
+ if (!zfcp_scsi_transport_template)
goto out_transport;
- scsi_transport_reserve_device(zfcp_data.scsi_transport_template,
+ scsi_transport_reserve_device(zfcp_scsi_transport_template,
sizeof(struct zfcp_scsi_dev));
@@ -175,18 +160,12 @@ static int __init zfcp_module_init(void)
out_ccw_register:
misc_deregister(&zfcp_cfdc_misc);
out_misc:
- fc_release_transport(zfcp_data.scsi_transport_template);
+ fc_release_transport(zfcp_scsi_transport_template);
out_transport:
- kmem_cache_destroy(zfcp_data.adisc_cache);
-out_adisc_cache:
- kmem_cache_destroy(zfcp_data.gid_pn_cache);
-out_gid_cache:
- kmem_cache_destroy(zfcp_data.sr_buffer_cache);
-out_sr_cache:
- kmem_cache_destroy(zfcp_data.qtcb_cache);
+ kmem_cache_destroy(zfcp_fc_req_cache);
+out_fc_cache:
+ kmem_cache_destroy(zfcp_fsf_qtcb_cache);
out_qtcb_cache:
- kmem_cache_destroy(zfcp_data.gpn_ft_cache);
-out:
return retval;
}
@@ -196,12 +175,9 @@ static void __exit zfcp_module_exit(void)
{
ccw_driver_unregister(&zfcp_ccw_driver);
misc_deregister(&zfcp_cfdc_misc);
- fc_release_transport(zfcp_data.scsi_transport_template);
- kmem_cache_destroy(zfcp_data.adisc_cache);
- kmem_cache_destroy(zfcp_data.gid_pn_cache);
- kmem_cache_destroy(zfcp_data.sr_buffer_cache);
- kmem_cache_destroy(zfcp_data.qtcb_cache);
- kmem_cache_destroy(zfcp_data.gpn_ft_cache);
+ fc_release_transport(zfcp_scsi_transport_template);
+ kmem_cache_destroy(zfcp_fc_req_cache);
+ kmem_cache_destroy(zfcp_fsf_qtcb_cache);
}
module_exit(zfcp_module_exit);
@@ -260,18 +236,18 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
return -ENOMEM;
adapter->pool.qtcb_pool =
- mempool_create_slab_pool(4, zfcp_data.qtcb_cache);
+ mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache);
if (!adapter->pool.qtcb_pool)
return -ENOMEM;
- adapter->pool.status_read_data =
- mempool_create_slab_pool(FSF_STATUS_READS_RECOM,
- zfcp_data.sr_buffer_cache);
- if (!adapter->pool.status_read_data)
+ BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
+ adapter->pool.sr_data =
+ mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0);
+ if (!adapter->pool.sr_data)
return -ENOMEM;
adapter->pool.gid_pn =
- mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
+ mempool_create_slab_pool(1, zfcp_fc_req_cache);
if (!adapter->pool.gid_pn)
return -ENOMEM;
@@ -290,8 +266,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
mempool_destroy(adapter->pool.qtcb_pool);
if (adapter->pool.status_read_req)
mempool_destroy(adapter->pool.status_read_req);
- if (adapter->pool.status_read_data)
- mempool_destroy(adapter->pool.status_read_data);
+ if (adapter->pool.sr_data)
+ mempool_destroy(adapter->pool.sr_data);
if (adapter->pool.gid_pn)
mempool_destroy(adapter->pool.gid_pn);
}
@@ -386,6 +362,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
+ INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
if (zfcp_qdio_setup(adapter))
goto failed;
@@ -437,7 +414,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
- if (!zfcp_adapter_scsi_register(adapter))
+ if (!zfcp_scsi_adapter_register(adapter))
return adapter;
failed:
@@ -451,10 +428,11 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
cancel_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
+ cancel_work_sync(&adapter->ns_up_work);
zfcp_destroy_adapter_work_queue(adapter);
zfcp_fc_wka_ports_force_offline(adapter->gs);
- zfcp_adapter_scsi_unregister(adapter);
+ zfcp_scsi_adapter_unregister(adapter);
sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
zfcp_erp_thread_kill(adapter);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 9ae1d0a6f627..527ba48eea57 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -89,7 +89,6 @@ struct zfcp_reqlist;
#define ZFCP_STATUS_LUN_READONLY 0x00000008
/* FSF request status (this does not have a common part) */
-#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
@@ -108,7 +107,7 @@ struct zfcp_adapter_mempool {
mempool_t *scsi_req;
mempool_t *scsi_abort;
mempool_t *status_read_req;
- mempool_t *status_read_data;
+ mempool_t *sr_data;
mempool_t *gid_pn;
mempool_t *qtcb_pool;
};
@@ -190,6 +189,7 @@ struct zfcp_adapter {
struct fsf_qtcb_bottom_port *stats_reset_data;
unsigned long stats_reset;
struct work_struct scan_work;
+ struct work_struct ns_up_work;
struct service_level service_level;
struct workqueue_struct *work_queue;
struct device_dma_parameters dma_parms;
@@ -314,15 +314,4 @@ struct zfcp_fsf_req {
void (*handler)(struct zfcp_fsf_req *);
};
-/* driver data */
-struct zfcp_data {
- struct scsi_host_template scsi_host_template;
- struct scsi_transport_template *scsi_transport_template;
- struct kmem_cache *gpn_ft_cache;
- struct kmem_cache *qtcb_cache;
- struct kmem_cache *sr_buffer_cache;
- struct kmem_cache *gid_pn_cache;
- struct kmem_cache *adisc_cache;
-};
-
#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index e003e306f870..e1b4f800e226 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -732,7 +732,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
- if (mempool_resize(act->adapter->pool.status_read_data,
+ if (mempool_resize(act->adapter->pool.sr_data,
act->adapter->stat_read_buf_num, GFP_KERNEL))
return ZFCP_ERP_FAILED;
@@ -1231,8 +1231,10 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
if (result == ZFCP_ERP_SUCCEEDED) {
register_service_level(&adapter->service_level);
queue_work(adapter->work_queue, &adapter->scan_work);
+ queue_work(adapter->work_queue, &adapter->ns_up_work);
} else
unregister_service_level(&adapter->service_level);
+
kref_put(&adapter->ref, zfcp_adapter_release);
break;
}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 6e325284fbe7..03627cfd81cd 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -80,6 +80,7 @@ extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
extern void zfcp_erp_timeout_handler(unsigned long);
/* zfcp_fc.c */
+extern struct kmem_cache *zfcp_fc_req_cache;
extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
enum fc_host_event_code event_code, u32);
extern void zfcp_fc_post_event(struct work_struct *);
@@ -95,8 +96,10 @@ extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
+extern void zfcp_fc_sym_name_update(struct work_struct *);
/* zfcp_fsf.c */
+extern struct kmem_cache *zfcp_fsf_qtcb_cache;
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
@@ -139,9 +142,9 @@ extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *,
struct qdio_buffer *);
/* zfcp_scsi.c */
-extern struct zfcp_data zfcp_data;
-extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
-extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
+extern struct scsi_transport_template *zfcp_scsi_transport_template;
+extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
+extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
extern struct fc_function_template zfcp_transport_functions;
extern void zfcp_scsi_rport_work(struct work_struct *);
extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 30cf91a787a3..297e6b71ce9c 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -11,11 +11,14 @@
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/utsname.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
+struct kmem_cache *zfcp_fc_req_cache;
+
static u32 zfcp_fc_rscn_range_mask[] = {
[ELS_ADDR_FMT_PORT] = 0xFFFFFF,
[ELS_ADDR_FMT_AREA] = 0xFFFF00,
@@ -260,24 +263,18 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
zfcp_fc_incoming_rscn(fsf_req);
}
-static void zfcp_fc_ns_gid_pn_eval(void *data)
+static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
{
- struct zfcp_fc_gid_pn *gid_pn = data;
- struct zfcp_fsf_ct_els *ct = &gid_pn->ct;
- struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req);
- struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp);
- struct zfcp_port *port = gid_pn->port;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
- if (ct->status)
+ if (ct_els->status)
return;
- if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC)
+ if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC)
return;
- /* paranoia */
- if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn)
- return;
/* looks like a valid d_id */
- port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid);
+ ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
}
static void zfcp_fc_complete(void *data)
@@ -285,69 +282,73 @@ static void zfcp_fc_complete(void *data)
complete(data);
}
+static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
+{
+ ct_hdr->ct_rev = FC_CT_REV;
+ ct_hdr->ct_fs_type = FC_FST_DIR;
+ ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
+ ct_hdr->ct_cmd = cmd;
+ ct_hdr->ct_mr_size = mr_size / 4;
+}
+
static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
- struct zfcp_fc_gid_pn *gid_pn)
+ struct zfcp_fc_req *fc_req)
{
struct zfcp_adapter *adapter = port->adapter;
DECLARE_COMPLETION_ONSTACK(completion);
+ struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
+ struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
int ret;
/* setup parameters for send generic command */
- gid_pn->port = port;
- gid_pn->ct.handler = zfcp_fc_complete;
- gid_pn->ct.handler_data = &completion;
- gid_pn->ct.req = &gid_pn->sg_req;
- gid_pn->ct.resp = &gid_pn->sg_resp;
- sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req,
- sizeof(struct zfcp_fc_gid_pn_req));
- sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp,
- sizeof(struct zfcp_fc_gid_pn_resp));
-
- /* setup nameserver request */
- gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV;
- gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR;
- gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
- gid_pn->gid_pn_req.ct_hdr.ct_options = 0;
- gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN;
- gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4;
- gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
-
- ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
+ fc_req->ct_els.port = port;
+ fc_req->ct_els.handler = zfcp_fc_complete;
+ fc_req->ct_els.handler_data = &completion;
+ fc_req->ct_els.req = &fc_req->sg_req;
+ fc_req->ct_els.resp = &fc_req->sg_rsp;
+ sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
+ sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));
+
+ zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
+ FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
+ gid_pn_req->gid_pn.fn_wwpn = port->wwpn;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
adapter->pool.gid_pn_req,
ZFCP_FC_CTELS_TMO);
if (!ret) {
wait_for_completion(&completion);
- zfcp_fc_ns_gid_pn_eval(gid_pn);
+ zfcp_fc_ns_gid_pn_eval(fc_req);
}
return ret;
}
/**
- * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
+ * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
* @port: port where GID_PN request is needed
* return: -ENOMEM on error, 0 otherwise
*/
static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
{
int ret;
- struct zfcp_fc_gid_pn *gid_pn;
+ struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
- gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
- if (!gid_pn)
+ fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
+ if (!fc_req)
return -ENOMEM;
- memset(gid_pn, 0, sizeof(*gid_pn));
+ memset(fc_req, 0, sizeof(*fc_req));
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
if (ret)
goto out;
- ret = zfcp_fc_ns_gid_pn_request(port, gid_pn);
+ ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
zfcp_fc_wka_port_put(&adapter->gs->ds);
out:
- mempool_free(gid_pn, adapter->pool.gid_pn);
+ mempool_free(fc_req, adapter->pool.gid_pn);
return ret;
}
@@ -419,11 +420,11 @@ void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
static void zfcp_fc_adisc_handler(void *data)
{
- struct zfcp_fc_els_adisc *adisc = data;
- struct zfcp_port *port = adisc->els.port;
- struct fc_els_adisc *adisc_resp = &adisc->adisc_resp;
+ struct zfcp_fc_req *fc_req = data;
+ struct zfcp_port *port = fc_req->ct_els.port;
+ struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
- if (adisc->els.status) {
+ if (fc_req->ct_els.status) {
/* request rejected or timed out */
zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fcadh_1");
@@ -445,42 +446,42 @@ static void zfcp_fc_adisc_handler(void *data)
out:
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
put_device(&port->dev);
- kmem_cache_free(zfcp_data.adisc_cache, adisc);
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
static int zfcp_fc_adisc(struct zfcp_port *port)
{
- struct zfcp_fc_els_adisc *adisc;
+ struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
+ struct Scsi_Host *shost = adapter->scsi_host;
int ret;
- adisc = kmem_cache_zalloc(zfcp_data.adisc_cache, GFP_ATOMIC);
- if (!adisc)
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
+ if (!fc_req)
return -ENOMEM;
- adisc->els.port = port;
- adisc->els.req = &adisc->req;
- adisc->els.resp = &adisc->resp;
- sg_init_one(adisc->els.req, &adisc->adisc_req,
+ fc_req->ct_els.port = port;
+ fc_req->ct_els.req = &fc_req->sg_req;
+ fc_req->ct_els.resp = &fc_req->sg_rsp;
+ sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
sizeof(struct fc_els_adisc));
- sg_init_one(adisc->els.resp, &adisc->adisc_resp,
+ sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
sizeof(struct fc_els_adisc));
- adisc->els.handler = zfcp_fc_adisc_handler;
- adisc->els.handler_data = adisc;
+ fc_req->ct_els.handler = zfcp_fc_adisc_handler;
+ fc_req->ct_els.handler_data = fc_req;
/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
without FC-AL-2 capability, so we don't set it */
- adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host);
- adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host);
- adisc->adisc_req.adisc_cmd = ELS_ADISC;
- hton24(adisc->adisc_req.adisc_port_id,
- fc_host_port_id(adapter->scsi_host));
+ fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost);
+ fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost);
+ fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
+ hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
- ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els,
+ ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
ZFCP_FC_CTELS_TMO);
if (ret)
- kmem_cache_free(zfcp_data.adisc_cache, adisc);
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
return ret;
}
@@ -528,68 +529,42 @@ void zfcp_fc_test_link(struct zfcp_port *port)
put_device(&port->dev);
}
-static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
+static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num)
{
- struct scatterlist *sg = &gpn_ft->sg_req;
-
- kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg));
- zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
-
- kfree(gpn_ft);
-}
+ struct zfcp_fc_req *fc_req;
-static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
-{
- struct zfcp_fc_gpn_ft *gpn_ft;
- struct zfcp_fc_gpn_ft_req *req;
-
- gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
- if (!gpn_ft)
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+ if (!fc_req)
return NULL;
- req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
- if (!req) {
- kfree(gpn_ft);
- gpn_ft = NULL;
- goto out;
+ if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
+ return NULL;
}
- sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
- if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) {
- zfcp_free_sg_env(gpn_ft, buf_num);
- gpn_ft = NULL;
- }
-out:
- return gpn_ft;
-}
+ sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
+ sizeof(struct zfcp_fc_gpn_ft_req));
+ return fc_req;
+}
-static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
+static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_bytes)
{
- struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
- struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
DECLARE_COMPLETION_ONSTACK(completion);
int ret;
- /* prepare CT IU for GPN_FT */
- req->ct_hdr.ct_rev = FC_CT_REV;
- req->ct_hdr.ct_fs_type = FC_FST_DIR;
- req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
- req->ct_hdr.ct_options = 0;
- req->ct_hdr.ct_cmd = FC_NS_GPN_FT;
- req->ct_hdr.ct_mr_size = max_bytes / 4;
- req->gpn_ft.fn_domain_id_scope = 0;
- req->gpn_ft.fn_area_id_scope = 0;
+ zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
- /* prepare zfcp_send_ct */
- ct->handler = zfcp_fc_complete;
- ct->handler_data = &completion;
- ct->req = &gpn_ft->sg_req;
- ct->resp = gpn_ft->sg_resp;
+ ct_els->handler = zfcp_fc_complete;
+ ct_els->handler_data = &completion;
+ ct_els->req = &fc_req->sg_req;
+ ct_els->resp = &fc_req->sg_rsp;
- ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL,
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (!ret)
wait_for_completion(&completion);
@@ -610,11 +585,11 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
list_move_tail(&port->list, lh);
}
-static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
+static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_entries)
{
- struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
- struct scatterlist *sg = gpn_ft->sg_resp;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct scatterlist *sg = &fc_req->sg_rsp;
struct fc_ct_hdr *hdr = sg_virt(sg);
struct fc_gpn_ft_resp *acc = sg_virt(sg);
struct zfcp_port *port, *tmp;
@@ -623,7 +598,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
u32 d_id;
int ret = 0, x, last = 0;
- if (ct->status)
+ if (ct_els->status)
return -EIO;
if (hdr->ct_cmd != FC_FS_ACC) {
@@ -687,7 +662,7 @@ void zfcp_fc_scan_ports(struct work_struct *work)
struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
scan_work);
int ret, i;
- struct zfcp_fc_gpn_ft *gpn_ft;
+ struct zfcp_fc_req *fc_req;
int chain, max_entries, buf_num, max_bytes;
chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
@@ -702,25 +677,145 @@ void zfcp_fc_scan_ports(struct work_struct *work)
if (zfcp_fc_wka_port_get(&adapter->gs->ds))
return;
- gpn_ft = zfcp_alloc_sg_env(buf_num);
- if (!gpn_ft)
+ fc_req = zfcp_alloc_sg_env(buf_num);
+ if (!fc_req)
goto out;
for (i = 0; i < 3; i++) {
- ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
+ ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
if (!ret) {
- ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries);
+ ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
if (ret == -EAGAIN)
ssleep(1);
else
break;
}
}
- zfcp_free_sg_env(gpn_ft, buf_num);
+ zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
out:
zfcp_fc_wka_port_put(&adapter->gs->ds);
}
+static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
+ struct zfcp_fc_req *fc_req)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ char devno[] = "DEVNO:";
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
+ struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
+ int ret;
+
+ zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
+ FC_SYMBOLIC_NAME_SIZE);
+ hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));
+
+ sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
+ sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));
+
+ ct_els->handler = zfcp_fc_complete;
+ ct_els->handler_data = &completion;
+ ct_els->req = &fc_req->sg_req;
+ ct_els->resp = &fc_req->sg_rsp;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+ ZFCP_FC_CTELS_TMO);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&completion);
+ if (ct_els->status)
+ return ct_els->status;
+
+ if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
+ !(strstr(gspn_rsp->gspn.fp_name, devno)))
+ snprintf(fc_host_symbolic_name(adapter->scsi_host),
+ FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
+ gspn_rsp->gspn.fp_name, devno,
+ dev_name(&adapter->ccw_device->dev),
+ init_utsname()->nodename);
+ else
+ strlcpy(fc_host_symbolic_name(adapter->scsi_host),
+ gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
+
+ return 0;
+}
+
+static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
+ struct zfcp_fc_req *fc_req)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct Scsi_Host *shost = adapter->scsi_host;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
+ struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
+ int ret, len;
+
+ zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
+ FC_SYMBOLIC_NAME_SIZE);
+ hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
+ len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
+ FC_SYMBOLIC_NAME_SIZE);
+ rspn_req->rspn.fr_name_len = len;
+
+ sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
+ sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));
+
+ ct_els->handler = zfcp_fc_complete;
+ ct_els->handler_data = &completion;
+ ct_els->req = &fc_req->sg_req;
+ ct_els->resp = &fc_req->sg_rsp;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+ ZFCP_FC_CTELS_TMO);
+ if (!ret)
+ wait_for_completion(&completion);
+}
+
+/**
+ * zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
+ * @work: ns_up_work of the adapter where to update the symbolic port name
+ *
+ * Retrieve the current symbolic port name that may have been set by
+ * the hardware using the GSPN request and update the fc_host
+ * symbolic_name sysfs attribute. When running in NPIV mode (and hence
+ * the port name is unique for this system), update the symbolic port
+ * name to add Linux specific information and update the FC nameserver
+ * using the RSPN request.
+ */
+void zfcp_fc_sym_name_update(struct work_struct *work)
+{
+ struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
+ ns_up_work);
+ int ret;
+ struct zfcp_fc_req *fc_req;
+
+ if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
+ fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+ return;
+
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+ if (!fc_req)
+ return;
+
+ ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
+ if (ret)
+ goto out_free;
+
+ ret = zfcp_fc_gspn(adapter, fc_req);
+ if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+ goto out_ds_put;
+
+ memset(fc_req, 0, sizeof(*fc_req));
+ zfcp_fc_rspn(adapter, fc_req);
+
+out_ds_put:
+ zfcp_fc_wka_port_put(&adapter->gs->ds);
+out_free:
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
+}
+
static void zfcp_fc_ct_els_job_handler(void *data)
{
struct fc_bsg_job *job = data;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index b464ae01086c..4561f3bf7300 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -64,33 +64,16 @@ struct zfcp_fc_gid_pn_req {
} __packed;
/**
- * struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response
+ * struct zfcp_fc_gid_pn_rsp - container for ct header plus gid_pn response
* @ct_hdr: FC GS common transport header
* @gid_pn: GID_PN response
*/
-struct zfcp_fc_gid_pn_resp {
+struct zfcp_fc_gid_pn_rsp {
struct fc_ct_hdr ct_hdr;
struct fc_gid_pn_resp gid_pn;
} __packed;
/**
- * struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request
- * @ct: data passed to zfcp_fsf for issuing fsf request
- * @sg_req: scatterlist entry for request data
- * @sg_resp: scatterlist entry for response data
- * @gid_pn_req: GID_PN request data
- * @gid_pn_resp: GID_PN response data
- */
-struct zfcp_fc_gid_pn {
- struct zfcp_fsf_ct_els ct;
- struct scatterlist sg_req;
- struct scatterlist sg_resp;
- struct zfcp_fc_gid_pn_req gid_pn_req;
- struct zfcp_fc_gid_pn_resp gid_pn_resp;
- struct zfcp_port *port;
-};
-
-/**
* struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
* @ct_hdr: FC GS common transport header
* @gpn_ft: GPN_FT request
@@ -101,41 +84,72 @@ struct zfcp_fc_gpn_ft_req {
} __packed;
/**
- * struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response
+ * struct zfcp_fc_gspn_req - container for ct header plus GSPN_ID request
* @ct_hdr: FC GS common transport header
- * @gpn_ft: Array of gpn_ft response data to fill one memory page
+ * @gspn: GSPN_ID request
*/
-struct zfcp_fc_gpn_ft_resp {
+struct zfcp_fc_gspn_req {
struct fc_ct_hdr ct_hdr;
- struct fc_gpn_ft_resp gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE];
+ struct fc_gid_pn_resp gspn;
} __packed;
/**
- * struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request
- * @ct: data passed to zfcp_fsf for issuing fsf request
- * @sg_req: scatter list entry for gpn_ft request
- * @sg_resp: scatter list entries for gpn_ft responses (per memory page)
+ * struct zfcp_fc_gspn_rsp - container for ct header plus GSPN_ID response
+ * @ct_hdr: FC GS common transport header
+ * @gspn: GSPN_ID response
+ * @name: The name string of the GSPN_ID response
*/
-struct zfcp_fc_gpn_ft {
- struct zfcp_fsf_ct_els ct;
- struct scatterlist sg_req;
- struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS];
-};
+struct zfcp_fc_gspn_rsp {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_gspn_resp gspn;
+ char name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
/**
- * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC
- * @els: data required for issuing els fsf command
- * @req: scatterlist entry for ELS ADISC request
- * @resp: scatterlist entry for ELS ADISC response
- * @adisc_req: ELS ADISC request data
- * @adisc_resp: ELS ADISC response data
+ * struct zfcp_fc_rspn_req - container for ct header plus RSPN_ID request
+ * @ct_hdr: FC GS common transport header
+ * @rspn: RSPN_ID request
+ * @name: The name string of the RSPN_ID request
*/
-struct zfcp_fc_els_adisc {
- struct zfcp_fsf_ct_els els;
- struct scatterlist req;
- struct scatterlist resp;
- struct fc_els_adisc adisc_req;
- struct fc_els_adisc adisc_resp;
+struct zfcp_fc_rspn_req {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_ns_rspn rspn;
+ char name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
+
+/**
+ * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
+ * @ct_els: data required for issuing fsf command
+ * @sg_req: scatterlist entry for request data
+ * @sg_rsp: scatterlist entry for response data
+ * @u: request specific data
+ */
+struct zfcp_fc_req {
+ struct zfcp_fsf_ct_els ct_els;
+ struct scatterlist sg_req;
+ struct scatterlist sg_rsp;
+ union {
+ struct {
+ struct fc_els_adisc req;
+ struct fc_els_adisc rsp;
+ } adisc;
+ struct {
+ struct zfcp_fc_gid_pn_req req;
+ struct zfcp_fc_gid_pn_rsp rsp;
+ } gid_pn;
+ struct {
+ struct scatterlist sg_rsp2[ZFCP_FC_GPN_FT_NUM_BUFS - 1];
+ struct zfcp_fc_gpn_ft_req req;
+ } gpn_ft;
+ struct {
+ struct zfcp_fc_gspn_req req;
+ struct zfcp_fc_gspn_rsp rsp;
+ } gspn;
+ struct {
+ struct zfcp_fc_rspn_req req;
+ struct fc_ct_hdr rsp;
+ } rspn;
+ } u;
};
/**
@@ -192,14 +206,21 @@ struct zfcp_fc_wka_ports {
* zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
* @fcp: fcp_cmnd to setup
* @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
+ * @tm: task management flags to setup task management command
*/
static inline
-void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
+void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
+ u8 tm_flags)
{
char tag[2];
int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
+ if (unlikely(tm_flags)) {
+ fcp->fc_tm_flags = tm_flags;
+ return;
+ }
+
if (scsi_populate_tag_msg(scsi, tag)) {
switch (tag[0]) {
case MSG_ORDERED_TAG:
@@ -226,19 +247,6 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
}
/**
- * zfcp_fc_fcp_tm - setup FCP command as task management command
- * @fcp: fcp_cmnd to setup
- * @dev: scsi_device where to send the task management command
- * @tm: task management flags to setup tm command
- */
-static inline
-void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
-{
- int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
- fcp->fc_tm_flags |= tm_flags;
-}
-
-/**
* zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
* @fcp_rsp: FCP RSP IU to evaluate
* @scsi: SCSI command where to update status and sense buffer
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 60ff9d172c79..a0e05ef65924 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -18,6 +18,8 @@
#include "zfcp_qdio.h"
#include "zfcp_reqlist.h"
+struct kmem_cache *zfcp_fsf_qtcb_cache;
+
static void zfcp_fsf_request_timeout_handler(unsigned long data)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
@@ -83,7 +85,7 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
}
if (likely(req->qtcb))
- kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
+ kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
kfree(req);
}
@@ -212,7 +214,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
zfcp_dbf_hba_fsf_uss("fssrh_1", req);
- mempool_free(sr_buf, adapter->pool.status_read_data);
+ mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
zfcp_fsf_req_free(req);
return;
}
@@ -265,7 +267,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
break;
}
- mempool_free(sr_buf, adapter->pool.status_read_data);
+ mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
zfcp_fsf_req_free(req);
atomic_inc(&adapter->stat_miss);
@@ -628,7 +630,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
if (likely(pool))
qtcb = mempool_alloc(pool, GFP_ATOMIC);
else
- qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
+ qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
if (unlikely(!qtcb))
return NULL;
@@ -723,6 +725,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req;
struct fsf_status_read_buffer *sr_buf;
+ struct page *page;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -736,11 +739,12 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
goto out;
}
- sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
- if (!sr_buf) {
+ page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
+ if (!page) {
retval = -ENOMEM;
goto failed_buf;
}
+ sr_buf = page_address(page);
memset(sr_buf, 0, sizeof(*sr_buf));
req->data = sr_buf;
@@ -755,7 +759,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
failed_req_send:
req->data = NULL;
- mempool_free(sr_buf, adapter->pool.status_read_data);
+ mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
failed_buf:
zfcp_dbf_hba_fsf_uss("fssr__1", req);
zfcp_fsf_req_free(req);
@@ -1552,7 +1556,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
- if (unlikely(IS_ERR(req))) {
+ if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
@@ -1605,7 +1609,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
- if (unlikely(IS_ERR(req))) {
+ if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
@@ -2206,7 +2210,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
- zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
+ zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
if (scsi_prot_sg_count(scsi_cmnd)) {
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
@@ -2284,7 +2288,6 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
goto out;
}
- req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
req->data = scmnd;
req->handler = zfcp_fsf_fcp_task_mgmt_handler;
req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
@@ -2296,7 +2299,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
- zfcp_fc_fcp_tm(fcp_cmnd, scmnd->device, tm_flags);
+ zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req))
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index ddb5800823a9..2a4991d6d4d5 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -292,7 +292,37 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
return SUCCESS;
}
-int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
+struct scsi_transport_template *zfcp_scsi_transport_template;
+
+static struct scsi_host_template zfcp_scsi_host_template = {
+ .module = THIS_MODULE,
+ .name = "zfcp",
+ .queuecommand = zfcp_scsi_queuecommand,
+ .eh_abort_handler = zfcp_scsi_eh_abort_handler,
+ .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
+ .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
+ .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
+ .slave_alloc = zfcp_scsi_slave_alloc,
+ .slave_configure = zfcp_scsi_slave_configure,
+ .slave_destroy = zfcp_scsi_slave_destroy,
+ .change_queue_depth = zfcp_scsi_change_queue_depth,
+ .proc_name = "zfcp",
+ .can_queue = 4096,
+ .this_id = -1,
+ .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
+ .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
+ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
+ .cmd_per_lun = 1,
+ .use_clustering = 1,
+ .shost_attrs = zfcp_sysfs_shost_attrs,
+ .sdev_attrs = zfcp_sysfs_sdev_attrs,
+};
+
+/**
+ * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
+ * @adapter: The zfcp adapter to register with the SCSI midlayer
+ */
+int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
struct ccw_dev_id dev_id;
@@ -301,7 +331,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
ccw_device_get_id(adapter->ccw_device, &dev_id);
/* register adapter as SCSI host with mid layer of SCSI stack */
- adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template,
+ adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
sizeof (struct zfcp_adapter *));
if (!adapter->scsi_host) {
dev_err(&adapter->ccw_device->dev,
@@ -316,7 +346,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
adapter->scsi_host->max_channel = 0;
adapter->scsi_host->unique_id = dev_id.devno;
adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
- adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
+ adapter->scsi_host->transportt = zfcp_scsi_transport_template;
adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
@@ -328,7 +358,11 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
return 0;
}
-void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
+/**
+ * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
+ * @adapter: The zfcp adapter to unregister.
+ */
+void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
{
struct Scsi_Host *shost;
struct zfcp_port *port;
@@ -346,8 +380,6 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
scsi_remove_host(shost);
scsi_host_put(shost);
adapter->scsi_host = NULL;
-
- return;
}
static struct fc_host_statistics*
@@ -688,33 +720,8 @@ struct fc_function_template zfcp_transport_functions = {
/* no functions registered for following dynamic attributes but
directly set by LLDD */
.show_host_port_type = 1,
+ .show_host_symbolic_name = 1,
.show_host_speed = 1,
.show_host_port_id = 1,
.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
};
-
-struct zfcp_data zfcp_data = {
- .scsi_host_template = {
- .name = "zfcp",
- .module = THIS_MODULE,
- .proc_name = "zfcp",
- .change_queue_depth = zfcp_scsi_change_queue_depth,
- .slave_alloc = zfcp_scsi_slave_alloc,
- .slave_configure = zfcp_scsi_slave_configure,
- .slave_destroy = zfcp_scsi_slave_destroy,
- .queuecommand = zfcp_scsi_queuecommand,
- .eh_abort_handler = zfcp_scsi_eh_abort_handler,
- .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
- .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
- .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
- .can_queue = 4096,
- .this_id = -1,
- .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
- .cmd_per_lun = 1,
- .use_clustering = 1,
- .sdev_attrs = zfcp_sysfs_sdev_attrs,
- .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
- .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
- .shost_attrs = zfcp_sysfs_shost_attrs,
- },
-};
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8616496ffc02..4a1f029c4fe9 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -381,6 +381,7 @@ config ISCSI_BOOT_SYSFS
source "drivers/scsi/cxgbi/Kconfig"
source "drivers/scsi/bnx2i/Kconfig"
+source "drivers/scsi/bnx2fc/Kconfig"
source "drivers/scsi/be2iscsi/Kconfig"
config SGIWD93_SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index ef6de669424b..7ad0b8a79ae8 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_LIBFC) += libfc/
obj-$(CONFIG_LIBFCOE) += fcoe/
obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_FCOE_FNIC) += fnic/
+obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 9a5629f94f95..e7cd2fcbe036 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -936,8 +936,7 @@ static void NCR5380_exit(struct Scsi_Host *instance)
{
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
- cancel_delayed_work(&hostdata->coroutine);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&hostdata->coroutine);
}
/**
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 984bd527c6c9..da7b9887ec48 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1020,7 +1020,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
int poll_count = 0;
arcmsr_free_sysfs_attr(acb);
scsi_remove_host(host);
- flush_scheduled_work();
+ flush_work_sync(&acb->arcmsr_do_message_isr_bh);
del_timer_sync(&acb->eternal_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_stop_adapter_bgrb(acb);
@@ -1066,7 +1066,7 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
(struct AdapterControlBlock *)host->hostdata;
del_timer_sync(&acb->eternal_timer);
arcmsr_disable_outbound_ints(acb);
- flush_scheduled_work();
+ flush_work_sync(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index eaaa8813067d..868cc5590145 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -210,28 +210,20 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
}
/**
- * beiscsi_conn_get_param - get the iscsi parameter
- * @cls_conn: pointer to iscsi cls conn
+ * beiscsi_ep_get_param - get the iscsi parameter
+ * @ep: pointer to iscsi ep
* @param: parameter type identifier
* @buf: buffer pointer
*
* returns iscsi parameter
*/
-int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
enum iscsi_param param, char *buf)
{
- struct beiscsi_endpoint *beiscsi_ep;
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
int len = 0;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param);
- beiscsi_ep = beiscsi_conn->ep;
- if (!beiscsi_ep) {
- SE_DEBUG(DBG_LVL_1,
- "In beiscsi_conn_get_param , no beiscsi_ep\n");
- return -ENODEV;
- }
switch (param) {
case ISCSI_PARAM_CONN_PORT:
@@ -244,7 +236,7 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
break;
default:
- return iscsi_conn_get_param(cls_conn, param, buf);
+ return -ENOSYS;
}
return len;
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 8950a702b9f4..9c532797c29e 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -48,8 +48,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn,
uint64_t transport_fd, int is_leading);
-int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf);
+int beiscsi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param,
+ char *buf);
int beiscsi_get_host_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 638c72b7f94a..24e20ba9633c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -4384,7 +4384,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.bind_conn = beiscsi_conn_bind,
.destroy_conn = iscsi_conn_teardown,
.set_param = beiscsi_set_param,
- .get_conn_param = beiscsi_conn_get_param,
+ .get_conn_param = iscsi_conn_get_param,
.get_session_param = iscsi_session_get_param,
.get_host_param = beiscsi_get_host_param,
.start_conn = beiscsi_conn_start,
@@ -4395,6 +4395,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.alloc_pdu = beiscsi_alloc_pdu,
.parse_pdu_itt = beiscsi_parse_pdu,
.get_stats = beiscsi_conn_get_stats,
+ .get_ep_param = beiscsi_ep_get_param,
.ep_connect = beiscsi_ep_connect,
.ep_poll = beiscsi_ep_poll,
.ep_disconnect = beiscsi_ep_disconnect,
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
new file mode 100644
index 000000000000..69d031d98469
--- /dev/null
+++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
@@ -0,0 +1,1080 @@
+#ifndef __57XX_FCOE_HSI_LINUX_LE__
+#define __57XX_FCOE_HSI_LINUX_LE__
+
+/*
+ * common data for all protocols
+ */
+struct b577xx_doorbell_hdr {
+ u8 header;
+#define B577XX_DOORBELL_HDR_RX (0x1<<0)
+#define B577XX_DOORBELL_HDR_RX_SHIFT 0
+#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
+#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
+#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
+#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
+#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
+#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
+};
+
+/*
+ * doorbell message sent to the chip
+ */
+struct b577xx_doorbell_set_prod {
+#if defined(__BIG_ENDIAN)
+ u16 prod;
+ u8 zero_fill1;
+ struct b577xx_doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct b577xx_doorbell_hdr header;
+ u8 zero_fill1;
+ u16 prod;
+#endif
+};
+
+
+struct regpair {
+ __le32 lo;
+ __le32 hi;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ */
+struct fcoe_abts_rsp_union {
+ u32 r_ctl;
+ u32 abts_rsp_payload[7];
+};
+
+
+/*
+ * 4 regs size
+ */
+struct fcoe_bd_ctx {
+ u32 buf_addr_hi;
+ u32 buf_addr_lo;
+#if defined(__BIG_ENDIAN)
+ u16 rsrv0;
+ u16 buf_len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 buf_len;
+ u16 rsrv0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 rsrv1;
+ u16 flags;
+#elif defined(__LITTLE_ENDIAN)
+ u16 flags;
+ u16 rsrv1;
+#endif
+};
+
+
+struct fcoe_cleanup_flow_info {
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u16 task_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 task_id;
+ u16 reserved1;
+#endif
+ u32 reserved2[7];
+};
+
+
+struct fcoe_fcp_cmd_payload {
+ u32 opaque[8];
+};
+
+struct fcoe_fc_hdr {
+#if defined(__BIG_ENDIAN)
+ u8 cs_ctl;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 cs_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 r_ctl;
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 r_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 seq_id;
+ u8 df_ctl;
+ u16 seq_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u16 seq_cnt;
+ u8 df_ctl;
+ u8 seq_id;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 type;
+ u8 f_ctl[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 f_ctl[3];
+ u8 type;
+#endif
+ u32 parameters;
+#if defined(__BIG_ENDIAN)
+ u16 ox_id;
+ u16 rx_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_id;
+ u16 ox_id;
+#endif
+};
+
+struct fcoe_fc_frame {
+ struct fcoe_fc_hdr fc_hdr;
+ u32 reserved0[2];
+};
+
+union fcoe_cmd_flow_info {
+ struct fcoe_fcp_cmd_payload fcp_cmd_payload;
+ struct fcoe_fc_frame mp_fc_frame;
+};
+
+
+
+struct fcoe_fcp_rsp_flags {
+ u8 flags;
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
+};
+
+
+struct fcoe_fcp_rsp_payload {
+ struct regpair reserved0;
+ u32 fcp_resid;
+#if defined(__BIG_ENDIAN)
+ u16 retry_delay_timer;
+ struct fcoe_fcp_rsp_flags fcp_flags;
+ u8 scsi_status_code;
+#elif defined(__LITTLE_ENDIAN)
+ u8 scsi_status_code;
+ struct fcoe_fcp_rsp_flags fcp_flags;
+ u16 retry_delay_timer;
+#endif
+ u32 fcp_rsp_len;
+ u32 fcp_sns_len;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ */
+struct fcoe_fcp_rsp_union {
+ struct fcoe_fcp_rsp_payload payload;
+ struct regpair reserved0;
+};
+
+
+struct fcoe_fcp_xfr_rdy_payload {
+ u32 burst_len;
+ u32 data_ro;
+};
+
+struct fcoe_read_flow_info {
+ struct fcoe_fc_hdr fc_data_in_hdr;
+ u32 reserved[2];
+};
+
+struct fcoe_write_flow_info {
+ struct fcoe_fc_hdr fc_data_out_hdr;
+ struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload;
+};
+
+union fcoe_rsp_flow_info {
+ struct fcoe_fcp_rsp_union fcp_rsp;
+ struct fcoe_abts_rsp_union abts_rsp;
+};
+
+/*
+ * 32 bytes used for general purposes
+ */
+union fcoe_general_task_ctx {
+ union fcoe_cmd_flow_info cmd_info;
+ struct fcoe_read_flow_info read_info;
+ struct fcoe_write_flow_info write_info;
+ union fcoe_rsp_flow_info rsp_info;
+ struct fcoe_cleanup_flow_info cleanup_info;
+ u32 comp_info[8];
+};
+
+
+/*
+ * FCoE KCQ CQE parameters
+ */
+union fcoe_kcqe_params {
+ u32 reserved0[4];
+};
+
+/*
+ * FCoE KCQ CQE
+ */
+struct fcoe_kcqe {
+ u32 fcoe_conn_id;
+ u32 completion_status;
+ u32 fcoe_conn_context_id;
+ union fcoe_kcqe_params params;
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+ u16 qe_self_seq;
+ u8 op_code;
+ u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE KWQE header
+ */
+struct fcoe_kwqe_header {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+ u8 op_code;
+#elif defined(__LITTLE_ENDIAN)
+ u8 op_code;
+ u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE firmware init request 1
+ */
+struct fcoe_kwqe_init1 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 num_tasks;
+#elif defined(__LITTLE_ENDIAN)
+ u16 num_tasks;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 task_list_pbl_addr_lo;
+ u32 task_list_pbl_addr_hi;
+ u32 dummy_buffer_addr_lo;
+ u32 dummy_buffer_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 rq_num_wqes;
+ u16 sq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_num_wqes;
+ u16 rq_num_wqes;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 cq_num_wqes;
+ u16 rq_buffer_log_size;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_buffer_log_size;
+ u16 cq_num_wqes;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+ u8 num_sessions_log;
+ u16 mtu;
+#elif defined(__LITTLE_ENDIAN)
+ u16 mtu;
+ u8 num_sessions_log;
+ u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE firmware init request 2
+ */
+struct fcoe_kwqe_init2 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 hash_tbl_pbl_addr_lo;
+ u32 hash_tbl_pbl_addr_hi;
+ u32 t2_hash_tbl_addr_lo;
+ u32 t2_hash_tbl_addr_hi;
+ u32 t2_ptr_hash_tbl_addr_lo;
+ u32 t2_ptr_hash_tbl_addr_hi;
+ u32 free_list_count;
+};
+
+/*
+ * FCoE firmware init request 3
+ */
+struct fcoe_kwqe_init3 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 error_bit_map_lo;
+ u32 error_bit_map_hi;
+#if defined(__BIG_ENDIAN)
+ u8 reserved21[3];
+ u8 cached_session_enable;
+#elif defined(__LITTLE_ENDIAN)
+ u8 cached_session_enable;
+ u8 reserved21[3];
+#endif
+ u32 reserved2[4];
+};
+
+/*
+ * FCoE connection offload request 1
+ */
+struct fcoe_kwqe_conn_offload1 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 fcoe_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 fcoe_conn_id;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 sq_addr_lo;
+ u32 sq_addr_hi;
+ u32 rq_pbl_addr_lo;
+ u32 rq_pbl_addr_hi;
+ u32 rq_first_pbe_addr_lo;
+ u32 rq_first_pbe_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u16 rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_prod;
+ u16 reserved0;
+#endif
+};
+
+/*
+ * FCoE connection offload request 2
+ */
+struct fcoe_kwqe_conn_offload2 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 tx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tx_max_fc_pay_len;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 cq_addr_lo;
+ u32 cq_addr_hi;
+ u32 xferq_addr_lo;
+ u32 xferq_addr_hi;
+ u32 conn_db_addr_lo;
+ u32 conn_db_addr_hi;
+ u32 reserved1;
+};
+
+/*
+ * FCoE connection offload request 3
+ */
+struct fcoe_kwqe_conn_offload3 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+#elif defined(__LITTLE_ENDIAN)
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+ struct fcoe_kwqe_header hdr;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 tx_max_conc_seqs_c3;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 tx_max_conc_seqs_c3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+#endif
+ u32 reserved;
+ u32 confq_first_pbe_addr_lo;
+ u32 confq_first_pbe_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 rx_max_fc_pay_len;
+ u16 tx_total_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tx_total_conc_seqs;
+ u16 rx_max_fc_pay_len;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 rx_open_seqs_exch_c3;
+ u8 rx_max_conc_seqs_c3;
+ u16 rx_total_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_total_conc_seqs;
+ u8 rx_max_conc_seqs_c3;
+ u8 rx_open_seqs_exch_c3;
+#endif
+};
+
+/*
+ * FCoE connection offload request 4
+ */
+struct fcoe_kwqe_conn_offload4 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u8 reserved2;
+ u8 e_d_tov_timer_val;
+#elif defined(__LITTLE_ENDIAN)
+ u8 e_d_tov_timer_val;
+ u8 reserved2;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u8 src_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+ u8 dst_mac_addr_hi16[2];
+ u8 src_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+ u8 src_mac_addr_hi16[2];
+ u8 dst_mac_addr_hi16[2];
+#endif
+ u8 dst_mac_addr_lo32[4];
+ u32 lcq_addr_lo;
+ u32 lcq_addr_hi;
+ u32 confq_pbl_base_addr_lo;
+ u32 confq_pbl_base_addr_hi;
+};
+
+/*
+ * FCoE connection enable request
+ */
+struct fcoe_kwqe_conn_enable_disable {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u8 src_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+ u8 src_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+ u8 src_mac_addr_hi16[2];
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+#endif
+ u8 dst_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u8 dst_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+ u8 dst_mac_addr_hi16[2];
+ u16 reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 vlan_flag;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 vlan_flag;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 reserved3;
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 reserved3;
+#endif
+ u32 context_id;
+ u32 conn_id;
+ u32 reserved4;
+};
+
+/*
+ * FCoE connection destroy request
+ */
+struct fcoe_kwqe_conn_destroy {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 context_id;
+ u32 conn_id;
+ u32 reserved1[5];
+};
+
+/*
+ * FCoe destroy request
+ */
+struct fcoe_kwqe_destroy {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 reserved1[7];
+};
+
+/*
+ * FCoe statistics request
+ */
+struct fcoe_kwqe_stat {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 stat_params_addr_lo;
+ u32 stat_params_addr_hi;
+ u32 reserved1[5];
+};
+
+/*
+ * FCoE KWQ WQE
+ */
+union fcoe_kwqe {
+ struct fcoe_kwqe_init1 init1;
+ struct fcoe_kwqe_init2 init2;
+ struct fcoe_kwqe_init3 init3;
+ struct fcoe_kwqe_conn_offload1 conn_offload1;
+ struct fcoe_kwqe_conn_offload2 conn_offload2;
+ struct fcoe_kwqe_conn_offload3 conn_offload3;
+ struct fcoe_kwqe_conn_offload4 conn_offload4;
+ struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
+ struct fcoe_kwqe_conn_destroy conn_destroy;
+ struct fcoe_kwqe_destroy destroy;
+ struct fcoe_kwqe_stat statistics;
+};
+
+struct fcoe_mul_sges_ctx {
+ struct regpair cur_sge_addr;
+#if defined(__BIG_ENDIAN)
+ u8 sgl_size;
+ u8 cur_sge_idx;
+ u16 cur_sge_off;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cur_sge_off;
+ u8 cur_sge_idx;
+ u8 sgl_size;
+#endif
+};
+
+struct fcoe_s_stat_ctx {
+ u8 flags;
+#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
+#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
+#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
+#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
+#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
+#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
+#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
+#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
+};
+
+struct fcoe_seq_ctx {
+#if defined(__BIG_ENDIAN)
+ u16 low_seq_cnt;
+ struct fcoe_s_stat_ctx s_stat;
+ u8 seq_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 seq_id;
+ struct fcoe_s_stat_ctx s_stat;
+ u16 low_seq_cnt;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 err_seq_cnt;
+ u16 high_seq_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u16 high_seq_cnt;
+ u16 err_seq_cnt;
+#endif
+ u32 low_exp_ro;
+ u32 high_exp_ro;
+};
+
+
+struct fcoe_single_sge_ctx {
+ struct regpair cur_buf_addr;
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u16 cur_buf_rem;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cur_buf_rem;
+ u16 reserved0;
+#endif
+};
+
+union fcoe_sgl_ctx {
+ struct fcoe_single_sge_ctx single_sge;
+ struct fcoe_mul_sges_ctx mul_sges;
+};
+
+
+
+/*
+ * FCoE SQ element
+ */
+struct fcoe_sqe {
+ u16 wqe;
+#define FCOE_SQE_TASK_ID (0x7FFF<<0)
+#define FCOE_SQE_TASK_ID_SHIFT 0
+#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+struct fcoe_task_ctx_entry_tx_only {
+ union fcoe_sgl_ctx sgl_ctx;
+};
+
+struct fcoe_task_ctx_entry_txwr_rxrd {
+#if defined(__BIG_ENDIAN)
+ u16 verify_tx_seq;
+ u8 init_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
+ u8 tx_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
+#elif defined(__LITTLE_ENDIAN)
+ u8 tx_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
+ u8 init_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
+ u16 verify_tx_seq;
+#endif
+};
+
+/*
+ * Common section. Both TX and RX processing might write and read from it in
+ * different flows
+ */
+struct fcoe_task_ctx_entry_tx_rx_cmn {
+ u32 data_2_trns;
+ union fcoe_general_task_ctx general;
+#if defined(__BIG_ENDIAN)
+ u16 tx_low_seq_cnt;
+ struct fcoe_s_stat_ctx tx_s_stat;
+ u8 tx_seq_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 tx_seq_id;
+ struct fcoe_s_stat_ctx tx_s_stat;
+ u16 tx_low_seq_cnt;
+#endif
+ u32 common_flags;
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29
+};
+
+struct fcoe_task_ctx_entry_rxwr_txrd {
+#if defined(__BIG_ENDIAN)
+ u16 rx_id;
+ u16 rx_flags;
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_flags;
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
+ u16 rx_id;
+#endif
+};
+
+struct fcoe_task_ctx_entry_rx_only {
+ struct fcoe_seq_ctx seq_ctx;
+ struct fcoe_seq_ctx ooo_seq_ctx;
+ u32 rsrv3;
+ union fcoe_sgl_ctx sgl_ctx;
+};
+
+struct fcoe_task_ctx_entry {
+ struct fcoe_task_ctx_entry_tx_only tx_wr_only;
+ struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
+ struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
+ struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
+ struct fcoe_task_ctx_entry_rx_only rx_wr_only;
+ u32 reserved[4];
+};
+
+
+/*
+ * FCoE XFRQ element
+ */
+struct fcoe_xfrqe {
+ u16 wqe;
+#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
+#define FCOE_XFRQE_TASK_ID_SHIFT 0
+#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE CONFQ element
+ */
+struct fcoe_confqe {
+#if defined(__BIG_ENDIAN)
+ u16 rx_id;
+ u16 ox_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 ox_id;
+ u16 rx_id;
+#endif
+ u32 param;
+};
+
+
+/*
+ * FCoE conection data base
+ */
+struct fcoe_conn_db {
+#if defined(__BIG_ENDIAN)
+ u16 rsrv0;
+ u16 rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_prod;
+ u16 rsrv0;
+#endif
+ u32 rsrv1;
+ struct regpair cq_arm;
+};
+
+
+/*
+ * FCoE CQ element
+ */
+struct fcoe_cqe {
+ u16 wqe;
+#define FCOE_CQE_CQE_INFO (0x3FFF<<0)
+#define FCOE_CQE_CQE_INFO_SHIFT 0
+#define FCOE_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE error/warning resporting entry
+ */
+struct fcoe_err_report_entry {
+ u32 err_warn_bitmap_lo;
+ u32 err_warn_bitmap_hi;
+ u32 tx_buf_off;
+ u32 rx_buf_off;
+ struct fcoe_fc_hdr fc_hdr;
+};
+
+
+/*
+ * FCoE hash table entry (32 bytes)
+ */
+struct fcoe_hash_table_entry {
+#if defined(__BIG_ENDIAN)
+ u8 d_id_0;
+ u8 s_id_2;
+ u8 s_id_1;
+ u8 s_id_0;
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id_0;
+ u8 s_id_1;
+ u8 s_id_2;
+ u8 d_id_0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 dst_mac_addr_hi;
+ u8 d_id_2;
+ u8 d_id_1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id_1;
+ u8 d_id_2;
+ u16 dst_mac_addr_hi;
+#endif
+ u32 dst_mac_addr_lo;
+#if defined(__BIG_ENDIAN)
+ u16 vlan_id;
+ u16 src_mac_addr_hi;
+#elif defined(__LITTLE_ENDIAN)
+ u16 src_mac_addr_hi;
+ u16 vlan_id;
+#endif
+ u32 src_mac_addr_lo;
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u8 reserved0;
+ u8 vlan_flag;
+#elif defined(__LITTLE_ENDIAN)
+ u8 vlan_flag;
+ u8 reserved0;
+ u16 reserved1;
+#endif
+ u32 reserved2;
+ u32 field_id;
+#define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0)
+#define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0
+#define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24)
+#define FCOE_HASH_TABLE_ENTRY_RESERVED3_SHIFT 24
+#define FCOE_HASH_TABLE_ENTRY_VALID (0x1<<31)
+#define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31
+};
+
+/*
+ * FCoE pending work request CQE
+ */
+struct fcoe_pend_wq_cqe {
+ u16 wqe;
+#define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0)
+#define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0
+#define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_PEND_WQ_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_PEND_WQ_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_PEND_WQ_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE RX statistics parameters section#0
+ */
+struct fcoe_rx_stat_params_section0 {
+ u32 fcoe_ver_cnt;
+ u32 fcoe_rx_pkt_cnt;
+ u32 fcoe_rx_byte_cnt;
+ u32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1
+ */
+struct fcoe_rx_stat_params_section1 {
+ u32 fc_crc_cnt;
+ u32 eofa_del_cnt;
+ u32 miss_frame_cnt;
+ u32 seq_timeout_cnt;
+ u32 drop_seq_cnt;
+ u32 fcoe_rx_drop_pkt_cnt;
+ u32 fcp_rx_pkt_cnt;
+ u32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters
+ */
+struct fcoe_tx_stat_params {
+ u32 fcoe_tx_pkt_cnt;
+ u32 fcoe_tx_byte_cnt;
+ u32 fcp_tx_pkt_cnt;
+ u32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters
+ */
+struct fcoe_statistics_params {
+ struct fcoe_tx_stat_params tx_stat;
+ struct fcoe_rx_stat_params_section0 rx_stat0;
+ struct fcoe_rx_stat_params_section1 rx_stat1;
+};
+
+
+/*
+ * FCoE t2 hash table entry (64 bytes)
+ */
+struct fcoe_t2_hash_table_entry {
+ struct fcoe_hash_table_entry data;
+ struct regpair next;
+ struct regpair reserved0[3];
+};
+
+/*
+ * FCoE unsolicited CQE
+ */
+struct fcoe_unsolicited_cqe {
+ u16 wqe;
+#define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0)
+#define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0
+#define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2)
+#define FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT 2
+#define FCOE_UNSOLICITED_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_UNSOLICITED_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+#endif /* __57XX_FCOE_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
new file mode 100644
index 000000000000..6a38080e35ed
--- /dev/null
+++ b/drivers/scsi/bnx2fc/Kconfig
@@ -0,0 +1,11 @@
+config SCSI_BNX2X_FCOE
+ tristate "Broadcom NetXtreme II FCoE support"
+ depends on PCI
+ select NETDEVICES
+ select NETDEV_1000
+ select LIBFC
+ select LIBFCOE
+ select CNIC
+ ---help---
+ This driver supports FCoE offload for the Broadcom NetXtreme II
+ devices.
diff --git a/drivers/scsi/bnx2fc/Makefile b/drivers/scsi/bnx2fc/Makefile
new file mode 100644
index 000000000000..a92695a25176
--- /dev/null
+++ b/drivers/scsi/bnx2fc/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o
+
+bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
new file mode 100644
index 000000000000..df2fc09ba479
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -0,0 +1,511 @@
+#ifndef _BNX2FC_H_
+#define _BNX2FC_H_
+/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/kthread.h>
+#include <linux/crc32.h>
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/libfc.h>
+#include <scsi/libfcoe.h>
+#include <scsi/fc_encode.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fc2.h>
+#include <scsi/fc_frame.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/fc/fc_fcp.h>
+
+#include "57xx_hsi_bnx2fc.h"
+#include "bnx2fc_debug.h"
+#include "../../net/cnic_if.h"
+#include "bnx2fc_constants.h"
+
+#define BNX2FC_NAME "bnx2fc"
+#define BNX2FC_VERSION "1.0.0"
+
+#define PFX "bnx2fc: "
+
+#define BNX2X_DOORBELL_PCI_BAR 2
+
+#define BNX2FC_MAX_BD_LEN 0xffff
+#define BNX2FC_BD_SPLIT_SZ 0x8000
+#define BNX2FC_MAX_BDS_PER_CMD 256
+
+#define BNX2FC_SQ_WQES_MAX 256
+
+#define BNX2FC_SCSI_MAX_SQES ((3 * BNX2FC_SQ_WQES_MAX) / 8)
+#define BNX2FC_TM_MAX_SQES ((BNX2FC_SQ_WQES_MAX) / 2)
+#define BNX2FC_ELS_MAX_SQES (BNX2FC_TM_MAX_SQES - 1)
+
+#define BNX2FC_RQ_WQES_MAX 16
+#define BNX2FC_CQ_WQES_MAX (BNX2FC_SQ_WQES_MAX + BNX2FC_RQ_WQES_MAX)
+
+#define BNX2FC_NUM_MAX_SESS 128
+#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
+
+#define BNX2FC_MAX_OUTSTANDING_CMNDS 4096
+#define BNX2FC_MIN_PAYLOAD 256
+#define BNX2FC_MAX_PAYLOAD 2048
+
+#define BNX2FC_RQ_BUF_SZ 256
+#define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ))
+
+#define BNX2FC_SQ_WQE_SIZE (sizeof(struct fcoe_sqe))
+#define BNX2FC_CQ_WQE_SIZE (sizeof(struct fcoe_cqe))
+#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ)
+#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe))
+#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
+#define BNX2FC_5771X_DB_PAGE_SIZE 128
+
+#define BNX2FC_MAX_TASKS BNX2FC_MAX_OUTSTANDING_CMNDS
+#define BNX2FC_TASK_SIZE 128
+#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
+#define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE)
+
+#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8
+#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024)
+
+#define BNX2FC_MAX_SEQS 255
+
+#define BNX2FC_READ (1 << 1)
+#define BNX2FC_WRITE (1 << 0)
+
+#define BNX2FC_MIN_XID 0
+#define BNX2FC_MAX_XID (BNX2FC_MAX_OUTSTANDING_CMNDS - 1)
+#define FCOE_MIN_XID (BNX2FC_MAX_OUTSTANDING_CMNDS)
+#define FCOE_MAX_XID \
+ (BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256))
+#define BNX2FC_MAX_LUN 0xFFFF
+#define BNX2FC_MAX_FCP_TGT 256
+#define BNX2FC_MAX_CMD_LEN 16
+
+#define BNX2FC_TM_TIMEOUT 60 /* secs */
+#define BNX2FC_IO_TIMEOUT 20000UL /* msecs */
+
+#define BNX2FC_WAIT_CNT 120
+#define BNX2FC_FW_TIMEOUT (3 * HZ)
+
+#define PORT_MAX 2
+
+#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
+
+/* FC FCP Status */
+#define FC_GOOD 0
+
+#define BNX2FC_RNID_HBA 0x7
+
+/* bnx2fc driver uses only one instance of fcoe_percpu_s */
+extern struct fcoe_percpu_s bnx2fc_global;
+
+extern struct workqueue_struct *bnx2fc_wq;
+
+struct bnx2fc_percpu_s {
+ struct task_struct *iothread;
+ struct list_head work_list;
+ spinlock_t fp_work_lock;
+};
+
+
+struct bnx2fc_hba {
+ struct list_head link;
+ struct cnic_dev *cnic;
+ struct pci_dev *pcidev;
+ struct net_device *netdev;
+ struct net_device *phys_dev;
+ unsigned long reg_with_cnic;
+ #define BNX2FC_CNIC_REGISTERED 1
+ struct packet_type fcoe_packet_type;
+ struct packet_type fip_packet_type;
+ struct bnx2fc_cmd_mgr *cmd_mgr;
+ struct workqueue_struct *timer_work_queue;
+ struct kref kref;
+ spinlock_t hba_lock;
+ struct mutex hba_mutex;
+ unsigned long adapter_state;
+ #define ADAPTER_STATE_UP 0
+ #define ADAPTER_STATE_GOING_DOWN 1
+ #define ADAPTER_STATE_LINK_DOWN 2
+ #define ADAPTER_STATE_READY 3
+ u32 flags;
+ unsigned long init_done;
+ #define BNX2FC_FW_INIT_DONE 0
+ #define BNX2FC_CTLR_INIT_DONE 1
+ #define BNX2FC_CREATE_DONE 2
+ struct fcoe_ctlr ctlr;
+ u8 vlan_enabled;
+ int vlan_id;
+ u32 next_conn_id;
+ struct fcoe_task_ctx_entry **task_ctx;
+ dma_addr_t *task_ctx_dma;
+ struct regpair *task_ctx_bd_tbl;
+ dma_addr_t task_ctx_bd_dma;
+
+ int hash_tbl_segment_count;
+ void **hash_tbl_segments;
+ void *hash_tbl_pbl;
+ dma_addr_t hash_tbl_pbl_dma;
+ struct fcoe_t2_hash_table_entry *t2_hash_tbl;
+ dma_addr_t t2_hash_tbl_dma;
+ char *t2_hash_tbl_ptr;
+ dma_addr_t t2_hash_tbl_ptr_dma;
+
+ char *dummy_buffer;
+ dma_addr_t dummy_buf_dma;
+
+ struct fcoe_statistics_params *stats_buffer;
+ dma_addr_t stats_buf_dma;
+
+ /*
+ * PCI related info.
+ */
+ u16 pci_did;
+ u16 pci_vid;
+ u16 pci_sdid;
+ u16 pci_svid;
+ u16 pci_func;
+ u16 pci_devno;
+
+ struct task_struct *l2_thread;
+
+ /* linkdown handling */
+ wait_queue_head_t shutdown_wait;
+ int wait_for_link_down;
+
+ /*destroy handling */
+ struct timer_list destroy_timer;
+ wait_queue_head_t destroy_wait;
+
+ /* Active list of offloaded sessions */
+ struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS];
+ int num_ofld_sess;
+
+ /* statistics */
+ struct completion stat_req_done;
+};
+
+#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr)
+
+struct bnx2fc_cmd_mgr {
+ struct bnx2fc_hba *hba;
+ u16 next_idx;
+ struct list_head *free_list;
+ spinlock_t *free_list_lock;
+ struct io_bdt **io_bdt_pool;
+ struct bnx2fc_cmd **cmds;
+};
+
+struct bnx2fc_rport {
+ struct fcoe_port *port;
+ struct fc_rport *rport;
+ struct fc_rport_priv *rdata;
+ void __iomem *ctx_base;
+#define DPM_TRIGER_TYPE 0x40
+ u32 fcoe_conn_id;
+ u32 context_id;
+ u32 sid;
+
+ unsigned long flags;
+#define BNX2FC_FLAG_SESSION_READY 0x1
+#define BNX2FC_FLAG_OFFLOADED 0x2
+#define BNX2FC_FLAG_DISABLED 0x3
+#define BNX2FC_FLAG_DESTROYED 0x4
+#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5
+#define BNX2FC_FLAG_DESTROY_CMPL 0x6
+#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x7
+#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8
+#define BNX2FC_FLAG_EXPL_LOGO 0x9
+
+ u32 max_sqes;
+ u32 max_rqes;
+ u32 max_cqes;
+
+ struct fcoe_sqe *sq;
+ dma_addr_t sq_dma;
+ u16 sq_prod_idx;
+ u8 sq_curr_toggle_bit;
+ u32 sq_mem_size;
+
+ struct fcoe_cqe *cq;
+ dma_addr_t cq_dma;
+ u32 cq_cons_idx;
+ u8 cq_curr_toggle_bit;
+ u32 cq_mem_size;
+
+ void *rq;
+ dma_addr_t rq_dma;
+ u32 rq_prod_idx;
+ u32 rq_cons_idx;
+ u32 rq_mem_size;
+
+ void *rq_pbl;
+ dma_addr_t rq_pbl_dma;
+ u32 rq_pbl_size;
+
+ struct fcoe_xfrqe *xferq;
+ dma_addr_t xferq_dma;
+ u32 xferq_mem_size;
+
+ struct fcoe_confqe *confq;
+ dma_addr_t confq_dma;
+ u32 confq_mem_size;
+
+ void *confq_pbl;
+ dma_addr_t confq_pbl_dma;
+ u32 confq_pbl_size;
+
+ struct fcoe_conn_db *conn_db;
+ dma_addr_t conn_db_dma;
+ u32 conn_db_mem_size;
+
+ struct fcoe_sqe *lcq;
+ dma_addr_t lcq_dma;
+ u32 lcq_mem_size;
+
+ void *ofld_req[4];
+ dma_addr_t ofld_req_dma[4];
+ void *enbl_req;
+ dma_addr_t enbl_req_dma;
+
+ spinlock_t tgt_lock;
+ spinlock_t cq_lock;
+ atomic_t num_active_ios;
+ u32 flush_in_prog;
+ unsigned long work_time_slice;
+ unsigned long timestamp;
+ struct list_head free_task_list;
+ struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
+ atomic_t pi;
+ atomic_t ci;
+ struct list_head active_cmd_queue;
+ struct list_head els_queue;
+ struct list_head io_retire_queue;
+ struct list_head active_tm_queue;
+
+ struct timer_list ofld_timer;
+ wait_queue_head_t ofld_wait;
+
+ struct timer_list upld_timer;
+ wait_queue_head_t upld_wait;
+};
+
+struct bnx2fc_mp_req {
+ u8 tm_flags;
+
+ u32 req_len;
+ void *req_buf;
+ dma_addr_t req_buf_dma;
+ struct fcoe_bd_ctx *mp_req_bd;
+ dma_addr_t mp_req_bd_dma;
+ struct fc_frame_header req_fc_hdr;
+
+ u32 resp_len;
+ void *resp_buf;
+ dma_addr_t resp_buf_dma;
+ struct fcoe_bd_ctx *mp_resp_bd;
+ dma_addr_t mp_resp_bd_dma;
+ struct fc_frame_header resp_fc_hdr;
+};
+
+struct bnx2fc_els_cb_arg {
+ struct bnx2fc_cmd *aborted_io_req;
+ struct bnx2fc_cmd *io_req;
+ u16 l2_oxid;
+};
+
+/* bnx2fc command structure */
+struct bnx2fc_cmd {
+ struct list_head link;
+ u8 on_active_queue;
+ u8 on_tmf_queue;
+ u8 cmd_type;
+#define BNX2FC_SCSI_CMD 1
+#define BNX2FC_TASK_MGMT_CMD 2
+#define BNX2FC_ABTS 3
+#define BNX2FC_ELS 4
+#define BNX2FC_CLEANUP 5
+ u8 io_req_flags;
+ struct kref refcount;
+ struct fcoe_port *port;
+ struct bnx2fc_rport *tgt;
+ struct scsi_cmnd *sc_cmd;
+ struct bnx2fc_cmd_mgr *cmd_mgr;
+ struct bnx2fc_mp_req mp_req;
+ void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg);
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct delayed_work timeout_work; /* timer for ULP timeouts */
+ struct completion tm_done;
+ int wait_for_comp;
+ u16 xid;
+ struct fcoe_task_ctx_entry *task;
+ struct io_bdt *bd_tbl;
+ struct fcp_rsp *rsp;
+ size_t data_xfer_len;
+ unsigned long req_flags;
+#define BNX2FC_FLAG_ISSUE_RRQ 0x1
+#define BNX2FC_FLAG_ISSUE_ABTS 0x2
+#define BNX2FC_FLAG_ABTS_DONE 0x3
+#define BNX2FC_FLAG_TM_COMPL 0x4
+#define BNX2FC_FLAG_TM_TIMEOUT 0x5
+#define BNX2FC_FLAG_IO_CLEANUP 0x6
+#define BNX2FC_FLAG_RETIRE_OXID 0x7
+#define BNX2FC_FLAG_EH_ABORT 0x8
+#define BNX2FC_FLAG_IO_COMPL 0x9
+#define BNX2FC_FLAG_ELS_DONE 0xa
+#define BNX2FC_FLAG_ELS_TIMEOUT 0xb
+ u32 fcp_resid;
+ u32 fcp_rsp_len;
+ u32 fcp_sns_len;
+ u8 cdb_status; /* SCSI IO status */
+ u8 fcp_status; /* FCP IO status */
+ u8 fcp_rsp_code;
+ u8 scsi_comp_flags;
+};
+
+struct io_bdt {
+ struct bnx2fc_cmd *io_req;
+ struct fcoe_bd_ctx *bd_tbl;
+ dma_addr_t bd_tbl_dma;
+ u16 bd_valid;
+};
+
+struct bnx2fc_work {
+ struct list_head list;
+ struct bnx2fc_rport *tgt;
+ u16 wqe;
+};
+struct bnx2fc_unsol_els {
+ struct fc_lport *lport;
+ struct fc_frame *fp;
+ struct work_struct unsol_els_work;
+};
+
+
+
+struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
+void bnx2fc_cmd_release(struct kref *ref);
+int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd);
+int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba);
+int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba);
+int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_disable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt);
+void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
+ u32 num_cqe);
+int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba);
+void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba);
+int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba);
+void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba);
+struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
+ u16 min_xid, u16 max_xid);
+void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr);
+void bnx2fc_get_link_state(struct bnx2fc_hba *hba);
+char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items);
+void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items);
+int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen);
+int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req);
+int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req);
+int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req);
+void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
+ unsigned int timer_msec);
+int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
+void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u16 orig_xid);
+void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task);
+void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task);
+void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid);
+void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt);
+int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
+void bnx2fc_rport_event_handler(struct fc_lport *lport,
+ struct fc_rport_priv *rport,
+ enum fc_rport_event event);
+void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
+ struct fcp_cmnd *fcp_cmnd);
+
+
+
+void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt);
+struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
+ struct fc_frame *fp, unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *,
+ void *),
+ void *arg, u32 timeout);
+int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
+struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
+ u32 port_id);
+void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
+ unsigned char *buf,
+ u32 frame_len, u16 l2_oxid);
+int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
+
+#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h
new file mode 100644
index 000000000000..fe7769173c43
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h
@@ -0,0 +1,206 @@
+#ifndef __BNX2FC_CONSTANTS_H_
+#define __BNX2FC_CONSTANTS_H_
+
+/**
+ * This file defines HSI constants for the FCoE flows
+ */
+
+/* KWQ/KCQ FCoE layer code */
+#define FCOE_KWQE_LAYER_CODE (7)
+
+/* KWQ (kernel work queue) request op codes */
+#define FCOE_KWQE_OPCODE_INIT1 (0)
+#define FCOE_KWQE_OPCODE_INIT2 (1)
+#define FCOE_KWQE_OPCODE_INIT3 (2)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6)
+#define FCOE_KWQE_OPCODE_ENABLE_CONN (7)
+#define FCOE_KWQE_OPCODE_DISABLE_CONN (8)
+#define FCOE_KWQE_OPCODE_DESTROY_CONN (9)
+#define FCOE_KWQE_OPCODE_DESTROY (10)
+#define FCOE_KWQE_OPCODE_STAT (11)
+
+/* KCQ (kernel completion queue) response op codes */
+#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10)
+#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11)
+#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12)
+#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15)
+#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16)
+#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17)
+#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18)
+#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
+#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21)
+
+/* KCQ (kernel completion queue) completion status */
+#define FCOE_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
+#define FCOE_KCQE_COMPLETION_STATUS_ERROR (0x1)
+#define FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x2)
+#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3)
+#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4)
+#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5)
+
+/* Unsolicited CQE type */
+#define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0
+#define FCOE_ERROR_DETECTION_CQE_TYPE 1
+#define FCOE_WARNING_DETECTION_CQE_TYPE 2
+
+/* Task context constants */
+/* After driver has initialize the task in case timer services required */
+#define FCOE_TASK_TX_STATE_INIT 0
+/* In case timer services are required then shall be updated by Xstorm after
+ * start processing the task. In case no timer facilities are required then the
+ * driver would initialize the state to this value */
+#define FCOE_TASK_TX_STATE_NORMAL 1
+/* Task is under abort procedure. Updated in order to stop processing of
+ * pending WQEs on this task */
+#define FCOE_TASK_TX_STATE_ABORT 2
+/* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */
+#define FCOE_TASK_TX_STATE_ERROR 3
+/* For REC_TOV timer expiration indication received from Xstorm */
+#define FCOE_TASK_TX_STATE_WARNING 4
+/* For completed unsolicited task */
+#define FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED 5
+/* For exchange cleanup request task */
+#define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6
+/* For sequence cleanup request task */
+#define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7
+/* Mark task as aborted and indicate that ABTS was not transmitted */
+#define FCOE_TASK_TX_STATE_BEFORE_ABTS_TX 8
+/* Mark task as aborted and indicate that ABTS was transmitted */
+#define FCOE_TASK_TX_STATE_AFTER_ABTS_TX 9
+/* For completion the ABTS task. */
+#define FCOE_TASK_TX_STATE_ABTS_TX_COMPLETED 10
+/* Mark task as aborted and indicate that Exchange cleanup was not transmitted
+ */
+#define FCOE_TASK_TX_STATE_BEFORE_EXCHANGE_CLEANUP_TX 11
+/* Mark task as aborted and indicate that Exchange cleanup was transmitted */
+#define FCOE_TASK_TX_STATE_AFTER_EXCHANGE_CLEANUP_TX 12
+
+#define FCOE_TASK_RX_STATE_NORMAL 0
+#define FCOE_TASK_RX_STATE_COMPLETED 1
+/* Obsolete: Intermediate completion (middle path with local completion) */
+#define FCOE_TASK_RX_STATE_INTER_COMP 2
+/* For REC_TOV timer expiration indication received from Xstorm */
+#define FCOE_TASK_RX_STATE_WARNING 3
+/* For E_D_T_TOV timer expiration in Ustorm */
+#define FCOE_TASK_RX_STATE_ERROR 4
+/* ABTS ACC arrived wait for local completion to finally complete the task. */
+#define FCOE_TASK_RX_STATE_ABTS_ACC_ARRIVED 5
+/* local completion arrived wait for ABTS ACC to finally complete the task. */
+#define FCOE_TASK_RX_STATE_ABTS_LOCAL_COMP_ARRIVED 6
+/* Special completion indication in case of task was aborted. */
+#define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7
+/* Special completion indication in case of task was cleaned. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 8
+/* Special completion indication (in task requested the exchange cleanup) in
+ * case cleaned task is in non-valid. */
+#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 9
+/* Special completion indication (in task requested the sequence cleanup) in
+ * case cleaned task was already returned to normal. */
+#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 10
+/* Exchange cleanup arrived wait until xfer will be handled to finally
+ * complete the task. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_ARRIVED 11
+/* Xfer handled, wait for exchange cleanup to finally complete the task. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_HANDLED_XFER 12
+
+#define FCOE_TASK_TYPE_WRITE 0
+#define FCOE_TASK_TYPE_READ 1
+#define FCOE_TASK_TYPE_MIDPATH 2
+#define FCOE_TASK_TYPE_UNSOLICITED 3
+#define FCOE_TASK_TYPE_ABTS 4
+#define FCOE_TASK_TYPE_EXCHANGE_CLEANUP 5
+#define FCOE_TASK_TYPE_SEQUENCE_CLEANUP 6
+
+#define FCOE_TASK_DEV_TYPE_DISK 0
+#define FCOE_TASK_DEV_TYPE_TAPE 1
+
+#define FCOE_TASK_CLASS_TYPE_3 0
+#define FCOE_TASK_CLASS_TYPE_2 1
+
+/* Everest FCoE connection type */
+#define B577XX_FCOE_CONNECTION_TYPE 4
+
+/* Error codes for Error Reporting in fast path flows */
+/* XFER error codes */
+#define FCOE_ERROR_CODE_XFER_OOO_RO 0
+#define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1
+#define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2
+#define FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS 3
+#define FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE 4
+#define FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE 5
+#define FCOE_ERROR_CODE_XFER_PEND_XFER_SET 6
+#define FCOE_ERROR_CODE_XFER_OPENED_SEQ 7
+#define FCOE_ERROR_CODE_XFER_FCTL 8
+
+/* FCP RSP error codes */
+#define FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET 9
+#define FCOE_ERROR_CODE_FCP_RSP_UNDERFLOW 10
+#define FCOE_ERROR_CODE_FCP_RSP_OVERFLOW 11
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD 12
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD 13
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE 14
+#define FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET 15
+#define FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ 16
+#define FCOE_ERROR_CODE_FCP_RSP_FCTL 17
+#define FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET 18
+#define FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET 19
+
+/* FCP DATA error codes */
+#define FCOE_ERROR_CODE_DATA_OOO_RO 20
+#define FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE 21
+#define FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS 22
+#define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23
+#define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24
+#define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25
+#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26
+#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27
+#define FCOE_ERROR_CODE_DATA_FCTL 28
+
+/* Middle path error codes */
+#define FCOE_ERROR_CODE_MIDPATH_TYPE_NOT_ELS 29
+#define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30
+#define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31
+#define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32
+#define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33
+#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_FCTL 34
+#define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35
+#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36
+
+/* ABTS error codes */
+#define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37
+#define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38
+#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39
+#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40
+#define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41
+
+/* Common error codes */
+#define FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD 42
+#define FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE 43
+#define FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH 44
+#define FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT 45
+#define FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH 46
+#define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47
+#define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48
+#define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49
+#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50
+
+/* Unsolicited Rx error codes */
+#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51
+#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_BLS 52
+#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_ELS 53
+#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_BLS 54
+#define FCOE_ERROR_CODE_UNSOLICITED_R_CTL 55
+
+#define FCOE_ERROR_CODE_RW_TASK_DDF_RCTL_INFO_FIELD 56
+#define FCOE_ERROR_CODE_RW_TASK_INVALID_RCTL 57
+#define FCOE_ERROR_CODE_RW_TASK_RCTL_GENERAL_MISMATCH 58
+
+/* Timer error codes */
+#define FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION 60
+#define FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION 61
+
+
+#endif /* BNX2FC_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
new file mode 100644
index 000000000000..7f6aff68cc53
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -0,0 +1,70 @@
+#ifndef __BNX2FC_DEBUG__
+#define __BNX2FC_DEBUG__
+
+/* Log level bit mask */
+#define LOG_IO 0x01 /* scsi cmd error, cleanup */
+#define LOG_TGT 0x02 /* Session setup, cleanup, etc' */
+#define LOG_HBA 0x04 /* lport events, link, mtu, etc' */
+#define LOG_ELS 0x08 /* ELS logs */
+#define LOG_MISC 0x10 /* fcoe L2 frame related logs*/
+#define LOG_ALL 0xff /* LOG all messages */
+
+extern unsigned int bnx2fc_debug_level;
+
+#define BNX2FC_CHK_LOGGING(LEVEL, CMD) \
+ do { \
+ if (unlikely(bnx2fc_debug_level & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+ } while (0)
+
+#define BNX2FC_ELS_DBG(fmt, arg...) \
+ BNX2FC_CHK_LOGGING(LOG_ELS, \
+ printk(KERN_ALERT PFX fmt, ##arg))
+
+#define BNX2FC_MISC_DBG(fmt, arg...) \
+ BNX2FC_CHK_LOGGING(LOG_MISC, \
+ printk(KERN_ALERT PFX fmt, ##arg))
+
+#define BNX2FC_IO_DBG(io_req, fmt, arg...) \
+ do { \
+ if (!io_req || !io_req->port || !io_req->port->lport || \
+ !io_req->port->lport->host) \
+ BNX2FC_CHK_LOGGING(LOG_IO, \
+ printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ else \
+ BNX2FC_CHK_LOGGING(LOG_IO, \
+ shost_printk(KERN_ALERT, \
+ (io_req)->port->lport->host, \
+ PFX "xid:0x%x " fmt, \
+ (io_req)->xid, ##arg)); \
+ } while (0)
+
+#define BNX2FC_TGT_DBG(tgt, fmt, arg...) \
+ do { \
+ if (!tgt || !tgt->port || !tgt->port->lport || \
+ !tgt->port->lport->host || !tgt->rport) \
+ BNX2FC_CHK_LOGGING(LOG_TGT, \
+ printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ else \
+ BNX2FC_CHK_LOGGING(LOG_TGT, \
+ shost_printk(KERN_ALERT, \
+ (tgt)->port->lport->host, \
+ PFX "port:%x " fmt, \
+ (tgt)->rport->port_id, ##arg)); \
+ } while (0)
+
+
+#define BNX2FC_HBA_DBG(lport, fmt, arg...) \
+ do { \
+ if (!lport || !lport->host) \
+ BNX2FC_CHK_LOGGING(LOG_HBA, \
+ printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ else \
+ BNX2FC_CHK_LOGGING(LOG_HBA, \
+ shost_printk(KERN_ALERT, lport->host, \
+ PFX fmt, ##arg)); \
+ } while (0)
+
+#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
new file mode 100644
index 000000000000..7a11a255157f
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -0,0 +1,515 @@
+/*
+ * bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * This file contains helper routines that handle ELS requests
+ * and responses.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg);
+static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg);
+static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
+ void *data, u32 data_len,
+ void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
+ struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
+
+static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_cmd *orig_io_req;
+ struct bnx2fc_cmd *rrq_req;
+ int rc = 0;
+
+ BUG_ON(!cb_arg);
+ rrq_req = cb_arg->io_req;
+ orig_io_req = cb_arg->aborted_io_req;
+ BUG_ON(!orig_io_req);
+ BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
+ orig_io_req->xid, rrq_req->xid);
+
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
+ /*
+ * els req is timed out. cleanup the IO with FW and
+ * drop the completion. Remove from active_cmd_queue.
+ */
+ BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
+ rrq_req->xid);
+
+ if (rrq_req->on_active_queue) {
+ list_del_init(&rrq_req->link);
+ rrq_req->on_active_queue = 0;
+ rc = bnx2fc_initiate_cleanup(rrq_req);
+ BUG_ON(rc);
+ }
+ }
+ kfree(cb_arg);
+}
+int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
+{
+
+ struct fc_els_rrq rrq;
+ struct bnx2fc_rport *tgt = aborted_io_req->tgt;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ struct bnx2fc_els_cb_arg *cb_arg = NULL;
+ u32 sid = tgt->sid;
+ u32 r_a_tov = lport->r_a_tov;
+ unsigned long start = jiffies;
+ int rc;
+
+ BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
+ aborted_io_req->xid);
+ memset(&rrq, 0, sizeof(rrq));
+
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
+ rc = -ENOMEM;
+ goto rrq_err;
+ }
+
+ cb_arg->aborted_io_req = aborted_io_req;
+
+ rrq.rrq_cmd = ELS_RRQ;
+ hton24(rrq.rrq_s_id, sid);
+ rrq.rrq_ox_id = htons(aborted_io_req->xid);
+ rrq.rrq_rx_id = htons(aborted_io_req->task->rx_wr_tx_rd.rx_id);
+
+retry_rrq:
+ rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
+ bnx2fc_rrq_compl, cb_arg,
+ r_a_tov);
+ if (rc == -ENOMEM) {
+ if (time_after(jiffies, start + (10 * HZ))) {
+ BNX2FC_ELS_DBG("rrq Failed\n");
+ rc = FAILED;
+ goto rrq_err;
+ }
+ msleep(20);
+ goto retry_rrq;
+ }
+rrq_err:
+ if (rc) {
+ BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
+ aborted_io_req->xid);
+ kfree(cb_arg);
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ }
+ return rc;
+}
+
+static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_cmd *els_req;
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ unsigned char *buf;
+ void *resp_buf;
+ u32 resp_len, hdr_len;
+ u16 l2_oxid;
+ int frame_len;
+ int rc = 0;
+
+ l2_oxid = cb_arg->l2_oxid;
+ BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
+
+ els_req = cb_arg->io_req;
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
+ /*
+ * els req is timed out. cleanup the IO with FW and
+ * drop the completion. libfc will handle the els timeout
+ */
+ if (els_req->on_active_queue) {
+ list_del_init(&els_req->link);
+ els_req->on_active_queue = 0;
+ rc = bnx2fc_initiate_cleanup(els_req);
+ BUG_ON(rc);
+ }
+ goto free_arg;
+ }
+
+ tgt = els_req->tgt;
+ mp_req = &(els_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+ resp_len = mp_req->resp_len;
+ resp_buf = mp_req->resp_buf;
+
+ buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!buf) {
+ printk(KERN_ERR PFX "Unable to alloc mp buf\n");
+ goto free_arg;
+ }
+ hdr_len = sizeof(*fc_hdr);
+ if (hdr_len + resp_len > PAGE_SIZE) {
+ printk(KERN_ERR PFX "l2_els_compl: resp len is "
+ "beyond page size\n");
+ goto free_buf;
+ }
+ memcpy(buf, fc_hdr, hdr_len);
+ memcpy(buf + hdr_len, resp_buf, resp_len);
+ frame_len = hdr_len + resp_len;
+
+ bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
+
+free_buf:
+ kfree(buf);
+free_arg:
+ kfree(cb_arg);
+}
+
+int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+ struct fc_els_adisc *adisc;
+ struct fc_frame_header *fh;
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ fh = fc_frame_header_get(fp);
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
+ return -ENOMEM;
+ }
+
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
+ adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+ /* adisc is initialized by libfc */
+ rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
+ bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+ if (rc)
+ kfree(cb_arg);
+ return rc;
+}
+
+int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+ struct fc_els_logo *logo;
+ struct fc_frame_header *fh;
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ fh = fc_frame_header_get(fp);
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
+ return -ENOMEM;
+ }
+
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
+ logo = fc_frame_payload_get(fp, sizeof(*logo));
+ /* logo is initialized by libfc */
+ rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
+ bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+ if (rc)
+ kfree(cb_arg);
+ return rc;
+}
+
+int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+ struct fc_els_rls *rls;
+ struct fc_frame_header *fh;
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ fh = fc_frame_header_get(fp);
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
+ return -ENOMEM;
+ }
+
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ rls = fc_frame_payload_get(fp, sizeof(*rls));
+ /* rls is initialized by libfc */
+ rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
+ bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+ if (rc)
+ kfree(cb_arg);
+ return rc;
+}
+
+static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
+ void *data, u32 data_len,
+ void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
+ struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
+{
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct fc_rport *rport = tgt->rport;
+ struct fc_lport *lport = port->lport;
+ struct bnx2fc_cmd *els_req;
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ int rc = 0;
+ int task_idx, index;
+ u32 did, sid;
+ u16 xid;
+
+ rc = fc_remote_port_chkready(rport);
+ if (rc) {
+ printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+ if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
+ (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
+ printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+ els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
+ if (!els_req) {
+ rc = -ENOMEM;
+ goto els_err;
+ }
+
+ els_req->sc_cmd = NULL;
+ els_req->port = port;
+ els_req->tgt = tgt;
+ els_req->cb_func = cb_func;
+ cb_arg->io_req = els_req;
+ els_req->cb_arg = cb_arg;
+
+ mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
+ rc = bnx2fc_init_mp_req(els_req);
+ if (rc == FAILED) {
+ printk(KERN_ALERT PFX "ELS MP request init failed\n");
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = -ENOMEM;
+ goto els_err;
+ } else {
+ /* rc SUCCESS */
+ rc = 0;
+ }
+
+ /* Set the data_xfer_len to the size of ELS payload */
+ mp_req->req_len = data_len;
+ els_req->data_xfer_len = mp_req->req_len;
+
+ /* Fill ELS Payload */
+ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
+ memcpy(mp_req->req_buf, data, data_len);
+ } else {
+ printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op);
+ els_req->cb_func = NULL;
+ els_req->cb_arg = NULL;
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = -EINVAL;
+ }
+
+ if (rc)
+ goto els_err;
+
+ /* Fill FC header */
+ fc_hdr = &(mp_req->req_fc_hdr);
+
+ did = tgt->rport->port_id;
+ sid = tgt->sid;
+
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
+ FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+
+ /* Obtain exchange id */
+ xid = els_req->xid;
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_mp_task(els_req, task);
+
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ printk(KERN_ERR PFX "initiate_els.. session not ready\n");
+ els_req->cb_func = NULL;
+ els_req->cb_arg = NULL;
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return -EINVAL;
+ }
+
+ if (timer_msec)
+ bnx2fc_cmd_timer_set(els_req, timer_msec);
+ bnx2fc_add_2_sq(tgt, xid);
+
+ els_req->on_active_queue = 1;
+ list_add_tail(&els_req->link, &tgt->els_queue);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+els_err:
+ return rc;
+}
+
+void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
+ struct fcoe_task_ctx_entry *task, u8 num_rq)
+{
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ u64 *hdr;
+ u64 *temp_hdr;
+
+ BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
+ "cmd_type = %d\n", els_req->xid, els_req->cmd_type);
+
+ if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
+ &els_req->req_flags)) {
+ BNX2FC_ELS_DBG("Timer context finished processing this "
+ "els - 0x%x\n", els_req->xid);
+ /* This IO doesnt receive cleanup completion */
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ return;
+ }
+
+ /* Cancel the timeout_work, as we received the response */
+ if (cancel_delayed_work(&els_req->timeout_work))
+ kref_put(&els_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ if (els_req->on_active_queue) {
+ list_del_init(&els_req->link);
+ els_req->on_active_queue = 0;
+ }
+
+ mp_req = &(els_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+
+ hdr = (u64 *)fc_hdr;
+ temp_hdr = (u64 *)
+ &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+ hdr[0] = cpu_to_be64(temp_hdr[0]);
+ hdr[1] = cpu_to_be64(temp_hdr[1]);
+ hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+ mp_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+
+ /* Parse ELS response */
+ if ((els_req->cb_func) && (els_req->cb_arg)) {
+ els_req->cb_func(els_req->cb_arg);
+ els_req->cb_arg = NULL;
+ }
+
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+}
+
+static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg)
+{
+ struct fcoe_ctlr *fip = arg;
+ struct fc_exch *exch = fc_seq_exch(seq);
+ struct fc_lport *lport = exch->lp;
+ u8 *mac;
+ struct fc_frame_header *fh;
+ u8 op;
+
+ if (IS_ERR(fp))
+ goto done;
+
+ mac = fr_cb(fp)->granted_mac;
+ if (is_zero_ether_addr(mac)) {
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_type != FC_TYPE_ELS) {
+ printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
+ "fh_type != FC_TYPE_ELS\n");
+ fc_frame_free(fp);
+ return;
+ }
+ op = fc_frame_payload_op(fp);
+ if (lport->vport) {
+ if (op == ELS_LS_RJT) {
+ printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
+ fc_vport_terminate(lport->vport);
+ fc_frame_free(fp);
+ return;
+ }
+ }
+ if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
+ fc_frame_free(fp);
+ return;
+ }
+ }
+ fip->update_mac(lport, mac);
+done:
+ fc_lport_flogi_resp(seq, fp, lport);
+}
+
+static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg)
+{
+ struct fcoe_ctlr *fip = arg;
+ struct fc_exch *exch = fc_seq_exch(seq);
+ struct fc_lport *lport = exch->lp;
+ static u8 zero_mac[ETH_ALEN] = { 0 };
+
+ if (!IS_ERR(fp))
+ fip->update_mac(lport, zero_mac);
+ fc_lport_logo_resp(seq, fp, lport);
+}
+
+struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
+ struct fc_frame *fp, unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *,
+ void *),
+ void *arg, u32 timeout)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_ctlr *fip = &hba->ctlr;
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+
+ switch (op) {
+ case ELS_FLOGI:
+ case ELS_FDISC:
+ return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
+ fip, timeout);
+ case ELS_LOGO:
+ /* only hook onto fabric logouts, not port logouts */
+ if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
+ break;
+ return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
+ fip, timeout);
+ }
+ return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
new file mode 100644
index 000000000000..e476e8753079
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -0,0 +1,2535 @@
+/* bnx2fc_fcoe.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * This file contains the code that interacts with libfc, libfcoe,
+ * cnic modules to create FCoE instances, send/receive non-offloaded
+ * FIP/FCoE packets, listen to link events etc.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+static struct list_head adapter_list;
+static u32 adapter_count;
+static DEFINE_MUTEX(bnx2fc_dev_lock);
+DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
+
+#define DRV_MODULE_NAME "bnx2fc"
+#define DRV_MODULE_VERSION BNX2FC_VERSION
+#define DRV_MODULE_RELDATE "Jan 25, 2011"
+
+
+static char version[] __devinitdata =
+ "Broadcom NetXtreme II FCoE Driver " DRV_MODULE_NAME \
+ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+
+MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 FCoE Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define BNX2FC_MAX_QUEUE_DEPTH 256
+#define BNX2FC_MIN_QUEUE_DEPTH 32
+#define FCOE_WORD_TO_BYTE 4
+
+static struct scsi_transport_template *bnx2fc_transport_template;
+static struct scsi_transport_template *bnx2fc_vport_xport_template;
+
+struct workqueue_struct *bnx2fc_wq;
+
+/* bnx2fc structure needs only one instance of the fcoe_percpu_s structure.
+ * Here the io threads are per cpu but the l2 thread is just one
+ */
+struct fcoe_percpu_s bnx2fc_global;
+DEFINE_SPINLOCK(bnx2fc_global_lock);
+
+static struct cnic_ulp_ops bnx2fc_cnic_cb;
+static struct libfc_function_template bnx2fc_libfc_fcn_templ;
+static struct scsi_host_template bnx2fc_shost_template;
+static struct fc_function_template bnx2fc_transport_function;
+static struct fc_function_template bnx2fc_vport_xport_function;
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
+static int bnx2fc_destroy(struct net_device *net_device);
+static int bnx2fc_enable(struct net_device *netdev);
+static int bnx2fc_disable(struct net_device *netdev);
+
+static void bnx2fc_recv_frame(struct sk_buff *skb);
+
+static void bnx2fc_start_disc(struct bnx2fc_hba *hba);
+static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
+static int bnx2fc_net_config(struct fc_lport *lp);
+static int bnx2fc_lport_config(struct fc_lport *lport);
+static int bnx2fc_em_config(struct fc_lport *lport);
+static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
+static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
+static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
+static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
+ struct device *parent, int npiv);
+static void bnx2fc_destroy_work(struct work_struct *work);
+
+static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
+static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
+
+static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
+static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba);
+
+static void bnx2fc_port_shutdown(struct fc_lport *lport);
+static void bnx2fc_stop(struct bnx2fc_hba *hba);
+static int __init bnx2fc_mod_init(void);
+static void __exit bnx2fc_mod_exit(void);
+
+unsigned int bnx2fc_debug_level;
+module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
+
+static int bnx2fc_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu);
+/* notification function for CPU hotplug events */
+static struct notifier_block bnx2fc_cpu_notifier = {
+ .notifier_call = bnx2fc_cpu_callback,
+};
+
+static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
+{
+ struct fcoe_percpu_s *bg;
+ struct fcoe_rcv_info *fr;
+ struct sk_buff_head *list;
+ struct sk_buff *skb, *next;
+ struct sk_buff *head;
+
+ bg = &bnx2fc_global;
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ list = &bg->fcoe_rx_list;
+ head = list->next;
+ for (skb = head; skb != (struct sk_buff *)list;
+ skb = next) {
+ next = skb->next;
+ fr = fcoe_dev_from_skb(skb);
+ if (fr->fr_dev == lp) {
+ __skb_unlink(skb, list);
+ kfree_skb(skb);
+ }
+ }
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+}
+
+int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+{
+ int rc;
+ spin_lock(&bnx2fc_global_lock);
+ rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global);
+ spin_unlock(&bnx2fc_global_lock);
+
+ return rc;
+}
+
+static void bnx2fc_abort_io(struct fc_lport *lport)
+{
+ /*
+ * This function is no-op for bnx2fc, but we do
+ * not want to leave it as NULL either, as libfc
+ * can call the default function which is
+ * fc_fcp_abort_io.
+ */
+}
+
+static void bnx2fc_cleanup(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_rport *tgt;
+ int i;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ mutex_lock(&hba->hba_mutex);
+ spin_lock_bh(&hba->hba_lock);
+ for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+ tgt = hba->tgt_ofld_list[i];
+ if (tgt) {
+ /* Cleanup IOs belonging to requested vport */
+ if (tgt->port == port) {
+ spin_unlock_bh(&hba->hba_lock);
+ BNX2FC_TGT_DBG(tgt, "flush/cleanup\n");
+ bnx2fc_flush_active_ios(tgt);
+ spin_lock_bh(&hba->hba_lock);
+ }
+ }
+ }
+ spin_unlock_bh(&hba->hba_lock);
+ mutex_unlock(&hba->hba_mutex);
+}
+
+static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt,
+ struct fc_frame *fp)
+{
+ struct fc_rport_priv *rdata = tgt->rdata;
+ struct fc_frame_header *fh;
+ int rc = 0;
+
+ fh = fc_frame_header_get(fp);
+ BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, "
+ "r_ctl = 0x%x\n", rdata->ids.port_id,
+ ntohs(fh->fh_ox_id), fh->fh_r_ctl);
+ if ((fh->fh_type == FC_TYPE_ELS) &&
+ (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_ADISC:
+ rc = bnx2fc_send_adisc(tgt, fp);
+ break;
+ case ELS_LOGO:
+ rc = bnx2fc_send_logo(tgt, fp);
+ break;
+ case ELS_RLS:
+ rc = bnx2fc_send_rls(tgt, fp);
+ break;
+ default:
+ break;
+ }
+ } else if ((fh->fh_type == FC_TYPE_BLS) &&
+ (fh->fh_r_ctl == FC_RCTL_BA_ABTS))
+ BNX2FC_TGT_DBG(tgt, "ABTS frame\n");
+ else {
+ BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x "
+ "rctl 0x%x thru non-offload path\n",
+ fh->fh_type, fh->fh_r_ctl);
+ return -ENODEV;
+ }
+ if (rc)
+ return -ENOMEM;
+ else
+ return 0;
+}
+
+/**
+ * bnx2fc_xmit - bnx2fc's FCoE frame transmit function
+ *
+ * @lport: the associated local port
+ * @fp: the fc_frame to be transmitted
+ */
+static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct ethhdr *eh;
+ struct fcoe_crc_eof *cp;
+ struct sk_buff *skb;
+ struct fc_frame_header *fh;
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ struct fcoe_hdr *hp;
+ struct bnx2fc_rport *tgt;
+ struct fcoe_dev_stats *stats;
+ u8 sof, eof;
+ u32 crc;
+ unsigned int hlen, tlen, elen;
+ int wlen, rc = 0;
+
+ port = (struct fcoe_port *)lport_priv(lport);
+ hba = port->priv;
+
+ fh = fc_frame_header_get(fp);
+
+ skb = fp_skb(fp);
+ if (!lport->link_up) {
+ BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+ if (!hba->ctlr.sel_fcf) {
+ BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+ if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb))
+ return 0;
+ }
+
+ sof = fr_sof(fp);
+ eof = fr_eof(fp);
+
+ /*
+ * Snoop the frame header to check if the frame is for
+ * an offloaded session
+ */
+ /*
+ * tgt_ofld_list access is synchronized using
+ * both hba mutex and hba lock. Atleast hba mutex or
+ * hba lock needs to be held for read access.
+ */
+
+ spin_lock_bh(&hba->hba_lock);
+ tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id));
+ if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
+ /* This frame is for offloaded session */
+ BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session "
+ "port_id = 0x%x\n", ntoh24(fh->fh_d_id));
+ spin_unlock_bh(&hba->hba_lock);
+ rc = bnx2fc_xmit_l2_frame(tgt, fp);
+ if (rc != -ENODEV) {
+ kfree_skb(skb);
+ return rc;
+ }
+ } else {
+ spin_unlock_bh(&hba->hba_lock);
+ }
+
+ elen = sizeof(struct ethhdr);
+ hlen = sizeof(struct fcoe_hdr);
+ tlen = sizeof(struct fcoe_crc_eof);
+ wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
+
+ skb->ip_summed = CHECKSUM_NONE;
+ crc = fcoe_fc_crc(fp);
+
+ /* copy port crc and eof to the skb buff */
+ if (skb_is_nonlinear(skb)) {
+ skb_frag_t *frag;
+ if (bnx2fc_get_paged_crc_eof(skb, tlen)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
+ cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
+ + frag->page_offset;
+ } else {
+ cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+ }
+
+ memset(cp, 0, sizeof(*cp));
+ cp->fcoe_eof = eof;
+ cp->fcoe_crc32 = cpu_to_le32(~crc);
+ if (skb_is_nonlinear(skb)) {
+ kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
+ cp = NULL;
+ }
+
+ /* adjust skb network/transport offsets to match mac/fcoe/port */
+ skb_push(skb, elen + hlen);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb->mac_len = elen;
+ skb->protocol = htons(ETH_P_FCOE);
+ skb->dev = hba->netdev;
+
+ /* fill up mac and fcoe headers */
+ eh = eth_hdr(skb);
+ eh->h_proto = htons(ETH_P_FCOE);
+ if (hba->ctlr.map_dest)
+ fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
+ else
+ /* insert GW address */
+ memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN);
+
+ if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN))
+ memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN);
+ else
+ memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
+
+ hp = (struct fcoe_hdr *)(eh + 1);
+ memset(hp, 0, sizeof(*hp));
+ if (FC_FCOE_VER)
+ FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
+ hp->fcoe_sof = sof;
+
+ /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
+ if (lport->seq_offload && fr_max_payload(fp)) {
+ skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
+ skb_shinfo(skb)->gso_size = fr_max_payload(fp);
+ } else {
+ skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ }
+
+ /*update tx stats */
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats->TxFrames++;
+ stats->TxWords += wlen;
+ put_cpu();
+
+ /* send down to lld */
+ fr_dev(fp) = lport;
+ if (port->fcoe_pending_queue.qlen)
+ fcoe_check_wait_queue(lport, skb);
+ else if (fcoe_start_io(skb))
+ fcoe_check_wait_queue(lport, skb);
+
+ return 0;
+}
+
+/**
+ * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ
+ *
+ * @skb: the receive socket buffer
+ * @dev: associated net device
+ * @ptype: context
+ * @olddev: last device
+ *
+ * This function receives the packet and builds FC frame and passes it up
+ */
+static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *olddev)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_hba *hba;
+ struct fc_frame_header *fh;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_percpu_s *bg;
+ unsigned short oxid;
+
+ hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type);
+ lport = hba->ctlr.lp;
+
+ if (unlikely(lport == NULL)) {
+ printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n");
+ goto err;
+ }
+
+ if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
+ printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n");
+ goto err;
+ }
+
+ /*
+ * Check for minimum frame length, and make sure required FCoE
+ * and FC headers are pulled into the linear data area.
+ */
+ if (unlikely((skb->len < FCOE_MIN_FRAME) ||
+ !pskb_may_pull(skb, FCOE_HEADER_LEN)))
+ goto err;
+
+ skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+
+ oxid = ntohs(fh->fh_ox_id);
+
+ fr = fcoe_dev_from_skb(skb);
+ fr->fr_dev = lport;
+ fr->ptype = ptype;
+
+ bg = &bnx2fc_global;
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+
+ __skb_queue_tail(&bg->fcoe_rx_list, skb);
+ if (bg->fcoe_rx_list.qlen == 1)
+ wake_up_process(bg->thread);
+
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+ return 0;
+err:
+ kfree_skb(skb);
+ return -1;
+}
+
+static int bnx2fc_l2_rcv_thread(void *arg)
+{
+ struct fcoe_percpu_s *bg = arg;
+ struct sk_buff *skb;
+
+ set_user_nice(current, -20);
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_RUNNING);
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) {
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+ bnx2fc_recv_frame(skb);
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ }
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+
+static void bnx2fc_recv_frame(struct sk_buff *skb)
+{
+ u32 fr_len;
+ struct fc_lport *lport;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_dev_stats *stats;
+ struct fc_frame_header *fh;
+ struct fcoe_crc_eof crc_eof;
+ struct fc_frame *fp;
+ struct fc_lport *vn_port;
+ struct fcoe_port *port;
+ u8 *mac = NULL;
+ u8 *dest_mac = NULL;
+ struct fcoe_hdr *hp;
+
+ fr = fcoe_dev_from_skb(skb);
+ lport = fr->fr_dev;
+ if (unlikely(lport == NULL)) {
+ printk(KERN_ALERT PFX "Invalid lport struct\n");
+ kfree_skb(skb);
+ return;
+ }
+
+ if (skb_is_nonlinear(skb))
+ skb_linearize(skb);
+ mac = eth_hdr(skb)->h_source;
+ dest_mac = eth_hdr(skb)->h_dest;
+
+ /* Pull the header */
+ hp = (struct fcoe_hdr *) skb_network_header(skb);
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+ skb_pull(skb, sizeof(struct fcoe_hdr));
+ fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats->RxFrames++;
+ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+
+ fp = (struct fc_frame *)skb;
+ fc_frame_init(fp);
+ fr_dev(fp) = lport;
+ fr_sof(fp) = hp->fcoe_sof;
+ if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ fr_eof(fp) = crc_eof.fcoe_eof;
+ fr_crc(fp) = crc_eof.fcoe_crc32;
+ if (pskb_trim(skb, fr_len)) {
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+
+ fh = fc_frame_header_get(fp);
+
+ vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
+ if (vn_port) {
+ port = lport_priv(vn_port);
+ if (compare_ether_addr(port->data_src_addr, dest_mac)
+ != 0) {
+ BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ }
+ if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
+ fh->fh_type == FC_TYPE_FCP) {
+ /* Drop FCP data. We dont this in L2 path */
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
+ fh->fh_type == FC_TYPE_ELS) {
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_LOGO:
+ if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
+ /* drop non-FIP LOGO */
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ break;
+ }
+ }
+ if (le32_to_cpu(fr_crc(fp)) !=
+ ~crc32(~0, skb->data, fr_len)) {
+ if (stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING PFX "dropping frame with "
+ "CRC error\n");
+ stats->InvalidCRCCount++;
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ put_cpu();
+ fc_exch_recv(lport, fp);
+}
+
+/**
+ * bnx2fc_percpu_io_thread - thread per cpu for ios
+ *
+ * @arg: ptr to bnx2fc_percpu_info structure
+ */
+int bnx2fc_percpu_io_thread(void *arg)
+{
+ struct bnx2fc_percpu_s *p = arg;
+ struct bnx2fc_work *work, *tmp;
+ LIST_HEAD(work_list);
+
+ set_user_nice(current, -20);
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_RUNNING);
+ spin_lock_bh(&p->fp_work_lock);
+ while (!list_empty(&p->work_list)) {
+ list_splice_init(&p->work_list, &work_list);
+ spin_unlock_bh(&p->fp_work_lock);
+
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del_init(&work->list);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ kfree(work);
+ }
+
+ spin_lock_bh(&p->fp_work_lock);
+ }
+ spin_unlock_bh(&p->fp_work_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
+{
+ struct fc_host_statistics *bnx2fc_stats;
+ struct fc_lport *lport = shost_priv(shost);
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_statistics_params *fw_stats;
+ int rc = 0;
+
+ fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer;
+ if (!fw_stats)
+ return NULL;
+
+ bnx2fc_stats = fc_get_host_stats(shost);
+
+ init_completion(&hba->stat_req_done);
+ if (bnx2fc_send_stat_req(hba))
+ return bnx2fc_stats;
+ rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
+ if (!rc) {
+ BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
+ return bnx2fc_stats;
+ }
+ bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat1.fc_crc_cnt;
+ bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
+ bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
+ bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;
+ bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4;
+
+ bnx2fc_stats->dumped_frames = 0;
+ bnx2fc_stats->lip_count = 0;
+ bnx2fc_stats->nos_count = 0;
+ bnx2fc_stats->loss_of_sync_count = 0;
+ bnx2fc_stats->loss_of_signal_count = 0;
+ bnx2fc_stats->prim_seq_protocol_err_count = 0;
+
+ return bnx2fc_stats;
+}
+
+static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct Scsi_Host *shost = lport->host;
+ int rc = 0;
+
+ shost->max_cmd_len = BNX2FC_MAX_CMD_LEN;
+ shost->max_lun = BNX2FC_MAX_LUN;
+ shost->max_id = BNX2FC_MAX_FCP_TGT;
+ shost->max_channel = 0;
+ if (lport->vport)
+ shost->transportt = bnx2fc_vport_xport_template;
+ else
+ shost->transportt = bnx2fc_transport_template;
+
+ /* Add the new host to SCSI-ml */
+ rc = scsi_add_host(lport->host, dev);
+ if (rc) {
+ printk(KERN_ERR PFX "Error on scsi_add_host\n");
+ return rc;
+ }
+ if (!lport->vport)
+ fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
+ sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
+ BNX2FC_NAME, BNX2FC_VERSION,
+ hba->netdev->name);
+
+ return 0;
+}
+
+static int bnx2fc_mfs_update(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *netdev = hba->netdev;
+ u32 mfs;
+ u32 max_mfs;
+
+ mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
+ sizeof(struct fcoe_crc_eof));
+ max_mfs = BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header);
+ BNX2FC_HBA_DBG(lport, "mfs = %d, max_mfs = %d\n", mfs, max_mfs);
+ if (mfs > max_mfs)
+ mfs = max_mfs;
+
+ /* Adjust mfs to be a multiple of 256 bytes */
+ mfs = (((mfs - sizeof(struct fc_frame_header)) / BNX2FC_MIN_PAYLOAD) *
+ BNX2FC_MIN_PAYLOAD);
+ mfs = mfs + sizeof(struct fc_frame_header);
+
+ BNX2FC_HBA_DBG(lport, "Set MFS = %d\n", mfs);
+ if (fc_set_mfs(lport, mfs))
+ return -EINVAL;
+ return 0;
+}
+static void bnx2fc_link_speed_update(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *netdev = hba->netdev;
+ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+
+ if (!dev_ethtool_get_settings(netdev, &ecmd)) {
+ lport->link_supported_speeds &=
+ ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+ if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full))
+ lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+ if (ecmd.supported & SUPPORTED_10000baseT_Full)
+ lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
+
+ if (ecmd.speed == SPEED_1000)
+ lport->link_speed = FC_PORTSPEED_1GBIT;
+ if (ecmd.speed == SPEED_10000)
+ lport->link_speed = FC_PORTSPEED_10GBIT;
+ }
+ return;
+}
+static int bnx2fc_link_ok(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *dev = hba->phys_dev;
+ int rc = 0;
+
+ if ((dev->flags & IFF_UP) && netif_carrier_ok(dev))
+ clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ else {
+ set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ rc = -1;
+ }
+ return rc;
+}
+
+/**
+ * bnx2fc_get_link_state - get network link state
+ *
+ * @hba: adapter instance pointer
+ *
+ * updates adapter structure flag based on netdev state
+ */
+void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
+{
+ if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
+ set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ else
+ clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+}
+
+static int bnx2fc_net_config(struct fc_lport *lport)
+{
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ u64 wwnn, wwpn;
+
+ port = lport_priv(lport);
+ hba = port->priv;
+
+ /* require support for get_pauseparam ethtool op. */
+ if (!hba->phys_dev->ethtool_ops ||
+ !hba->phys_dev->ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ if (bnx2fc_mfs_update(lport))
+ return -EINVAL;
+
+ skb_queue_head_init(&port->fcoe_pending_queue);
+ port->fcoe_pending_queue_active = 0;
+ setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
+
+ bnx2fc_link_speed_update(lport);
+
+ if (!lport->vport) {
+ wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0);
+ BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
+ fc_set_wwnn(lport, wwnn);
+
+ wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0);
+ BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
+ fc_set_wwpn(lport, wwpn);
+ }
+
+ return 0;
+}
+
+static void bnx2fc_destroy_timer(unsigned long data)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
+
+ BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - "
+ "Destroy compl not received!!\n");
+ hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
+ wake_up_interruptible(&hba->destroy_wait);
+}
+
+/**
+ * bnx2fc_indicate_netevent - Generic netdev event handler
+ *
+ * @context: adapter structure pointer
+ * @event: event type
+ *
+ * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and
+ * NETDEV_CHANGE_MTU events
+ */
+static void bnx2fc_indicate_netevent(void *context, unsigned long event)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+ struct fc_lport *lport = hba->ctlr.lp;
+ struct fc_lport *vport;
+ u32 link_possible = 1;
+
+ if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n",
+ hba->netdev->name, event);
+ return;
+ }
+
+ /*
+ * ASSUMPTION:
+ * indicate_netevent cannot be called from cnic unless bnx2fc
+ * does register_device
+ */
+ BUG_ON(!lport);
+
+ BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n",
+ hba->netdev->name, event);
+
+ switch (event) {
+ case NETDEV_UP:
+ BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n",
+ hba->adapter_state);
+ if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+ printk(KERN_ERR "indicate_netevent: "\
+ "adapter is not UP!!\n");
+ /* fall thru to update mfs if MTU has changed */
+ case NETDEV_CHANGEMTU:
+ BNX2FC_HBA_DBG(lport, "NETDEV_CHANGEMTU event\n");
+ bnx2fc_mfs_update(lport);
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ bnx2fc_mfs_update(vport);
+ mutex_unlock(&lport->lp_mutex);
+ break;
+
+ case NETDEV_DOWN:
+ BNX2FC_HBA_DBG(lport, "Port down\n");
+ clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ link_possible = 0;
+ break;
+
+ case NETDEV_GOING_DOWN:
+ BNX2FC_HBA_DBG(lport, "Port going down\n");
+ set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ link_possible = 0;
+ break;
+
+ case NETDEV_CHANGE:
+ BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n");
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Unkonwn netevent %ld", event);
+ return;
+ }
+
+ bnx2fc_link_speed_update(lport);
+
+ if (link_possible && !bnx2fc_link_ok(lport)) {
+ printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n");
+ fcoe_ctlr_link_up(&hba->ctlr);
+ } else {
+ printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n");
+ if (fcoe_ctlr_link_down(&hba->ctlr)) {
+ clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+ per_cpu_ptr(lport->dev_stats,
+ get_cpu())->LinkFailureCount++;
+ put_cpu();
+ fcoe_clean_pending_queue(lport);
+
+ init_waitqueue_head(&hba->shutdown_wait);
+ BNX2FC_HBA_DBG(lport, "indicate_netevent "
+ "num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ hba->wait_for_link_down = 1;
+ BNX2FC_HBA_DBG(lport, "waiting for uploads to "
+ "compl proc = %s\n",
+ current->comm);
+ wait_event_interruptible(hba->shutdown_wait,
+ (hba->num_ofld_sess == 0));
+ BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ hba->wait_for_link_down = 0;
+
+ if (signal_pending(current))
+ flush_signals(current);
+ }
+ }
+}
+
+static int bnx2fc_libfc_config(struct fc_lport *lport)
+{
+
+ /* Set the function pointers set by bnx2fc driver */
+ memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ,
+ sizeof(struct libfc_function_template));
+ fc_elsct_init(lport);
+ fc_exch_init(lport);
+ fc_rport_init(lport);
+ fc_disc_init(lport);
+ return 0;
+}
+
+static int bnx2fc_em_config(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+
+ if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
+ FCOE_MAX_XID, NULL)) {
+ printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
+ BNX2FC_MAX_XID);
+
+ if (!hba->cmd_mgr) {
+ printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
+ fc_exch_mgr_free(lport);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int bnx2fc_lport_config(struct fc_lport *lport)
+{
+ lport->link_up = 0;
+ lport->qfull = 0;
+ lport->max_retry_count = 3;
+ lport->max_rport_retry_count = 3;
+ lport->e_d_tov = 2 * 1000;
+ lport->r_a_tov = 10 * 1000;
+
+ /* REVISIT: enable when supporting tape devices
+ lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+ FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
+ */
+ lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS);
+ lport->does_npiv = 1;
+
+ memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen));
+ lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA;
+
+ /* alloc stats structure */
+ if (fc_lport_init_stats(lport))
+ return -ENOMEM;
+
+ /* Finish fc_lport configuration */
+ fc_lport_config(lport);
+
+ return 0;
+}
+
+/**
+ * bnx2fc_fip_recv - handle a received FIP frame.
+ *
+ * @skb: the received skb
+ * @dev: associated &net_device
+ * @ptype: the &packet_type structure which was used to register this handler.
+ * @orig_dev: original receive &net_device, in case @ dev is a bond.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev)
+{
+ struct bnx2fc_hba *hba;
+ hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type);
+ fcoe_ctlr_recv(&hba->ctlr, skb);
+ return 0;
+}
+
+/**
+ * bnx2fc_update_src_mac - Update Ethernet MAC filters.
+ *
+ * @fip: FCoE controller.
+ * @old: Unicast MAC address to delete if the MAC is non-zero.
+ * @new: Unicast MAC address to add.
+ *
+ * Remove any previously-set unicast MAC filter.
+ * Add secondary FCoE MAC address filter for our OUI.
+ */
+static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr)
+{
+ struct fcoe_port *port = lport_priv(lport);
+
+ memcpy(port->data_src_addr, addr, ETH_ALEN);
+}
+
+/**
+ * bnx2fc_get_src_mac - return the ethernet source address for an lport
+ *
+ * @lport: libfc port
+ */
+static u8 *bnx2fc_get_src_mac(struct fc_lport *lport)
+{
+ struct fcoe_port *port;
+
+ port = (struct fcoe_port *)lport_priv(lport);
+ return port->data_src_addr;
+}
+
+/**
+ * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame.
+ *
+ * @fip: FCoE controller.
+ * @skb: FIP Packet.
+ */
+static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+ skb->dev = bnx2fc_from_ctlr(fip)->netdev;
+ dev_queue_xmit(skb);
+}
+
+static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fcoe_port *port = lport_priv(n_port);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *netdev = hba->netdev;
+ struct fc_lport *vn_port;
+
+ if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "vn ports cannot be created on"
+ "this hba\n");
+ return -EIO;
+ }
+ mutex_lock(&bnx2fc_dev_lock);
+ vn_port = bnx2fc_if_create(hba, &vport->dev, 1);
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ if (IS_ERR(vn_port)) {
+ printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n",
+ netdev->name);
+ return -EIO;
+ }
+
+ if (disabled) {
+ fc_vport_set_state(vport, FC_VPORT_DISABLED);
+ } else {
+ vn_port->boot_time = jiffies;
+ fc_lport_init(vn_port);
+ fc_fabric_login(vn_port);
+ fc_vport_setlink(vn_port);
+ }
+ return 0;
+}
+
+static int bnx2fc_vport_destroy(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port = vport->dd_data;
+ struct fcoe_port *port = lport_priv(vn_port);
+
+ mutex_lock(&n_port->lp_mutex);
+ list_del(&vn_port->list);
+ mutex_unlock(&n_port->lp_mutex);
+ queue_work(bnx2fc_wq, &port->destroy_work);
+ return 0;
+}
+
+static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
+{
+ struct fc_lport *lport = vport->dd_data;
+
+ if (disable) {
+ fc_vport_set_state(vport, FC_VPORT_DISABLED);
+ fc_fabric_logoff(lport);
+ } else {
+ lport->boot_time = jiffies;
+ fc_fabric_login(lport);
+ fc_vport_setlink(lport);
+ }
+ return 0;
+}
+
+
+static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
+{
+ struct net_device *netdev = hba->netdev;
+ struct net_device *physdev = hba->phys_dev;
+ struct netdev_hw_addr *ha;
+ int sel_san_mac = 0;
+
+ /* Do not support for bonding device */
+ if ((netdev->priv_flags & IFF_MASTER_ALB) ||
+ (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
+ (netdev->priv_flags & IFF_MASTER_8023AD)) {
+ return -EOPNOTSUPP;
+ }
+
+ /* setup Source MAC Address */
+ rcu_read_lock();
+ for_each_dev_addr(physdev, ha) {
+ BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ",
+ ha->type);
+ printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0],
+ ha->addr[1], ha->addr[2], ha->addr[3],
+ ha->addr[4], ha->addr[5]);
+
+ if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
+ (is_valid_ether_addr(ha->addr))) {
+ memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
+ sel_san_mac = 1;
+ BNX2FC_MISC_DBG("Found SAN MAC\n");
+ }
+ }
+ rcu_read_unlock();
+
+ if (!sel_san_mac)
+ return -ENODEV;
+
+ hba->fip_packet_type.func = bnx2fc_fip_recv;
+ hba->fip_packet_type.type = htons(ETH_P_FIP);
+ hba->fip_packet_type.dev = netdev;
+ dev_add_pack(&hba->fip_packet_type);
+
+ hba->fcoe_packet_type.func = bnx2fc_rcv;
+ hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+ hba->fcoe_packet_type.dev = netdev;
+ dev_add_pack(&hba->fcoe_packet_type);
+
+ return 0;
+}
+
+static int bnx2fc_attach_transport(void)
+{
+ bnx2fc_transport_template =
+ fc_attach_transport(&bnx2fc_transport_function);
+
+ if (bnx2fc_transport_template == NULL) {
+ printk(KERN_ERR PFX "Failed to attach FC transport\n");
+ return -ENODEV;
+ }
+
+ bnx2fc_vport_xport_template =
+ fc_attach_transport(&bnx2fc_vport_xport_function);
+ if (bnx2fc_vport_xport_template == NULL) {
+ printk(KERN_ERR PFX
+ "Failed to attach FC transport for vport\n");
+ fc_release_transport(bnx2fc_transport_template);
+ bnx2fc_transport_template = NULL;
+ return -ENODEV;
+ }
+ return 0;
+}
+static void bnx2fc_release_transport(void)
+{
+ fc_release_transport(bnx2fc_transport_template);
+ fc_release_transport(bnx2fc_vport_xport_template);
+ bnx2fc_transport_template = NULL;
+ bnx2fc_vport_xport_template = NULL;
+}
+
+static void bnx2fc_interface_release(struct kref *kref)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *netdev;
+ struct net_device *phys_dev;
+
+ hba = container_of(kref, struct bnx2fc_hba, kref);
+ BNX2FC_HBA_DBG(hba->ctlr.lp, "Interface is being released\n");
+
+ netdev = hba->netdev;
+ phys_dev = hba->phys_dev;
+
+ /* tear-down FIP controller */
+ if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done))
+ fcoe_ctlr_destroy(&hba->ctlr);
+
+ /* Free the command manager */
+ if (hba->cmd_mgr) {
+ bnx2fc_cmd_mgr_free(hba->cmd_mgr);
+ hba->cmd_mgr = NULL;
+ }
+ dev_put(netdev);
+ module_put(THIS_MODULE);
+}
+
+static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba)
+{
+ kref_get(&hba->kref);
+}
+
+static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba)
+{
+ kref_put(&hba->kref, bnx2fc_interface_release);
+}
+static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba)
+{
+ bnx2fc_unbind_pcidev(hba);
+ kfree(hba);
+}
+
+/**
+ * bnx2fc_interface_create - create a new fcoe instance
+ *
+ * @cnic: pointer to cnic device
+ *
+ * Creates a new FCoE instance on the given device which include allocating
+ * hba structure, scsi_host and lport structures.
+ */
+static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic)
+{
+ struct bnx2fc_hba *hba;
+ int rc;
+
+ hba = kzalloc(sizeof(*hba), GFP_KERNEL);
+ if (!hba) {
+ printk(KERN_ERR PFX "Unable to allocate hba structure\n");
+ return NULL;
+ }
+ spin_lock_init(&hba->hba_lock);
+ mutex_init(&hba->hba_mutex);
+
+ hba->cnic = cnic;
+ rc = bnx2fc_bind_pcidev(hba);
+ if (rc)
+ goto bind_err;
+ hba->phys_dev = cnic->netdev;
+ /* will get overwritten after we do vlan discovery */
+ hba->netdev = hba->phys_dev;
+
+ init_waitqueue_head(&hba->shutdown_wait);
+ init_waitqueue_head(&hba->destroy_wait);
+
+ return hba;
+bind_err:
+ printk(KERN_ERR PFX "create_interface: bind error\n");
+ kfree(hba);
+ return NULL;
+}
+
+static int bnx2fc_interface_setup(struct bnx2fc_hba *hba,
+ enum fip_state fip_mode)
+{
+ int rc = 0;
+ struct net_device *netdev = hba->netdev;
+ struct fcoe_ctlr *fip = &hba->ctlr;
+
+ dev_hold(netdev);
+ kref_init(&hba->kref);
+
+ hba->flags = 0;
+
+ /* Initialize FIP */
+ memset(fip, 0, sizeof(*fip));
+ fcoe_ctlr_init(fip, fip_mode);
+ hba->ctlr.send = bnx2fc_fip_send;
+ hba->ctlr.update_mac = bnx2fc_update_src_mac;
+ hba->ctlr.get_src_addr = bnx2fc_get_src_mac;
+ set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
+
+ rc = bnx2fc_netdev_setup(hba);
+ if (rc)
+ goto setup_err;
+
+ hba->next_conn_id = 0;
+
+ memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list));
+ hba->num_ofld_sess = 0;
+
+ return 0;
+
+setup_err:
+ fcoe_ctlr_destroy(&hba->ctlr);
+ dev_put(netdev);
+ bnx2fc_interface_put(hba);
+ return rc;
+}
+
+/**
+ * bnx2fc_if_create - Create FCoE instance on a given interface
+ *
+ * @hba: FCoE interface to create a local port on
+ * @parent: Device pointer to be the parent in sysfs for the SCSI host
+ * @npiv: Indicates if the port is vport or not
+ *
+ * Creates a fc_lport instance and a Scsi_Host instance and configure them.
+ *
+ * Returns: Allocated fc_lport or an error pointer
+ */
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
+ struct device *parent, int npiv)
+{
+ struct fc_lport *lport = NULL;
+ struct fcoe_port *port;
+ struct Scsi_Host *shost;
+ struct fc_vport *vport = dev_to_vport(parent);
+ int rc = 0;
+
+ /* Allocate Scsi_Host structure */
+ if (!npiv) {
+ lport = libfc_host_alloc(&bnx2fc_shost_template,
+ sizeof(struct fcoe_port));
+ } else {
+ lport = libfc_vport_create(vport,
+ sizeof(struct fcoe_port));
+ }
+
+ if (!lport) {
+ printk(KERN_ERR PFX "could not allocate scsi host structure\n");
+ return NULL;
+ }
+ shost = lport->host;
+ port = lport_priv(lport);
+ port->lport = lport;
+ port->priv = hba;
+ INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
+
+ /* Configure fcoe_port */
+ rc = bnx2fc_lport_config(lport);
+ if (rc)
+ goto lp_config_err;
+
+ if (npiv) {
+ vport = dev_to_vport(parent);
+ printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n",
+ vport->node_name, vport->port_name);
+ fc_set_wwnn(lport, vport->node_name);
+ fc_set_wwpn(lport, vport->port_name);
+ }
+ /* Configure netdev and networking properties of the lport */
+ rc = bnx2fc_net_config(lport);
+ if (rc) {
+ printk(KERN_ERR PFX "Error on bnx2fc_net_config\n");
+ goto lp_config_err;
+ }
+
+ rc = bnx2fc_shost_config(lport, parent);
+ if (rc) {
+ printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
+ hba->netdev->name);
+ goto lp_config_err;
+ }
+
+ /* Initialize the libfc library */
+ rc = bnx2fc_libfc_config(lport);
+ if (rc) {
+ printk(KERN_ERR PFX "Couldnt configure libfc\n");
+ goto shost_err;
+ }
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+
+ /* Allocate exchange manager */
+ if (!npiv) {
+ rc = bnx2fc_em_config(lport);
+ if (rc) {
+ printk(KERN_ERR PFX "Error on bnx2fc_em_config\n");
+ goto shost_err;
+ }
+ }
+
+ bnx2fc_interface_get(hba);
+ return lport;
+
+shost_err:
+ scsi_remove_host(shost);
+lp_config_err:
+ scsi_host_put(lport->host);
+ return NULL;
+}
+
+static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba)
+{
+ /* Dont listen for Ethernet packets anymore */
+ __dev_remove_pack(&hba->fcoe_packet_type);
+ __dev_remove_pack(&hba->fip_packet_type);
+ synchronize_net();
+}
+
+static void bnx2fc_if_destroy(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+
+ BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
+ /* Stop the transmit retry timer */
+ del_timer_sync(&port->timer);
+
+ /* Free existing transmit skbs */
+ fcoe_clean_pending_queue(lport);
+
+ bnx2fc_interface_put(hba);
+
+ /* Free queued packets for the receive thread */
+ bnx2fc_clean_rx_queue(lport);
+
+ /* Detach from scsi-ml */
+ fc_remove_host(lport->host);
+ scsi_remove_host(lport->host);
+
+ /*
+ * Note that only the physical lport will have the exchange manager.
+ * for vports, this function is NOP
+ */
+ fc_exch_mgr_free(lport);
+
+ /* Free memory used by statistical counters */
+ fc_lport_free_stats(lport);
+
+ /* Release Scsi_Host */
+ scsi_host_put(lport->host);
+}
+
+/**
+ * bnx2fc_destroy - Destroy a bnx2fc FCoE interface
+ *
+ * @buffer: The name of the Ethernet interface to be destroyed
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_destroy(struct net_device *netdev)
+{
+ struct bnx2fc_hba *hba = NULL;
+ struct net_device *phys_dev;
+ int rc = 0;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ mutex_lock(&bnx2fc_dev_lock);
+#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto netdev_err;
+ }
+#endif
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(netdev);
+ else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -ENODEV;
+ goto netdev_err;
+ }
+
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba || !hba->ctlr.lp) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n");
+ goto netdev_err;
+ }
+
+ if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n");
+ goto netdev_err;
+ }
+
+ bnx2fc_netdev_cleanup(hba);
+
+ bnx2fc_stop(hba);
+
+ bnx2fc_if_destroy(hba->ctlr.lp);
+
+ destroy_workqueue(hba->timer_work_queue);
+
+ if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
+ bnx2fc_fw_destroy(hba);
+
+ clear_bit(BNX2FC_CREATE_DONE, &hba->init_done);
+netdev_err:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+static void bnx2fc_destroy_work(struct work_struct *work)
+{
+ struct fcoe_port *port;
+ struct fc_lport *lport;
+
+ port = container_of(work, struct fcoe_port, destroy_work);
+ lport = port->lport;
+
+ BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
+
+ bnx2fc_port_shutdown(lport);
+ rtnl_lock();
+ mutex_lock(&bnx2fc_dev_lock);
+ bnx2fc_if_destroy(lport);
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+}
+
+static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba)
+{
+ bnx2fc_free_fw_resc(hba);
+ bnx2fc_free_task_ctx(hba);
+}
+
+/**
+ * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated
+ * pci structure
+ *
+ * @hba: Adapter instance
+ */
+static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba)
+{
+ if (bnx2fc_setup_task_ctx(hba))
+ goto mem_err;
+
+ if (bnx2fc_setup_fw_resc(hba))
+ goto mem_err;
+
+ return 0;
+mem_err:
+ bnx2fc_unbind_adapter_devices(hba);
+ return -ENOMEM;
+}
+
+static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
+{
+ struct cnic_dev *cnic;
+
+ if (!hba->cnic) {
+ printk(KERN_ERR PFX "cnic is NULL\n");
+ return -ENODEV;
+ }
+ cnic = hba->cnic;
+ hba->pcidev = cnic->pcidev;
+ if (hba->pcidev)
+ pci_dev_get(hba->pcidev);
+
+ return 0;
+}
+
+static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
+{
+ if (hba->pcidev)
+ pci_dev_put(hba->pcidev);
+ hba->pcidev = NULL;
+}
+
+
+
+/**
+ * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance
+ *
+ * @handle: transport handle pointing to adapter struture
+ *
+ * This function maps adapter structure to pcidev structure and initiates
+ * firmware handshake to enable/initialize on-chip FCoE components.
+ * This bnx2fc - cnic interface api callback is used after following
+ * conditions are met -
+ * a) underlying network interface is up (marked by event NETDEV_UP
+ * from netdev
+ * b) bnx2fc adatper structure is registered.
+ */
+static void bnx2fc_ulp_start(void *handle)
+{
+ struct bnx2fc_hba *hba = handle;
+ struct fc_lport *lport = hba->ctlr.lp;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ mutex_lock(&bnx2fc_dev_lock);
+
+ if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
+ goto start_disc;
+
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done))
+ bnx2fc_fw_init(hba);
+
+start_disc:
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ BNX2FC_MISC_DBG("bnx2fc started.\n");
+
+ /* Kick off Fabric discovery*/
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "ulp_init: start discovery\n");
+ lport->tt.frame_send = bnx2fc_xmit;
+ bnx2fc_start_disc(hba);
+ }
+}
+
+static void bnx2fc_port_shutdown(struct fc_lport *lport)
+{
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ fc_fabric_logoff(lport);
+ fc_lport_destroy(lport);
+}
+
+static void bnx2fc_stop(struct bnx2fc_hba *hba)
+{
+ struct fc_lport *lport;
+ struct fc_lport *vport;
+
+ BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__,
+ hba->init_done);
+ if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) &&
+ test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ lport = hba->ctlr.lp;
+ bnx2fc_port_shutdown(lport);
+ BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d "
+ "offloaded sessions\n",
+ hba->num_ofld_sess);
+ wait_event_interruptible(hba->shutdown_wait,
+ (hba->num_ofld_sess == 0));
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+ fcoe_ctlr_link_down(&hba->ctlr);
+ fcoe_clean_pending_queue(lport);
+
+ mutex_lock(&hba->hba_mutex);
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+
+ clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ mutex_unlock(&hba->hba_mutex);
+ }
+}
+
+static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
+{
+#define BNX2FC_INIT_POLL_TIME (1000 / HZ)
+ int rc = -1;
+ int i = HZ;
+
+ rc = bnx2fc_bind_adapter_devices(hba);
+ if (rc) {
+ printk(KERN_ALERT PFX
+ "bnx2fc_bind_adapter_devices failed - rc = %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnx2fc_send_fw_fcoe_init_msg(hba);
+ if (rc) {
+ printk(KERN_ALERT PFX
+ "bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc);
+ goto err_unbind;
+ }
+
+ /*
+ * Wait until the adapter init message is complete, and adapter
+ * state is UP.
+ */
+ while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+ msleep(BNX2FC_INIT_POLL_TIME);
+
+ if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) {
+ printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize. "
+ "Ignoring...\n",
+ hba->cnic->netdev->name);
+ rc = -1;
+ goto err_unbind;
+ }
+
+
+ /* Mark HBA to indicate that the FW INIT is done */
+ set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done);
+ return 0;
+
+err_unbind:
+ bnx2fc_unbind_adapter_devices(hba);
+err_out:
+ return rc;
+}
+
+static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
+{
+ if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
+ init_timer(&hba->destroy_timer);
+ hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
+ jiffies;
+ hba->destroy_timer.function = bnx2fc_destroy_timer;
+ hba->destroy_timer.data = (unsigned long)hba;
+ add_timer(&hba->destroy_timer);
+ wait_event_interruptible(hba->destroy_wait,
+ (hba->flags &
+ BNX2FC_FLAG_DESTROY_CMPL));
+ /* This should never happen */
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&hba->destroy_timer);
+ }
+ bnx2fc_unbind_adapter_devices(hba);
+ }
+}
+
+/**
+ * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance
+ *
+ * @handle: transport handle pointing to adapter structure
+ *
+ * Driver checks if adapter is already in shutdown mode, if not start
+ * the shutdown process.
+ */
+static void bnx2fc_ulp_stop(void *handle)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle;
+
+ printk(KERN_ERR "ULP_STOP\n");
+
+ mutex_lock(&bnx2fc_dev_lock);
+ bnx2fc_stop(hba);
+ bnx2fc_fw_destroy(hba);
+ mutex_unlock(&bnx2fc_dev_lock);
+}
+
+static void bnx2fc_start_disc(struct bnx2fc_hba *hba)
+{
+ struct fc_lport *lport;
+ int wait_cnt = 0;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ /* Kick off FIP/FLOGI */
+ if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "Init not done yet\n");
+ return;
+ }
+
+ lport = hba->ctlr.lp;
+ BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
+
+ if (!bnx2fc_link_ok(lport)) {
+ BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
+ fcoe_ctlr_link_up(&hba->ctlr);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
+ set_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ }
+
+ /* wait for the FCF to be selected before issuing FLOGI */
+ while (!hba->ctlr.sel_fcf) {
+ msleep(250);
+ /* give up after 3 secs */
+ if (++wait_cnt > 12)
+ break;
+ }
+ fc_lport_init(lport);
+ fc_fabric_login(lport);
+}
+
+
+/**
+ * bnx2fc_ulp_init - Initialize an adapter instance
+ *
+ * @dev : cnic device handle
+ * Called from cnic_register_driver() context to initialize all
+ * enumerated cnic devices. This routine allocates adapter structure
+ * and other device specific resources.
+ */
+static void bnx2fc_ulp_init(struct cnic_dev *dev)
+{
+ struct bnx2fc_hba *hba;
+ int rc = 0;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ /* bnx2fc works only when bnx2x is loaded */
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s,"
+ " flags: %lx\n",
+ dev->netdev->name, dev->flags);
+ return;
+ }
+
+ /* Configure FCoE interface */
+ hba = bnx2fc_interface_create(dev);
+ if (!hba) {
+ printk(KERN_ERR PFX "hba initialization failed\n");
+ return;
+ }
+
+ /* Add HBA to the adapter list */
+ mutex_lock(&bnx2fc_dev_lock);
+ list_add_tail(&hba->link, &adapter_list);
+ adapter_count++;
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
+ rc = dev->register_device(dev, CNIC_ULP_FCOE,
+ (void *) hba);
+ if (rc)
+ printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc);
+ else
+ set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
+}
+
+
+static int bnx2fc_disable(struct net_device *netdev)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *phys_dev;
+ struct ethtool_drvinfo drvinfo;
+ int rc = 0;
+
+ if (!rtnl_trylock()) {
+ printk(KERN_ERR PFX "retrying for rtnl_lock\n");
+ return -EIO;
+ }
+
+ mutex_lock(&bnx2fc_dev_lock);
+
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(netdev);
+ else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* verify if the physical device is a netxtreme2 device */
+ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+ if (strcmp(drvinfo.driver, "bnx2x")) {
+ printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+ } else {
+ printk(KERN_ERR PFX "unable to obtain drv_info\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n");
+
+ /* obtain hba and initialize rest of the structure */
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba || !hba->ctlr.lp) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n");
+ } else {
+ fcoe_ctlr_link_down(&hba->ctlr);
+ fcoe_clean_pending_queue(hba->ctlr.lp);
+ }
+
+nodev:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+
+static int bnx2fc_enable(struct net_device *netdev)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *phys_dev;
+ struct ethtool_drvinfo drvinfo;
+ int rc = 0;
+
+ if (!rtnl_trylock()) {
+ printk(KERN_ERR PFX "retrying for rtnl_lock\n");
+ return -EIO;
+ }
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ mutex_lock(&bnx2fc_dev_lock);
+
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(netdev);
+ else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+ /* verify if the physical device is a netxtreme2 device */
+ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+ if (strcmp(drvinfo.driver, "bnx2x")) {
+ printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+ } else {
+ printk(KERN_ERR PFX "unable to obtain drv_info\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* obtain hba and initialize rest of the structure */
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba || !hba->ctlr.lp) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n");
+ } else if (!bnx2fc_link_ok(hba->ctlr.lp))
+ fcoe_ctlr_link_up(&hba->ctlr);
+
+nodev:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+/**
+ * bnx2fc_create - Create bnx2fc FCoE interface
+ *
+ * @buffer: The name of Ethernet interface to create on
+ * @kp: The associated kernel param
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *phys_dev;
+ struct fc_lport *lport;
+ struct ethtool_drvinfo drvinfo;
+ int rc = 0;
+ int vlan_id;
+
+ BNX2FC_MISC_DBG("Entered bnx2fc_create\n");
+ if (fip_mode != FIP_MODE_FABRIC) {
+ printk(KERN_ERR "fip mode not FABRIC\n");
+ return -EIO;
+ }
+
+ if (!rtnl_trylock()) {
+ printk(KERN_ERR "trying for rtnl_lock\n");
+ return -EIO;
+ }
+ mutex_lock(&bnx2fc_dev_lock);
+
+#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto mod_err;
+ }
+#endif
+
+ if (!try_module_get(THIS_MODULE)) {
+ rc = -EINVAL;
+ goto mod_err;
+ }
+
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN) {
+ phys_dev = vlan_dev_real_dev(netdev);
+ vlan_id = vlan_dev_vlan_id(netdev);
+ } else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -EINVAL;
+ goto netdev_err;
+ }
+ /* verify if the physical device is a netxtreme2 device */
+ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+ if (strcmp(drvinfo.driver, "bnx2x")) {
+ printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+ rc = -EINVAL;
+ goto netdev_err;
+ }
+ } else {
+ printk(KERN_ERR PFX "unable to obtain drv_info\n");
+ rc = -EINVAL;
+ goto netdev_err;
+ }
+
+ /* obtain hba and initialize rest of the structure */
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_create: hba not found\n");
+ goto netdev_err;
+ }
+
+ if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ rc = bnx2fc_fw_init(hba);
+ if (rc)
+ goto netdev_err;
+ }
+
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ rc = -EEXIST;
+ goto netdev_err;
+ }
+
+ /* update netdev with vlan netdev */
+ hba->netdev = netdev;
+ hba->vlan_id = vlan_id;
+ hba->vlan_enabled = 1;
+
+ rc = bnx2fc_interface_setup(hba, fip_mode);
+ if (rc) {
+ printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n");
+ goto ifput_err;
+ }
+
+ hba->timer_work_queue =
+ create_singlethread_workqueue("bnx2fc_timer_wq");
+ if (!hba->timer_work_queue) {
+ printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
+ rc = -EINVAL;
+ goto ifput_err;
+ }
+
+ lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0);
+ if (!lport) {
+ printk(KERN_ERR PFX "Failed to create interface (%s)\n",
+ netdev->name);
+ bnx2fc_netdev_cleanup(hba);
+ rc = -EINVAL;
+ goto if_create_err;
+ }
+
+ lport->boot_time = jiffies;
+
+ /* Make this master N_port */
+ hba->ctlr.lp = lport;
+
+ set_bit(BNX2FC_CREATE_DONE, &hba->init_done);
+ printk(KERN_ERR PFX "create: START DISC\n");
+ bnx2fc_start_disc(hba);
+ /*
+ * Release from kref_init in bnx2fc_interface_setup, on success
+ * lport should be holding a reference taken in bnx2fc_if_create
+ */
+ bnx2fc_interface_put(hba);
+ /* put netdev that was held while calling dev_get_by_name */
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return 0;
+
+if_create_err:
+ destroy_workqueue(hba->timer_work_queue);
+ifput_err:
+ bnx2fc_interface_put(hba);
+netdev_err:
+ module_put(THIS_MODULE);
+mod_err:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+/**
+ * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance
+ *
+ * @cnic: Pointer to cnic device instance
+ *
+ **/
+static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
+{
+ struct list_head *list;
+ struct list_head *temp;
+ struct bnx2fc_hba *hba;
+
+ /* Called with bnx2fc_dev_lock held */
+ list_for_each_safe(list, temp, &adapter_list) {
+ hba = (struct bnx2fc_hba *)list;
+ if (hba->cnic == cnic)
+ return hba;
+ }
+ return NULL;
+}
+
+static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev)
+{
+ struct list_head *list;
+ struct list_head *temp;
+ struct bnx2fc_hba *hba;
+
+ /* Called with bnx2fc_dev_lock held */
+ list_for_each_safe(list, temp, &adapter_list) {
+ hba = (struct bnx2fc_hba *)list;
+ if (hba->phys_dev == phys_dev)
+ return hba;
+ }
+ printk(KERN_ERR PFX "hba_lookup: hba NULL\n");
+ return NULL;
+}
+
+/**
+ * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources
+ *
+ * @dev cnic device handle
+ */
+static void bnx2fc_ulp_exit(struct cnic_dev *dev)
+{
+ struct bnx2fc_hba *hba;
+
+ BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
+
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n",
+ dev->netdev->name, dev->flags);
+ return;
+ }
+
+ mutex_lock(&bnx2fc_dev_lock);
+ hba = bnx2fc_find_hba_for_cnic(dev);
+ if (!hba) {
+ printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n",
+ dev);
+ mutex_unlock(&bnx2fc_dev_lock);
+ return;
+ }
+
+ list_del_init(&hba->link);
+ adapter_count--;
+
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ /* destroy not called yet, move to quiesced list */
+ bnx2fc_netdev_cleanup(hba);
+ bnx2fc_if_destroy(hba->ctlr.lp);
+ }
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ bnx2fc_ulp_stop(hba);
+ /* unregister cnic device */
+ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
+ bnx2fc_interface_destroy(hba);
+}
+
+/**
+ * bnx2fc_fcoe_reset - Resets the fcoe
+ *
+ * @shost: shost the reset is from
+ *
+ * Returns: always 0
+ */
+static int bnx2fc_fcoe_reset(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+ fc_lport_reset(lport);
+ return 0;
+}
+
+
+static bool bnx2fc_match(struct net_device *netdev)
+{
+ mutex_lock(&bnx2fc_dev_lock);
+ if (netdev->priv_flags & IFF_802_1Q_VLAN) {
+ struct net_device *phys_dev = vlan_dev_real_dev(netdev);
+
+ if (bnx2fc_hba_lookup(phys_dev)) {
+ mutex_unlock(&bnx2fc_dev_lock);
+ return true;
+ }
+ }
+ mutex_unlock(&bnx2fc_dev_lock);
+ return false;
+}
+
+
+static struct fcoe_transport bnx2fc_transport = {
+ .name = {"bnx2fc"},
+ .attached = false,
+ .list = LIST_HEAD_INIT(bnx2fc_transport.list),
+ .match = bnx2fc_match,
+ .create = bnx2fc_create,
+ .destroy = bnx2fc_destroy,
+ .enable = bnx2fc_enable,
+ .disable = bnx2fc_disable,
+};
+
+/**
+ * bnx2fc_percpu_thread_create - Create a receive thread for an
+ * online CPU
+ *
+ * @cpu: cpu index for the online cpu
+ */
+static void bnx2fc_percpu_thread_create(unsigned int cpu)
+{
+ struct bnx2fc_percpu_s *p;
+ struct task_struct *thread;
+
+ p = &per_cpu(bnx2fc_percpu, cpu);
+
+ thread = kthread_create(bnx2fc_percpu_io_thread,
+ (void *)p,
+ "bnx2fc_thread/%d", cpu);
+ /* bind thread to the cpu */
+ if (likely(!IS_ERR(p->iothread))) {
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ }
+}
+
+static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
+{
+ struct bnx2fc_percpu_s *p;
+ struct task_struct *thread;
+ struct bnx2fc_work *work, *tmp;
+ LIST_HEAD(work_list);
+
+ BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
+
+ /* Prevent any new work from being queued for this CPU */
+ p = &per_cpu(bnx2fc_percpu, cpu);
+ spin_lock_bh(&p->fp_work_lock);
+ thread = p->iothread;
+ p->iothread = NULL;
+
+
+ /* Free all work in the list */
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del_init(&work->list);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ kfree(work);
+ }
+
+ spin_unlock_bh(&p->fp_work_lock);
+
+ if (thread)
+ kthread_stop(thread);
+}
+
+/**
+ * bnx2fc_cpu_callback - Handler for CPU hotplug events
+ *
+ * @nfb: The callback data block
+ * @action: The event triggering the callback
+ * @hcpu: The index of the CPU that the event is for
+ *
+ * This creates or destroys per-CPU data for fcoe
+ *
+ * Returns NOTIFY_OK always.
+ */
+static int bnx2fc_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ printk(PFX "CPU %x online: Create Rx thread\n", cpu);
+ bnx2fc_percpu_thread_create(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
+ bnx2fc_percpu_thread_destroy(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+/**
+ * bnx2fc_mod_init - module init entry point
+ *
+ * Initialize driver wide global data structures, and register
+ * with cnic module
+ **/
+static int __init bnx2fc_mod_init(void)
+{
+ struct fcoe_percpu_s *bg;
+ struct task_struct *l2_thread;
+ int rc = 0;
+ unsigned int cpu = 0;
+ struct bnx2fc_percpu_s *p;
+
+ printk(KERN_INFO PFX "%s", version);
+
+ /* register as a fcoe transport */
+ rc = fcoe_transport_attach(&bnx2fc_transport);
+ if (rc) {
+ printk(KERN_ERR "failed to register an fcoe transport, check "
+ "if libfcoe is loaded\n");
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&adapter_list);
+ mutex_init(&bnx2fc_dev_lock);
+ adapter_count = 0;
+
+ /* Attach FC transport template */
+ rc = bnx2fc_attach_transport();
+ if (rc)
+ goto detach_ft;
+
+ bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0);
+ if (!bnx2fc_wq) {
+ rc = -ENOMEM;
+ goto release_bt;
+ }
+
+ bg = &bnx2fc_global;
+ skb_queue_head_init(&bg->fcoe_rx_list);
+ l2_thread = kthread_create(bnx2fc_l2_rcv_thread,
+ (void *)bg,
+ "bnx2fc_l2_thread");
+ if (IS_ERR(l2_thread)) {
+ rc = PTR_ERR(l2_thread);
+ goto free_wq;
+ }
+ wake_up_process(l2_thread);
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ bg->thread = l2_thread;
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+ for_each_possible_cpu(cpu) {
+ p = &per_cpu(bnx2fc_percpu, cpu);
+ INIT_LIST_HEAD(&p->work_list);
+ spin_lock_init(&p->fp_work_lock);
+ }
+
+ for_each_online_cpu(cpu) {
+ bnx2fc_percpu_thread_create(cpu);
+ }
+
+ /* Initialize per CPU interrupt thread */
+ register_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+ cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
+
+ return 0;
+
+free_wq:
+ destroy_workqueue(bnx2fc_wq);
+release_bt:
+ bnx2fc_release_transport();
+detach_ft:
+ fcoe_transport_detach(&bnx2fc_transport);
+out:
+ return rc;
+}
+
+static void __exit bnx2fc_mod_exit(void)
+{
+ LIST_HEAD(to_be_deleted);
+ struct bnx2fc_hba *hba, *next;
+ struct fcoe_percpu_s *bg;
+ struct task_struct *l2_thread;
+ struct sk_buff *skb;
+ unsigned int cpu = 0;
+
+ /*
+ * NOTE: Since cnic calls register_driver routine rtnl_lock,
+ * it will have higher precedence than bnx2fc_dev_lock.
+ * unregister_device() cannot be called with bnx2fc_dev_lock
+ * held.
+ */
+ mutex_lock(&bnx2fc_dev_lock);
+ list_splice(&adapter_list, &to_be_deleted);
+ INIT_LIST_HEAD(&adapter_list);
+ adapter_count = 0;
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ /* Unregister with cnic */
+ list_for_each_entry_safe(hba, next, &to_be_deleted, link) {
+ list_del_init(&hba->link);
+ printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n",
+ hba, atomic_read(&hba->kref.refcount));
+ bnx2fc_ulp_stop(hba);
+ /* unregister cnic device */
+ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED,
+ &hba->reg_with_cnic))
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
+ bnx2fc_interface_destroy(hba);
+ }
+ cnic_unregister_driver(CNIC_ULP_FCOE);
+
+ /* Destroy global thread */
+ bg = &bnx2fc_global;
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ l2_thread = bg->thread;
+ bg->thread = NULL;
+ while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL)
+ kfree_skb(skb);
+
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+ if (l2_thread)
+ kthread_stop(l2_thread);
+
+ unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+ /* Destroy per cpu threads */
+ for_each_online_cpu(cpu) {
+ bnx2fc_percpu_thread_destroy(cpu);
+ }
+
+ destroy_workqueue(bnx2fc_wq);
+ /*
+ * detach from scsi transport
+ * must happen after all destroys are done
+ */
+ bnx2fc_release_transport();
+
+ /* detach from fcoe transport */
+ fcoe_transport_detach(&bnx2fc_transport);
+}
+
+module_init(bnx2fc_mod_init);
+module_exit(bnx2fc_mod_exit);
+
+static struct fc_function_template bnx2fc_transport_function = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+
+ .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
+ sizeof(struct bnx2fc_rport)),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = bnx2fc_get_host_stats,
+
+ .issue_fc_host_lip = bnx2fc_fcoe_reset,
+
+ .terminate_rport_io = fc_rport_terminate_io,
+
+ .vport_create = bnx2fc_vport_create,
+ .vport_delete = bnx2fc_vport_destroy,
+ .vport_disable = bnx2fc_vport_disable,
+};
+
+static struct fc_function_template bnx2fc_vport_xport_function = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+
+ .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
+ sizeof(struct bnx2fc_rport)),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = fc_get_host_stats,
+ .issue_fc_host_lip = bnx2fc_fcoe_reset,
+ .terminate_rport_io = fc_rport_terminate_io,
+};
+
+/**
+ * scsi_host_template structure used while registering with SCSI-ml
+ */
+static struct scsi_host_template bnx2fc_shost_template = {
+ .module = THIS_MODULE,
+ .name = "Broadcom Offload FCoE Initiator",
+ .queuecommand = bnx2fc_queuecommand,
+ .eh_abort_handler = bnx2fc_eh_abort, /* abts */
+ .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
+ .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
+ .eh_host_reset_handler = fc_eh_host_reset,
+ .slave_alloc = fc_slave_alloc,
+ .change_queue_depth = fc_change_queue_depth,
+ .change_queue_type = fc_change_queue_type,
+ .this_id = -1,
+ .cmd_per_lun = 3,
+ .can_queue = (BNX2FC_MAX_OUTSTANDING_CMNDS/2),
+ .use_clustering = ENABLE_CLUSTERING,
+ .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
+ .max_sectors = 512,
+};
+
+static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
+ .frame_send = bnx2fc_xmit,
+ .elsct_send = bnx2fc_elsct_send,
+ .fcp_abort_io = bnx2fc_abort_io,
+ .fcp_cleanup = bnx2fc_cleanup,
+ .rport_event_callback = bnx2fc_rport_event_handler,
+};
+
+/**
+ * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface
+ * structure carrying callback function pointers
+ */
+static struct cnic_ulp_ops bnx2fc_cnic_cb = {
+ .owner = THIS_MODULE,
+ .cnic_init = bnx2fc_ulp_init,
+ .cnic_exit = bnx2fc_ulp_exit,
+ .cnic_start = bnx2fc_ulp_start,
+ .cnic_stop = bnx2fc_ulp_stop,
+ .indicate_kcqes = bnx2fc_indicate_kcqe,
+ .indicate_netevent = bnx2fc_indicate_netevent,
+};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
new file mode 100644
index 000000000000..4f4096836742
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -0,0 +1,1868 @@
+/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * This file contains the code that low level functions that interact
+ * with 57712 FCoE firmware.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
+
+static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *new_cqe_kcqe);
+static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe);
+static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe);
+static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
+static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *conn_destroy);
+
+int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
+{
+ struct fcoe_kwqe_stat stat_req;
+ struct kwqe *kwqe_arr[2];
+ int num_kwqes = 1;
+ int rc = 0;
+
+ memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
+ stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
+ stat_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
+ stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
+
+ kwqe_arr[0] = (struct kwqe *) &stat_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
+ *
+ * @hba: adapter structure pointer
+ *
+ * Send down FCoE firmware init KWQEs which initiates the initial handshake
+ * with the f/w.
+ *
+ */
+int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
+{
+ struct fcoe_kwqe_init1 fcoe_init1;
+ struct fcoe_kwqe_init2 fcoe_init2;
+ struct fcoe_kwqe_init3 fcoe_init3;
+ struct kwqe *kwqe_arr[3];
+ int num_kwqes = 3;
+ int rc = 0;
+
+ if (!hba->cnic) {
+ printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n");
+ return -ENODEV;
+ }
+
+ /* fill init1 KWQE */
+ memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
+ fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
+ fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
+ fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
+ fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
+ fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
+ fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
+ fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
+ fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
+ fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
+ fcoe_init1.task_list_pbl_addr_hi =
+ (u32) ((u64) hba->task_ctx_bd_dma >> 32);
+ fcoe_init1.mtu = hba->netdev->mtu;
+
+ fcoe_init1.flags = (PAGE_SHIFT <<
+ FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
+
+ fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
+
+ /* fill init2 KWQE */
+ memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
+ fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
+ fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
+ fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
+ ((u64) hba->hash_tbl_pbl_dma >> 32);
+
+ fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
+ fcoe_init2.t2_hash_tbl_addr_hi = (u32)
+ ((u64) hba->t2_hash_tbl_dma >> 32);
+
+ fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
+ fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
+ ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
+
+ fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
+
+ /* fill init3 KWQE */
+ memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
+ fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
+ fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+ fcoe_init3.error_bit_map_lo = 0xffffffff;
+ fcoe_init3.error_bit_map_hi = 0xffffffff;
+
+
+ kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
+ kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
+ kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
+{
+ struct fcoe_kwqe_destroy fcoe_destroy;
+ struct kwqe *kwqe_arr[2];
+ int num_kwqes = 1;
+ int rc = -1;
+
+ /* fill destroy KWQE */
+ memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
+ fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
+ fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+ kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct fc_lport *lport = port->lport;
+ struct bnx2fc_hba *hba = port->priv;
+ struct kwqe *kwqe_arr[4];
+ struct fcoe_kwqe_conn_offload1 ofld_req1;
+ struct fcoe_kwqe_conn_offload2 ofld_req2;
+ struct fcoe_kwqe_conn_offload3 ofld_req3;
+ struct fcoe_kwqe_conn_offload4 ofld_req4;
+ struct fc_rport_priv *rdata = tgt->rdata;
+ struct fc_rport *rport = tgt->rport;
+ int num_kwqes = 4;
+ u32 port_id;
+ int rc = 0;
+ u16 conn_id;
+
+ /* Initialize offload request 1 structure */
+ memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
+
+ ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
+ ofld_req1.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+
+ conn_id = (u16)tgt->fcoe_conn_id;
+ ofld_req1.fcoe_conn_id = conn_id;
+
+
+ ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
+ ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
+
+ ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
+ ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
+
+ ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
+ ofld_req1.rq_first_pbe_addr_hi =
+ (u32)((u64) tgt->rq_dma >> 32);
+
+ ofld_req1.rq_prod = 0x8000;
+
+ /* Initialize offload request 2 structure */
+ memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
+
+ ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
+ ofld_req2.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
+
+ ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
+ ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
+
+ ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
+ ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
+
+ ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
+ ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
+
+ /* Initialize offload request 3 structure */
+ memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
+
+ ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
+ ofld_req3.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req3.vlan_tag = hba->vlan_id <<
+ FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
+ ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
+
+ port_id = fc_host_port_id(lport->host);
+ if (port_id == 0) {
+ BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Store s_id of the initiator for further reference. This will
+ * be used during disable/destroy during linkdown processing as
+ * when the lport is reset, the port_id also is reset to 0
+ */
+ tgt->sid = port_id;
+ ofld_req3.s_id[0] = (port_id & 0x000000FF);
+ ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
+ ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+ port_id = rport->port_id;
+ ofld_req3.d_id[0] = (port_id & 0x000000FF);
+ ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
+ ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
+
+ ofld_req3.tx_total_conc_seqs = rdata->max_seq;
+
+ ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
+ ofld_req3.rx_max_fc_pay_len = lport->mfs;
+
+ ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
+ ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
+ ofld_req3.rx_open_seqs_exch_c3 = 1;
+
+ ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
+ ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
+
+ /* set mul_n_port_ids supported flag to 0, until it is supported */
+ ofld_req3.flags = 0;
+ /*
+ ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
+ */
+ /* Info from PLOGI response */
+ ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
+
+ ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
+
+ /* vlan flag */
+ ofld_req3.flags |= (hba->vlan_enabled <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
+
+ /* C2_VALID and ACK flags are not set as they are not suppported */
+
+
+ /* Initialize offload request 4 structure */
+ memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
+ ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
+ ofld_req4.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
+
+
+ ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5];
+ /* local mac */
+ ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4];
+ ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3];
+ ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2];
+ ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1];
+ ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0];
+ ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
+ ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
+ ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
+ ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
+ ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
+ ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
+
+ ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
+ ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
+
+ ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
+ ofld_req4.confq_pbl_base_addr_hi =
+ (u32)((u64) tgt->confq_pbl_dma >> 32);
+
+ kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+ kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+ kwqe_arr[2] = (struct kwqe *) &ofld_req3;
+ kwqe_arr[3] = (struct kwqe *) &ofld_req4;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct kwqe *kwqe_arr[2];
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_kwqe_conn_enable_disable enbl_req;
+ struct fc_lport *lport = port->lport;
+ struct fc_rport *rport = tgt->rport;
+ int num_kwqes = 1;
+ int rc = 0;
+ u32 port_id;
+
+ memset(&enbl_req, 0x00,
+ sizeof(struct fcoe_kwqe_conn_enable_disable));
+ enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
+ enbl_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
+ /* local mac */
+ enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4];
+ enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
+ enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
+ enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
+ enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
+
+ enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
+ enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
+ enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
+ enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
+ enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
+ enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
+
+ port_id = fc_host_port_id(lport->host);
+ if (port_id != tgt->sid) {
+ printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
+ "sid = 0x%x\n", port_id, tgt->sid);
+ port_id = tgt->sid;
+ }
+ enbl_req.s_id[0] = (port_id & 0x000000FF);
+ enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
+ enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+ port_id = rport->port_id;
+ enbl_req.d_id[0] = (port_id & 0x000000FF);
+ enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
+ enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
+ enbl_req.vlan_tag = hba->vlan_id <<
+ FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
+ enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
+ enbl_req.vlan_flag = hba->vlan_enabled;
+ enbl_req.context_id = tgt->context_id;
+ enbl_req.conn_id = tgt->fcoe_conn_id;
+
+ kwqe_arr[0] = (struct kwqe *) &enbl_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_disable_req - initiates FCoE Session disable
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_disable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_kwqe_conn_enable_disable disable_req;
+ struct kwqe *kwqe_arr[2];
+ struct fc_rport *rport = tgt->rport;
+ int num_kwqes = 1;
+ int rc = 0;
+ u32 port_id;
+
+ memset(&disable_req, 0x00,
+ sizeof(struct fcoe_kwqe_conn_enable_disable));
+ disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
+ disable_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
+ disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
+ disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
+ disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
+ disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
+
+ disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
+ disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
+ disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
+ disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
+ disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
+ disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
+
+ port_id = tgt->sid;
+ disable_req.s_id[0] = (port_id & 0x000000FF);
+ disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
+ disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+
+ port_id = rport->port_id;
+ disable_req.d_id[0] = (port_id & 0x000000FF);
+ disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
+ disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
+ disable_req.context_id = tgt->context_id;
+ disable_req.conn_id = tgt->fcoe_conn_id;
+ disable_req.vlan_tag = hba->vlan_id <<
+ FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
+ disable_req.vlan_tag |=
+ 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
+ disable_req.vlan_flag = hba->vlan_enabled;
+
+ kwqe_arr[0] = (struct kwqe *) &disable_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ struct fcoe_kwqe_conn_destroy destroy_req;
+ struct kwqe *kwqe_arr[2];
+ int num_kwqes = 1;
+ int rc = 0;
+
+ memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
+ destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
+ destroy_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ destroy_req.context_id = tgt->context_id;
+ destroy_req.conn_id = tgt->fcoe_conn_id;
+
+ kwqe_arr[0] = (struct kwqe *) &destroy_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+static void bnx2fc_unsol_els_work(struct work_struct *work)
+{
+ struct bnx2fc_unsol_els *unsol_els;
+ struct fc_lport *lport;
+ struct fc_frame *fp;
+
+ unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
+ lport = unsol_els->lport;
+ fp = unsol_els->fp;
+ fc_exch_recv(lport, fp);
+ kfree(unsol_els);
+}
+
+void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
+ unsigned char *buf,
+ u32 frame_len, u16 l2_oxid)
+{
+ struct fcoe_port *port = tgt->port;
+ struct fc_lport *lport = port->lport;
+ struct bnx2fc_unsol_els *unsol_els;
+ struct fc_frame_header *fh;
+ struct fc_frame *fp;
+ struct sk_buff *skb;
+ u32 payload_len;
+ u32 crc;
+ u8 op;
+
+
+ unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
+ if (!unsol_els) {
+ BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
+ l2_oxid, frame_len);
+
+ payload_len = frame_len - sizeof(struct fc_frame_header);
+
+ fp = fc_frame_alloc(lport, payload_len);
+ if (!fp) {
+ printk(KERN_ERR PFX "fc_frame_alloc failure\n");
+ return;
+ }
+
+ fh = (struct fc_frame_header *) fc_frame_header_get(fp);
+ /* Copy FC Frame header and payload into the frame */
+ memcpy(fh, buf, frame_len);
+
+ if (l2_oxid != FC_XID_UNKNOWN)
+ fh->fh_ox_id = htons(l2_oxid);
+
+ skb = fp_skb(fp);
+
+ if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
+ (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
+
+ if (fh->fh_type == FC_TYPE_ELS) {
+ op = fc_frame_payload_op(fp);
+ if ((op == ELS_TEST) || (op == ELS_ESTC) ||
+ (op == ELS_FAN) || (op == ELS_CSU)) {
+ /*
+ * No need to reply for these
+ * ELS requests
+ */
+ printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
+ kfree_skb(skb);
+ return;
+ }
+ }
+ crc = fcoe_fc_crc(fp);
+ fc_frame_init(fp);
+ fr_dev(fp) = lport;
+ fr_sof(fp) = FC_SOF_I3;
+ fr_eof(fp) = FC_EOF_T;
+ fr_crc(fp) = cpu_to_le32(~crc);
+ unsol_els->lport = lport;
+ unsol_els->fp = fp;
+ INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
+ queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
+ } else {
+ BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
+ kfree_skb(skb);
+ }
+}
+
+static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
+{
+ u8 num_rq;
+ struct fcoe_err_report_entry *err_entry;
+ unsigned char *rq_data;
+ unsigned char *buf = NULL, *buf1;
+ int i;
+ u16 xid;
+ u32 frame_len, len;
+ struct bnx2fc_cmd *io_req = NULL;
+ struct fcoe_task_ctx_entry *task, *task_page;
+ struct bnx2fc_hba *hba = tgt->port->priv;
+ int task_idx, index;
+ int rc = 0;
+
+
+ BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
+ switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
+ case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
+ frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
+ FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
+
+ num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
+
+ rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
+ if (rq_data) {
+ buf = rq_data;
+ } else {
+ buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
+ GFP_ATOMIC);
+
+ if (!buf1) {
+ BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
+ break;
+ }
+
+ for (i = 0; i < num_rq; i++) {
+ rq_data = (unsigned char *)
+ bnx2fc_get_next_rqe(tgt, 1);
+ len = BNX2FC_RQ_BUF_SZ;
+ memcpy(buf1, rq_data, len);
+ buf1 += len;
+ }
+ }
+ bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
+ FC_XID_UNKNOWN);
+
+ if (buf != rq_data)
+ kfree(buf);
+ bnx2fc_return_rqe(tgt, num_rq);
+ break;
+
+ case FCOE_ERROR_DETECTION_CQE_TYPE:
+ /*
+ *In case of error reporting CQE a single RQ entry
+ * is consumes.
+ */
+ spin_lock_bh(&tgt->tgt_lock);
+ num_rq = 1;
+ err_entry = (struct fcoe_err_report_entry *)
+ bnx2fc_get_next_rqe(tgt, 1);
+ xid = err_entry->fc_hdr.ox_id;
+ BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
+ BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
+ err_entry->err_warn_bitmap_hi,
+ err_entry->err_warn_bitmap_lo);
+ BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
+ err_entry->tx_buf_off, err_entry->rx_buf_off);
+
+ bnx2fc_return_rqe(tgt, 1);
+
+ if (xid > BNX2FC_MAX_XID) {
+ BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
+ xid);
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)
+ hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+
+ io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
+ if (!io_req) {
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
+ printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
+ "progress.. ignore unsol err\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ /*
+ * If ABTS is already in progress, and FW error is
+ * received after that, do not cancel the timeout_work
+ * and let the error recovery continue by explicitly
+ * logging out the target, when the ABTS eventually
+ * times out.
+ */
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &io_req->req_flags)) {
+ /*
+ * Cancel the timeout_work, as we received IO
+ * completion with FW error.
+ */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* timer hold */
+
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
+ "failed. issue cleanup\n");
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
+ } else
+ printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
+ "in ABTS processing\n", xid);
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+
+ case FCOE_WARNING_DETECTION_CQE_TYPE:
+ /*
+ *In case of warning reporting CQE a single RQ entry
+ * is consumes.
+ */
+ num_rq = 1;
+ err_entry = (struct fcoe_err_report_entry *)
+ bnx2fc_get_next_rqe(tgt, 1);
+ xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
+ BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
+ BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
+ err_entry->err_warn_bitmap_hi,
+ err_entry->err_warn_bitmap_lo);
+ BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
+ err_entry->tx_buf_off, err_entry->rx_buf_off);
+
+ bnx2fc_return_rqe(tgt, 1);
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
+ break;
+ }
+}
+
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
+{
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_cmd *io_req;
+ int task_idx, index;
+ u16 xid;
+ u8 cmd_type;
+ u8 rx_state = 0;
+ u8 num_rq;
+
+ spin_lock_bh(&tgt->tgt_lock);
+ xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
+ if (xid >= BNX2FC_MAX_TASKS) {
+ printk(KERN_ALERT PFX "ERROR:xid out of range\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ return;
+ }
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+
+ num_rq = ((task->rx_wr_tx_rd.rx_flags &
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >>
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT);
+
+ io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
+
+ if (io_req == NULL) {
+ printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ return;
+ }
+
+ /* Timestamp IO completion time */
+ cmd_type = io_req->cmd_type;
+
+ /* optimized completion path */
+ if (cmd_type == BNX2FC_SCSI_CMD) {
+ rx_state = ((task->rx_wr_tx_rd.rx_flags &
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >>
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT);
+
+ if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
+ bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return;
+ }
+ }
+
+ /* Process other IO completion types */
+ switch (cmd_type) {
+ case BNX2FC_SCSI_CMD:
+ if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
+ bnx2fc_process_abts_compl(io_req, task, num_rq);
+ else if (rx_state ==
+ FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
+ bnx2fc_process_cleanup_compl(io_req, task, num_rq);
+ else
+ printk(KERN_ERR PFX "Invalid rx state - %d\n",
+ rx_state);
+ break;
+
+ case BNX2FC_TASK_MGMT_CMD:
+ BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
+ bnx2fc_process_tm_compl(io_req, task, num_rq);
+ break;
+
+ case BNX2FC_ABTS:
+ /*
+ * ABTS request received by firmware. ABTS response
+ * will be delivered to the task belonging to the IO
+ * that was aborted
+ */
+ BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ break;
+
+ case BNX2FC_ELS:
+ BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n");
+ bnx2fc_process_els_compl(io_req, task, num_rq);
+ break;
+
+ case BNX2FC_CLEANUP:
+ BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
+ break;
+ }
+ spin_unlock_bh(&tgt->tgt_lock);
+}
+
+struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
+{
+ struct bnx2fc_work *work;
+ work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
+ if (!work)
+ return NULL;
+
+ INIT_LIST_HEAD(&work->list);
+ work->tgt = tgt;
+ work->wqe = wqe;
+ return work;
+}
+
+int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
+{
+ struct fcoe_cqe *cq;
+ u32 cq_cons;
+ struct fcoe_cqe *cqe;
+ u16 wqe;
+ bool more_cqes_found = false;
+
+ /*
+ * cq_lock is a low contention lock used to protect
+ * the CQ data structure from being freed up during
+ * the upload operation
+ */
+ spin_lock_bh(&tgt->cq_lock);
+
+ if (!tgt->cq) {
+ printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
+ spin_unlock_bh(&tgt->cq_lock);
+ return 0;
+ }
+ cq = tgt->cq;
+ cq_cons = tgt->cq_cons_idx;
+ cqe = &cq[cq_cons];
+
+ do {
+ more_cqes_found ^= true;
+
+ while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
+ (tgt->cq_curr_toggle_bit <<
+ FCOE_CQE_TOGGLE_BIT_SHIFT)) {
+
+ /* new entry on the cq */
+ if (wqe & FCOE_CQE_CQE_TYPE) {
+ /* Unsolicited event notification */
+ bnx2fc_process_unsol_compl(tgt, wqe);
+ } else {
+ struct bnx2fc_work *work = NULL;
+ struct bnx2fc_percpu_s *fps = NULL;
+ unsigned int cpu = wqe % num_possible_cpus();
+
+ fps = &per_cpu(bnx2fc_percpu, cpu);
+ spin_lock_bh(&fps->fp_work_lock);
+ if (unlikely(!fps->iothread))
+ goto unlock;
+
+ work = bnx2fc_alloc_work(tgt, wqe);
+ if (work)
+ list_add_tail(&work->list,
+ &fps->work_list);
+unlock:
+ spin_unlock_bh(&fps->fp_work_lock);
+
+ /* Pending work request completion */
+ if (fps->iothread && work)
+ wake_up_process(fps->iothread);
+ else
+ bnx2fc_process_cq_compl(tgt, wqe);
+ }
+ cqe++;
+ tgt->cq_cons_idx++;
+
+ if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
+ tgt->cq_cons_idx = 0;
+ cqe = cq;
+ tgt->cq_curr_toggle_bit =
+ 1 - tgt->cq_curr_toggle_bit;
+ }
+ }
+ /* Re-arm CQ */
+ if (more_cqes_found) {
+ tgt->conn_db->cq_arm.lo = -1;
+ wmb();
+ }
+ } while (more_cqes_found);
+
+ /*
+ * Commit tgt->cq_cons_idx change to the memory
+ * spin_lock implies full memory barrier, no need to smp_wmb
+ */
+
+ spin_unlock_bh(&tgt->cq_lock);
+ return 0;
+}
+
+/**
+ * bnx2fc_fastpath_notification - process global event queue (KCQ)
+ *
+ * @hba: adapter structure pointer
+ * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
+ *
+ * Fast path event notification handler
+ */
+static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *new_cqe_kcqe)
+{
+ u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
+ struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
+
+ if (!tgt) {
+ printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id);
+ return;
+ }
+
+ bnx2fc_process_new_cqes(tgt);
+}
+
+/**
+ * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
+ *
+ * @hba: adapter structure pointer
+ * @ofld_kcqe: connection offload kcqe pointer
+ *
+ * handle session offload completion, enable the session if offload is
+ * successful.
+ */
+static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe)
+{
+ struct bnx2fc_rport *tgt;
+ struct fcoe_port *port;
+ u32 conn_id;
+ u32 context_id;
+ int rc;
+
+ conn_id = ofld_kcqe->fcoe_conn_id;
+ context_id = ofld_kcqe->fcoe_conn_context_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
+ return;
+ }
+ BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
+ ofld_kcqe->fcoe_conn_context_id);
+ port = tgt->port;
+ if (hba != tgt->port->priv) {
+ printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+ goto ofld_cmpl_err;
+ }
+ /*
+ * cnic has allocated a context_id for this session; use this
+ * while enabling the session.
+ */
+ tgt->context_id = context_id;
+ if (ofld_kcqe->completion_status) {
+ if (ofld_kcqe->completion_status ==
+ FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
+ printk(KERN_ERR PFX "unable to allocate FCoE context "
+ "resources\n");
+ set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
+ }
+ goto ofld_cmpl_err;
+ } else {
+
+ /* now enable the session */
+ rc = bnx2fc_send_session_enable_req(port, tgt);
+ if (rc) {
+ printk(KERN_ALERT PFX "enable session failed\n");
+ goto ofld_cmpl_err;
+ }
+ }
+ return;
+ofld_cmpl_err:
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+}
+
+/**
+ * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
+ *
+ * @hba: adapter structure pointer
+ * @ofld_kcqe: connection offload kcqe pointer
+ *
+ * handle session enable completion, mark the rport as ready
+ */
+
+static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe)
+{
+ struct bnx2fc_rport *tgt;
+ u32 conn_id;
+ u32 context_id;
+
+ context_id = ofld_kcqe->fcoe_conn_context_id;
+ conn_id = ofld_kcqe->fcoe_conn_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
+ ofld_kcqe->fcoe_conn_context_id);
+
+ /*
+ * context_id should be the same for this target during offload
+ * and enable
+ */
+ if (tgt->context_id != context_id) {
+ printk(KERN_ALERT PFX "context id mis-match\n");
+ return;
+ }
+ if (hba != tgt->port->priv) {
+ printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+ goto enbl_cmpl_err;
+ }
+ if (ofld_kcqe->completion_status) {
+ goto enbl_cmpl_err;
+ } else {
+ /* enable successful - rport ready for issuing IOs */
+ set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+ }
+ return;
+
+enbl_cmpl_err:
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+}
+
+static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *disable_kcqe)
+{
+
+ struct bnx2fc_rport *tgt;
+ u32 conn_id;
+
+ conn_id = disable_kcqe->fcoe_conn_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
+
+ if (disable_kcqe->completion_status) {
+ printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n",
+ disable_kcqe->completion_status);
+ return;
+ } else {
+ /* disable successful */
+ BNX2FC_TGT_DBG(tgt, "disable successful\n");
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+ }
+}
+
+static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *destroy_kcqe)
+{
+ struct bnx2fc_rport *tgt;
+ u32 conn_id;
+
+ conn_id = destroy_kcqe->fcoe_conn_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
+
+ if (destroy_kcqe->completion_status) {
+ printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n",
+ destroy_kcqe->completion_status);
+ return;
+ } else {
+ /* destroy successful */
+ BNX2FC_TGT_DBG(tgt, "upload successful\n");
+ clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+ }
+}
+
+static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
+{
+ switch (err_code) {
+ case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
+ printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
+ break;
+
+ case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
+ printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
+ break;
+
+ case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
+ printk(KERN_ERR PFX "init_failure due to NIC error\n");
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
+ }
+}
+
+/**
+ * bnx2fc_indicae_kcqe - process KCQE
+ *
+ * @hba: adapter structure pointer
+ * @kcqe: kcqe pointer
+ * @num_cqe: Number of completion queue elements
+ *
+ * Generic KCQ event handler
+ */
+void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
+ u32 num_cqe)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+ int i = 0;
+ struct fcoe_kcqe *kcqe = NULL;
+
+ while (i < num_cqe) {
+ kcqe = (struct fcoe_kcqe *) kcq[i++];
+
+ switch (kcqe->op_code) {
+ case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
+ bnx2fc_fastpath_notification(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
+ bnx2fc_process_ofld_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_ENABLE_CONN:
+ bnx2fc_process_enable_conn_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_INIT_FUNC:
+ if (kcqe->completion_status !=
+ FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
+ bnx2fc_init_failure(hba,
+ kcqe->completion_status);
+ } else {
+ set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ bnx2fc_get_link_state(hba);
+ printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
+ (u8)hba->pcidev->bus->number);
+ }
+ break;
+
+ case FCOE_KCQE_OPCODE_DESTROY_FUNC:
+ if (kcqe->completion_status !=
+ FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
+
+ printk(KERN_ERR PFX "DESTROY failed\n");
+ } else {
+ printk(KERN_ERR PFX "DESTROY success\n");
+ }
+ hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
+ wake_up_interruptible(&hba->destroy_wait);
+ break;
+
+ case FCOE_KCQE_OPCODE_DISABLE_CONN:
+ bnx2fc_process_conn_disable_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_DESTROY_CONN:
+ bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_STAT_FUNC:
+ if (kcqe->completion_status !=
+ FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
+ printk(KERN_ERR PFX "STAT failed\n");
+ complete(&hba->stat_req_done);
+ break;
+
+ case FCOE_KCQE_OPCODE_FCOE_ERROR:
+ /* fall thru */
+ default:
+ printk(KERN_ALERT PFX "unknown opcode 0x%x\n",
+ kcqe->op_code);
+ }
+ }
+}
+
+void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
+{
+ struct fcoe_sqe *sqe;
+
+ sqe = &tgt->sq[tgt->sq_prod_idx];
+
+ /* Fill SQ WQE */
+ sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
+ sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
+
+ /* Advance SQ Prod Idx */
+ if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
+ tgt->sq_prod_idx = 0;
+ tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
+ }
+}
+
+void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
+{
+ struct b577xx_doorbell_set_prod ev_doorbell;
+ u32 msg;
+
+ wmb();
+
+ memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod));
+ ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE;
+
+ ev_doorbell.prod = tgt->sq_prod_idx |
+ (tgt->sq_curr_toggle_bit << 15);
+ ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE <<
+ B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
+ msg = *((u32 *)&ev_doorbell);
+ writel(cpu_to_le32(msg), tgt->ctx_base);
+
+ mmiowb();
+
+}
+
+int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
+{
+ u32 context_id = tgt->context_id;
+ struct fcoe_port *port = tgt->port;
+ u32 reg_off;
+ resource_size_t reg_base;
+ struct bnx2fc_hba *hba = port->priv;
+
+ reg_base = pci_resource_start(hba->pcidev,
+ BNX2X_DOORBELL_PCI_BAR);
+ reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
+ (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
+ tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ if (!tgt->ctx_base)
+ return -ENOMEM;
+ return 0;
+}
+
+char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
+{
+ char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
+
+ if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
+ return NULL;
+
+ tgt->rq_cons_idx += num_items;
+
+ if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
+ tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
+
+ return buf;
+}
+
+void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
+{
+ /* return the rq buffer */
+ u32 next_prod_idx = tgt->rq_prod_idx + num_items;
+ if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
+ /* Wrap around RQ */
+ next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
+ }
+ tgt->rq_prod_idx = next_prod_idx;
+ tgt->conn_db->rq_prod = tgt->rq_prod_idx;
+}
+
+void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u16 orig_xid)
+{
+ u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ u32 context_id = tgt->context_id;
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ /* Tx Write Rx Read */
+ task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
+ task->tx_wr_rx_rd.init_flags = task_type <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+ /* Common */
+ task->cmn.common_flags = context_id <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
+ task->cmn.general.cleanup_info.task_id = orig_xid;
+
+
+}
+
+void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task)
+{
+ struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fc_frame_header *fc_hdr;
+ u8 task_type = 0;
+ u64 *hdr;
+ u64 temp_hdr[3];
+ u32 context_id;
+
+
+ /* Obtain task_type */
+ if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
+ (io_req->cmd_type == BNX2FC_ELS)) {
+ task_type = FCOE_TASK_TYPE_MIDPATH;
+ } else if (io_req->cmd_type == BNX2FC_ABTS) {
+ task_type = FCOE_TASK_TYPE_ABTS;
+ }
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ /* Setup the task from io_req for easy reference */
+ io_req->task = task;
+
+ BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
+ io_req->cmd_type, task_type);
+
+ /* Tx only */
+ if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
+ (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)mp_req->mp_req_bd_dma;
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)mp_req->mp_req_bd_dma >> 32);
+ task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
+ BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n",
+ (unsigned long long)mp_req->mp_req_bd_dma);
+ }
+
+ /* Tx Write Rx Read */
+ task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
+ task->tx_wr_rx_rd.init_flags = task_type <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+
+ /* Common */
+ task->cmn.data_2_trns = io_req->data_xfer_len;
+ context_id = tgt->context_id;
+ task->cmn.common_flags = context_id <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
+
+ /* Rx Write Tx Read */
+ fc_hdr = &(mp_req->req_fc_hdr);
+ if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+ fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
+ fc_hdr->fh_rx_id = htons(0xffff);
+ task->rx_wr_tx_rd.rx_id = 0xffff;
+ } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
+ fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
+ }
+
+ /* Fill FC Header into middle path buffer */
+ hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+ memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
+ hdr[0] = cpu_to_be64(temp_hdr[0]);
+ hdr[1] = cpu_to_be64(temp_hdr[1]);
+ hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+ /* Rx Only */
+ if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)mp_req->mp_resp_bd_dma;
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
+ task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
+ }
+}
+
+void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task)
+{
+ u8 task_type;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct io_bdt *bd_tbl = io_req->bd_tbl;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ u64 *fcp_cmnd;
+ u64 tmp_fcp_cmnd[4];
+ u32 context_id;
+ int cnt, i;
+ int bd_count;
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ /* Setup the task from io_req for easy reference */
+ io_req->task = task;
+
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+ task_type = FCOE_TASK_TYPE_WRITE;
+ else
+ task_type = FCOE_TASK_TYPE_READ;
+
+ /* Tx only */
+ if (task_type == FCOE_TASK_TYPE_WRITE) {
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)bd_tbl->bd_tbl_dma;
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+ task->tx_wr_only.sgl_ctx.mul_sges.sgl_size =
+ bd_tbl->bd_valid;
+ }
+
+ /*Tx Write Rx Read */
+ /* Init state to NORMAL */
+ task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
+ task->tx_wr_rx_rd.init_flags = task_type <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+
+ /* Common */
+ task->cmn.data_2_trns = io_req->data_xfer_len;
+ context_id = tgt->context_id;
+ task->cmn.common_flags = context_id <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
+
+ /* Set initiative ownership */
+ task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT;
+
+ /* Set initial seq counter */
+ task->cmn.tx_low_seq_cnt = 1;
+
+ /* Set state to "waiting for the first packet" */
+ task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME;
+
+ /* Fill FCP_CMND IU */
+ fcp_cmnd = (u64 *)
+ task->cmn.general.cmd_info.fcp_cmd_payload.opaque;
+ bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
+
+ /* swap fcp_cmnd */
+ cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
+
+ for (i = 0; i < cnt; i++) {
+ *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
+ fcp_cmnd++;
+ }
+
+ /* Rx Write Tx Read */
+ task->rx_wr_tx_rd.rx_id = 0xffff;
+
+ /* Rx Only */
+ if (task_type == FCOE_TASK_TYPE_READ) {
+
+ bd_count = bd_tbl->bd_valid;
+ if (bd_count == 1) {
+
+ struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
+
+ task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo =
+ fcoe_bd_tbl->buf_addr_lo;
+ task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi =
+ fcoe_bd_tbl->buf_addr_hi;
+ task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem =
+ fcoe_bd_tbl->buf_len;
+ task->tx_wr_rx_rd.init_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT;
+ } else {
+
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)bd_tbl->bd_tbl_dma;
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+ task->rx_wr_only.sgl_ctx.mul_sges.sgl_size =
+ bd_tbl->bd_valid;
+ }
+ }
+}
+
+/**
+ * bnx2fc_setup_task_ctx - allocate and map task context
+ *
+ * @hba: pointer to adapter structure
+ *
+ * allocate memory for task context, and associated BD table to be used
+ * by firmware
+ *
+ */
+int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
+{
+ int rc = 0;
+ struct regpair *task_ctx_bdt;
+ dma_addr_t addr;
+ int i;
+
+ /*
+ * Allocate task context bd table. A page size of bd table
+ * can map 256 buffers. Each buffer contains 32 task context
+ * entries. Hence the limit with one page is 8192 task context
+ * entries.
+ */
+ hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_bd_dma,
+ GFP_KERNEL);
+ if (!hba->task_ctx_bd_tbl) {
+ printk(KERN_ERR PFX "unable to allocate task context BDT\n");
+ rc = -1;
+ goto out;
+ }
+ memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
+
+ /*
+ * Allocate task_ctx which is an array of pointers pointing to
+ * a page containing 32 task contexts
+ */
+ hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
+ GFP_KERNEL);
+ if (!hba->task_ctx) {
+ printk(KERN_ERR PFX "unable to allocate task context array\n");
+ rc = -1;
+ goto out1;
+ }
+
+ /*
+ * Allocate task_ctx_dma which is an array of dma addresses
+ */
+ hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
+ sizeof(dma_addr_t)), GFP_KERNEL);
+ if (!hba->task_ctx_dma) {
+ printk(KERN_ERR PFX "unable to alloc context mapping array\n");
+ rc = -1;
+ goto out2;
+ }
+
+ task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
+ for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+
+ hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_dma[i],
+ GFP_KERNEL);
+ if (!hba->task_ctx[i]) {
+ printk(KERN_ERR PFX "unable to alloc task context\n");
+ rc = -1;
+ goto out3;
+ }
+ memset(hba->task_ctx[i], 0, PAGE_SIZE);
+ addr = (u64)hba->task_ctx_dma[i];
+ task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
+ task_ctx_bdt->lo = cpu_to_le32((u32)addr);
+ task_ctx_bdt++;
+ }
+ return 0;
+
+out3:
+ for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+ if (hba->task_ctx[i]) {
+
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx[i], hba->task_ctx_dma[i]);
+ hba->task_ctx[i] = NULL;
+ }
+ }
+
+ kfree(hba->task_ctx_dma);
+ hba->task_ctx_dma = NULL;
+out2:
+ kfree(hba->task_ctx);
+ hba->task_ctx = NULL;
+out1:
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
+ hba->task_ctx_bd_tbl = NULL;
+out:
+ return rc;
+}
+
+void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
+{
+ int i;
+
+ if (hba->task_ctx_bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx_bd_tbl,
+ hba->task_ctx_bd_dma);
+ hba->task_ctx_bd_tbl = NULL;
+ }
+
+ if (hba->task_ctx) {
+ for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+ if (hba->task_ctx[i]) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx[i],
+ hba->task_ctx_dma[i]);
+ hba->task_ctx[i] = NULL;
+ }
+ }
+ kfree(hba->task_ctx);
+ hba->task_ctx = NULL;
+ }
+
+ kfree(hba->task_ctx_dma);
+ hba->task_ctx_dma = NULL;
+}
+
+static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
+{
+ int i;
+ int segment_count;
+ int hash_table_size;
+ u32 *pbl;
+
+ segment_count = hba->hash_tbl_segment_count;
+ hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
+ sizeof(struct fcoe_hash_table_entry);
+
+ pbl = hba->hash_tbl_pbl;
+ for (i = 0; i < segment_count; ++i) {
+ dma_addr_t dma_address;
+
+ dma_address = le32_to_cpu(*pbl);
+ ++pbl;
+ dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
+ ++pbl;
+ dma_free_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ hba->hash_tbl_segments[i],
+ dma_address);
+
+ }
+
+ if (hba->hash_tbl_pbl) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->hash_tbl_pbl,
+ hba->hash_tbl_pbl_dma);
+ hba->hash_tbl_pbl = NULL;
+ }
+}
+
+static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
+{
+ int i;
+ int hash_table_size;
+ int segment_count;
+ int segment_array_size;
+ int dma_segment_array_size;
+ dma_addr_t *dma_segment_array;
+ u32 *pbl;
+
+ hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
+ sizeof(struct fcoe_hash_table_entry);
+
+ segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
+ segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
+ hba->hash_tbl_segment_count = segment_count;
+
+ segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
+ hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
+ if (!hba->hash_tbl_segments) {
+ printk(KERN_ERR PFX "hash table pointers alloc failed\n");
+ return -ENOMEM;
+ }
+ dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
+ dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
+ if (!dma_segment_array) {
+ printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < segment_count; ++i) {
+ hba->hash_tbl_segments[i] =
+ dma_alloc_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ &dma_segment_array[i],
+ GFP_KERNEL);
+ if (!hba->hash_tbl_segments[i]) {
+ printk(KERN_ERR PFX "hash segment alloc failed\n");
+ while (--i >= 0) {
+ dma_free_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ hba->hash_tbl_segments[i],
+ dma_segment_array[i]);
+ hba->hash_tbl_segments[i] = NULL;
+ }
+ kfree(dma_segment_array);
+ return -ENOMEM;
+ }
+ memset(hba->hash_tbl_segments[i], 0,
+ BNX2FC_HASH_TBL_CHUNK_SIZE);
+ }
+
+ hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->hash_tbl_pbl_dma,
+ GFP_KERNEL);
+ if (!hba->hash_tbl_pbl) {
+ printk(KERN_ERR PFX "hash table pbl alloc failed\n");
+ kfree(dma_segment_array);
+ return -ENOMEM;
+ }
+ memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
+
+ pbl = hba->hash_tbl_pbl;
+ for (i = 0; i < segment_count; ++i) {
+ u64 paddr = dma_segment_array[i];
+ *pbl = cpu_to_le32((u32) paddr);
+ ++pbl;
+ *pbl = cpu_to_le32((u32) (paddr >> 32));
+ ++pbl;
+ }
+ pbl = hba->hash_tbl_pbl;
+ i = 0;
+ while (*pbl && *(pbl + 1)) {
+ u32 lo;
+ u32 hi;
+ lo = *pbl;
+ ++pbl;
+ hi = *pbl;
+ ++pbl;
+ ++i;
+ }
+ kfree(dma_segment_array);
+ return 0;
+}
+
+/**
+ * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
+ *
+ * @hba: Pointer to adapter structure
+ *
+ */
+int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
+{
+ u64 addr;
+ u32 mem_size;
+ int i;
+
+ if (bnx2fc_allocate_hash_table(hba))
+ return -ENOMEM;
+
+ mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
+ hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_ptr_dma,
+ GFP_KERNEL);
+ if (!hba->t2_hash_tbl_ptr) {
+ printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+ memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
+
+ mem_size = BNX2FC_NUM_MAX_SESS *
+ sizeof(struct fcoe_t2_hash_table_entry);
+ hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_dma,
+ GFP_KERNEL);
+ if (!hba->t2_hash_tbl) {
+ printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+ memset(hba->t2_hash_tbl, 0x00, mem_size);
+ for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+ addr = (unsigned long) hba->t2_hash_tbl_dma +
+ ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
+ hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
+ hba->t2_hash_tbl[i].next.hi = addr >> 32;
+ }
+
+ hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE, &hba->dummy_buf_dma,
+ GFP_KERNEL);
+ if (!hba->dummy_buffer) {
+ printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+
+ hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->stats_buf_dma,
+ GFP_KERNEL);
+ if (!hba->stats_buffer) {
+ printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+ memset(hba->stats_buffer, 0x00, PAGE_SIZE);
+
+ return 0;
+}
+
+void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
+{
+ u32 mem_size;
+
+ if (hba->stats_buffer) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->stats_buffer, hba->stats_buf_dma);
+ hba->stats_buffer = NULL;
+ }
+
+ if (hba->dummy_buffer) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->dummy_buffer, hba->dummy_buf_dma);
+ hba->dummy_buffer = NULL;
+ }
+
+ if (hba->t2_hash_tbl_ptr) {
+ mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
+ dma_free_coherent(&hba->pcidev->dev, mem_size,
+ hba->t2_hash_tbl_ptr,
+ hba->t2_hash_tbl_ptr_dma);
+ hba->t2_hash_tbl_ptr = NULL;
+ }
+
+ if (hba->t2_hash_tbl) {
+ mem_size = BNX2FC_NUM_MAX_SESS *
+ sizeof(struct fcoe_t2_hash_table_entry);
+ dma_free_coherent(&hba->pcidev->dev, mem_size,
+ hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
+ hba->t2_hash_tbl = NULL;
+ }
+ bnx2fc_free_hash_table(hba);
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
new file mode 100644
index 000000000000..0f1dd23730db
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -0,0 +1,1833 @@
+/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * IO manager and SCSI IO processing.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
+ int bd_index);
+static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
+static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
+static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
+ struct bnx2fc_cmd *io_req);
+static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
+static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
+static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+ struct fcoe_fcp_rsp_payload *fcp_rsp,
+ u8 num_rq);
+
+void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
+ unsigned int timer_msec)
+{
+ struct bnx2fc_hba *hba = io_req->port->priv;
+
+ if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work,
+ msecs_to_jiffies(timer_msec)))
+ kref_get(&io_req->refcount);
+}
+
+static void bnx2fc_cmd_timeout(struct work_struct *work)
+{
+ struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
+ timeout_work.work);
+ struct fc_lport *lport;
+ struct fc_rport_priv *rdata;
+ u8 cmd_type = io_req->cmd_type;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ int logo_issued;
+ int rc;
+
+ BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
+ "req_flags = %lx\n", cmd_type, io_req->req_flags);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
+ clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
+ /*
+ * ideally we should hold the io_req until RRQ complets,
+ * and release io_req from timeout hold.
+ */
+ spin_unlock_bh(&tgt->tgt_lock);
+ bnx2fc_send_rrq(io_req);
+ return;
+ }
+ if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
+ goto done;
+ }
+
+ switch (cmd_type) {
+ case BNX2FC_SCSI_CMD:
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags)) {
+ /* Handle eh_abort timeout */
+ BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
+ complete(&io_req->tm_done);
+ } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &io_req->req_flags)) {
+ /* Handle internally generated ABTS timeout */
+ BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
+ io_req->refcount.refcount.counter);
+ if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags))) {
+
+ lport = io_req->port->lport;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(
+ BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ /* Explicitly logo the target */
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Explicit "
+ "logo - tgt flags = 0x%lx\n",
+ tgt->flags);
+
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ return;
+ }
+ } else {
+ /* Hanlde IO timeout */
+ BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
+ if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "IO completed before "
+ " timer expiry\n");
+ goto done;
+ }
+
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &io_req->req_flags)) {
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc == SUCCESS)
+ goto done;
+ /*
+ * Explicitly logo the target if
+ * abts initiation fails
+ */
+ lport = io_req->port->lport;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(
+ BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Explicit "
+ "logo - tgt flags = 0x%lx\n",
+ tgt->flags);
+
+
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ return;
+ } else {
+ BNX2FC_IO_DBG(io_req, "IO already in "
+ "ABTS processing\n");
+ }
+ }
+ break;
+ case BNX2FC_ELS:
+
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
+
+ if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags)) {
+ lport = io_req->port->lport;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(
+ BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ /* Explicitly logo the target */
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Explicitly logo"
+ "(els)\n");
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ return;
+ }
+ } else {
+ /*
+ * Handle ELS timeout.
+ * tgt_lock is used to sync compl path and timeout
+ * path. If els compl path is processing this IO, we
+ * have nothing to do here, just release the timer hold
+ */
+ BNX2FC_IO_DBG(io_req, "ELS timed out\n");
+ if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
+ &io_req->req_flags))
+ goto done;
+
+ /* Indicate the cb_func that this ELS is timed out */
+ set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
+
+ if ((io_req->cb_func) && (io_req->cb_arg)) {
+ io_req->cb_func(io_req->cb_arg);
+ io_req->cb_arg = NULL;
+ }
+ }
+ break;
+ default:
+ printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
+ cmd_type);
+ break;
+ }
+
+done:
+ /* release the cmd that was held when timer was set */
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+}
+
+static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
+{
+ /* Called with host lock held */
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+
+ /*
+ * active_cmd_queue may have other command types as well,
+ * and during flush operation, we want to error back only
+ * scsi commands.
+ */
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD)
+ return;
+
+ BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
+ bnx2fc_unmap_sg_list(io_req);
+ io_req->sc_cmd = NULL;
+ if (!sc_cmd) {
+ printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
+ "IO(0x%x) already cleaned up\n",
+ io_req->xid);
+ return;
+ }
+ sc_cmd->result = err_code << 16;
+
+ BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
+ sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
+ sc_cmd->allowed);
+ scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+}
+
+struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
+ u16 min_xid, u16 max_xid)
+{
+ struct bnx2fc_cmd_mgr *cmgr;
+ struct io_bdt *bdt_info;
+ struct bnx2fc_cmd *io_req;
+ size_t len;
+ u32 mem_size;
+ u16 xid;
+ int i;
+ int num_ios;
+ size_t bd_tbl_sz;
+
+ if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
+ printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
+ and max_xid 0x%x\n", min_xid, max_xid);
+ return NULL;
+ }
+ BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
+
+ num_ios = max_xid - min_xid + 1;
+ len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
+ len += sizeof(struct bnx2fc_cmd_mgr);
+
+ cmgr = kzalloc(len, GFP_KERNEL);
+ if (!cmgr) {
+ printk(KERN_ERR PFX "failed to alloc cmgr\n");
+ return NULL;
+ }
+
+ cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
+ num_possible_cpus(), GFP_KERNEL);
+ if (!cmgr->free_list) {
+ printk(KERN_ERR PFX "failed to alloc free_list\n");
+ goto mem_err;
+ }
+
+ cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
+ num_possible_cpus(), GFP_KERNEL);
+ if (!cmgr->free_list_lock) {
+ printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
+ goto mem_err;
+ }
+
+ cmgr->hba = hba;
+ cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ INIT_LIST_HEAD(&cmgr->free_list[i]);
+ spin_lock_init(&cmgr->free_list_lock[i]);
+ }
+
+ /* Pre-allocated pool of bnx2fc_cmds */
+ xid = BNX2FC_MIN_XID;
+ for (i = 0; i < num_ios; i++) {
+ io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
+
+ if (!io_req) {
+ printk(KERN_ERR PFX "failed to alloc io_req\n");
+ goto mem_err;
+ }
+
+ INIT_LIST_HEAD(&io_req->link);
+ INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
+
+ io_req->xid = xid++;
+ if (io_req->xid >= BNX2FC_MAX_OUTSTANDING_CMNDS)
+ printk(KERN_ERR PFX "ERROR allocating xids - 0x%x\n",
+ io_req->xid);
+ list_add_tail(&io_req->link,
+ &cmgr->free_list[io_req->xid % num_possible_cpus()]);
+ io_req++;
+ }
+
+ /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
+ mem_size = num_ios * sizeof(struct io_bdt *);
+ cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
+ if (!cmgr->io_bdt_pool) {
+ printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
+ goto mem_err;
+ }
+
+ mem_size = sizeof(struct io_bdt);
+ for (i = 0; i < num_ios; i++) {
+ cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
+ if (!cmgr->io_bdt_pool[i]) {
+ printk(KERN_ERR PFX "failed to alloc "
+ "io_bdt_pool[%d]\n", i);
+ goto mem_err;
+ }
+ }
+
+ /* Allocate an map fcoe_bdt_ctx structures */
+ bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
+ for (i = 0; i < num_ios; i++) {
+ bdt_info = cmgr->io_bdt_pool[i];
+ bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+ bd_tbl_sz,
+ &bdt_info->bd_tbl_dma,
+ GFP_KERNEL);
+ if (!bdt_info->bd_tbl) {
+ printk(KERN_ERR PFX "failed to alloc "
+ "bdt_tbl[%d]\n", i);
+ goto mem_err;
+ }
+ }
+
+ return cmgr;
+
+mem_err:
+ bnx2fc_cmd_mgr_free(cmgr);
+ return NULL;
+}
+
+void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
+{
+ struct io_bdt *bdt_info;
+ struct bnx2fc_hba *hba = cmgr->hba;
+ size_t bd_tbl_sz;
+ u16 min_xid = BNX2FC_MIN_XID;
+ u16 max_xid = BNX2FC_MAX_XID;
+ int num_ios;
+ int i;
+
+ num_ios = max_xid - min_xid + 1;
+
+ /* Free fcoe_bdt_ctx structures */
+ if (!cmgr->io_bdt_pool)
+ goto free_cmd_pool;
+
+ bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
+ for (i = 0; i < num_ios; i++) {
+ bdt_info = cmgr->io_bdt_pool[i];
+ if (bdt_info->bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
+ bdt_info->bd_tbl,
+ bdt_info->bd_tbl_dma);
+ bdt_info->bd_tbl = NULL;
+ }
+ }
+
+ /* Destroy io_bdt pool */
+ for (i = 0; i < num_ios; i++) {
+ kfree(cmgr->io_bdt_pool[i]);
+ cmgr->io_bdt_pool[i] = NULL;
+ }
+
+ kfree(cmgr->io_bdt_pool);
+ cmgr->io_bdt_pool = NULL;
+
+free_cmd_pool:
+ kfree(cmgr->free_list_lock);
+
+ /* Destroy cmd pool */
+ if (!cmgr->free_list)
+ goto free_cmgr;
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ struct list_head *list;
+ struct list_head *tmp;
+
+ list_for_each_safe(list, tmp, &cmgr->free_list[i]) {
+ struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list;
+ list_del(&io_req->link);
+ kfree(io_req);
+ }
+ }
+ kfree(cmgr->free_list);
+free_cmgr:
+ /* Free command manager itself */
+ kfree(cmgr);
+}
+
+struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
+{
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
+ struct bnx2fc_cmd *io_req;
+ struct list_head *listp;
+ struct io_bdt *bd_tbl;
+ u32 max_sqes;
+ u16 xid;
+
+ max_sqes = tgt->max_sqes;
+ switch (type) {
+ case BNX2FC_TASK_MGMT_CMD:
+ max_sqes = BNX2FC_TM_MAX_SQES;
+ break;
+ case BNX2FC_ELS:
+ max_sqes = BNX2FC_ELS_MAX_SQES;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * NOTE: Free list insertions and deletions are protected with
+ * cmgr lock
+ */
+ spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ if ((list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) ||
+ (tgt->num_active_ios.counter >= max_sqes)) {
+ BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
+ "ios(%d):sqes(%d)\n",
+ tgt->num_active_ios.counter, tgt->max_sqes);
+ if (list_empty(&(cmd_mgr->free_list[smp_processor_id()])))
+ printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ return NULL;
+ }
+
+ listp = (struct list_head *)
+ cmd_mgr->free_list[smp_processor_id()].next;
+ list_del_init(listp);
+ io_req = (struct bnx2fc_cmd *) listp;
+ xid = io_req->xid;
+ cmd_mgr->cmds[xid] = io_req;
+ atomic_inc(&tgt->num_active_ios);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+
+ INIT_LIST_HEAD(&io_req->link);
+
+ io_req->port = port;
+ io_req->cmd_mgr = cmd_mgr;
+ io_req->req_flags = 0;
+ io_req->cmd_type = type;
+
+ /* Bind io_bdt for this io_req */
+ /* Have a static link between io_req and io_bdt_pool */
+ bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
+ bd_tbl->io_req = io_req;
+
+ /* Hold the io_req against deletion */
+ kref_init(&io_req->refcount);
+ return io_req;
+}
+static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
+{
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
+ struct bnx2fc_cmd *io_req;
+ struct list_head *listp;
+ struct io_bdt *bd_tbl;
+ u32 max_sqes;
+ u16 xid;
+
+ max_sqes = BNX2FC_SCSI_MAX_SQES;
+ /*
+ * NOTE: Free list insertions and deletions are protected with
+ * cmgr lock
+ */
+ spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ if ((list_empty(&cmd_mgr->free_list[smp_processor_id()])) ||
+ (tgt->num_active_ios.counter >= max_sqes)) {
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ return NULL;
+ }
+
+ listp = (struct list_head *)
+ cmd_mgr->free_list[smp_processor_id()].next;
+ list_del_init(listp);
+ io_req = (struct bnx2fc_cmd *) listp;
+ xid = io_req->xid;
+ cmd_mgr->cmds[xid] = io_req;
+ atomic_inc(&tgt->num_active_ios);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+
+ INIT_LIST_HEAD(&io_req->link);
+
+ io_req->port = port;
+ io_req->cmd_mgr = cmd_mgr;
+ io_req->req_flags = 0;
+
+ /* Bind io_bdt for this io_req */
+ /* Have a static link between io_req and io_bdt_pool */
+ bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
+ bd_tbl->io_req = io_req;
+
+ /* Hold the io_req against deletion */
+ kref_init(&io_req->refcount);
+ return io_req;
+}
+
+void bnx2fc_cmd_release(struct kref *ref)
+{
+ struct bnx2fc_cmd *io_req = container_of(ref,
+ struct bnx2fc_cmd, refcount);
+ struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
+
+ spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD)
+ bnx2fc_free_mp_resc(io_req);
+ cmd_mgr->cmds[io_req->xid] = NULL;
+ /* Delete IO from retire queue */
+ list_del_init(&io_req->link);
+ /* Add it to the free list */
+ list_add(&io_req->link,
+ &cmd_mgr->free_list[smp_processor_id()]);
+ atomic_dec(&io_req->tgt->num_active_ios);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+}
+
+static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
+ struct bnx2fc_hba *hba = io_req->port->priv;
+ size_t sz = sizeof(struct fcoe_bd_ctx);
+
+ /* clear tm flags */
+ mp_req->tm_flags = 0;
+ if (mp_req->mp_req_bd) {
+ dma_free_coherent(&hba->pcidev->dev, sz,
+ mp_req->mp_req_bd,
+ mp_req->mp_req_bd_dma);
+ mp_req->mp_req_bd = NULL;
+ }
+ if (mp_req->mp_resp_bd) {
+ dma_free_coherent(&hba->pcidev->dev, sz,
+ mp_req->mp_resp_bd,
+ mp_req->mp_resp_bd_dma);
+ mp_req->mp_resp_bd = NULL;
+ }
+ if (mp_req->req_buf) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->req_buf,
+ mp_req->req_buf_dma);
+ mp_req->req_buf = NULL;
+ }
+ if (mp_req->resp_buf) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->resp_buf,
+ mp_req->resp_buf_dma);
+ mp_req->resp_buf = NULL;
+ }
+}
+
+int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_mp_req *mp_req;
+ struct fcoe_bd_ctx *mp_req_bd;
+ struct fcoe_bd_ctx *mp_resp_bd;
+ struct bnx2fc_hba *hba = io_req->port->priv;
+ dma_addr_t addr;
+ size_t sz;
+
+ mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
+ memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
+
+ mp_req->req_len = sizeof(struct fcp_cmnd);
+ io_req->data_xfer_len = mp_req->req_len;
+ mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &mp_req->req_buf_dma,
+ GFP_ATOMIC);
+ if (!mp_req->req_buf) {
+ printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+
+ mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &mp_req->resp_buf_dma,
+ GFP_ATOMIC);
+ if (!mp_req->resp_buf) {
+ printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+ memset(mp_req->req_buf, 0, PAGE_SIZE);
+ memset(mp_req->resp_buf, 0, PAGE_SIZE);
+
+ /* Allocate and map mp_req_bd and mp_resp_bd */
+ sz = sizeof(struct fcoe_bd_ctx);
+ mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
+ &mp_req->mp_req_bd_dma,
+ GFP_ATOMIC);
+ if (!mp_req->mp_req_bd) {
+ printk(KERN_ERR PFX "unable to alloc MP req bd\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+ mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
+ &mp_req->mp_resp_bd_dma,
+ GFP_ATOMIC);
+ if (!mp_req->mp_req_bd) {
+ printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+ /* Fill bd table */
+ addr = mp_req->req_buf_dma;
+ mp_req_bd = mp_req->mp_req_bd;
+ mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
+ mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
+ mp_req_bd->buf_len = PAGE_SIZE;
+ mp_req_bd->flags = 0;
+
+ /*
+ * MP buffer is either a task mgmt command or an ELS.
+ * So the assumption is that it consumes a single bd
+ * entry in the bd table
+ */
+ mp_resp_bd = mp_req->mp_resp_bd;
+ addr = mp_req->resp_buf_dma;
+ mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
+ mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
+ mp_resp_bd->buf_len = PAGE_SIZE;
+ mp_resp_bd->flags = 0;
+
+ return SUCCESS;
+}
+
+static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
+{
+ struct fc_lport *lport;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fcoe_port *port;
+ struct bnx2fc_hba *hba;
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_cmd *io_req;
+ struct bnx2fc_mp_req *tm_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct Scsi_Host *host = sc_cmd->device->host;
+ struct fc_frame_header *fc_hdr;
+ struct fcp_cmnd *fcp_cmnd;
+ int task_idx, index;
+ int rc = SUCCESS;
+ u16 xid;
+ u32 sid, did;
+ unsigned long start = jiffies;
+
+ lport = shost_priv(host);
+ port = lport_priv(lport);
+ hba = port->priv;
+
+ if (rport == NULL) {
+ printk(KERN_ALERT PFX "device_reset: rport is NULL\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+
+ rc = fc_block_scsi_eh(sc_cmd);
+ if (rc)
+ return rc;
+
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ printk(KERN_ERR PFX "device_reset: link is not ready\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+ /* rport and tgt are allocated together, so tgt should be non-NULL */
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
+ printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+retry_tmf:
+ io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
+ if (!io_req) {
+ if (time_after(jiffies, start + HZ)) {
+ printk(KERN_ERR PFX "tmf: Failed TMF");
+ rc = FAILED;
+ goto tmf_err;
+ }
+ msleep(20);
+ goto retry_tmf;
+ }
+ /* Initialize rest of io_req fields */
+ io_req->sc_cmd = sc_cmd;
+ io_req->port = port;
+ io_req->tgt = tgt;
+
+ tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
+
+ rc = bnx2fc_init_mp_req(io_req);
+ if (rc == FAILED) {
+ printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ goto tmf_err;
+ }
+
+ /* Set TM flags */
+ io_req->io_req_flags = 0;
+ tm_req->tm_flags = tm_flags;
+
+ /* Fill FCP_CMND */
+ bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
+ fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
+ memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len);
+ fcp_cmnd->fc_dl = 0;
+
+ /* Fill FC header */
+ fc_hdr = &(tm_req->req_fc_hdr);
+ sid = tgt->sid;
+ did = rport->port_id;
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
+ FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+ /* Obtain exchange id */
+ xid = io_req->xid;
+
+ BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_mp_task(io_req, task);
+
+ sc_cmd->SCp.ptr = (char *)io_req;
+
+ /* Obtain free SQ entry */
+ spin_lock_bh(&tgt->tgt_lock);
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Enqueue the io_req to active_tm_queue */
+ io_req->on_tmf_queue = 1;
+ list_add_tail(&io_req->link, &tgt->active_tm_queue);
+
+ init_completion(&io_req->tm_done);
+ io_req->wait_for_comp = 1;
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ rc = wait_for_completion_timeout(&io_req->tm_done,
+ BNX2FC_TM_TIMEOUT * HZ);
+ spin_lock_bh(&tgt->tgt_lock);
+
+ io_req->wait_for_comp = 0;
+ if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags)))
+ set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
+
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ if (!rc) {
+ printk(KERN_ERR PFX "task mgmt command failed...\n");
+ rc = FAILED;
+ } else {
+ printk(KERN_ERR PFX "task mgmt command success...\n");
+ rc = SUCCESS;
+ }
+tmf_err:
+ return rc;
+}
+
+int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fc_rport *rport = tgt->rport;
+ struct fc_rport_priv *rdata = tgt->rdata;
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ struct bnx2fc_cmd *abts_io_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fc_frame_header *fc_hdr;
+ struct bnx2fc_mp_req *abts_req;
+ int task_idx, index;
+ u32 sid, did;
+ u16 xid;
+ int rc = SUCCESS;
+ u32 r_a_tov = rdata->r_a_tov;
+
+ /* called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
+
+ port = io_req->port;
+ hba = port->priv;
+ lport = port->lport;
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ if (rport == NULL) {
+ printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
+ if (!abts_io_req) {
+ printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ /* Initialize rest of io_req fields */
+ abts_io_req->sc_cmd = NULL;
+ abts_io_req->port = port;
+ abts_io_req->tgt = tgt;
+ abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
+
+ abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
+ memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
+
+ /* Fill FC header */
+ fc_hdr = &(abts_req->req_fc_hdr);
+
+ /* Obtain oxid and rxid for the original exchange to be aborted */
+ fc_hdr->fh_ox_id = htons(io_req->xid);
+ fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id);
+
+ sid = tgt->sid;
+ did = rport->port_id;
+
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
+ FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+
+ xid = abts_io_req->xid;
+ BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_mp_task(abts_io_req, task);
+
+ /*
+ * ABTS task is a temporary task that will be cleaned up
+ * irrespective of ABTS response. We need to start the timer
+ * for the original exchange, as the CQE is posted for the original
+ * IO request.
+ *
+ * Timer for ABTS is started only when it is originated by a
+ * TM request. For the ABTS issued as part of ULP timeout,
+ * scsi-ml maintains the timers.
+ */
+
+ /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
+ bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
+
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+
+abts_err:
+ return rc;
+}
+
+int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ struct bnx2fc_cmd *cleanup_io_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ int task_idx, index;
+ u16 xid, orig_xid;
+ int rc = 0;
+
+ /* ASSUMPTION: called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
+
+ port = io_req->port;
+ hba = port->priv;
+ lport = port->lport;
+
+ cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
+ if (!cleanup_io_req) {
+ printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+ rc = -1;
+ goto cleanup_err;
+ }
+
+ /* Initialize rest of io_req fields */
+ cleanup_io_req->sc_cmd = NULL;
+ cleanup_io_req->port = port;
+ cleanup_io_req->tgt = tgt;
+ cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
+
+ xid = cleanup_io_req->xid;
+
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ orig_xid = io_req->xid;
+
+ BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
+
+ bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
+
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+
+cleanup_err:
+ return rc;
+}
+
+/**
+ * bnx2fc_eh_target_reset: Reset a target
+ *
+ * @sc_cmd: SCSI command
+ *
+ * Set from SCSI host template to send task mgmt command to the target
+ * and wait for the response
+ */
+int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
+{
+ return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
+}
+
+/**
+ * bnx2fc_eh_device_reset - Reset a single LUN
+ *
+ * @sc_cmd: SCSI command
+ *
+ * Set from SCSI host template to send task mgmt command to the target
+ * and wait for the response
+ */
+int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
+{
+ return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
+}
+
+/**
+ * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
+ * SCSI command
+ *
+ * @sc_cmd: SCSI_ML command pointer
+ *
+ * SCSI abort request handler
+ */
+int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct bnx2fc_cmd *io_req;
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt;
+ int rc = FAILED;
+
+
+ rc = fc_block_scsi_eh(sc_cmd);
+ if (rc)
+ return rc;
+
+ lport = shost_priv(sc_cmd->device->host);
+ if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
+ printk(KERN_ALERT PFX "eh_abort: link not ready\n");
+ return rc;
+ }
+
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
+
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
+ if (!io_req) {
+ /* Command might have just completed */
+ printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+ BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
+ io_req->refcount.refcount.counter);
+
+ /* Hold IO request across abort processing */
+ kref_get(&io_req->refcount);
+
+ BUG_ON(tgt != io_req->tgt);
+
+ /* Remove the io_req from the active_q. */
+ /*
+ * Task Mgmt functions (LUN RESET & TGT RESET) will not
+ * issue an ABTS on this particular IO req, as the
+ * io_req is no longer in the active_q.
+ */
+ if (tgt->flush_in_prog) {
+ printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ "flush in progress\n", io_req->xid);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+
+ if (io_req->on_active_queue == 0) {
+ printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ "not on active_q\n", io_req->xid);
+ /*
+ * This condition can happen only due to the FW bug,
+ * where we do not receive cleanup response from
+ * the FW. Handle this case gracefully by erroring
+ * back the IO request to SCSI-ml
+ */
+ bnx2fc_scsi_done(io_req, DID_ABORT);
+
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+
+ /*
+ * Only eh_abort processing will remove the IO from
+ * active_cmd_q before processing the request. this is
+ * done to avoid race conditions between IOs aborted
+ * as part of task management completion and eh_abort
+ * processing
+ */
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ /* Move IO req to retire queue */
+ list_add_tail(&io_req->link, &tgt->io_retire_queue);
+
+ init_completion(&io_req->tm_done);
+ io_req->wait_for_comp = 1;
+
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
+ /* Cancel the current timer running on this io_req */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+ set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
+ rc = bnx2fc_initiate_abts(io_req);
+ } else {
+ printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ "already in abts processing\n", io_req->xid);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+ if (rc == FAILED) {
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return rc;
+ }
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ wait_for_completion(&io_req->tm_done);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req->wait_for_comp = 0;
+ if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags))) {
+ /* Let the scsi-ml try to recover this command */
+ printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
+ io_req->xid);
+ rc = FAILED;
+ } else {
+ /*
+ * We come here even when there was a race condition
+ * between timeout and abts completion, and abts
+ * completion happens just in time.
+ */
+ BNX2FC_IO_DBG(io_req, "abort succeeded\n");
+ rc = SUCCESS;
+ bnx2fc_scsi_done(io_req, DID_ABORT);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
+
+ /* release the reference taken in eh_abort */
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return rc;
+}
+
+void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq)
+{
+ BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
+ "refcnt = %d, cmd_type = %d\n",
+ io_req->refcount.refcount.counter, io_req->cmd_type);
+ bnx2fc_scsi_done(io_req, DID_ERROR);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+}
+
+void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq)
+{
+ u32 r_ctl;
+ u32 r_a_tov = FC_DEF_R_A_TOV;
+ u8 issue_rrq = 0;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+
+ BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
+ "refcnt = %d, cmd_type = %d\n",
+ io_req->xid,
+ io_req->refcount.refcount.counter, io_req->cmd_type);
+
+ if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "Timer context finished processing"
+ " this io\n");
+ return;
+ }
+
+ /* Do not issue RRQ as this IO is already cleanedup */
+ if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
+ &io_req->req_flags))
+ goto io_compl;
+
+ /*
+ * For ABTS issued due to SCSI eh_abort_handler, timeout
+ * values are maintained by scsi-ml itself. Cancel timeout
+ * in case ABTS issued as part of task management function
+ * or due to FW error.
+ */
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl;
+
+ switch (r_ctl) {
+ case FC_RCTL_BA_ACC:
+ /*
+ * Dont release this cmd yet. It will be relesed
+ * after we get RRQ response
+ */
+ BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
+ issue_rrq = 1;
+ break;
+
+ case FC_RCTL_BA_RJT:
+ BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
+ break;
+ default:
+ printk(KERN_ERR PFX "Unknown ABTS response\n");
+ break;
+ }
+
+ if (issue_rrq) {
+ BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
+ set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
+ }
+ set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
+ bnx2fc_cmd_timer_set(io_req, r_a_tov);
+
+io_compl:
+ if (io_req->wait_for_comp) {
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags))
+ complete(&io_req->tm_done);
+ } else {
+ /*
+ * We end up here when ABTS is issued as
+ * in asynchronous context, i.e., as part
+ * of task management completion, or
+ * when FW error is received or when the
+ * ABTS is issued when the IO is timed
+ * out.
+ */
+
+ if (io_req->on_active_queue) {
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ /* Move IO req to retire queue */
+ list_add_tail(&io_req->link, &tgt->io_retire_queue);
+ }
+ bnx2fc_scsi_done(io_req, DID_ERROR);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
+}
+
+static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct list_head *list;
+ struct list_head *tmp;
+ struct bnx2fc_cmd *cmd;
+ int tm_lun = sc_cmd->device->lun;
+ int rc = 0;
+ int lun;
+
+ /* called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
+ /*
+ * Walk thru the active_ios queue and ABORT the IO
+ * that matches with the LUN that was reset
+ */
+ list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
+ cmd = (struct bnx2fc_cmd *)list;
+ lun = cmd->sc_cmd->device->lun;
+ if (lun == tm_lun) {
+ /* Initiate ABTS on this cmd */
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &cmd->req_flags)) {
+ /* cancel the IO timeout */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release);
+ /* timer hold */
+ rc = bnx2fc_initiate_abts(cmd);
+ /* abts shouldnt fail in this context */
+ WARN_ON(rc != SUCCESS);
+ } else
+ printk(KERN_ERR PFX "lun_rst: abts already in"
+ " progress for this IO 0x%x\n",
+ cmd->xid);
+ }
+ }
+}
+
+static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct list_head *list;
+ struct list_head *tmp;
+ struct bnx2fc_cmd *cmd;
+ int rc = 0;
+
+ /* called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
+ /*
+ * Walk thru the active_ios queue and ABORT the IO
+ * that matches with the LUN that was reset
+ */
+ list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
+ cmd = (struct bnx2fc_cmd *)list;
+ /* Initiate ABTS */
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &cmd->req_flags)) {
+ /* cancel the IO timeout */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* timer hold */
+ rc = bnx2fc_initiate_abts(cmd);
+ /* abts shouldnt fail in this context */
+ WARN_ON(rc != SUCCESS);
+
+ } else
+ printk(KERN_ERR PFX "tgt_rst: abts already in progress"
+ " for this IO 0x%x\n", cmd->xid);
+ }
+}
+
+void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task, u8 num_rq)
+{
+ struct bnx2fc_mp_req *tm_req;
+ struct fc_frame_header *fc_hdr;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ u64 *hdr;
+ u64 *temp_hdr;
+ void *rsp_buf;
+
+ /* Called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
+
+ if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
+ set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags);
+ else {
+ /* TM has already timed out and we got
+ * delayed completion. Ignore completion
+ * processing.
+ */
+ return;
+ }
+
+ tm_req = &(io_req->mp_req);
+ fc_hdr = &(tm_req->resp_fc_hdr);
+ hdr = (u64 *)fc_hdr;
+ temp_hdr = (u64 *)
+ &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+ hdr[0] = cpu_to_be64(temp_hdr[0]);
+ hdr[1] = cpu_to_be64(temp_hdr[1]);
+ hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+ tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+
+ rsp_buf = tm_req->resp_buf;
+
+ if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
+ bnx2fc_parse_fcp_rsp(io_req,
+ (struct fcoe_fcp_rsp_payload *)
+ rsp_buf, num_rq);
+ if (io_req->fcp_rsp_code == 0) {
+ /* TM successful */
+ if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
+ bnx2fc_lun_reset_cmpl(io_req);
+ else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
+ bnx2fc_tgt_reset_cmpl(io_req);
+ }
+ } else {
+ printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
+ fc_hdr->fh_r_ctl);
+ }
+ if (!sc_cmd->SCp.ptr) {
+ printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n");
+ return;
+ }
+ switch (io_req->fcp_status) {
+ case FC_GOOD:
+ if (io_req->cdb_status == 0) {
+ /* Good IO completion */
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ /* Transport status is good, SCSI status not good */
+ sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+ }
+ if (io_req->fcp_resid)
+ scsi_set_resid(sc_cmd, io_req->fcp_resid);
+ break;
+
+ default:
+ BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
+ io_req->fcp_status);
+ break;
+ }
+
+ sc_cmd = io_req->sc_cmd;
+ io_req->sc_cmd = NULL;
+
+ /* check if the io_req exists in tgt's tmf_q */
+ if (io_req->on_tmf_queue) {
+
+ list_del_init(&io_req->link);
+ io_req->on_tmf_queue = 0;
+ } else {
+
+ printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n");
+ return;
+ }
+
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ if (io_req->wait_for_comp) {
+ BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
+ complete(&io_req->tm_done);
+ }
+}
+
+static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
+ int bd_index)
+{
+ struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+ int frag_size, sg_frags;
+
+ sg_frags = 0;
+ while (sg_len) {
+ if (sg_len >= BNX2FC_BD_SPLIT_SZ)
+ frag_size = BNX2FC_BD_SPLIT_SZ;
+ else
+ frag_size = sg_len;
+ bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
+ bd[bd_index + sg_frags].buf_addr_hi = addr >> 32;
+ bd[bd_index + sg_frags].buf_len = (u16)frag_size;
+ bd[bd_index + sg_frags].flags = 0;
+
+ addr += (u64) frag_size;
+ sg_frags++;
+ sg_len -= frag_size;
+ }
+ return sg_frags;
+
+}
+
+static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+ struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+ struct scatterlist *sg;
+ int byte_count = 0;
+ int sg_count = 0;
+ int bd_count = 0;
+ int sg_frags;
+ unsigned int sg_len;
+ u64 addr;
+ int i;
+
+ sg_count = scsi_dma_map(sc);
+ scsi_for_each_sg(sc, sg, sg_count, i) {
+ sg_len = sg_dma_len(sg);
+ addr = sg_dma_address(sg);
+ if (sg_len > BNX2FC_MAX_BD_LEN) {
+ sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
+ bd_count);
+ } else {
+
+ sg_frags = 1;
+ bd[bd_count].buf_addr_lo = addr & 0xffffffff;
+ bd[bd_count].buf_addr_hi = addr >> 32;
+ bd[bd_count].buf_len = (u16)sg_len;
+ bd[bd_count].flags = 0;
+ }
+ bd_count += sg_frags;
+ byte_count += sg_len;
+ }
+ if (byte_count != scsi_bufflen(sc))
+ printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
+ "task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
+ io_req->xid);
+ return bd_count;
+}
+
+static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+ struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+ int bd_count;
+
+ if (scsi_sg_count(sc))
+ bd_count = bnx2fc_map_sg(io_req);
+ else {
+ bd_count = 0;
+ bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
+ bd[0].buf_len = bd[0].flags = 0;
+ }
+ io_req->bd_tbl->bd_valid = bd_count;
+}
+
+static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+
+ if (io_req->bd_tbl->bd_valid && sc) {
+ scsi_dma_unmap(sc);
+ io_req->bd_tbl->bd_valid = 0;
+ }
+}
+
+void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
+ struct fcp_cmnd *fcp_cmnd)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ char tag[2];
+
+ memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+
+ int_to_scsilun(sc_cmd->device->lun,
+ (struct scsi_lun *) fcp_cmnd->fc_lun);
+
+
+ fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
+ memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
+
+ fcp_cmnd->fc_cmdref = 0;
+ fcp_cmnd->fc_pri_ta = 0;
+ fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
+ fcp_cmnd->fc_flags = io_req->io_req_flags;
+
+ if (scsi_populate_tag_msg(sc_cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ;
+ break;
+ case ORDERED_QUEUE_TAG:
+ fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED;
+ break;
+ default:
+ fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
+ break;
+ }
+ } else {
+ fcp_cmnd->fc_pri_ta = 0;
+ }
+}
+
+static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+ struct fcoe_fcp_rsp_payload *fcp_rsp,
+ u8 num_rq)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ u8 rsp_flags = fcp_rsp->fcp_flags.flags;
+ u32 rq_buff_len = 0;
+ int i;
+ unsigned char *rq_data;
+ unsigned char *dummy;
+ int fcp_sns_len = 0;
+ int fcp_rsp_len = 0;
+
+ io_req->fcp_status = FC_GOOD;
+ io_req->fcp_resid = fcp_rsp->fcp_resid;
+
+ io_req->scsi_comp_flags = rsp_flags;
+ CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
+ fcp_rsp->scsi_status_code;
+
+ /* Fetch fcp_rsp_info and fcp_sns_info if available */
+ if (num_rq) {
+
+ /*
+ * We do not anticipate num_rq >1, as the linux defined
+ * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
+ * 256 bytes of single rq buffer is good enough to hold this.
+ */
+
+ if (rsp_flags &
+ FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) {
+ fcp_rsp_len = rq_buff_len
+ = fcp_rsp->fcp_rsp_len;
+ }
+
+ if (rsp_flags &
+ FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) {
+ fcp_sns_len = fcp_rsp->fcp_sns_len;
+ rq_buff_len += fcp_rsp->fcp_sns_len;
+ }
+
+ io_req->fcp_rsp_len = fcp_rsp_len;
+ io_req->fcp_sns_len = fcp_sns_len;
+
+ if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
+ /* Invalid sense sense length. */
+ printk(KERN_ALERT PFX "invalid sns length %d\n",
+ rq_buff_len);
+ /* reset rq_buff_len */
+ rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
+ }
+
+ rq_data = bnx2fc_get_next_rqe(tgt, 1);
+
+ if (num_rq > 1) {
+ /* We do not need extra sense data */
+ for (i = 1; i < num_rq; i++)
+ dummy = bnx2fc_get_next_rqe(tgt, 1);
+ }
+
+ /* fetch fcp_rsp_code */
+ if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
+ /* Only for task management function */
+ io_req->fcp_rsp_code = rq_data[3];
+ printk(KERN_ERR PFX "fcp_rsp_code = %d\n",
+ io_req->fcp_rsp_code);
+ }
+
+ /* fetch sense data */
+ rq_data += fcp_rsp_len;
+
+ if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
+ printk(KERN_ERR PFX "Truncating sense buffer\n");
+ fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
+ }
+
+ memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer));
+ if (fcp_sns_len)
+ memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
+
+ /* return RQ entries */
+ for (i = 0; i < num_rq; i++)
+ bnx2fc_return_rqe(tgt, 1);
+ }
+}
+
+/**
+ * bnx2fc_queuecommand - Queuecommand function of the scsi template
+ *
+ * @host: The Scsi_Host the command was issued to
+ * @sc_cmd: struct scsi_cmnd to be executed
+ *
+ * This is the IO strategy routine, called by SCSI-ML
+ **/
+int bnx2fc_queuecommand(struct Scsi_Host *host,
+ struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lport = shost_priv(host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_cmd *io_req;
+ int rc = 0;
+ int rval;
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval) {
+ sc_cmd->result = rval;
+ sc_cmd->scsi_done(sc_cmd);
+ return 0;
+ }
+
+ if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+
+ /* rport and tgt are allocated together, so tgt should be non-NULL */
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ /*
+ * Session is not offloaded yet. Let SCSI-ml retry
+ * the command.
+ */
+ rc = SCSI_MLQUEUE_TARGET_BUSY;
+ goto exit_qcmd;
+ }
+
+ io_req = bnx2fc_cmd_alloc(tgt);
+ if (!io_req) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+ io_req->sc_cmd = sc_cmd;
+
+ if (bnx2fc_post_io_req(tgt, io_req)) {
+ printk(KERN_ERR PFX "Unable to post io_req\n");
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+exit_qcmd:
+ return rc;
+}
+
+void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq)
+{
+ struct fcoe_fcp_rsp_payload *fcp_rsp;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct scsi_cmnd *sc_cmd;
+ struct Scsi_Host *host;
+
+
+ /* scsi_cmd_cmpl is called with tgt lock held */
+
+ if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
+ /* we will not receive ABTS response for this IO */
+ BNX2FC_IO_DBG(io_req, "Timer context finished processing "
+ "this scsi cmd\n");
+ }
+
+ /* Cancel the timeout_work, as we received IO completion */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ sc_cmd = io_req->sc_cmd;
+ if (sc_cmd == NULL) {
+ printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
+ return;
+ }
+
+ /* Fetch fcp_rsp from task context and perform cmd completion */
+ fcp_rsp = (struct fcoe_fcp_rsp_payload *)
+ &(task->cmn.general.rsp_info.fcp_rsp.payload);
+
+ /* parse fcp_rsp and obtain sense data from RQ if available */
+ bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
+
+ host = sc_cmd->device->host;
+ if (!sc_cmd->SCp.ptr) {
+ printk(KERN_ERR PFX "SCp.ptr is NULL\n");
+ return;
+ }
+ io_req->sc_cmd = NULL;
+
+ if (io_req->on_active_queue) {
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ /* Move IO req to retire queue */
+ list_add_tail(&io_req->link, &tgt->io_retire_queue);
+ } else {
+ /* This should not happen, but could have been pulled
+ * by bnx2fc_flush_active_ios(), or during a race
+ * between command abort and (late) completion.
+ */
+ BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
+ if (io_req->wait_for_comp)
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags))
+ complete(&io_req->tm_done);
+ }
+
+ bnx2fc_unmap_sg_list(io_req);
+
+ switch (io_req->fcp_status) {
+ case FC_GOOD:
+ if (io_req->cdb_status == 0) {
+ /* Good IO completion */
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ /* Transport status is good, SCSI status not good */
+ BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
+ " fcp_resid = 0x%x\n",
+ io_req->cdb_status, io_req->fcp_resid);
+ sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+ }
+ if (io_req->fcp_resid)
+ scsi_set_resid(sc_cmd, io_req->fcp_resid);
+ break;
+ default:
+ printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n",
+ io_req->fcp_status);
+ break;
+ }
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+}
+
+static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
+ struct bnx2fc_cmd *io_req)
+{
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct fc_lport *lport = port->lport;
+ struct fcoe_dev_stats *stats;
+ int task_idx, index;
+ u16 xid;
+
+ /* Initialize rest of io_req fields */
+ io_req->cmd_type = BNX2FC_SCSI_CMD;
+ io_req->port = port;
+ io_req->tgt = tgt;
+ io_req->data_xfer_len = scsi_bufflen(sc_cmd);
+ sc_cmd->SCp.ptr = (char *)io_req;
+
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ io_req->io_req_flags = BNX2FC_READ;
+ stats->InputRequests++;
+ stats->InputBytes += io_req->data_xfer_len;
+ } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ io_req->io_req_flags = BNX2FC_WRITE;
+ stats->OutputRequests++;
+ stats->OutputBytes += io_req->data_xfer_len;
+ } else {
+ io_req->io_req_flags = 0;
+ stats->ControlRequests++;
+ }
+ put_cpu();
+
+ xid = io_req->xid;
+
+ /* Build buffer descriptor list for firmware from sg list */
+ bnx2fc_build_bd_list_from_sg(io_req);
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_task(io_req, task);
+
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (tgt->flush_in_prog) {
+ printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return -EAGAIN;
+ }
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ printk(KERN_ERR PFX "Session not ready...post_io\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return -EAGAIN;
+ }
+
+ /* Time IO req */
+ bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Enqueue the io_req to active_cmd_queue */
+
+ io_req->on_active_queue = 1;
+ /* move io_req from pending_queue to active_queue */
+ list_add_tail(&io_req->link, &tgt->active_cmd_queue);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return 0;
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
new file mode 100644
index 000000000000..7ea93af60260
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -0,0 +1,844 @@
+/* bnx2fc_tgt.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * Handles operations such as session offload/upload etc, and manages
+ * session resources such as connection id and qp resources.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+static void bnx2fc_upld_timer(unsigned long data);
+static void bnx2fc_ofld_timer(unsigned long data);
+static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
+ struct fcoe_port *port,
+ struct fc_rport_priv *rdata);
+static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
+
+static void bnx2fc_upld_timer(unsigned long data)
+{
+
+ struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+
+ BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
+ /* fake upload completion */
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+}
+
+static void bnx2fc_ofld_timer(unsigned long data)
+{
+
+ struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+
+ BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
+ /* NOTE: This function should never be called, as
+ * offload should never timeout
+ */
+ /*
+ * If the timer has expired, this session is dead
+ * Clear offloaded flag and logout of this device.
+ * Since OFFLOADED flag is cleared, this case
+ * will be considered as offload error and the
+ * port will be logged off, and conn_id, session
+ * resources are freed up in bnx2fc_offload_session
+ */
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+}
+
+static void bnx2fc_offload_session(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt,
+ struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_rport *rport = rdata->rport;
+ struct bnx2fc_hba *hba = port->priv;
+ int rval;
+ int i = 0;
+
+ /* Initialize bnx2fc_rport */
+ /* NOTE: tgt is already bzero'd */
+ rval = bnx2fc_init_tgt(tgt, port, rdata);
+ if (rval) {
+ printk(KERN_ERR PFX "Failed to allocate conn id for "
+ "port_id (%6x)\n", rport->port_id);
+ goto ofld_err;
+ }
+
+ /* Allocate session resources */
+ rval = bnx2fc_alloc_session_resc(hba, tgt);
+ if (rval) {
+ printk(KERN_ERR PFX "Failed to allocate resources\n");
+ goto ofld_err;
+ }
+
+ /*
+ * Initialize FCoE session offload process.
+ * Upon completion of offload process add
+ * rport to list of rports
+ */
+retry_ofld:
+ clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ rval = bnx2fc_send_session_ofld_req(port, tgt);
+ if (rval) {
+ printk(KERN_ERR PFX "ofld_req failed\n");
+ goto ofld_err;
+ }
+
+ /*
+ * wait for the session is offloaded and enabled. 3 Secs
+ * should be ample time for this process to complete.
+ */
+ setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ wait_event_interruptible(tgt->ofld_wait,
+ (test_bit(
+ BNX2FC_FLAG_OFLD_REQ_CMPL,
+ &tgt->flags)));
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&tgt->ofld_timer);
+
+ if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
+ if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
+ &tgt->flags)) {
+ BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, "
+ "retry ofld..%d\n", i++);
+ msleep_interruptible(1000);
+ if (i > 3) {
+ i = 0;
+ goto ofld_err;
+ }
+ goto retry_ofld;
+ }
+ goto ofld_err;
+ }
+ if (bnx2fc_map_doorbell(tgt)) {
+ printk(KERN_ERR PFX "map doorbell failed - no mem\n");
+ /* upload will take care of cleaning up sess resc */
+ lport->tt.rport_logoff(rdata);
+ }
+ return;
+
+ofld_err:
+ /* couldn't offload the session. log off from this rport */
+ BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
+ lport->tt.rport_logoff(rdata);
+ /* Free session resources */
+ bnx2fc_free_session_resc(hba, tgt);
+ if (tgt->fcoe_conn_id != -1)
+ bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+}
+
+void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
+{
+ struct bnx2fc_cmd *io_req;
+ struct list_head *list;
+ struct list_head *tmp;
+ int rc;
+ int i = 0;
+ BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
+ tgt->num_active_ios.counter);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ tgt->flush_in_prog = 1;
+
+ list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ i++;
+ io_req = (struct bnx2fc_cmd *)list;
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
+
+ if (cancel_delayed_work(&io_req->timeout_work)) {
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags)) {
+ /* Handle eh_abort timeout */
+ BNX2FC_IO_DBG(io_req, "eh_abort for IO "
+ "cleaned up\n");
+ complete(&io_req->tm_done);
+ }
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+ }
+
+ set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags);
+ set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
+
+ list_for_each_safe(list, tmp, &tgt->els_queue) {
+ i++;
+ io_req = (struct bnx2fc_cmd *)list;
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+
+ BNX2FC_IO_DBG(io_req, "els_queue cleanup\n");
+
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ if ((io_req->cb_func) && (io_req->cb_arg)) {
+ io_req->cb_func(io_req->cb_arg);
+ io_req->cb_arg = NULL;
+ }
+
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
+
+ list_for_each_safe(list, tmp, &tgt->io_retire_queue) {
+ i++;
+ io_req = (struct bnx2fc_cmd *)list;
+ list_del_init(&io_req->link);
+
+ BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
+
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+
+ clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
+ }
+
+ BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i);
+ i = 0;
+ spin_unlock_bh(&tgt->tgt_lock);
+ /* wait for active_ios to go to 0 */
+ while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT))
+ msleep(25);
+ if (tgt->num_active_ios.counter != 0)
+ printk(KERN_ERR PFX "CLEANUP on port 0x%x:"
+ " active_ios = %d\n",
+ tgt->rdata->ids.port_id, tgt->num_active_ios.counter);
+ spin_lock_bh(&tgt->tgt_lock);
+ tgt->flush_in_prog = 0;
+ spin_unlock_bh(&tgt->tgt_lock);
+}
+
+static void bnx2fc_upload_session(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct bnx2fc_hba *hba = port->priv;
+
+ BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
+ tgt->num_active_ios.counter);
+
+ /*
+ * Called with hba->hba_mutex held.
+ * This is a blocking call
+ */
+ clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ bnx2fc_send_session_disable_req(port, tgt);
+
+ /*
+ * wait for upload to complete. 3 Secs
+ * should be sufficient time for this process to complete.
+ */
+ setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
+ wait_event_interruptible(tgt->upld_wait,
+ (test_bit(
+ BNX2FC_FLAG_UPLD_REQ_COMPL,
+ &tgt->flags)));
+
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&tgt->upld_timer);
+
+ /*
+ * traverse thru the active_q and tmf_q and cleanup
+ * IOs in these lists
+ */
+ BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n",
+ tgt->flags);
+ bnx2fc_flush_active_ios(tgt);
+
+ /* Issue destroy KWQE */
+ if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) {
+ BNX2FC_TGT_DBG(tgt, "send destroy req\n");
+ clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ bnx2fc_send_session_destroy_req(hba, tgt);
+
+ /* wait for destroy to complete */
+ setup_timer(&tgt->upld_timer,
+ bnx2fc_upld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ wait_event_interruptible(tgt->upld_wait,
+ (test_bit(
+ BNX2FC_FLAG_UPLD_REQ_COMPL,
+ &tgt->flags)));
+
+ if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
+ printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
+
+ BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
+ tgt->flags);
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&tgt->upld_timer);
+
+ } else
+ printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy"
+ " not sent to FW\n");
+
+ /* Free session resources */
+ spin_lock_bh(&tgt->cq_lock);
+ bnx2fc_free_session_resc(hba, tgt);
+ bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+ spin_unlock_bh(&tgt->cq_lock);
+}
+
+static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
+ struct fcoe_port *port,
+ struct fc_rport_priv *rdata)
+{
+
+ struct fc_rport *rport = rdata->rport;
+ struct bnx2fc_hba *hba = port->priv;
+
+ tgt->rport = rport;
+ tgt->rdata = rdata;
+ tgt->port = port;
+
+ if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) {
+ BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n");
+ tgt->fcoe_conn_id = -1;
+ return -1;
+ }
+
+ tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt);
+ if (tgt->fcoe_conn_id == -1)
+ return -1;
+
+ BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id);
+
+ tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
+ tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
+ tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
+
+ /* Initialize the toggle bit */
+ tgt->sq_curr_toggle_bit = 1;
+ tgt->cq_curr_toggle_bit = 1;
+ tgt->sq_prod_idx = 0;
+ tgt->cq_cons_idx = 0;
+ tgt->rq_prod_idx = 0x8000;
+ tgt->rq_cons_idx = 0;
+ atomic_set(&tgt->num_active_ios, 0);
+
+ tgt->work_time_slice = 2;
+
+ spin_lock_init(&tgt->tgt_lock);
+ spin_lock_init(&tgt->cq_lock);
+
+ /* Initialize active_cmd_queue list */
+ INIT_LIST_HEAD(&tgt->active_cmd_queue);
+
+ /* Initialize IO retire queue */
+ INIT_LIST_HEAD(&tgt->io_retire_queue);
+
+ INIT_LIST_HEAD(&tgt->els_queue);
+
+ /* Initialize active_tm_queue list */
+ INIT_LIST_HEAD(&tgt->active_tm_queue);
+
+ init_waitqueue_head(&tgt->ofld_wait);
+ init_waitqueue_head(&tgt->upld_wait);
+
+ return 0;
+}
+
+/**
+ * This event_callback is called after successful completion of libfc
+ * initiated target login. bnx2fc can proceed with initiating the session
+ * establishment.
+ */
+void bnx2fc_rport_event_handler(struct fc_lport *lport,
+ struct fc_rport_priv *rdata,
+ enum fc_rport_event event)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct fc_rport *rport = rdata->rport;
+ struct fc_rport_libfc_priv *rp;
+ struct bnx2fc_rport *tgt;
+ u32 port_id;
+
+ BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n",
+ event, rdata->ids.port_id);
+ switch (event) {
+ case RPORT_EV_READY:
+ if (!rport) {
+ printk(KERN_ALERT PFX "rport is NULL: ERROR!\n");
+ break;
+ }
+
+ rp = rport->dd_data;
+ if (rport->port_id == FC_FID_DIR_SERV) {
+ /*
+ * bnx2fc_rport structure doesnt exist for
+ * directory server.
+ * We should not come here, as lport will
+ * take care of fabric login
+ */
+ printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n",
+ rdata->ids.port_id);
+ break;
+ }
+
+ if (rdata->spp_type != FC_TYPE_FCP) {
+ BNX2FC_HBA_DBG(lport, "not FCP type target."
+ " not offloading\n");
+ break;
+ }
+ if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ BNX2FC_HBA_DBG(lport, "not FCP_TARGET"
+ " not offloading\n");
+ break;
+ }
+
+ /*
+ * Offlaod process is protected with hba mutex.
+ * Use the same mutex_lock for upload process too
+ */
+ mutex_lock(&hba->hba_mutex);
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ /* This can happen when ADISC finds the same target */
+ if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
+ BNX2FC_TGT_DBG(tgt, "already offloaded\n");
+ mutex_unlock(&hba->hba_mutex);
+ return;
+ }
+
+ /*
+ * Offload the session. This is a blocking call, and will
+ * wait until the session is offloaded.
+ */
+ bnx2fc_offload_session(port, tgt, rdata);
+
+ BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+
+ if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
+ /*
+ * Session is offloaded and enabled. Map
+ * doorbell register for this target
+ */
+ BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
+ /* This counter is protected with hba mutex */
+ hba->num_ofld_sess++;
+
+ set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+ } else {
+ /*
+ * Offload or enable would have failed.
+ * In offload/enable completion path, the
+ * rport would have already been removed
+ */
+ BNX2FC_TGT_DBG(tgt, "Port is being logged off as "
+ "offloaded flag not set\n");
+ }
+ mutex_unlock(&hba->hba_mutex);
+ break;
+ case RPORT_EV_LOGO:
+ case RPORT_EV_FAILED:
+ case RPORT_EV_STOP:
+ port_id = rdata->ids.port_id;
+ if (port_id == FC_FID_DIR_SERV)
+ break;
+
+ if (!rport) {
+ printk(KERN_ALERT PFX "%x - rport not created Yet!!\n",
+ port_id);
+ break;
+ }
+ rp = rport->dd_data;
+ mutex_lock(&hba->hba_mutex);
+ /*
+ * Perform session upload. Note that rdata->peers is already
+ * removed from disc->rports list before we get this event.
+ */
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
+ mutex_unlock(&hba->hba_mutex);
+ break;
+ }
+ clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+
+ bnx2fc_upload_session(port, tgt);
+ hba->num_ofld_sess--;
+ BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ /*
+ * Try to wake up the linkdown wait thread. If num_ofld_sess
+ * is 0, the waiting therad wakes up
+ */
+ if ((hba->wait_for_link_down) &&
+ (hba->num_ofld_sess == 0)) {
+ wake_up_interruptible(&hba->shutdown_wait);
+ }
+ if (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags)) {
+ printk(KERN_ERR PFX "Relogin to the tgt\n");
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_login(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ mutex_unlock(&hba->hba_mutex);
+
+ break;
+
+ case RPORT_EV_NONE:
+ break;
+ }
+}
+
+/**
+ * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id
+ *
+ * @port: fcoe_port struct to lookup the target port on
+ * @port_id: The remote port ID to look up
+ */
+struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
+ u32 port_id)
+{
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_rport *tgt;
+ struct fc_rport_priv *rdata;
+ int i;
+
+ for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+ tgt = hba->tgt_ofld_list[i];
+ if ((tgt) && (tgt->port == port)) {
+ rdata = tgt->rdata;
+ if (rdata->ids.port_id == port_id) {
+ if (rdata->rp_state != RPORT_ST_DELETE) {
+ BNX2FC_TGT_DBG(tgt, "rport "
+ "obtained\n");
+ return tgt;
+ } else {
+ printk(KERN_ERR PFX "rport 0x%x "
+ "is in DELETED state\n",
+ rdata->ids.port_id);
+ return NULL;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+
+/**
+ * bnx2fc_alloc_conn_id - allocates FCOE Connection id
+ *
+ * @hba: pointer to adapter structure
+ * @tgt: pointer to bnx2fc_rport structure
+ */
+static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ u32 conn_id, next;
+
+ /* called with hba mutex held */
+
+ /*
+ * tgt_ofld_list access is synchronized using
+ * both hba mutex and hba lock. Atleast hba mutex or
+ * hba lock needs to be held for read access.
+ */
+
+ spin_lock_bh(&hba->hba_lock);
+ next = hba->next_conn_id;
+ conn_id = hba->next_conn_id++;
+ if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS)
+ hba->next_conn_id = 0;
+
+ while (hba->tgt_ofld_list[conn_id] != NULL) {
+ conn_id++;
+ if (conn_id == BNX2FC_NUM_MAX_SESS)
+ conn_id = 0;
+
+ if (conn_id == next) {
+ /* No free conn_ids are available */
+ spin_unlock_bh(&hba->hba_lock);
+ return -1;
+ }
+ }
+ hba->tgt_ofld_list[conn_id] = tgt;
+ tgt->fcoe_conn_id = conn_id;
+ spin_unlock_bh(&hba->hba_lock);
+ return conn_id;
+}
+
+static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id)
+{
+ /* called with hba mutex held */
+ spin_lock_bh(&hba->hba_lock);
+ hba->tgt_ofld_list[conn_id] = NULL;
+ hba->next_conn_id = conn_id;
+ spin_unlock_bh(&hba->hba_lock);
+}
+
+/**
+ *bnx2fc_alloc_session_resc - Allocate qp resources for the session
+ *
+ */
+static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ dma_addr_t page;
+ int num_pages;
+ u32 *pbl;
+
+ /* Allocate and map SQ */
+ tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
+ tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ &tgt->sq_dma, GFP_KERNEL);
+ if (!tgt->sq) {
+ printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n",
+ tgt->sq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->sq, 0, tgt->sq_mem_size);
+
+ /* Allocate and map CQ */
+ tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
+ tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ &tgt->cq_dma, GFP_KERNEL);
+ if (!tgt->cq) {
+ printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n",
+ tgt->cq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->cq, 0, tgt->cq_mem_size);
+
+ /* Allocate and map RQ and RQ PBL */
+ tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
+ tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ &tgt->rq_dma, GFP_KERNEL);
+ if (!tgt->rq) {
+ printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n",
+ tgt->rq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->rq, 0, tgt->rq_mem_size);
+
+ tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *);
+ tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ &tgt->rq_pbl_dma, GFP_KERNEL);
+ if (!tgt->rq_pbl) {
+ printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n",
+ tgt->rq_pbl_size);
+ goto mem_alloc_failure;
+ }
+
+ memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
+ num_pages = tgt->rq_mem_size / PAGE_SIZE;
+ page = tgt->rq_dma;
+ pbl = (u32 *)tgt->rq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += PAGE_SIZE;
+ }
+
+ /* Allocate and map XFERQ */
+ tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
+ tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) &
+ PAGE_MASK;
+
+ tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
+ &tgt->xferq_dma, GFP_KERNEL);
+ if (!tgt->xferq) {
+ printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n",
+ tgt->xferq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->xferq, 0, tgt->xferq_mem_size);
+
+ /* Allocate and map CONFQ & CONFQ PBL */
+ tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
+ tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) &
+ PAGE_MASK;
+
+ tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
+ &tgt->confq_dma, GFP_KERNEL);
+ if (!tgt->confq) {
+ printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n",
+ tgt->confq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->confq, 0, tgt->confq_mem_size);
+
+ tgt->confq_pbl_size =
+ (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *);
+ tgt->confq_pbl_size =
+ (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->confq_pbl_size,
+ &tgt->confq_pbl_dma, GFP_KERNEL);
+ if (!tgt->confq_pbl) {
+ printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n",
+ tgt->confq_pbl_size);
+ goto mem_alloc_failure;
+ }
+
+ memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
+ num_pages = tgt->confq_mem_size / PAGE_SIZE;
+ page = tgt->confq_dma;
+ pbl = (u32 *)tgt->confq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += PAGE_SIZE;
+ }
+
+ /* Allocate and map ConnDB */
+ tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
+
+ tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->conn_db_mem_size,
+ &tgt->conn_db_dma, GFP_KERNEL);
+ if (!tgt->conn_db) {
+ printk(KERN_ALERT PFX "unable to allocate conn_db %d\n",
+ tgt->conn_db_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
+
+
+ /* Allocate and map LCQ */
+ tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
+ tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) &
+ PAGE_MASK;
+
+ tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ &tgt->lcq_dma, GFP_KERNEL);
+
+ if (!tgt->lcq) {
+ printk(KERN_ALERT PFX "unable to allocate lcq %d\n",
+ tgt->lcq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->lcq, 0, tgt->lcq_mem_size);
+
+ /* Arm CQ */
+ tgt->conn_db->cq_arm.lo = -1;
+ tgt->conn_db->rq_prod = 0x8000;
+
+ return 0;
+
+mem_alloc_failure:
+ bnx2fc_free_session_resc(hba, tgt);
+ bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+ return -ENOMEM;
+}
+
+/**
+ * bnx2i_free_session_resc - free qp resources for the session
+ *
+ * @hba: adapter structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ *
+ * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL
+ */
+static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
+
+ if (tgt->ctx_base) {
+ iounmap(tgt->ctx_base);
+ tgt->ctx_base = NULL;
+ }
+ /* Free LCQ */
+ if (tgt->lcq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ tgt->lcq, tgt->lcq_dma);
+ tgt->lcq = NULL;
+ }
+ /* Free connDB */
+ if (tgt->conn_db) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size,
+ tgt->conn_db, tgt->conn_db_dma);
+ tgt->conn_db = NULL;
+ }
+ /* Free confq and confq pbl */
+ if (tgt->confq_pbl) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size,
+ tgt->confq_pbl, tgt->confq_pbl_dma);
+ tgt->confq_pbl = NULL;
+ }
+ if (tgt->confq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
+ tgt->confq, tgt->confq_dma);
+ tgt->confq = NULL;
+ }
+ /* Free XFERQ */
+ if (tgt->xferq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
+ tgt->xferq, tgt->xferq_dma);
+ tgt->xferq = NULL;
+ }
+ /* Free RQ PBL and RQ */
+ if (tgt->rq_pbl) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ tgt->rq_pbl, tgt->rq_pbl_dma);
+ tgt->rq_pbl = NULL;
+ }
+ if (tgt->rq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ tgt->rq, tgt->rq_dma);
+ tgt->rq = NULL;
+ }
+ /* Free CQ */
+ if (tgt->cq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ tgt->cq, tgt->cq_dma);
+ tgt->cq = NULL;
+ }
+ /* Free SQ */
+ if (tgt->sq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ tgt->sq, tgt->sq_dma);
+ tgt->sq = NULL;
+ }
+}
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index e1ca5fe7e6bb..cfd59023227b 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -360,7 +360,7 @@ struct bnx2i_hba {
#define ADAPTER_STATE_LINK_DOWN 2
#define ADAPTER_STATE_INIT_FAILED 31
unsigned int mtu_supported;
- #define BNX2I_MAX_MTU_SUPPORTED 1500
+ #define BNX2I_MAX_MTU_SUPPORTED 9000
struct Scsi_Host *shost;
@@ -751,6 +751,8 @@ extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
struct iscsi_task *mtask);
extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_text(struct bnx2i_conn *conn,
+ struct iscsi_task *mtask);
extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
struct bnx2i_cmd *cmnd);
extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 96505e3ab986..1da34c019b8a 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -445,6 +445,56 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
}
/**
+ * bnx2i_send_iscsi_text - post iSCSI text WQE to hardware
+ * @conn: iscsi connection
+ * @mtask: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Text request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn,
+ struct iscsi_task *mtask)
+{
+ struct bnx2i_cmd *bnx2i_cmd;
+ struct bnx2i_text_request *text_wqe;
+ struct iscsi_text *text_hdr;
+ u32 dword;
+
+ bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
+ text_hdr = (struct iscsi_text *)mtask->hdr;
+ text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe;
+
+ memset(text_wqe, 0, sizeof(struct bnx2i_text_request));
+
+ text_wqe->op_code = text_hdr->opcode;
+ text_wqe->op_attr = text_hdr->flags;
+ text_wqe->data_length = ntoh24(text_hdr->dlength);
+ text_wqe->itt = mtask->itt |
+ (ISCSI_TASK_TYPE_MPATH << ISCSI_TEXT_REQUEST_TYPE_SHIFT);
+ text_wqe->ttt = be32_to_cpu(text_hdr->ttt);
+
+ text_wqe->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+
+ text_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
+ text_wqe->resp_bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
+
+ dword = ((1 << ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT) |
+ (bnx2i_conn->gen_pdu.resp_buf_size <<
+ ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
+ text_wqe->resp_buffer = dword;
+ text_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
+ text_wqe->bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
+ text_wqe->num_bds = 1;
+ text_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+ return 0;
+}
+
+
+/**
* bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
* @conn: iscsi connection
* @cmd: driver command structure which is requesting
@@ -490,15 +540,18 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
nopout_hdr = (struct iscsi_nopout *)task->hdr;
nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
+
+ memset(nopout_wqe, 0x00, sizeof(struct bnx2i_nop_out_request));
+
nopout_wqe->op_code = nopout_hdr->opcode;
nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
- u32 tmp = nopout_hdr->lun[0];
+ u32 tmp = nopout_wqe->lun[0];
/* 57710 requires LUN field to be swapped */
- nopout_hdr->lun[0] = nopout_hdr->lun[1];
- nopout_hdr->lun[1] = tmp;
+ nopout_wqe->lun[0] = nopout_wqe->lun[1];
+ nopout_wqe->lun[1] = tmp;
}
nopout_wqe->itt = ((u16)task->itt |
@@ -1425,6 +1478,68 @@ done:
return 0;
}
+
+/**
+ * bnx2i_process_text_resp - this function handles iscsi text response
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI Text Response CQE& complete it to open-iscsi user daemon
+ */
+static int bnx2i_process_text_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_task *task;
+ struct bnx2i_text_response *text;
+ struct iscsi_text_rsp *resp_hdr;
+ int pld_len;
+ int pad_len;
+
+ text = (struct bnx2i_text_response *) cqe;
+ spin_lock(&session->lock);
+ task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);
+ if (!task)
+ goto done;
+
+ resp_hdr = (struct iscsi_text_rsp *)&bnx2i_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = text->op_code;
+ resp_hdr->flags = text->response_flags;
+ resp_hdr->hlength = 0;
+
+ hton24(resp_hdr->dlength, text->data_length);
+ resp_hdr->itt = task->hdr->itt;
+ resp_hdr->ttt = cpu_to_be32(text->ttt);
+ resp_hdr->statsn = task->hdr->exp_statsn;
+ resp_hdr->exp_cmdsn = cpu_to_be32(text->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(text->max_cmd_sn);
+ pld_len = text->data_length;
+ bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf +
+ pld_len;
+ pad_len = 0;
+ if (pld_len & 0x3)
+ pad_len = 4 - (pld_len % 4);
+
+ if (pad_len) {
+ int i = 0;
+ for (i = 0; i < pad_len; i++) {
+ bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
+ bnx2i_conn->gen_pdu.resp_wr_ptr++;
+ }
+ }
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
+ bnx2i_conn->gen_pdu.resp_buf,
+ bnx2i_conn->gen_pdu.resp_wr_ptr -
+ bnx2i_conn->gen_pdu.resp_buf);
+done:
+ spin_unlock(&session->lock);
+ return 0;
+}
+
+
/**
* bnx2i_process_tmf_resp - this function handles iscsi TMF response
* @session: iscsi session pointer
@@ -1766,6 +1881,10 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
bnx2i_process_tmf_resp(session, bnx2i_conn,
qp->cq_cons_qe);
break;
+ case ISCSI_OP_TEXT_RSP:
+ bnx2i_process_text_resp(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
case ISCSI_OP_LOGOUT_RSP:
bnx2i_process_logout_resp(session, bnx2i_conn,
qp->cq_cons_qe);
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 72a7b2d4a439..1d24a2819736 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
#define DRV_MODULE_NAME "bnx2i"
-#define DRV_MODULE_VERSION "2.6.2.2"
-#define DRV_MODULE_RELDATE "Nov 23, 2010"
+#define DRV_MODULE_VERSION "2.6.2.3"
+#define DRV_MODULE_RELDATE "Dec 31, 2010"
static char version[] __devinitdata =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -29,7 +29,7 @@ static char version[] __devinitdata =
MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and "
"Eddie Wai <eddie.wai@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711"
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711/57712"
" iSCSI Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -88,9 +88,11 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
(hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
- } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711E)
+ } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57711E ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57712 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57712E)
set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
else
printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
@@ -161,6 +163,21 @@ void bnx2i_start(void *handle)
struct bnx2i_hba *hba = handle;
int i = HZ;
+ if (!hba->cnic->max_iscsi_conn) {
+ printk(KERN_ALERT "bnx2i: dev %s does not support "
+ "iSCSI\n", hba->netdev->name);
+
+ if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+ mutex_lock(&bnx2i_dev_lock);
+ list_del_init(&hba->link);
+ adapter_count--;
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+ clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+ mutex_unlock(&bnx2i_dev_lock);
+ bnx2i_free_hba(hba);
+ }
+ return;
+ }
bnx2i_send_fw_iscsi_init_msg(hba);
while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
msleep(BNX2I_INIT_POLL_TIME);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f0dce26593eb..1809f9ccc4ce 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1092,6 +1092,9 @@ static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
case ISCSI_OP_SCSI_TMFUNC:
rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
break;
+ case ISCSI_OP_TEXT:
+ rc = bnx2i_send_iscsi_text(bnx2i_conn, task);
+ break;
default:
iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
"send_gen: unsupported op 0x%x\n",
@@ -1455,42 +1458,40 @@ static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
/**
- * bnx2i_conn_get_param - return iscsi connection parameter to caller
- * @cls_conn: pointer to iscsi cls conn
+ * bnx2i_ep_get_param - return iscsi ep parameter to caller
+ * @ep: pointer to iscsi endpoint
* @param: parameter type identifier
* @buf: buffer pointer
*
- * returns iSCSI connection parameters
+ * returns iSCSI ep parameters
*/
-static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf)
+static int bnx2i_ep_get_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
{
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct bnx2i_conn *bnx2i_conn = conn->dd_data;
- int len = 0;
+ struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
+ struct bnx2i_hba *hba = bnx2i_ep->hba;
+ int len = -ENOTCONN;
- if (!(bnx2i_conn && bnx2i_conn->ep && bnx2i_conn->ep->hba))
- goto out;
+ if (!hba)
+ return -ENOTCONN;
switch (param) {
case ISCSI_PARAM_CONN_PORT:
- mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
- if (bnx2i_conn->ep->cm_sk)
- len = sprintf(buf, "%hu\n",
- bnx2i_conn->ep->cm_sk->dst_port);
- mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
+ mutex_lock(&hba->net_dev_lock);
+ if (bnx2i_ep->cm_sk)
+ len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port);
+ mutex_unlock(&hba->net_dev_lock);
break;
case ISCSI_PARAM_CONN_ADDRESS:
- mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
- if (bnx2i_conn->ep->cm_sk)
- len = sprintf(buf, "%pI4\n",
- &bnx2i_conn->ep->cm_sk->dst_ip);
- mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
+ mutex_lock(&hba->net_dev_lock);
+ if (bnx2i_ep->cm_sk)
+ len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip);
+ mutex_unlock(&hba->net_dev_lock);
break;
default:
- return iscsi_conn_get_param(cls_conn, param, buf);
+ return -ENOSYS;
}
-out:
+
return len;
}
@@ -1935,13 +1936,13 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
cnic_dev_10g = 1;
switch (bnx2i_ep->state) {
- case EP_STATE_CONNECT_FAILED:
case EP_STATE_CLEANUP_FAILED:
case EP_STATE_OFLD_FAILED:
case EP_STATE_DISCONN_TIMEDOUT:
ret = 0;
break;
case EP_STATE_CONNECT_START:
+ case EP_STATE_CONNECT_FAILED:
case EP_STATE_CONNECT_COMPL:
case EP_STATE_ULP_UPDATE_START:
case EP_STATE_ULP_UPDATE_COMPL:
@@ -2167,7 +2168,8 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.name = "bnx2i",
.caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
CAP_MULTI_R2T | CAP_DATADGST |
- CAP_DATA_PATH_OFFLOAD,
+ CAP_DATA_PATH_OFFLOAD |
+ CAP_TEXT_NEGO,
.param_mask = ISCSI_MAX_RECV_DLENGTH |
ISCSI_MAX_XMIT_DLENGTH |
ISCSI_HDRDGST_EN |
@@ -2200,7 +2202,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.bind_conn = bnx2i_conn_bind,
.destroy_conn = bnx2i_conn_destroy,
.set_param = iscsi_set_param,
- .get_conn_param = bnx2i_conn_get_param,
+ .get_conn_param = iscsi_conn_get_param,
.get_session_param = iscsi_session_get_param,
.get_host_param = bnx2i_host_get_param,
.start_conn = bnx2i_conn_start,
@@ -2209,6 +2211,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.xmit_task = bnx2i_task_xmit,
.get_stats = bnx2i_conn_get_stats,
/* TCP connect - disconnect - option-2 interface calls */
+ .get_ep_param = bnx2i_ep_get_param,
.ep_connect = bnx2i_ep_connect,
.ep_poll = bnx2i_ep_poll,
.ep_disconnect = bnx2i_ep_disconnect,
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index a129a170b47b..fc2cdb62f53b 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -105,7 +105,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
/* owner and name should be set already */
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
| CAP_DATADGST | CAP_DIGEST_OFFLOAD |
- CAP_PADDING_OFFLOAD,
+ CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
.param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
@@ -137,7 +137,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
- .get_conn_param = cxgbi_get_conn_param,
+ .get_conn_param = iscsi_conn_get_param,
.set_param = cxgbi_set_conn_param,
.get_stats = cxgbi_get_conn_stats,
/* pdu xmit req from user space */
@@ -152,6 +152,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
.xmit_pdu = cxgbi_conn_xmit_pdu,
.parse_pdu_itt = cxgbi_parse_pdu_itt,
/* TCP connect/disconnect */
+ .get_ep_param = cxgbi_get_ep_param,
.ep_connect = cxgbi_ep_connect,
.ep_poll = cxgbi_ep_poll,
.ep_disconnect = cxgbi_ep_disconnect,
@@ -1108,10 +1109,11 @@ static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
csk, idx, npods, gl);
for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
- struct sk_buff *skb = ddp->gl_skb[idx];
+ struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
+ PPOD_SIZE, 0, GFP_ATOMIC);
- /* hold on to the skb until we clear the ddp mapping */
- skb_get(skb);
+ if (!skb)
+ return -ENOMEM;
ulp_mem_io_set_hdr(skb, pm_addr);
cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head +
@@ -1136,56 +1138,20 @@ static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
cdev, idx, npods, tag);
for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
- struct sk_buff *skb = ddp->gl_skb[idx];
+ struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
+ PPOD_SIZE, 0, GFP_ATOMIC);
if (!skb) {
- pr_err("tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
+ pr_err("tag 0x%x, 0x%x, %d/%u, skb OOM.\n",
tag, idx, i, npods);
continue;
}
- ddp->gl_skb[idx] = NULL;
- memset(skb->head + sizeof(struct ulp_mem_io), 0, PPOD_SIZE);
ulp_mem_io_set_hdr(skb, pm_addr);
skb->priority = CPL_PRIORITY_CONTROL;
cxgb3_ofld_send(cdev->lldev, skb);
}
}
-static void ddp_free_gl_skb(struct cxgbi_ddp_info *ddp, int idx, int cnt)
-{
- int i;
-
- log_debug(1 << CXGBI_DBG_DDP,
- "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
-
- for (i = 0; i < cnt; i++, idx++)
- if (ddp->gl_skb[idx]) {
- kfree_skb(ddp->gl_skb[idx]);
- ddp->gl_skb[idx] = NULL;
- }
-}
-
-static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx,
- int cnt, gfp_t gfp)
-{
- int i;
-
- log_debug(1 << CXGBI_DBG_DDP,
- "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
-
- for (i = 0; i < cnt; i++) {
- struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
- PPOD_SIZE, 0, gfp);
- if (skb)
- ddp->gl_skb[idx + i] = skb;
- else {
- ddp_free_gl_skb(ddp, idx, i);
- return -ENOMEM;
- }
- }
- return 0;
-}
-
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
unsigned int tid, int pg_idx, bool reply)
{
@@ -1316,8 +1282,6 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
}
tdev->ulp_iscsi = ddp;
- cdev->csk_ddp_free_gl_skb = ddp_free_gl_skb;
- cdev->csk_ddp_alloc_gl_skb = ddp_alloc_gl_skb;
cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
cdev->csk_ddp_set = ddp_set_map;
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
index 5f5e3394b594..20593fd69d8f 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
@@ -24,10 +24,21 @@
extern cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
-#define cxgb3i_get_private_ipv4addr(ndev) \
- (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr)
-#define cxgb3i_set_private_ipv4addr(ndev, addr) \
- (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr
+static inline unsigned int cxgb3i_get_private_ipv4addr(struct net_device *ndev)
+{
+ return ((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr;
+}
+
+static inline void cxgb3i_set_private_ipv4addr(struct net_device *ndev,
+ unsigned int addr)
+{
+ struct port_info *pi = (struct port_info *)netdev_priv(ndev);
+
+ pi->iscsic.flags = addr ? 1 : 0;
+ pi->iscsi_ipv4addr = addr;
+ if (addr)
+ memcpy(pi->iscsic.mac_addr, ndev->dev_addr, ETH_ALEN);
+}
struct cpl_iscsi_hdr_norss {
union opcode_tid ot;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 8c04fada710b..f3a4cd7cf782 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -106,7 +106,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.name = DRV_MODULE_NAME,
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
CAP_DATADGST | CAP_DIGEST_OFFLOAD |
- CAP_PADDING_OFFLOAD,
+ CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
.param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
@@ -138,7 +138,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
- .get_conn_param = cxgbi_get_conn_param,
+ .get_conn_param = iscsi_conn_get_param,
.set_param = cxgbi_set_conn_param,
.get_stats = cxgbi_get_conn_stats,
/* pdu xmit req from user space */
@@ -153,6 +153,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.xmit_pdu = cxgbi_conn_xmit_pdu,
.parse_pdu_itt = cxgbi_parse_pdu_itt,
/* TCP connect/disconnect */
+ .get_ep_param = cxgbi_get_ep_param,
.ep_connect = cxgbi_ep_connect,
.ep_poll = cxgbi_ep_poll,
.ep_disconnect = cxgbi_ep_disconnect,
@@ -1425,8 +1426,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
cxgbi_ddp_page_size_factor(pgsz_factor);
cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
- cdev->csk_ddp_free_gl_skb = NULL;
- cdev->csk_ddp_alloc_gl_skb = NULL;
cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
cdev->csk_ddp_set = ddp_set_map;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index a24dff9f9163..de764ea7419d 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -530,6 +530,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
csk->dst = dst;
csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
csk->daddr.sin_port = daddr->sin_port;
+ csk->daddr.sin_family = daddr->sin_family;
csk->saddr.sin_addr.s_addr = rt->rt_src;
return csk;
@@ -1264,12 +1265,6 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
return idx;
}
- if (cdev->csk_ddp_alloc_gl_skb) {
- err = cdev->csk_ddp_alloc_gl_skb(ddp, idx, npods, gfp);
- if (err < 0)
- goto unmark_entries;
- }
-
tag = cxgbi_ddp_tag_base(tformat, sw_tag);
tag |= idx << PPOD_IDX_SHIFT;
@@ -1280,11 +1275,8 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
hdr.page_offset = htonl(gl->offset);
err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl);
- if (err < 0) {
- if (cdev->csk_ddp_free_gl_skb)
- cdev->csk_ddp_free_gl_skb(ddp, idx, npods);
+ if (err < 0)
goto unmark_entries;
- }
ddp->idx_last = idx;
log_debug(1 << CXGBI_DBG_DDP,
@@ -1350,8 +1342,6 @@ static void ddp_destroy(struct kref *kref)
>> PPOD_PAGES_SHIFT;
pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods);
kfree(gl);
- if (cdev->csk_ddp_free_gl_skb)
- cdev->csk_ddp_free_gl_skb(ddp, i, npods);
i += npods;
} else
i++;
@@ -1394,8 +1384,6 @@ int cxgbi_ddp_init(struct cxgbi_device *cdev,
return -ENOMEM;
}
ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
- ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
- ppmax * sizeof(struct cxgbi_gather_list *));
cdev->ddp = ddp;
spin_lock_init(&ddp->map_lock);
@@ -1895,13 +1883,16 @@ EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
{
- u8 submode = 0;
+ if (hcrc || dcrc) {
+ u8 submode = 0;
- if (hcrc)
- submode |= 1;
- if (dcrc)
- submode |= 2;
- cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
+ if (hcrc)
+ submode |= 1;
+ if (dcrc)
+ submode |= 2;
+ cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
+ } else
+ cxgbi_skcb_ulp_mode(skb) = 0;
}
int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
@@ -2197,32 +2188,34 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
}
EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
-int cxgbi_get_conn_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf)
+int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
+ char *buf)
{
- struct iscsi_conn *iconn = cls_conn->dd_data;
+ struct cxgbi_endpoint *cep = ep->dd_data;
+ struct cxgbi_sock *csk;
int len;
log_debug(1 << CXGBI_DBG_ISCSI,
- "cls_conn 0x%p, param %d.\n", cls_conn, param);
+ "cls_conn 0x%p, param %d.\n", ep, param);
switch (param) {
case ISCSI_PARAM_CONN_PORT:
- spin_lock_bh(&iconn->session->lock);
- len = sprintf(buf, "%hu\n", iconn->portal_port);
- spin_unlock_bh(&iconn->session->lock);
- break;
case ISCSI_PARAM_CONN_ADDRESS:
- spin_lock_bh(&iconn->session->lock);
- len = sprintf(buf, "%s\n", iconn->portal_address);
- spin_unlock_bh(&iconn->session->lock);
- break;
+ if (!cep)
+ return -ENOTCONN;
+
+ csk = cep->csk;
+ if (!csk)
+ return -ENOTCONN;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &csk->daddr, param, buf);
default:
- return iscsi_conn_get_param(cls_conn, param, buf);
+ return -ENOSYS;
}
return len;
}
-EXPORT_SYMBOL_GPL(cxgbi_get_conn_param);
+EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
struct iscsi_cls_conn *
cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
@@ -2289,11 +2282,6 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
cxgbi_conn_max_xmit_dlength(conn);
cxgbi_conn_max_recv_dlength(conn);
- spin_lock_bh(&conn->session->lock);
- sprintf(conn->portal_address, "%pI4", &csk->daddr.sin_addr.s_addr);
- conn->portal_port = ntohs(csk->daddr.sin_port);
- spin_unlock_bh(&conn->session->lock);
-
log_debug(1 << CXGBI_DBG_ISCSI,
"cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
cls_session, cls_conn, ep, cconn, csk);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index c57d59db000c..0a20fd5f7102 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -131,7 +131,6 @@ struct cxgbi_ddp_info {
unsigned int rsvd_tag_mask;
spinlock_t map_lock;
struct cxgbi_gather_list **gl_map;
- struct sk_buff **gl_skb;
};
#define DDP_PGIDX_MAX 4
@@ -536,8 +535,6 @@ struct cxgbi_device {
struct cxgbi_ddp_info *ddp;
void (*dev_ddp_cleanup)(struct cxgbi_device *);
- void (*csk_ddp_free_gl_skb)(struct cxgbi_ddp_info *, int, int);
- int (*csk_ddp_alloc_gl_skb)(struct cxgbi_ddp_info *, int, int, gfp_t);
int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *,
unsigned int, unsigned int,
struct cxgbi_gather_list *);
@@ -715,7 +712,7 @@ void cxgbi_cleanup_task(struct iscsi_task *task);
void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
int cxgbi_set_conn_param(struct iscsi_cls_conn *,
enum iscsi_param, char *, int);
-int cxgbi_get_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *);
+int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param, char *);
struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32);
int cxgbi_bind_conn(struct iscsi_cls_session *,
struct iscsi_cls_conn *, u64, int);
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index b837c5b3c8f9..564e6ecd17c2 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -25,16 +25,9 @@
#include <scsi/scsi_dh.h>
#include "../scsi_priv.h"
-struct scsi_dh_devinfo_list {
- struct list_head node;
- char vendor[9];
- char model[17];
- struct scsi_device_handler *handler;
-};
-
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(scsi_dh_list);
-static LIST_HEAD(scsi_dh_dev_list);
+static int scsi_dh_list_idx = 1;
static struct scsi_device_handler *get_device_handler(const char *name)
{
@@ -51,40 +44,18 @@ static struct scsi_device_handler *get_device_handler(const char *name)
return found;
}
-
-static struct scsi_device_handler *
-scsi_dh_cache_lookup(struct scsi_device *sdev)
+static struct scsi_device_handler *get_device_handler_by_idx(int idx)
{
- struct scsi_dh_devinfo_list *tmp;
- struct scsi_device_handler *found_dh = NULL;
+ struct scsi_device_handler *tmp, *found = NULL;
spin_lock(&list_lock);
- list_for_each_entry(tmp, &scsi_dh_dev_list, node) {
- if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) &&
- !strncmp(sdev->model, tmp->model, strlen(tmp->model))) {
- found_dh = tmp->handler;
+ list_for_each_entry(tmp, &scsi_dh_list, list) {
+ if (tmp->idx == idx) {
+ found = tmp;
break;
}
}
spin_unlock(&list_lock);
-
- return found_dh;
-}
-
-static int scsi_dh_handler_lookup(struct scsi_device_handler *scsi_dh,
- struct scsi_device *sdev)
-{
- int i, found = 0;
-
- for(i = 0; scsi_dh->devlist[i].vendor; i++) {
- if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor,
- strlen(scsi_dh->devlist[i].vendor)) &&
- !strncmp(sdev->model, scsi_dh->devlist[i].model,
- strlen(scsi_dh->devlist[i].model))) {
- found = 1;
- break;
- }
- }
return found;
}
@@ -102,41 +73,14 @@ device_handler_match(struct scsi_device_handler *scsi_dh,
struct scsi_device *sdev)
{
struct scsi_device_handler *found_dh = NULL;
- struct scsi_dh_devinfo_list *tmp;
+ int idx;
- found_dh = scsi_dh_cache_lookup(sdev);
- if (found_dh)
- return found_dh;
+ idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model,
+ SCSI_DEVINFO_DH);
+ found_dh = get_device_handler_by_idx(idx);
- if (scsi_dh) {
- if (scsi_dh_handler_lookup(scsi_dh, sdev))
- found_dh = scsi_dh;
- } else {
- struct scsi_device_handler *tmp_dh;
-
- spin_lock(&list_lock);
- list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
- if (scsi_dh_handler_lookup(tmp_dh, sdev))
- found_dh = tmp_dh;
- }
- spin_unlock(&list_lock);
- }
-
- if (found_dh) { /* If device is found, add it to the cache */
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
- if (tmp) {
- strncpy(tmp->vendor, sdev->vendor, 8);
- strncpy(tmp->model, sdev->model, 16);
- tmp->vendor[8] = '\0';
- tmp->model[16] = '\0';
- tmp->handler = found_dh;
- spin_lock(&list_lock);
- list_add(&tmp->node, &scsi_dh_dev_list);
- spin_unlock(&list_lock);
- } else {
- found_dh = NULL;
- }
- }
+ if (scsi_dh && found_dh != scsi_dh)
+ found_dh = NULL;
return found_dh;
}
@@ -373,12 +317,25 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
*/
int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
{
+ int i;
+
if (get_device_handler(scsi_dh->name))
return -EBUSY;
spin_lock(&list_lock);
+ scsi_dh->idx = scsi_dh_list_idx++;
list_add(&scsi_dh->list, &scsi_dh_list);
spin_unlock(&list_lock);
+
+ for (i = 0; scsi_dh->devlist[i].vendor; i++) {
+ scsi_dev_info_list_add_keyed(0,
+ scsi_dh->devlist[i].vendor,
+ scsi_dh->devlist[i].model,
+ NULL,
+ scsi_dh->idx,
+ SCSI_DEVINFO_DH);
+ }
+
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
@@ -395,7 +352,7 @@ EXPORT_SYMBOL_GPL(scsi_register_device_handler);
*/
int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
{
- struct scsi_dh_devinfo_list *tmp, *pos;
+ int i;
if (!get_device_handler(scsi_dh->name))
return -ENODEV;
@@ -403,14 +360,14 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
scsi_dh_notifier_remove);
+ for (i = 0; scsi_dh->devlist[i].vendor; i++) {
+ scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor,
+ scsi_dh->devlist[i].model,
+ SCSI_DEVINFO_DH);
+ }
+
spin_lock(&list_lock);
list_del(&scsi_dh->list);
- list_for_each_entry_safe(pos, tmp, &scsi_dh_dev_list, node) {
- if (pos->handler == scsi_dh) {
- list_del(&pos->node);
- kfree(pos);
- }
- }
spin_unlock(&list_lock);
printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
@@ -576,6 +533,10 @@ static int __init scsi_dh_init(void)
{
int r;
+ r = scsi_dev_info_add_list(SCSI_DEVINFO_DH, "SCSI Device Handler");
+ if (r)
+ return r;
+
r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
if (!r)
@@ -590,6 +551,7 @@ static void __exit scsi_dh_exit(void)
bus_for_each_dev(&scsi_bus_type, NULL, NULL,
scsi_dh_sysfs_attr_remove);
bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
+ scsi_dev_info_remove_list(SCSI_DEVINFO_DH);
}
module_init(scsi_dh_init);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 6b729324b8d3..7cae0bc85390 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -253,13 +253,15 @@ static void stpg_endio(struct request *req, int error)
{
struct alua_dh_data *h = req->end_io_data;
struct scsi_sense_hdr sense_hdr;
- unsigned err = SCSI_DH_IO;
+ unsigned err = SCSI_DH_OK;
if (error || host_byte(req->errors) != DID_OK ||
- msg_byte(req->errors) != COMMAND_COMPLETE)
+ msg_byte(req->errors) != COMMAND_COMPLETE) {
+ err = SCSI_DH_IO;
goto done;
+ }
- if (err == SCSI_DH_IO && h->senselen > 0) {
+ if (h->senselen > 0) {
err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
&sense_hdr);
if (!err) {
@@ -285,7 +287,8 @@ static void stpg_endio(struct request *req, int error)
print_alua_state(h->state));
}
done:
- blk_put_request(req);
+ req->end_io_data = NULL;
+ __blk_put_request(req->q, req);
if (h->callback_fn) {
h->callback_fn(h->callback_data, err);
h->callback_fn = h->callback_data = NULL;
@@ -303,7 +306,6 @@ done:
static unsigned submit_stpg(struct alua_dh_data *h)
{
struct request *rq;
- int err = SCSI_DH_RES_TEMP_UNAVAIL;
int stpg_len = 8;
struct scsi_device *sdev = h->sdev;
@@ -332,7 +334,7 @@ static unsigned submit_stpg(struct alua_dh_data *h)
rq->end_io_data = h;
blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio);
- return err;
+ return SCSI_DH_OK;
}
/*
@@ -730,7 +732,9 @@ static const struct scsi_dh_devlist alua_dev_list[] = {
{"Pillar", "Axiom" },
{"Intel", "Multi-Flex"},
{"NETAPP", "LUN"},
+ {"NETAPP", "LUN C-Mode"},
{"AIX", "NVDISK"},
+ {"Promise", "VTrak"},
{NULL, NULL}
};
@@ -759,7 +763,7 @@ static int alua_bus_attach(struct scsi_device *sdev)
unsigned long flags;
int err = SCSI_DH_OK;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 6faf472f7537..48441f6908a4 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -650,7 +650,7 @@ static int clariion_bus_attach(struct scsi_device *sdev)
unsigned long flags;
int err;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index e3916641e627..b479f1eef968 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -225,7 +225,8 @@ static void start_stop_endio(struct request *req, int error)
}
}
done:
- blk_put_request(req);
+ req->end_io_data = NULL;
+ __blk_put_request(req->q, req);
if (h->callback_fn) {
h->callback_fn(h->callback_data, err);
h->callback_fn = h->callback_data = NULL;
@@ -338,8 +339,8 @@ static int hp_sw_bus_attach(struct scsi_device *sdev)
unsigned long flags;
int ret;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
- + sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ + sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n",
HP_SW_NAME);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 5be3ae15cb71..293c183dfe6d 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -281,11 +281,13 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
}
static struct request *rdac_failover_get(struct scsi_device *sdev,
- struct rdac_dh_data *h)
+ struct rdac_dh_data *h, struct list_head *list)
{
struct request *rq;
struct rdac_mode_common *common;
unsigned data_size;
+ struct rdac_queue_data *qdata;
+ u8 *lun_table;
if (h->ctlr->use_ms10) {
struct rdac_pg_expanded *rdac_pg;
@@ -298,6 +300,7 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
rdac_pg->subpage_code = 0x1;
rdac_pg->page_len[0] = 0x01;
rdac_pg->page_len[1] = 0x28;
+ lun_table = rdac_pg->lun_table;
} else {
struct rdac_pg_legacy *rdac_pg;
@@ -307,11 +310,16 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
common = &rdac_pg->common;
rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
rdac_pg->page_len = 0x68;
+ lun_table = rdac_pg->lun_table;
}
common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
common->rdac_options = RDAC_FORCED_QUIESENCE;
+ list_for_each_entry(qdata, list, entry) {
+ lun_table[qdata->h->lun] = 0x81;
+ }
+
/* get request for block layer packet command */
rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
if (!rq)
@@ -565,7 +573,6 @@ static void send_mode_select(struct work_struct *work)
int err, retry_cnt = RDAC_RETRY_COUNT;
struct rdac_queue_data *tmp, *qdata;
LIST_HEAD(list);
- u8 *lun_table;
spin_lock(&ctlr->ms_lock);
list_splice_init(&ctlr->ms_head, &list);
@@ -573,21 +580,12 @@ static void send_mode_select(struct work_struct *work)
ctlr->ms_sdev = NULL;
spin_unlock(&ctlr->ms_lock);
- if (ctlr->use_ms10)
- lun_table = ctlr->mode_select.expanded.lun_table;
- else
- lun_table = ctlr->mode_select.legacy.lun_table;
-
retry:
err = SCSI_DH_RES_TEMP_UNAVAIL;
- rq = rdac_failover_get(sdev, h);
+ rq = rdac_failover_get(sdev, h, &list);
if (!rq)
goto done;
- list_for_each_entry(qdata, &list, entry) {
- lun_table[qdata->h->lun] = 0x81;
- }
-
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"%s MODE_SELECT command",
(char *) h->ctlr->array_name, h->ctlr->index,
@@ -769,6 +767,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"DELL", "MD32xx"},
{"DELL", "MD32xxi"},
{"DELL", "MD36xxi"},
+ {"DELL", "MD36xxf"},
{"LSI", "INF-01-00"},
{"ENGENIO", "INF-01-00"},
{"STK", "FLEXLINE 380"},
@@ -800,7 +799,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
int err;
char array_name[ARRAY_LABEL_LEN];
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
@@ -906,4 +905,5 @@ module_exit(rdac_exit);
MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
+MODULE_VERSION("01.00.0000.0000");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
index 950f27615c76..f6d37d0271f7 100644
--- a/drivers/scsi/fcoe/Makefile
+++ b/drivers/scsi/fcoe/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_FCOE) += fcoe.o
obj-$(CONFIG_LIBFCOE) += libfcoe.o
+
+libfcoe-objs := fcoe_ctlr.o fcoe_transport.o
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 3becc6a20a4f..bde6ee5333eb 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -31,6 +31,7 @@
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/ctype.h>
+#include <linux/workqueue.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -58,6 +59,8 @@ MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
DEFINE_MUTEX(fcoe_config_mutex);
+static struct workqueue_struct *fcoe_wq;
+
/* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */
static DECLARE_COMPLETION(fcoe_flush_completion);
@@ -72,7 +75,6 @@ static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
static int fcoe_rcv(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
static int fcoe_percpu_receive_thread(void *);
-static void fcoe_clean_pending_queue(struct fc_lport *);
static void fcoe_percpu_clean(struct fc_lport *);
static int fcoe_link_speed_update(struct fc_lport *);
static int fcoe_link_ok(struct fc_lport *);
@@ -80,7 +82,6 @@ static int fcoe_link_ok(struct fc_lport *);
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
static int fcoe_hostlist_add(const struct fc_lport *);
-static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
static int fcoe_device_notification(struct notifier_block *, ulong, void *);
static void fcoe_dev_setup(void);
static void fcoe_dev_cleanup(void);
@@ -101,10 +102,11 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
-static int fcoe_create(const char *, struct kernel_param *);
-static int fcoe_destroy(const char *, struct kernel_param *);
-static int fcoe_enable(const char *, struct kernel_param *);
-static int fcoe_disable(const char *, struct kernel_param *);
+static bool fcoe_match(struct net_device *netdev);
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
+static int fcoe_destroy(struct net_device *netdev);
+static int fcoe_enable(struct net_device *netdev);
+static int fcoe_disable(struct net_device *netdev);
static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
u32 did, struct fc_frame *,
@@ -117,24 +119,6 @@ static void fcoe_recv_frame(struct sk_buff *skb);
static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
-module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR);
-__MODULE_PARM_TYPE(create, "string");
-MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
-module_param_call(create_vn2vn, fcoe_create, NULL,
- (void *)FIP_MODE_VN2VN, S_IWUSR);
-__MODULE_PARM_TYPE(create_vn2vn, "string");
-MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance "
- "on an Ethernet interface");
-module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(destroy, "string");
-MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
-module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(enable, "string");
-MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
-module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(disable, "string");
-MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
-
/* notification function for packets from net device */
static struct notifier_block fcoe_notifier = {
.notifier_call = fcoe_device_notification,
@@ -145,8 +129,8 @@ static struct notifier_block fcoe_cpu_notifier = {
.notifier_call = fcoe_cpu_callback,
};
-static struct scsi_transport_template *fcoe_transport_template;
-static struct scsi_transport_template *fcoe_vport_transport_template;
+static struct scsi_transport_template *fcoe_nport_scsi_transport;
+static struct scsi_transport_template *fcoe_vport_scsi_transport;
static int fcoe_vport_destroy(struct fc_vport *);
static int fcoe_vport_create(struct fc_vport *, bool disabled);
@@ -163,7 +147,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
.lport_set_port_id = fcoe_set_port_id,
};
-struct fc_function_template fcoe_transport_function = {
+struct fc_function_template fcoe_nport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -203,7 +187,7 @@ struct fc_function_template fcoe_transport_function = {
.bsg_request = fc_lport_bsg_request,
};
-struct fc_function_template fcoe_vport_transport_function = {
+struct fc_function_template fcoe_vport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -354,10 +338,18 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
struct fcoe_interface *fcoe;
int err;
+ if (!try_module_get(THIS_MODULE)) {
+ FCOE_NETDEV_DBG(netdev,
+ "Could not get a reference to the module\n");
+ fcoe = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
if (!fcoe) {
FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
- return NULL;
+ fcoe = ERR_PTR(-ENOMEM);
+ goto out_nomod;
}
dev_hold(netdev);
@@ -376,9 +368,15 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
fcoe_ctlr_destroy(&fcoe->ctlr);
kfree(fcoe);
dev_put(netdev);
- return NULL;
+ fcoe = ERR_PTR(err);
+ goto out_nomod;
}
+ goto out;
+
+out_nomod:
+ module_put(THIS_MODULE);
+out:
return fcoe;
}
@@ -440,6 +438,7 @@ static void fcoe_interface_release(struct kref *kref)
fcoe_ctlr_destroy(&fcoe->ctlr);
kfree(fcoe);
dev_put(netdev);
+ module_put(THIS_MODULE);
}
/**
@@ -503,7 +502,7 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
@@ -559,17 +558,6 @@ static int fcoe_lport_config(struct fc_lport *lport)
}
/**
- * fcoe_queue_timer() - The fcoe queue timer
- * @lport: The local port
- *
- * Calls fcoe_check_wait_queue on timeout
- */
-static void fcoe_queue_timer(ulong lport)
-{
- fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
-}
-
-/**
* fcoe_get_wwn() - Get the world wide name from LLD if it supports it
* @netdev: the associated net device
* @wwn: the output WWN
@@ -648,7 +636,7 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
/* Setup lport private data to point to fcoe softc */
port = lport_priv(lport);
- fcoe = port->fcoe;
+ fcoe = port->priv;
/*
* Determine max frame size based on underlying device and optional
@@ -706,9 +694,9 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
lport->host->max_cmd_len = FCOE_MAX_CMD_LEN;
if (lport->vport)
- lport->host->transportt = fcoe_vport_transport_template;
+ lport->host->transportt = fcoe_vport_scsi_transport;
else
- lport->host->transportt = fcoe_transport_template;
+ lport->host->transportt = fcoe_nport_scsi_transport;
/* add the new host to the SCSI-ml */
rc = scsi_add_host(lport->host, dev);
@@ -758,7 +746,7 @@ bool fcoe_oem_match(struct fc_frame *fp)
static inline int fcoe_em_config(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct fcoe_interface *oldfcoe = NULL;
struct net_device *old_real_dev, *cur_real_dev;
u16 min_xid = FCOE_MIN_XID;
@@ -842,7 +830,7 @@ skip_oem:
static void fcoe_if_destroy(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct net_device *netdev = fcoe->netdev;
FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
@@ -884,7 +872,6 @@ static void fcoe_if_destroy(struct fc_lport *lport)
/* Release the Scsi_Host */
scsi_host_put(lport->host);
- module_put(THIS_MODULE);
}
/**
@@ -939,8 +926,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
struct device *parent, int npiv)
{
struct net_device *netdev = fcoe->netdev;
- struct fc_lport *lport = NULL;
+ struct fc_lport *lport, *n_port;
struct fcoe_port *port;
+ struct Scsi_Host *shost;
int rc;
/*
* parent is only a vport if npiv is 1,
@@ -950,13 +938,11 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
FCOE_NETDEV_DBG(netdev, "Create Interface\n");
- if (!npiv) {
- lport = libfc_host_alloc(&fcoe_shost_template,
- sizeof(struct fcoe_port));
- } else {
- lport = libfc_vport_create(vport,
- sizeof(struct fcoe_port));
- }
+ if (!npiv)
+ lport = libfc_host_alloc(&fcoe_shost_template, sizeof(*port));
+ else
+ lport = libfc_vport_create(vport, sizeof(*port));
+
if (!lport) {
FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
rc = -ENOMEM;
@@ -964,7 +950,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
}
port = lport_priv(lport);
port->lport = lport;
- port->fcoe = fcoe;
+ port->priv = fcoe;
+ port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH;
+ port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH;
INIT_WORK(&port->destroy_work, fcoe_destroy_work);
/* configure a fc_lport including the exchange manager */
@@ -1007,24 +995,27 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
goto out_lp_destroy;
}
- if (!npiv) {
- /*
- * fcoe_em_alloc() and fcoe_hostlist_add() both
- * need to be atomic with respect to other changes to the
- * hostlist since fcoe_em_alloc() looks for an existing EM
- * instance on host list updated by fcoe_hostlist_add().
- *
- * This is currently handled through the fcoe_config_mutex
- * begin held.
- */
-
+ /*
+ * fcoe_em_alloc() and fcoe_hostlist_add() both
+ * need to be atomic with respect to other changes to the
+ * hostlist since fcoe_em_alloc() looks for an existing EM
+ * instance on host list updated by fcoe_hostlist_add().
+ *
+ * This is currently handled through the fcoe_config_mutex
+ * begin held.
+ */
+ if (!npiv)
/* lport exch manager allocation */
rc = fcoe_em_config(lport);
- if (rc) {
- FCOE_NETDEV_DBG(netdev, "Could not configure the EM "
- "for the interface\n");
- goto out_lp_destroy;
- }
+ else {
+ shost = vport_to_shost(vport);
+ n_port = shost_priv(shost);
+ rc = fc_exch_mgr_list_clone(n_port, lport);
+ }
+
+ if (rc) {
+ FCOE_NETDEV_DBG(netdev, "Could not configure the EM\n");
+ goto out_lp_destroy;
}
fcoe_interface_get(fcoe);
@@ -1048,11 +1039,12 @@ out:
static int __init fcoe_if_init(void)
{
/* attach to scsi transport */
- fcoe_transport_template = fc_attach_transport(&fcoe_transport_function);
- fcoe_vport_transport_template =
- fc_attach_transport(&fcoe_vport_transport_function);
+ fcoe_nport_scsi_transport =
+ fc_attach_transport(&fcoe_nport_fc_functions);
+ fcoe_vport_scsi_transport =
+ fc_attach_transport(&fcoe_vport_fc_functions);
- if (!fcoe_transport_template) {
+ if (!fcoe_nport_scsi_transport) {
printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
return -ENODEV;
}
@@ -1069,10 +1061,10 @@ static int __init fcoe_if_init(void)
*/
int __exit fcoe_if_exit(void)
{
- fc_release_transport(fcoe_transport_template);
- fc_release_transport(fcoe_vport_transport_template);
- fcoe_transport_template = NULL;
- fcoe_vport_transport_template = NULL;
+ fc_release_transport(fcoe_nport_scsi_transport);
+ fc_release_transport(fcoe_vport_scsi_transport);
+ fcoe_nport_scsi_transport = NULL;
+ fcoe_vport_scsi_transport = NULL;
return 0;
}
@@ -1359,108 +1351,22 @@ err2:
}
/**
- * fcoe_start_io() - Start FCoE I/O
- * @skb: The packet to be transmitted
- *
- * This routine is called from the net device to start transmitting
- * FCoE packets.
- *
- * Returns: 0 for success
- */
-static inline int fcoe_start_io(struct sk_buff *skb)
-{
- struct sk_buff *nskb;
- int rc;
-
- nskb = skb_clone(skb, GFP_ATOMIC);
- rc = dev_queue_xmit(nskb);
- if (rc != 0)
- return rc;
- kfree_skb(skb);
- return 0;
-}
-
-/**
- * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
+ * fcoe_alloc_paged_crc_eof() - Allocate a page to be used for the trailer CRC
* @skb: The packet to be transmitted
* @tlen: The total length of the trailer
*
- * This routine allocates a page for frame trailers. The page is re-used if
- * there is enough room left on it for the current trailer. If there isn't
- * enough buffer left a new page is allocated for the trailer. Reference to
- * the page from this function as well as the skbs using the page fragments
- * ensure that the page is freed at the appropriate time.
- *
* Returns: 0 for success
*/
-static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
{
struct fcoe_percpu_s *fps;
- struct page *page;
+ int rc;
fps = &get_cpu_var(fcoe_percpu);
- page = fps->crc_eof_page;
- if (!page) {
- page = alloc_page(GFP_ATOMIC);
- if (!page) {
- put_cpu_var(fcoe_percpu);
- return -ENOMEM;
- }
- fps->crc_eof_page = page;
- fps->crc_eof_offset = 0;
- }
-
- get_page(page);
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
- fps->crc_eof_offset, tlen);
- skb->len += tlen;
- skb->data_len += tlen;
- skb->truesize += tlen;
- fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
-
- if (fps->crc_eof_offset >= PAGE_SIZE) {
- fps->crc_eof_page = NULL;
- fps->crc_eof_offset = 0;
- put_page(page);
- }
+ rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
put_cpu_var(fcoe_percpu);
- return 0;
-}
-/**
- * fcoe_fc_crc() - Calculates the CRC for a given frame
- * @fp: The frame to be checksumed
- *
- * This uses crc32() routine to calculate the CRC for a frame
- *
- * Return: The 32 bit CRC value
- */
-u32 fcoe_fc_crc(struct fc_frame *fp)
-{
- struct sk_buff *skb = fp_skb(fp);
- struct skb_frag_struct *frag;
- unsigned char *data;
- unsigned long off, len, clen;
- u32 crc;
- unsigned i;
-
- crc = crc32(~0, skb->data, skb_headlen(skb));
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- off = frag->page_offset;
- len = frag->size;
- while (len > 0) {
- clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
- data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
- KM_SKB_DATA_SOFTIRQ);
- crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
- kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
- off += clen;
- len -= clen;
- }
- }
- return crc;
+ return rc;
}
/**
@@ -1483,7 +1389,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
unsigned int tlen; /* trailer length */
unsigned int elen; /* eth header, may include vlan */
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
u8 sof, eof;
struct fcoe_hdr *hp;
@@ -1524,7 +1430,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
/* copy port crc and eof to the skb buff */
if (skb_is_nonlinear(skb)) {
skb_frag_t *frag;
- if (fcoe_get_paged_crc_eof(skb, tlen)) {
+ if (fcoe_alloc_paged_crc_eof(skb, tlen)) {
kfree_skb(skb);
return -ENOMEM;
}
@@ -1604,6 +1510,56 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
}
/**
+ * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC
+ * @lport: The local port the frame was received on
+ * @fp: The received frame
+ *
+ * Return: 0 on passing filtering checks
+ */
+static inline int fcoe_filter_frames(struct fc_lport *lport,
+ struct fc_frame *fp)
+{
+ struct fcoe_interface *fcoe;
+ struct fc_frame_header *fh;
+ struct sk_buff *skb = (struct sk_buff *)fp;
+ struct fcoe_dev_stats *stats;
+
+ /*
+ * We only check CRC if no offload is available and if it is
+ * it's solicited data, in which case, the FCP layer would
+ * check it during the copy.
+ */
+ if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ else
+ fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
+
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP)
+ return 0;
+
+ fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
+ if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
+ ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
+ FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
+ return -EINVAL;
+ }
+
+ if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED) ||
+ le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) {
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ return 0;
+ }
+
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats->InvalidCRCCount++;
+ if (stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+ return -EINVAL;
+}
+
+/**
* fcoe_recv_frame() - process a single received frame
* @skb: frame to process
*/
@@ -1613,7 +1569,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
struct fcoe_dev_stats *stats;
- struct fc_frame_header *fh;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
struct fcoe_port *port;
@@ -1644,7 +1599,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
* was done in fcoe_rcv already.
*/
hp = (struct fcoe_hdr *) skb_network_header(skb);
- fh = (struct fc_frame_header *) skb_transport_header(skb);
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
@@ -1677,35 +1631,11 @@ static void fcoe_recv_frame(struct sk_buff *skb)
if (pskb_trim(skb, fr_len))
goto drop;
- /*
- * We only check CRC if no offload is available and if it is
- * it's solicited data, in which case, the FCP layer would
- * check it during the copy.
- */
- if (lport->crc_offload &&
- skb->ip_summed == CHECKSUM_UNNECESSARY)
- fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
- else
- fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
-
- fh = fc_frame_header_get(fp);
- if ((fh->fh_r_ctl != FC_RCTL_DD_SOL_DATA ||
- fh->fh_type != FC_TYPE_FCP) &&
- (fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
- if (le32_to_cpu(fr_crc(fp)) !=
- ~crc32(~0, skb->data, fr_len)) {
- if (stats->InvalidCRCCount < 5)
- printk(KERN_WARNING "fcoe: dropping "
- "frame with CRC error\n");
- stats->InvalidCRCCount++;
- goto drop;
- }
- fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ if (!fcoe_filter_frames(lport, fp)) {
+ put_cpu();
+ fc_exch_recv(lport, fp);
+ return;
}
- put_cpu();
- fc_exch_recv(lport, fp);
- return;
-
drop:
stats->ErrorFrames++;
put_cpu();
@@ -1744,64 +1674,6 @@ int fcoe_percpu_receive_thread(void *arg)
}
/**
- * fcoe_check_wait_queue() - Attempt to clear the transmit backlog
- * @lport: The local port whose backlog is to be cleared
- *
- * This empties the wait_queue, dequeues the head of the wait_queue queue
- * and calls fcoe_start_io() for each packet. If all skb have been
- * transmitted it returns the qlen. If an error occurs it restores
- * wait_queue (to try again later) and returns -1.
- *
- * The wait_queue is used when the skb transmit fails. The failed skb
- * will go in the wait_queue which will be emptied by the timer function or
- * by the next skb transmit.
- */
-static void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
-{
- struct fcoe_port *port = lport_priv(lport);
- int rc;
-
- spin_lock_bh(&port->fcoe_pending_queue.lock);
-
- if (skb)
- __skb_queue_tail(&port->fcoe_pending_queue, skb);
-
- if (port->fcoe_pending_queue_active)
- goto out;
- port->fcoe_pending_queue_active = 1;
-
- while (port->fcoe_pending_queue.qlen) {
- /* keep qlen > 0 until fcoe_start_io succeeds */
- port->fcoe_pending_queue.qlen++;
- skb = __skb_dequeue(&port->fcoe_pending_queue);
-
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
- rc = fcoe_start_io(skb);
- spin_lock_bh(&port->fcoe_pending_queue.lock);
-
- if (rc) {
- __skb_queue_head(&port->fcoe_pending_queue, skb);
- /* undo temporary increment above */
- port->fcoe_pending_queue.qlen--;
- break;
- }
- /* undo temporary increment above */
- port->fcoe_pending_queue.qlen--;
- }
-
- if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
- lport->qfull = 0;
- if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
- mod_timer(&port->timer, jiffies + 2);
- port->fcoe_pending_queue_active = 0;
-out:
- if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
- lport->qfull = 1;
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
- return;
-}
-
-/**
* fcoe_dev_setup() - Setup the link change notification interface
*/
static void fcoe_dev_setup(void)
@@ -1872,7 +1744,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp);
fcoe_interface_cleanup(fcoe);
- schedule_work(&port->destroy_work);
+ queue_work(fcoe_wq, &port->destroy_work);
goto out;
break;
case NETDEV_FEAT_CHANGE:
@@ -1898,39 +1770,16 @@ out:
}
/**
- * fcoe_if_to_netdev() - Parse a name buffer to get a net device
- * @buffer: The name of the net device
- *
- * Returns: NULL or a ptr to net_device
- */
-static struct net_device *fcoe_if_to_netdev(const char *buffer)
-{
- char *cp;
- char ifname[IFNAMSIZ + 2];
-
- if (buffer) {
- strlcpy(ifname, buffer, IFNAMSIZ);
- cp = ifname + strlen(ifname);
- while (--cp >= ifname && *cp == '\n')
- *cp = '\0';
- return dev_get_by_name(&init_net, ifname);
- }
- return NULL;
-}
-
-/**
* fcoe_disable() - Disables a FCoE interface
- * @buffer: The name of the Ethernet interface to be disabled
- * @kp: The associated kernel parameter
+ * @netdev : The net_device object the Ethernet interface to create on
*
- * Called from sysfs.
+ * Called from fcoe transport.
*
* Returns: 0 for success
*/
-static int fcoe_disable(const char *buffer, struct kernel_param *kp)
+static int fcoe_disable(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
- struct net_device *netdev;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
@@ -1946,16 +1795,9 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp)
}
#endif
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
- goto out_nodev;
- }
-
if (!rtnl_trylock()) {
- dev_put(netdev);
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
fcoe = fcoe_hostlist_lookup_port(netdev);
@@ -1967,7 +1809,6 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp)
} else
rc = -ENODEV;
- dev_put(netdev);
out_nodev:
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -1975,17 +1816,15 @@ out_nodev:
/**
* fcoe_enable() - Enables a FCoE interface
- * @buffer: The name of the Ethernet interface to be enabled
- * @kp: The associated kernel parameter
+ * @netdev : The net_device object the Ethernet interface to create on
*
- * Called from sysfs.
+ * Called from fcoe transport.
*
* Returns: 0 for success
*/
-static int fcoe_enable(const char *buffer, struct kernel_param *kp)
+static int fcoe_enable(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
- struct net_device *netdev;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
@@ -2000,17 +1839,9 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp)
goto out_nodev;
}
#endif
-
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
- goto out_nodev;
- }
-
if (!rtnl_trylock()) {
- dev_put(netdev);
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
fcoe = fcoe_hostlist_lookup_port(netdev);
@@ -2021,7 +1852,6 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp)
else if (!fcoe_link_ok(fcoe->ctlr.lp))
fcoe_ctlr_link_up(&fcoe->ctlr);
- dev_put(netdev);
out_nodev:
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -2029,17 +1859,15 @@ out_nodev:
/**
* fcoe_destroy() - Destroy a FCoE interface
- * @buffer: The name of the Ethernet interface to be destroyed
- * @kp: The associated kernel parameter
+ * @netdev : The net_device object the Ethernet interface to create on
*
- * Called from sysfs.
+ * Called from fcoe transport
*
* Returns: 0 for success
*/
-static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
+static int fcoe_destroy(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
- struct net_device *netdev;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
@@ -2054,32 +1882,21 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
goto out_nodev;
}
#endif
-
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
- goto out_nodev;
- }
-
if (!rtnl_trylock()) {
- dev_put(netdev);
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
fcoe = fcoe_hostlist_lookup_port(netdev);
if (!fcoe) {
rtnl_unlock();
rc = -ENODEV;
- goto out_putdev;
+ goto out_nodev;
}
fcoe_interface_cleanup(fcoe);
list_del(&fcoe->list);
/* RTNL mutex is dropped by fcoe_if_destroy */
fcoe_if_destroy(fcoe->ctlr.lp);
-
-out_putdev:
- dev_put(netdev);
out_nodev:
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -2102,27 +1919,39 @@ static void fcoe_destroy_work(struct work_struct *work)
}
/**
+ * fcoe_match() - Check if the FCoE is supported on the given netdevice
+ * @netdev : The net_device object the Ethernet interface to create on
+ *
+ * Called from fcoe transport.
+ *
+ * Returns: always returns true as this is the default FCoE transport,
+ * i.e., support all netdevs.
+ */
+static bool fcoe_match(struct net_device *netdev)
+{
+ return true;
+}
+
+/**
* fcoe_create() - Create a fcoe interface
- * @buffer: The name of the Ethernet interface to create on
- * @kp: The associated kernel param
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
*
- * Called from sysfs.
+ * Called from fcoe transport
*
* Returns: 0 for success
*/
-static int fcoe_create(const char *buffer, struct kernel_param *kp)
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
{
- enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
int rc;
struct fcoe_interface *fcoe;
struct fc_lport *lport;
- struct net_device *netdev;
mutex_lock(&fcoe_config_mutex);
if (!rtnl_trylock()) {
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
#ifdef CONFIG_FCOE_MODULE
@@ -2133,31 +1962,20 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
*/
if (THIS_MODULE->state != MODULE_STATE_LIVE) {
rc = -ENODEV;
- goto out_nomod;
- }
-#endif
-
- if (!try_module_get(THIS_MODULE)) {
- rc = -EINVAL;
- goto out_nomod;
- }
-
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
goto out_nodev;
}
+#endif
/* look for existing lport */
if (fcoe_hostlist_lookup(netdev)) {
rc = -EEXIST;
- goto out_putdev;
+ goto out_nodev;
}
fcoe = fcoe_interface_create(netdev, fip_mode);
- if (!fcoe) {
- rc = -ENOMEM;
- goto out_putdev;
+ if (IS_ERR(fcoe)) {
+ rc = PTR_ERR(fcoe);
+ goto out_nodev;
}
lport = fcoe_if_create(fcoe, &netdev->dev, 0);
@@ -2186,18 +2004,13 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
* should be holding a reference taken in fcoe_if_create().
*/
fcoe_interface_put(fcoe);
- dev_put(netdev);
rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return 0;
out_free:
fcoe_interface_put(fcoe);
-out_putdev:
- dev_put(netdev);
out_nodev:
- module_put(THIS_MODULE);
-out_nomod:
rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -2212,8 +2025,7 @@ out_nomod:
*/
int fcoe_link_speed_update(struct fc_lport *lport)
{
- struct fcoe_port *port = lport_priv(lport);
- struct net_device *netdev = port->fcoe->netdev;
+ struct net_device *netdev = fcoe_netdev(lport);
struct ethtool_cmd ecmd = { ETHTOOL_GSET };
if (!dev_ethtool_get_settings(netdev, &ecmd)) {
@@ -2244,8 +2056,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
*/
int fcoe_link_ok(struct fc_lport *lport)
{
- struct fcoe_port *port = lport_priv(lport);
- struct net_device *netdev = port->fcoe->netdev;
+ struct net_device *netdev = fcoe_netdev(lport);
if (netif_oper_up(netdev))
return 0;
@@ -2309,24 +2120,6 @@ void fcoe_percpu_clean(struct fc_lport *lport)
}
/**
- * fcoe_clean_pending_queue() - Dequeue a skb and free it
- * @lport: The local port to dequeue a skb on
- */
-void fcoe_clean_pending_queue(struct fc_lport *lport)
-{
- struct fcoe_port *port = lport_priv(lport);
- struct sk_buff *skb;
-
- spin_lock_bh(&port->fcoe_pending_queue.lock);
- while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
- kfree_skb(skb);
- spin_lock_bh(&port->fcoe_pending_queue.lock);
- }
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
-}
-
-/**
* fcoe_reset() - Reset a local port
* @shost: The SCSI host associated with the local port to be reset
*
@@ -2335,7 +2128,13 @@ void fcoe_clean_pending_queue(struct fc_lport *lport)
int fcoe_reset(struct Scsi_Host *shost)
{
struct fc_lport *lport = shost_priv(shost);
- fc_lport_reset(lport);
+ struct fcoe_port *port = lport_priv(lport);
+ struct fcoe_interface *fcoe = port->priv;
+
+ fcoe_ctlr_link_down(&fcoe->ctlr);
+ fcoe_clean_pending_queue(fcoe->ctlr.lp);
+ if (!fcoe_link_ok(fcoe->ctlr.lp))
+ fcoe_ctlr_link_up(&fcoe->ctlr);
return 0;
}
@@ -2393,12 +2192,24 @@ static int fcoe_hostlist_add(const struct fc_lport *lport)
fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
if (!fcoe) {
port = lport_priv(lport);
- fcoe = port->fcoe;
+ fcoe = port->priv;
list_add_tail(&fcoe->list, &fcoe_hostlist);
}
return 0;
}
+
+static struct fcoe_transport fcoe_sw_transport = {
+ .name = {FCOE_TRANSPORT_DEFAULT},
+ .attached = false,
+ .list = LIST_HEAD_INIT(fcoe_sw_transport.list),
+ .match = fcoe_match,
+ .create = fcoe_create,
+ .destroy = fcoe_destroy,
+ .enable = fcoe_enable,
+ .disable = fcoe_disable,
+};
+
/**
* fcoe_init() - Initialize fcoe.ko
*
@@ -2410,6 +2221,18 @@ static int __init fcoe_init(void)
unsigned int cpu;
int rc = 0;
+ fcoe_wq = alloc_workqueue("fcoe", 0, 0);
+ if (!fcoe_wq)
+ return -ENOMEM;
+
+ /* register as a fcoe transport */
+ rc = fcoe_transport_attach(&fcoe_sw_transport);
+ if (rc) {
+ printk(KERN_ERR "failed to register an fcoe transport, check "
+ "if libfcoe is loaded\n");
+ return rc;
+ }
+
mutex_lock(&fcoe_config_mutex);
for_each_possible_cpu(cpu) {
@@ -2440,6 +2263,7 @@ out_free:
fcoe_percpu_thread_destroy(cpu);
}
mutex_unlock(&fcoe_config_mutex);
+ destroy_workqueue(fcoe_wq);
return rc;
}
module_init(fcoe_init);
@@ -2465,7 +2289,7 @@ static void __exit fcoe_exit(void)
list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp);
fcoe_interface_cleanup(fcoe);
- schedule_work(&port->destroy_work);
+ queue_work(fcoe_wq, &port->destroy_work);
}
rtnl_unlock();
@@ -2476,16 +2300,21 @@ static void __exit fcoe_exit(void)
mutex_unlock(&fcoe_config_mutex);
- /* flush any asyncronous interface destroys,
- * this should happen after the netdev notifier is unregistered */
- flush_scheduled_work();
- /* That will flush out all the N_Ports on the hostlist, but now we
- * may have NPIV VN_Ports scheduled for destruction */
- flush_scheduled_work();
+ /*
+ * destroy_work's may be chained but destroy_workqueue()
+ * can take care of them. Just kill the fcoe_wq.
+ */
+ destroy_workqueue(fcoe_wq);
- /* detach from scsi transport
- * must happen after all destroys are done, therefor after the flush */
+ /*
+ * Detaching from the scsi transport must happen after all
+ * destroys are done on the fcoe_wq. destroy_workqueue will
+ * enusre the fcoe_wq is flushed.
+ */
fcoe_if_exit();
+
+ /* detach from fcoe transport */
+ fcoe_transport_detach(&fcoe_sw_transport);
}
module_exit(fcoe_exit);
@@ -2557,7 +2386,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
void *arg, u32 timeout)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct fcoe_ctlr *fip = &fcoe->ctlr;
struct fc_frame_header *fh = fc_frame_header_get(fp);
@@ -2590,7 +2419,7 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
struct Scsi_Host *shost = vport_to_shost(vport);
struct fc_lport *n_port = shost_priv(shost);
struct fcoe_port *port = lport_priv(n_port);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct net_device *netdev = fcoe->netdev;
struct fc_lport *vn_port;
@@ -2630,7 +2459,7 @@ static int fcoe_vport_destroy(struct fc_vport *vport)
mutex_lock(&n_port->lp_mutex);
list_del(&vn_port->list);
mutex_unlock(&n_port->lp_mutex);
- schedule_work(&port->destroy_work);
+ queue_work(fcoe_wq, &port->destroy_work);
return 0;
}
@@ -2734,7 +2563,7 @@ static void fcoe_set_port_id(struct fc_lport *lport,
u32 port_id, struct fc_frame *fp)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index c69b2c56c2d1..408a6fd78fb4 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -24,7 +24,7 @@
#include <linux/kthread.h>
#define FCOE_MAX_QUEUE_DEPTH 256
-#define FCOE_LOW_QUEUE_DEPTH 32
+#define FCOE_MIN_QUEUE_DEPTH 32
#define FCOE_WORD_TO_BYTE 4
@@ -40,12 +40,6 @@
#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
-/*
- * Max MTU for FCoE: 14 (FCoE header) + 24 (FC header) + 2112 (max FC payload)
- * + 4 (FC CRC) + 4 (FCoE trailer) = 2158 bytes
- */
-#define FCOE_MTU 2158
-
unsigned int fcoe_debug_logging;
module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
@@ -71,21 +65,6 @@ do { \
netdev->name, ##args);)
/**
- * struct fcoe_percpu_s - The per-CPU context for FCoE receive threads
- * @thread: The thread context
- * @fcoe_rx_list: The queue of pending packets to process
- * @page: The memory page for calculating frame trailer CRCs
- * @crc_eof_offset: The offset into the CRC page pointing to available
- * memory for a new trailer
- */
-struct fcoe_percpu_s {
- struct task_struct *thread;
- struct sk_buff_head fcoe_rx_list;
- struct page *crc_eof_page;
- int crc_eof_offset;
-};
-
-/**
* struct fcoe_interface - A FCoE interface
* @list: Handle for a list of FCoE interfaces
* @netdev: The associated net device
@@ -108,30 +87,6 @@ struct fcoe_interface {
struct kref kref;
};
-/**
- * struct fcoe_port - The FCoE private structure
- * @fcoe: The associated fcoe interface
- * @lport: The associated local port
- * @fcoe_pending_queue: The pending Rx queue of skbs
- * @fcoe_pending_queue_active: Indicates if the pending queue is active
- * @timer: The queue timer
- * @destroy_work: Handle for work context
- * (to prevent RTNL deadlocks)
- * @data_srt_addr: Source address for data
- *
- * An instance of this structure is to be allocated along with the
- * Scsi_Host and libfc fc_lport structures.
- */
-struct fcoe_port {
- struct fcoe_interface *fcoe;
- struct fc_lport *lport;
- struct sk_buff_head fcoe_pending_queue;
- u8 fcoe_pending_queue_active;
- struct timer_list timer;
- struct work_struct destroy_work;
- u8 data_src_addr[ETH_ALEN];
-};
-
#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
/**
@@ -140,7 +95,8 @@ struct fcoe_port {
*/
static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
{
- return ((struct fcoe_port *)lport_priv(lport))->fcoe->netdev;
+ return ((struct fcoe_interface *)
+ ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
}
#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 625c6be25396..c93f007e702f 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -44,9 +44,7 @@
#include <scsi/libfc.h>
#include <scsi/libfcoe.h>
-MODULE_AUTHOR("Open-FCoE.org");
-MODULE_DESCRIPTION("FIP discovery protocol support for FCoE HBAs");
-MODULE_LICENSE("GPL v2");
+#include "libfcoe.h"
#define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */
#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */
@@ -66,31 +64,7 @@ static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS;
static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS;
static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS;
-unsigned int libfcoe_debug_logging;
-module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
-
-#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
-#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
-
-#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
-do { \
- if (unlikely(libfcoe_debug_logging & LEVEL)) \
- do { \
- CMD; \
- } while (0); \
-} while (0)
-
-#define LIBFCOE_DBG(fmt, args...) \
- LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
- printk(KERN_INFO "libfcoe: " fmt, ##args);)
-
-#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
- LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
- printk(KERN_INFO "host%d: fip: " fmt, \
- (fip)->lp->host->host_no, ##args);)
-
-static const char *fcoe_ctlr_states[] = {
+static const char * const fcoe_ctlr_states[] = {
[FIP_ST_DISABLED] = "DISABLED",
[FIP_ST_LINK_WAIT] = "LINK_WAIT",
[FIP_ST_AUTO] = "AUTO",
@@ -308,8 +282,8 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
struct fip_mac_desc mac;
struct fip_wwn_desc wwnn;
struct fip_size_desc size;
- } __attribute__((packed)) desc;
- } __attribute__((packed)) *sol;
+ } __packed desc;
+ } __packed * sol;
u32 fcoe_size;
skb = dev_alloc_skb(sizeof(*sol));
@@ -456,7 +430,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
struct ethhdr eth;
struct fip_header fip;
struct fip_mac_desc mac;
- } __attribute__((packed)) *kal;
+ } __packed * kal;
struct fip_vn_desc *vn;
u32 len;
struct fc_lport *lp;
@@ -527,7 +501,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
struct ethhdr eth;
struct fip_header fip;
struct fip_encaps encaps;
- } __attribute__((packed)) *cap;
+ } __packed * cap;
struct fc_frame_header *fh;
struct fip_mac_desc *mac;
struct fcoe_fcf *fcf;
@@ -1819,7 +1793,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
struct fip_mac_desc mac;
struct fip_wwn_desc wwnn;
struct fip_vn_desc vn;
- } __attribute__((packed)) *frame;
+ } __packed * frame;
struct fip_fc4_feat *ff;
struct fip_size_desc *size;
u32 fcp_feat;
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
new file mode 100644
index 000000000000..258684101bfd
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -0,0 +1,770 @@
+/*
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include <linux/crc32.h>
+#include <scsi/libfcoe.h>
+
+#include "libfcoe.h"
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs");
+MODULE_LICENSE("GPL v2");
+
+static int fcoe_transport_create(const char *, struct kernel_param *);
+static int fcoe_transport_destroy(const char *, struct kernel_param *);
+static int fcoe_transport_show(char *buffer, const struct kernel_param *kp);
+static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device);
+static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device);
+static int fcoe_transport_enable(const char *, struct kernel_param *);
+static int fcoe_transport_disable(const char *, struct kernel_param *);
+static int libfcoe_device_notification(struct notifier_block *notifier,
+ ulong event, void *ptr);
+
+static LIST_HEAD(fcoe_transports);
+static DEFINE_MUTEX(ft_mutex);
+static LIST_HEAD(fcoe_netdevs);
+static DEFINE_MUTEX(fn_mutex);
+
+unsigned int libfcoe_debug_logging;
+module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+module_param_call(show, NULL, fcoe_transport_show, NULL, S_IRUSR);
+__MODULE_PARM_TYPE(show, "string");
+MODULE_PARM_DESC(show, " Show attached FCoE transports");
+
+module_param_call(create, fcoe_transport_create, NULL,
+ (void *)FIP_MODE_FABRIC, S_IWUSR);
+__MODULE_PARM_TYPE(create, "string");
+MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
+
+module_param_call(create_vn2vn, fcoe_transport_create, NULL,
+ (void *)FIP_MODE_VN2VN, S_IWUSR);
+__MODULE_PARM_TYPE(create_vn2vn, "string");
+MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance "
+ "on an Ethernet interface");
+
+module_param_call(destroy, fcoe_transport_destroy, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(destroy, "string");
+MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
+
+module_param_call(enable, fcoe_transport_enable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(enable, "string");
+MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
+
+module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(disable, "string");
+MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
+
+/* notification function for packets from net device */
+static struct notifier_block libfcoe_notifier = {
+ .notifier_call = libfcoe_device_notification,
+};
+
+/**
+ * fcoe_fc_crc() - Calculates the CRC for a given frame
+ * @fp: The frame to be checksumed
+ *
+ * This uses crc32() routine to calculate the CRC for a frame
+ *
+ * Return: The 32 bit CRC value
+ */
+u32 fcoe_fc_crc(struct fc_frame *fp)
+{
+ struct sk_buff *skb = fp_skb(fp);
+ struct skb_frag_struct *frag;
+ unsigned char *data;
+ unsigned long off, len, clen;
+ u32 crc;
+ unsigned i;
+
+ crc = crc32(~0, skb->data, skb_headlen(skb));
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ off = frag->page_offset;
+ len = frag->size;
+ while (len > 0) {
+ clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
+ data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
+ KM_SKB_DATA_SOFTIRQ);
+ crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
+ kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
+ off += clen;
+ len -= clen;
+ }
+ }
+ return crc;
+}
+EXPORT_SYMBOL_GPL(fcoe_fc_crc);
+
+/**
+ * fcoe_start_io() - Start FCoE I/O
+ * @skb: The packet to be transmitted
+ *
+ * This routine is called from the net device to start transmitting
+ * FCoE packets.
+ *
+ * Returns: 0 for success
+ */
+int fcoe_start_io(struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+ int rc;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+ rc = dev_queue_xmit(nskb);
+ if (rc != 0)
+ return rc;
+ kfree_skb(skb);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_start_io);
+
+
+/**
+ * fcoe_clean_pending_queue() - Dequeue a skb and free it
+ * @lport: The local port to dequeue a skb on
+ */
+void fcoe_clean_pending_queue(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct sk_buff *skb;
+
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+ while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+ kfree_skb(skb);
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+ }
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+}
+EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
+
+/**
+ * fcoe_check_wait_queue() - Attempt to clear the transmit backlog
+ * @lport: The local port whose backlog is to be cleared
+ *
+ * This empties the wait_queue, dequeues the head of the wait_queue queue
+ * and calls fcoe_start_io() for each packet. If all skb have been
+ * transmitted it returns the qlen. If an error occurs it restores
+ * wait_queue (to try again later) and returns -1.
+ *
+ * The wait_queue is used when the skb transmit fails. The failed skb
+ * will go in the wait_queue which will be emptied by the timer function or
+ * by the next skb transmit.
+ */
+void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ int rc;
+
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+
+ if (skb)
+ __skb_queue_tail(&port->fcoe_pending_queue, skb);
+
+ if (port->fcoe_pending_queue_active)
+ goto out;
+ port->fcoe_pending_queue_active = 1;
+
+ while (port->fcoe_pending_queue.qlen) {
+ /* keep qlen > 0 until fcoe_start_io succeeds */
+ port->fcoe_pending_queue.qlen++;
+ skb = __skb_dequeue(&port->fcoe_pending_queue);
+
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+ rc = fcoe_start_io(skb);
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+
+ if (rc) {
+ __skb_queue_head(&port->fcoe_pending_queue, skb);
+ /* undo temporary increment above */
+ port->fcoe_pending_queue.qlen--;
+ break;
+ }
+ /* undo temporary increment above */
+ port->fcoe_pending_queue.qlen--;
+ }
+
+ if (port->fcoe_pending_queue.qlen < port->min_queue_depth)
+ lport->qfull = 0;
+ if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
+ mod_timer(&port->timer, jiffies + 2);
+ port->fcoe_pending_queue_active = 0;
+out:
+ if (port->fcoe_pending_queue.qlen > port->max_queue_depth)
+ lport->qfull = 1;
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+}
+EXPORT_SYMBOL_GPL(fcoe_check_wait_queue);
+
+/**
+ * fcoe_queue_timer() - The fcoe queue timer
+ * @lport: The local port
+ *
+ * Calls fcoe_check_wait_queue on timeout
+ */
+void fcoe_queue_timer(ulong lport)
+{
+ fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
+}
+EXPORT_SYMBOL_GPL(fcoe_queue_timer);
+
+/**
+ * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
+ * @skb: The packet to be transmitted
+ * @tlen: The total length of the trailer
+ * @fps: The fcoe context
+ *
+ * This routine allocates a page for frame trailers. The page is re-used if
+ * there is enough room left on it for the current trailer. If there isn't
+ * enough buffer left a new page is allocated for the trailer. Reference to
+ * the page from this function as well as the skbs using the page fragments
+ * ensure that the page is freed at the appropriate time.
+ *
+ * Returns: 0 for success
+ */
+int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
+ struct fcoe_percpu_s *fps)
+{
+ struct page *page;
+
+ page = fps->crc_eof_page;
+ if (!page) {
+ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ return -ENOMEM;
+
+ fps->crc_eof_page = page;
+ fps->crc_eof_offset = 0;
+ }
+
+ get_page(page);
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
+ fps->crc_eof_offset, tlen);
+ skb->len += tlen;
+ skb->data_len += tlen;
+ skb->truesize += tlen;
+ fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
+
+ if (fps->crc_eof_offset >= PAGE_SIZE) {
+ fps->crc_eof_page = NULL;
+ fps->crc_eof_offset = 0;
+ put_page(page);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_get_paged_crc_eof);
+
+/**
+ * fcoe_transport_lookup - find an fcoe transport that matches a netdev
+ * @netdev: The netdev to look for from all attached transports
+ *
+ * Returns : ptr to the fcoe transport that supports this netdev or NULL
+ * if not found.
+ *
+ * The ft_mutex should be held when this is called
+ */
+static struct fcoe_transport *fcoe_transport_lookup(struct net_device *netdev)
+{
+ struct fcoe_transport *ft = NULL;
+
+ list_for_each_entry(ft, &fcoe_transports, list)
+ if (ft->match && ft->match(netdev))
+ return ft;
+ return NULL;
+}
+
+/**
+ * fcoe_transport_attach - Attaches an FCoE transport
+ * @ft: The fcoe transport to be attached
+ *
+ * Returns : 0 for success
+ */
+int fcoe_transport_attach(struct fcoe_transport *ft)
+{
+ int rc = 0;
+
+ mutex_lock(&ft_mutex);
+ if (ft->attached) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already attached\n",
+ ft->name);
+ rc = -EEXIST;
+ goto out_attach;
+ }
+
+ /* Add default transport to the tail */
+ if (strcmp(ft->name, FCOE_TRANSPORT_DEFAULT))
+ list_add(&ft->list, &fcoe_transports);
+ else
+ list_add_tail(&ft->list, &fcoe_transports);
+
+ ft->attached = true;
+ LIBFCOE_TRANSPORT_DBG("attaching transport %s\n", ft->name);
+
+out_attach:
+ mutex_unlock(&ft_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(fcoe_transport_attach);
+
+/**
+ * fcoe_transport_attach - Detaches an FCoE transport
+ * @ft: The fcoe transport to be attached
+ *
+ * Returns : 0 for success
+ */
+int fcoe_transport_detach(struct fcoe_transport *ft)
+{
+ int rc = 0;
+
+ mutex_lock(&ft_mutex);
+ if (!ft->attached) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already detached\n",
+ ft->name);
+ rc = -ENODEV;
+ goto out_attach;
+ }
+
+ list_del(&ft->list);
+ ft->attached = false;
+ LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name);
+
+out_attach:
+ mutex_unlock(&ft_mutex);
+ return rc;
+
+}
+EXPORT_SYMBOL(fcoe_transport_detach);
+
+static int fcoe_transport_show(char *buffer, const struct kernel_param *kp)
+{
+ int i, j;
+ struct fcoe_transport *ft = NULL;
+
+ i = j = sprintf(buffer, "Attached FCoE transports:");
+ mutex_lock(&ft_mutex);
+ list_for_each_entry(ft, &fcoe_transports, list) {
+ i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name);
+ if (i >= PAGE_SIZE)
+ break;
+ }
+ mutex_unlock(&ft_mutex);
+ if (i == j)
+ i += snprintf(&buffer[i], IFNAMSIZ, "none");
+ return i;
+}
+
+static int __init fcoe_transport_init(void)
+{
+ register_netdevice_notifier(&libfcoe_notifier);
+ return 0;
+}
+
+static int __exit fcoe_transport_exit(void)
+{
+ struct fcoe_transport *ft;
+
+ unregister_netdevice_notifier(&libfcoe_notifier);
+ mutex_lock(&ft_mutex);
+ list_for_each_entry(ft, &fcoe_transports, list)
+ printk(KERN_ERR "FCoE transport %s is still attached!\n",
+ ft->name);
+ mutex_unlock(&ft_mutex);
+ return 0;
+}
+
+
+static int fcoe_add_netdev_mapping(struct net_device *netdev,
+ struct fcoe_transport *ft)
+{
+ struct fcoe_netdev_mapping *nm;
+
+ nm = kmalloc(sizeof(*nm), GFP_KERNEL);
+ if (!nm) {
+ printk(KERN_ERR "Unable to allocate netdev_mapping");
+ return -ENOMEM;
+ }
+
+ nm->netdev = netdev;
+ nm->ft = ft;
+
+ mutex_lock(&fn_mutex);
+ list_add(&nm->list, &fcoe_netdevs);
+ mutex_unlock(&fn_mutex);
+ return 0;
+}
+
+
+static void fcoe_del_netdev_mapping(struct net_device *netdev)
+{
+ struct fcoe_netdev_mapping *nm = NULL, *tmp;
+
+ mutex_lock(&fn_mutex);
+ list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) {
+ if (nm->netdev == netdev) {
+ list_del(&nm->list);
+ kfree(nm);
+ mutex_unlock(&fn_mutex);
+ return;
+ }
+ }
+ mutex_unlock(&fn_mutex);
+}
+
+
+/**
+ * fcoe_netdev_map_lookup - find the fcoe transport that matches the netdev on which
+ * it was created
+ *
+ * Returns : ptr to the fcoe transport that supports this netdev or NULL
+ * if not found.
+ *
+ * The ft_mutex should be held when this is called
+ */
+static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *netdev)
+{
+ struct fcoe_transport *ft = NULL;
+ struct fcoe_netdev_mapping *nm;
+
+ mutex_lock(&fn_mutex);
+ list_for_each_entry(nm, &fcoe_netdevs, list) {
+ if (netdev == nm->netdev) {
+ ft = nm->ft;
+ mutex_unlock(&fn_mutex);
+ return ft;
+ }
+ }
+
+ mutex_unlock(&fn_mutex);
+ return NULL;
+}
+
+/**
+ * fcoe_if_to_netdev() - Parse a name buffer to get a net device
+ * @buffer: The name of the net device
+ *
+ * Returns: NULL or a ptr to net_device
+ */
+static struct net_device *fcoe_if_to_netdev(const char *buffer)
+{
+ char *cp;
+ char ifname[IFNAMSIZ + 2];
+
+ if (buffer) {
+ strlcpy(ifname, buffer, IFNAMSIZ);
+ cp = ifname + strlen(ifname);
+ while (--cp >= ifname && *cp == '\n')
+ *cp = '\0';
+ return dev_get_by_name(&init_net, ifname);
+ }
+ return NULL;
+}
+
+/**
+ * libfcoe_device_notification() - Handler for net device events
+ * @notifier: The context of the notification
+ * @event: The type of event
+ * @ptr: The net device that the event was on
+ *
+ * This function is called by the Ethernet driver in case of link change event.
+ *
+ * Returns: 0 for success
+ */
+static int libfcoe_device_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+{
+ struct net_device *netdev = ptr;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ printk(KERN_ERR "libfcoe_device_notification: NETDEV_UNREGISTER %s\n",
+ netdev->name);
+ fcoe_del_netdev_mapping(netdev);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+
+/**
+ * fcoe_transport_create() - Create a fcoe interface
+ * @buffer: The name of the Ethernet interface to create on
+ * @kp: The associated kernel param
+ *
+ * Called from sysfs. This holds the ft_mutex while calling the
+ * registered fcoe transport's create function.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+ enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer);
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (ft) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already has existing "
+ "FCoE instance on %s.\n",
+ ft->name, netdev->name);
+ rc = -EEXIST;
+ goto out_putdev;
+ }
+
+ ft = fcoe_transport_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ goto out_putdev;
+ }
+
+ rc = fcoe_add_netdev_mapping(netdev, ft);
+ if (rc) {
+ LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping "
+ "for FCoE transport %s for %s.\n",
+ ft->name, netdev->name);
+ goto out_putdev;
+ }
+
+ /* pass to transport create */
+ rc = ft->create ? ft->create(netdev, fip_mode) : -ENODEV;
+ if (rc)
+ fcoe_del_netdev_mapping(netdev);
+
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n",
+ ft->name, (rc) ? "failed" : "succeeded",
+ netdev->name);
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * fcoe_transport_destroy() - Destroy a FCoE interface
+ * @buffer: The name of the Ethernet interface to be destroyed
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs. This holds the ft_mutex while calling the
+ * registered fcoe transport's destroy function.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer);
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ goto out_putdev;
+ }
+
+ /* pass to transport destroy */
+ rc = ft->destroy ? ft->destroy(netdev) : -ENODEV;
+ fcoe_del_netdev_mapping(netdev);
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n",
+ ft->name, (rc) ? "failed" : "succeeded",
+ netdev->name);
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * fcoe_transport_disable() - Disables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be disabled
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev)
+ goto out_nodev;
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft)
+ goto out_putdev;
+
+ rc = ft->disable ? ft->disable(netdev) : -ENODEV;
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * fcoe_transport_enable() - Enables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be enabled
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev)
+ goto out_nodev;
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft)
+ goto out_putdev;
+
+ rc = ft->enable ? ft->enable(netdev) : -ENODEV;
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * libfcoe_init() - Initialization routine for libfcoe.ko
+ */
+static int __init libfcoe_init(void)
+{
+ fcoe_transport_init();
+
+ return 0;
+}
+module_init(libfcoe_init);
+
+/**
+ * libfcoe_exit() - Tear down libfcoe.ko
+ */
+static void __exit libfcoe_exit(void)
+{
+ fcoe_transport_exit();
+}
+module_exit(libfcoe_exit);
diff --git a/drivers/scsi/fcoe/libfcoe.h b/drivers/scsi/fcoe/libfcoe.h
new file mode 100644
index 000000000000..6af5fc3a17d8
--- /dev/null
+++ b/drivers/scsi/fcoe/libfcoe.h
@@ -0,0 +1,31 @@
+#ifndef _FCOE_LIBFCOE_H_
+#define _FCOE_LIBFCOE_H_
+
+extern unsigned int libfcoe_debug_logging;
+#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
+#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
+#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
+
+#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
+do { \
+ if (unlikely(libfcoe_debug_logging & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+} while (0)
+
+#define LIBFCOE_DBG(fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
+ printk(KERN_INFO "libfcoe: " fmt, ##args);)
+
+#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
+ printk(KERN_INFO "host%d: fip: " fmt, \
+ (fip)->lp->host->host_no, ##args);)
+
+#define LIBFCOE_TRANSPORT_DBG(fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \
+ printk(KERN_INFO "%s: " fmt, \
+ __func__, ##args);)
+
+#endif /* _FCOE_LIBFCOE_H_ */
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 92f185081e62..671cde9d4060 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -37,7 +37,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.4.0.145"
+#define DRV_VERSION "1.5.0.1"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index db710148d156..b576be734e2e 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -654,7 +654,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
vdev->linkstatus_pa);
if (vdev->stats)
pci_free_consistent(vdev->pdev,
- sizeof(struct vnic_dev),
+ sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
pci_free_consistent(vdev->pdev,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 12deffccb8da..415ad4fb50d4 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -74,6 +74,10 @@ static int hpsa_allow_any;
module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(hpsa_allow_any,
"Allow hpsa driver to access unknown HP Smart Array hardware");
+static int hpsa_simple_mode;
+module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(hpsa_simple_mode,
+ "Use 'simple mode' rather than 'performant mode'");
/* define the PCI info for the cards we can control */
static const struct pci_device_id hpsa_pci_device_id[] = {
@@ -85,11 +89,13 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
@@ -109,11 +115,13 @@ static struct board_type products[] = {
{0x3249103C, "Smart Array P812", &SA5_access},
{0x324a103C, "Smart Array P712m", &SA5_access},
{0x324b103C, "Smart Array P711m", &SA5_access},
- {0x3250103C, "Smart Array", &SA5_access},
- {0x3250113C, "Smart Array", &SA5_access},
- {0x3250123C, "Smart Array", &SA5_access},
- {0x3250133C, "Smart Array", &SA5_access},
- {0x3250143C, "Smart Array", &SA5_access},
+ {0x3350103C, "Smart Array", &SA5_access},
+ {0x3351103C, "Smart Array", &SA5_access},
+ {0x3352103C, "Smart Array", &SA5_access},
+ {0x3353103C, "Smart Array", &SA5_access},
+ {0x3354103C, "Smart Array", &SA5_access},
+ {0x3355103C, "Smart Array", &SA5_access},
+ {0x3356103C, "Smart Array", &SA5_access},
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
@@ -147,17 +155,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
static int hpsa_slave_alloc(struct scsi_device *sdev);
static void hpsa_slave_destroy(struct scsi_device *sdev);
-static ssize_t raid_level_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t lunid_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t unique_id_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t host_show_firmware_revision(struct device *dev,
- struct device_attribute *attr, char *buf);
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
-static ssize_t host_store_rescan(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count);
static int check_for_unit_attention(struct ctlr_info *h,
struct CommandList *c);
static void check_ioctl_unit_attention(struct ctlr_info *h,
@@ -173,47 +171,10 @@ static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
unsigned long *memory_bar);
static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
-
-static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
-static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
-static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
-static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
-static DEVICE_ATTR(firmware_revision, S_IRUGO,
- host_show_firmware_revision, NULL);
-
-static struct device_attribute *hpsa_sdev_attrs[] = {
- &dev_attr_raid_level,
- &dev_attr_lunid,
- &dev_attr_unique_id,
- NULL,
-};
-
-static struct device_attribute *hpsa_shost_attrs[] = {
- &dev_attr_rescan,
- &dev_attr_firmware_revision,
- NULL,
-};
-
-static struct scsi_host_template hpsa_driver_template = {
- .module = THIS_MODULE,
- .name = "hpsa",
- .proc_name = "hpsa",
- .queuecommand = hpsa_scsi_queue_command,
- .scan_start = hpsa_scan_start,
- .scan_finished = hpsa_scan_finished,
- .change_queue_depth = hpsa_change_queue_depth,
- .this_id = -1,
- .use_clustering = ENABLE_CLUSTERING,
- .eh_device_reset_handler = hpsa_eh_device_reset_handler,
- .ioctl = hpsa_ioctl,
- .slave_alloc = hpsa_slave_alloc,
- .slave_destroy = hpsa_slave_destroy,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = hpsa_compat_ioctl,
-#endif
- .sdev_attrs = hpsa_sdev_attrs,
- .shost_attrs = hpsa_shost_attrs,
-};
+static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
+ void __iomem *vaddr, int wait_for_ready);
+#define BOARD_NOT_READY 0
+#define BOARD_READY 1
static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
{
@@ -291,67 +252,63 @@ static ssize_t host_show_firmware_revision(struct device *dev,
fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
}
-/* Enqueuing and dequeuing functions for cmdlists. */
-static inline void addQ(struct hlist_head *list, struct CommandList *c)
+static ssize_t host_show_commands_outstanding(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- hlist_add_head(&c->list, list);
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ctlr_info *h = shost_to_hba(shost);
+
+ return snprintf(buf, 20, "%d\n", h->commands_outstanding);
}
-static inline u32 next_command(struct ctlr_info *h)
+static ssize_t host_show_transport_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- u32 a;
-
- if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
- return h->access.command_completed(h);
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
- if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
- a = *(h->reply_pool_head); /* Next cmd in ring buffer */
- (h->reply_pool_head)++;
- h->commands_outstanding--;
- } else {
- a = FIFO_EMPTY;
- }
- /* Check for wraparound */
- if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
- h->reply_pool_head = h->reply_pool;
- h->reply_pool_wraparound ^= 1;
- }
- return a;
+ h = shost_to_hba(shost);
+ return snprintf(buf, 20, "%s\n",
+ h->transMethod & CFGTBL_Trans_Performant ?
+ "performant" : "simple");
}
-/* set_performant_mode: Modify the tag for cciss performant
- * set bit 0 for pull model, bits 3-1 for block fetch
- * register number
- */
-static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
-{
- if (likely(h->transMethod == CFGTBL_Trans_Performant))
- c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
-}
+/* List of controllers which cannot be reset on kexec with reset_devices */
+static u32 unresettable_controller[] = {
+ 0x324a103C, /* Smart Array P712m */
+ 0x324b103C, /* SmartArray P711m */
+ 0x3223103C, /* Smart Array P800 */
+ 0x3234103C, /* Smart Array P400 */
+ 0x3235103C, /* Smart Array P400i */
+ 0x3211103C, /* Smart Array E200i */
+ 0x3212103C, /* Smart Array E200 */
+ 0x3213103C, /* Smart Array E200i */
+ 0x3214103C, /* Smart Array E200i */
+ 0x3215103C, /* Smart Array E200i */
+ 0x3237103C, /* Smart Array E500 */
+ 0x323D103C, /* Smart Array P700m */
+ 0x409C0E11, /* Smart Array 6400 */
+ 0x409D0E11, /* Smart Array 6400 EM */
+};
-static void enqueue_cmd_and_start_io(struct ctlr_info *h,
- struct CommandList *c)
+static int ctlr_is_resettable(struct ctlr_info *h)
{
- unsigned long flags;
+ int i;
- set_performant_mode(h, c);
- spin_lock_irqsave(&h->lock, flags);
- addQ(&h->reqQ, c);
- h->Qdepth++;
- start_io(h);
- spin_unlock_irqrestore(&h->lock, flags);
+ for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
+ if (unresettable_controller[i] == h->board_id)
+ return 0;
+ return 1;
}
-static inline void removeQ(struct CommandList *c)
+static ssize_t host_show_resettable(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- if (WARN_ON(hlist_unhashed(&c->list)))
- return;
- hlist_del_init(&c->list);
-}
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
-static inline int is_hba_lunid(unsigned char scsi3addr[])
-{
- return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
+ h = shost_to_hba(shost);
+ return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h));
}
static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
@@ -359,15 +316,6 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
return (scsi3addr[3] & 0xC0) == 0x40;
}
-static inline int is_scsi_rev_5(struct ctlr_info *h)
-{
- if (!h->hba_inquiry_data)
- return 0;
- if ((h->hba_inquiry_data[2] & 0x07) == 5)
- return 1;
- return 0;
-}
-
static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
"UNKNOWN"
};
@@ -459,6 +407,129 @@ static ssize_t unique_id_show(struct device *dev,
sn[12], sn[13], sn[14], sn[15]);
}
+static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
+static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
+static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
+static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+static DEVICE_ATTR(firmware_revision, S_IRUGO,
+ host_show_firmware_revision, NULL);
+static DEVICE_ATTR(commands_outstanding, S_IRUGO,
+ host_show_commands_outstanding, NULL);
+static DEVICE_ATTR(transport_mode, S_IRUGO,
+ host_show_transport_mode, NULL);
+static DEVICE_ATTR(resettable, S_IRUGO,
+ host_show_resettable, NULL);
+
+static struct device_attribute *hpsa_sdev_attrs[] = {
+ &dev_attr_raid_level,
+ &dev_attr_lunid,
+ &dev_attr_unique_id,
+ NULL,
+};
+
+static struct device_attribute *hpsa_shost_attrs[] = {
+ &dev_attr_rescan,
+ &dev_attr_firmware_revision,
+ &dev_attr_commands_outstanding,
+ &dev_attr_transport_mode,
+ &dev_attr_resettable,
+ NULL,
+};
+
+static struct scsi_host_template hpsa_driver_template = {
+ .module = THIS_MODULE,
+ .name = "hpsa",
+ .proc_name = "hpsa",
+ .queuecommand = hpsa_scsi_queue_command,
+ .scan_start = hpsa_scan_start,
+ .scan_finished = hpsa_scan_finished,
+ .change_queue_depth = hpsa_change_queue_depth,
+ .this_id = -1,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = hpsa_eh_device_reset_handler,
+ .ioctl = hpsa_ioctl,
+ .slave_alloc = hpsa_slave_alloc,
+ .slave_destroy = hpsa_slave_destroy,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = hpsa_compat_ioctl,
+#endif
+ .sdev_attrs = hpsa_sdev_attrs,
+ .shost_attrs = hpsa_shost_attrs,
+};
+
+
+/* Enqueuing and dequeuing functions for cmdlists. */
+static inline void addQ(struct list_head *list, struct CommandList *c)
+{
+ list_add_tail(&c->list, list);
+}
+
+static inline u32 next_command(struct ctlr_info *h)
+{
+ u32 a;
+
+ if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+ return h->access.command_completed(h);
+
+ if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+ a = *(h->reply_pool_head); /* Next cmd in ring buffer */
+ (h->reply_pool_head)++;
+ h->commands_outstanding--;
+ } else {
+ a = FIFO_EMPTY;
+ }
+ /* Check for wraparound */
+ if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
+ h->reply_pool_head = h->reply_pool;
+ h->reply_pool_wraparound ^= 1;
+ }
+ return a;
+}
+
+/* set_performant_mode: Modify the tag for cciss performant
+ * set bit 0 for pull model, bits 3-1 for block fetch
+ * register number
+ */
+static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
+{
+ if (likely(h->transMethod & CFGTBL_Trans_Performant))
+ c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
+}
+
+static void enqueue_cmd_and_start_io(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ unsigned long flags;
+
+ set_performant_mode(h, c);
+ spin_lock_irqsave(&h->lock, flags);
+ addQ(&h->reqQ, c);
+ h->Qdepth++;
+ start_io(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static inline void removeQ(struct CommandList *c)
+{
+ if (WARN_ON(list_empty(&c->list)))
+ return;
+ list_del_init(&c->list);
+}
+
+static inline int is_hba_lunid(unsigned char scsi3addr[])
+{
+ return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
+}
+
+static inline int is_scsi_rev_5(struct ctlr_info *h)
+{
+ if (!h->hba_inquiry_data)
+ return 0;
+ if ((h->hba_inquiry_data[2] & 0x07) == 5)
+ return 1;
+ return 0;
+}
+
static int hpsa_find_target_lun(struct ctlr_info *h,
unsigned char scsi3addr[], int bus, int *target, int *lun)
{
@@ -1130,6 +1201,10 @@ static void complete_scsi_command(struct CommandList *cp,
cmd->result = DID_TIME_OUT << 16;
dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
break;
+ case CMD_UNABORTABLE:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "Command unabortable\n");
+ break;
default:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
@@ -1160,7 +1235,7 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
sh->sg_tablesize = h->maxsgentries;
h->scsi_host = sh;
sh->hostdata[0] = (unsigned long) h;
- sh->irq = h->intr[PERF_MODE_INT];
+ sh->irq = h->intr[h->intr_mode];
sh->unique_id = sh->irq;
error = scsi_add_host(sh, &h->pdev->dev);
if (error)
@@ -1295,6 +1370,9 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp)
case CMD_TIMEOUT:
dev_warn(d, "cp %p timed out\n", cp);
break;
+ case CMD_UNABORTABLE:
+ dev_warn(d, "Command unabortable\n");
+ break;
default:
dev_warn(d, "cp %p returned unknown status %x\n", cp,
ei->CommandStatus);
@@ -1595,6 +1673,8 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
return 0;
+ memset(scsi3addr, 0, 8);
+ scsi3addr[3] = target;
if (is_hba_lunid(scsi3addr))
return 0; /* Don't add the RAID controller here. */
@@ -1609,8 +1689,6 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
return 0;
}
- memset(scsi3addr, 0, 8);
- scsi3addr[3] = target;
if (hpsa_update_device_info(h, scsi3addr, this_device))
return 0;
(*nmsa2xxx_enclosures)++;
@@ -2199,7 +2277,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
c->cmdindex = i;
- INIT_HLIST_NODE(&c->list);
+ INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
temp64.val = (u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -2237,7 +2315,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
}
memset(c->err_info, 0, sizeof(*c->err_info));
- INIT_HLIST_NODE(&c->list);
+ INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
temp64.val = (u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -2267,7 +2345,7 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
pci_free_consistent(h->pdev, sizeof(*c->err_info),
c->err_info, (dma_addr_t) temp64.val);
pci_free_consistent(h->pdev, sizeof(*c),
- c, (dma_addr_t) c->busaddr);
+ c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
}
#ifdef CONFIG_COMPAT
@@ -2281,6 +2359,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
int err;
u32 cp;
+ memset(&arg64, 0, sizeof(arg64));
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
sizeof(arg64.LUN_info));
@@ -2317,6 +2396,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
int err;
u32 cp;
+ memset(&arg64, 0, sizeof(arg64));
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
sizeof(arg64.LUN_info));
@@ -2433,15 +2513,17 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
if (buff == NULL)
return -EFAULT;
- }
- if (iocommand.Request.Type.Direction == XFER_WRITE) {
- /* Copy the data into the buffer we created */
- if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
- kfree(buff);
- return -EFAULT;
+ if (iocommand.Request.Type.Direction == XFER_WRITE) {
+ /* Copy the data into the buffer we created */
+ if (copy_from_user(buff, iocommand.buf,
+ iocommand.buf_size)) {
+ kfree(buff);
+ return -EFAULT;
+ }
+ } else {
+ memset(buff, 0, iocommand.buf_size);
}
- } else
- memset(buff, 0, iocommand.buf_size);
+ }
c = cmd_special_alloc(h);
if (c == NULL) {
kfree(buff);
@@ -2487,8 +2569,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
cmd_special_free(h, c);
return -EFAULT;
}
-
- if (iocommand.Request.Type.Direction == XFER_READ) {
+ if (iocommand.Request.Type.Direction == XFER_READ &&
+ iocommand.buf_size > 0) {
/* Copy the data out of the buffer we created */
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
kfree(buff);
@@ -2581,14 +2663,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
c->cmd_type = CMD_IOCTL_PEND;
c->Header.ReplyQueue = 0;
-
- if (ioc->buf_size > 0) {
- c->Header.SGList = sg_used;
- c->Header.SGTotal = sg_used;
- } else {
- c->Header.SGList = 0;
- c->Header.SGTotal = 0;
- }
+ c->Header.SGList = c->Header.SGTotal = sg_used;
memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
c->Header.Tag.lower = c->busaddr;
memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
@@ -2605,7 +2680,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
}
hpsa_scsi_do_simple_cmd_core(h, c);
- hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
+ if (sg_used)
+ hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
/* Copy the error information out */
memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
@@ -2614,7 +2690,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
status = -EFAULT;
goto cleanup1;
}
- if (ioc->Request.Type.Direction == XFER_READ) {
+ if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
/* Copy the data out of the buffer we created */
BYTE __user *ptr = ioc->buf;
for (i = 0; i < sg_used; i++) {
@@ -2810,8 +2886,8 @@ static void start_io(struct ctlr_info *h)
{
struct CommandList *c;
- while (!hlist_empty(&h->reqQ)) {
- c = hlist_entry(h->reqQ.first, struct CommandList, list);
+ while (!list_empty(&h->reqQ)) {
+ c = list_entry(h->reqQ.next, struct CommandList, list);
/* can't do anything if fifo is full */
if ((h->access.fifo_full(h))) {
dev_warn(&h->pdev->dev, "fifo full\n");
@@ -2867,20 +2943,22 @@ static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
static inline u32 hpsa_tag_contains_index(u32 tag)
{
-#define DIRECT_LOOKUP_BIT 0x10
return tag & DIRECT_LOOKUP_BIT;
}
static inline u32 hpsa_tag_to_index(u32 tag)
{
-#define DIRECT_LOOKUP_SHIFT 5
return tag >> DIRECT_LOOKUP_SHIFT;
}
-static inline u32 hpsa_tag_discard_error_bits(u32 tag)
+
+static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
{
-#define HPSA_ERROR_BITS 0x03
- return tag & ~HPSA_ERROR_BITS;
+#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
+#define HPSA_SIMPLE_ERROR_BITS 0x03
+ if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+ return tag & ~HPSA_SIMPLE_ERROR_BITS;
+ return tag & ~HPSA_PERF_ERROR_BITS;
}
/* process completion of an indexed ("direct lookup") command */
@@ -2904,10 +2982,9 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
{
u32 tag;
struct CommandList *c = NULL;
- struct hlist_node *tmp;
- tag = hpsa_tag_discard_error_bits(raw_tag);
- hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+ tag = hpsa_tag_discard_error_bits(h, raw_tag);
+ list_for_each_entry(c, &h->cmpQ, list) {
if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
finish_cmd(c, raw_tag);
return next_command(h);
@@ -2957,7 +3034,10 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Send a message CDB to the firmware. */
+/* Send a message CDB to the firmware. Careful, this only works
+ * in simple mode, not performant mode due to the tag lookup.
+ * We only ever use this immediately after a controller reset.
+ */
static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
unsigned char type)
{
@@ -3023,7 +3103,7 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
- if (hpsa_tag_discard_error_bits(tag) == paddr32)
+ if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
break;
msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
}
@@ -3055,38 +3135,6 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
#define hpsa_noop(p) hpsa_message(p, 3, 0)
-static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
-{
-/* the #defines are stolen from drivers/pci/msi.h. */
-#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
-#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
-
- int pos;
- u16 control = 0;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
- if (pos) {
- pci_read_config_word(pdev, msi_control_reg(pos), &control);
- if (control & PCI_MSI_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI\n");
- pci_write_config_word(pdev, msi_control_reg(pos),
- control & ~PCI_MSI_FLAGS_ENABLE);
- }
- }
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_word(pdev, msi_control_reg(pos), &control);
- if (control & PCI_MSIX_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI-X\n");
- pci_write_config_word(pdev, msi_control_reg(pos),
- control & ~PCI_MSIX_FLAGS_ENABLE);
- }
- }
-
- return 0;
-}
-
static int hpsa_controller_hard_reset(struct pci_dev *pdev,
void * __iomem vaddr, bool use_doorbell)
{
@@ -3142,17 +3190,17 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
*/
static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
{
- u16 saved_config_space[32];
u64 cfg_offset;
u32 cfg_base_addr;
u64 cfg_base_addr_index;
void __iomem *vaddr;
unsigned long paddr;
u32 misc_fw_support, active_transport;
- int rc, i;
+ int rc;
struct CfgTable __iomem *cfgtable;
bool use_doorbell;
u32 board_id;
+ u16 command_register;
/* For controllers as old as the P600, this is very nearly
* the same thing as
@@ -3162,14 +3210,6 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
* pci_set_power_state(pci_dev, PCI_D0);
* pci_restore_state(pci_dev);
*
- * but we can't use these nice canned kernel routines on
- * kexec, because they also check the MSI/MSI-X state in PCI
- * configuration space and do the wrong thing when it is
- * set/cleared. Also, the pci_save/restore_state functions
- * violate the ordering requirements for restoring the
- * configuration space from the CCISS document (see the
- * comment below). So we roll our own ....
- *
* For controllers newer than the P600, the pci power state
* method of resetting doesn't work so we have another way
* using the doorbell register.
@@ -3182,13 +3222,21 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
* likely not be happy. Just forbid resetting this conjoined mess.
* The 640x isn't really supported by hpsa anyway.
*/
- hpsa_lookup_board_id(pdev, &board_id);
+ rc = hpsa_lookup_board_id(pdev, &board_id);
+ if (rc < 0) {
+ dev_warn(&pdev->dev, "Not resetting device.\n");
+ return -ENODEV;
+ }
if (board_id == 0x409C0E11 || board_id == 0x409D0E11)
return -ENOTSUPP;
- for (i = 0; i < 32; i++)
- pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
-
+ /* Save the PCI command register */
+ pci_read_config_word(pdev, 4, &command_register);
+ /* Turn the board off. This is so that later pci_restore_state()
+ * won't turn the board on before the rest of config space is ready.
+ */
+ pci_disable_device(pdev);
+ pci_save_state(pdev);
/* find the first memory BAR, so we can find the cfg table */
rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
@@ -3214,46 +3262,47 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
misc_fw_support = readl(&cfgtable->misc_fw_support);
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
- /* The doorbell reset seems to cause lockups on some Smart
- * Arrays (e.g. P410, P410i, maybe others). Until this is
- * fixed or at least isolated, avoid the doorbell reset.
- */
- use_doorbell = 0;
-
rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc)
goto unmap_cfgtable;
- /* Restore the PCI configuration space. The Open CISS
- * Specification says, "Restore the PCI Configuration
- * Registers, offsets 00h through 60h. It is important to
- * restore the command register, 16-bits at offset 04h,
- * last. Do not restore the configuration status register,
- * 16-bits at offset 06h." Note that the offset is 2*i.
- */
- for (i = 0; i < 32; i++) {
- if (i == 2 || i == 3)
- continue;
- pci_write_config_word(pdev, 2*i, saved_config_space[i]);
+ pci_restore_state(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_warn(&pdev->dev, "failed to enable device.\n");
+ goto unmap_cfgtable;
}
- wmb();
- pci_write_config_word(pdev, 4, saved_config_space[2]);
+ pci_write_config_word(pdev, 4, command_register);
/* Some devices (notably the HP Smart Array 5i Controller)
need a little pause here */
msleep(HPSA_POST_RESET_PAUSE_MSECS);
+ /* Wait for board to become not ready, then ready. */
+ dev_info(&pdev->dev, "Waiting for board to become ready.\n");
+ rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
+ if (rc)
+ dev_warn(&pdev->dev,
+ "failed waiting for board to become not ready\n");
+ rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
+ if (rc) {
+ dev_warn(&pdev->dev,
+ "failed waiting for board to become ready\n");
+ goto unmap_cfgtable;
+ }
+ dev_info(&pdev->dev, "board ready.\n");
+
/* Controller should be in simple mode at this point. If it's not,
* It means we're on one of those controllers which doesn't support
* the doorbell reset method and on which the PCI power management reset
* method doesn't work (P800, for example.)
- * In those cases, pretend the reset worked and hope for the best.
+ * In those cases, don't try to proceed, as it generally doesn't work.
*/
active_transport = readl(&cfgtable->TransportActive);
if (active_transport & PERFORMANT_MODE) {
dev_warn(&pdev->dev, "Unable to successfully reset controller,"
- " proceeding anyway.\n");
- rc = -ENOTSUPP;
+ " Ignoring controller.\n");
+ rc = -ENODEV;
}
unmap_cfgtable:
@@ -3386,7 +3435,7 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
default_int_mode:
#endif /* CONFIG_PCI_MSI */
/* if we get here we're going to use the default interrupt mode */
- h->intr[PERF_MODE_INT] = h->pdev->irq;
+ h->intr[h->intr_mode] = h->pdev->irq;
}
static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
@@ -3438,18 +3487,28 @@ static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
return -ENODEV;
}
-static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h)
+static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
+ void __iomem *vaddr, int wait_for_ready)
{
- int i;
+ int i, iterations;
u32 scratchpad;
+ if (wait_for_ready)
+ iterations = HPSA_BOARD_READY_ITERATIONS;
+ else
+ iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
- for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
- scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
- if (scratchpad == HPSA_FIRMWARE_READY)
- return 0;
+ for (i = 0; i < iterations; i++) {
+ scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
+ if (wait_for_ready) {
+ if (scratchpad == HPSA_FIRMWARE_READY)
+ return 0;
+ } else {
+ if (scratchpad != HPSA_FIRMWARE_READY)
+ return 0;
+ }
msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
}
- dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
+ dev_warn(&pdev->dev, "board not ready, timed out.\n");
return -ENODEV;
}
@@ -3497,6 +3556,11 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
{
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+
+ /* Limit commands in memory limited kdump scenario. */
+ if (reset_devices && h->max_commands > 32)
+ h->max_commands = 32;
+
if (h->max_commands < 16) {
dev_warn(&h->pdev->dev, "Controller reports "
"max supported commands of %d, an obvious lie. "
@@ -3571,16 +3635,21 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
{
int i;
+ u32 doorbell_value;
+ unsigned long flags;
/* under certain very rare conditions, this can take awhile.
* (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
* as we enter this code.)
*/
for (i = 0; i < MAX_CONFIG_WAIT; i++) {
- if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+ spin_lock_irqsave(&h->lock, flags);
+ doorbell_value = readl(h->vaddr + SA5_DOORBELL);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (!(doorbell_value & CFGTBL_ChangeReq))
break;
/* delay and try again */
- msleep(10);
+ usleep_range(10000, 20000);
}
}
@@ -3603,6 +3672,7 @@ static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
"unable to get board into simple mode\n");
return -ENODEV;
}
+ h->transMethod = CFGTBL_Trans_Simple;
return 0;
}
@@ -3641,7 +3711,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
err = -ENOMEM;
goto err_out_free_res;
}
- err = hpsa_wait_for_board_ready(h);
+ err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
if (err)
goto err_out_free_res;
err = hpsa_find_cfgtables(h);
@@ -3710,8 +3780,6 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
return 0; /* just try to do the kdump anyhow. */
if (rc)
return -ENODEV;
- if (hpsa_reset_msi(pdev))
- return -ENODEV;
/* Now try to get the controller to respond to a no-op */
for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
@@ -3749,8 +3817,11 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
h->pdev = pdev;
h->busy_initializing = 1;
- INIT_HLIST_HEAD(&h->cmpQ);
- INIT_HLIST_HEAD(&h->reqQ);
+ h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
+ INIT_LIST_HEAD(&h->cmpQ);
+ INIT_LIST_HEAD(&h->reqQ);
+ spin_lock_init(&h->lock);
+ spin_lock_init(&h->scan_lock);
rc = hpsa_pci_init(h);
if (rc != 0)
goto clean1;
@@ -3777,20 +3848,20 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
h->access.set_intr_mask(h, HPSA_INTR_OFF);
if (h->msix_vector || h->msi_vector)
- rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_msi,
+ rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi,
IRQF_DISABLED, h->devname, h);
else
- rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_intx,
+ rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_intx,
IRQF_DISABLED, h->devname, h);
if (rc) {
dev_err(&pdev->dev, "unable to get irq %d for %s\n",
- h->intr[PERF_MODE_INT], h->devname);
+ h->intr[h->intr_mode], h->devname);
goto clean2;
}
dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
h->devname, pdev->device,
- h->intr[PERF_MODE_INT], dac ? "" : " not");
+ h->intr[h->intr_mode], dac ? "" : " not");
h->cmd_pool_bits =
kmalloc(((h->nr_cmds + BITS_PER_LONG -
@@ -3810,8 +3881,6 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
}
if (hpsa_allocate_sg_chain_blocks(h))
goto clean4;
- spin_lock_init(&h->lock);
- spin_lock_init(&h->scan_lock);
init_waitqueue_head(&h->scan_wait_queue);
h->scan_finished = 1; /* no scan currently in progress */
@@ -3843,7 +3912,7 @@ clean4:
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool,
h->errinfo_pool_dhandle);
- free_irq(h->intr[PERF_MODE_INT], h);
+ free_irq(h->intr[h->intr_mode], h);
clean2:
clean1:
h->busy_initializing = 0;
@@ -3887,7 +3956,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
*/
hpsa_flush_cache(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
- free_irq(h->intr[PERF_MODE_INT], h);
+ free_irq(h->intr[h->intr_mode], h);
#ifdef CONFIG_PCI_MSI
if (h->msix_vector)
pci_disable_msix(h->pdev);
@@ -3989,7 +4058,8 @@ static void calc_bucket_map(int bucket[], int num_buckets,
}
}
-static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
+static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
+ u32 use_short_tags)
{
int i;
unsigned long register_value;
@@ -4037,7 +4107,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
writel(0, &h->transtable->RepQCtrAddrHigh32);
writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
writel(0, &h->transtable->RepQAddr0High32);
- writel(CFGTBL_Trans_Performant,
+ writel(CFGTBL_Trans_Performant | use_short_tags,
&(h->cfgtable->HostWrite.TransportRequest));
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_mode_change_ack(h);
@@ -4047,12 +4117,18 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
" performant mode\n");
return;
}
+ /* Change the access methods to the performant access methods */
+ h->access = SA5_performant_access;
+ h->transMethod = CFGTBL_Trans_Performant;
}
static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
{
u32 trans_support;
+ if (hpsa_simple_mode)
+ return;
+
trans_support = readl(&(h->cfgtable->TransportSupport));
if (!(trans_support & PERFORMANT_MODE))
return;
@@ -4072,11 +4148,8 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|| (h->blockFetchTable == NULL))
goto clean_up;
- hpsa_enter_performant_mode(h);
-
- /* Change the access methods to the performant access methods */
- h->access = SA5_performant_access;
- h->transMethod = CFGTBL_Trans_Performant;
+ hpsa_enter_performant_mode(h,
+ trans_support & CFGTBL_Trans_use_short_tags);
return;
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 19586e189f0f..621a1530054a 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -72,11 +72,12 @@ struct ctlr_info {
unsigned int intr[4];
unsigned int msix_vector;
unsigned int msi_vector;
+ int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
struct access_method access;
/* queue and queue Info */
- struct hlist_head reqQ;
- struct hlist_head cmpQ;
+ struct list_head reqQ;
+ struct list_head cmpQ;
unsigned int Qdepth;
unsigned int maxQsinceinit;
unsigned int maxSG;
@@ -154,12 +155,16 @@ struct ctlr_info {
* HPSA_BOARD_READY_ITERATIONS are derived from those.
*/
#define HPSA_BOARD_READY_WAIT_SECS (120)
+#define HPSA_BOARD_NOT_READY_WAIT_SECS (10)
#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
#define HPSA_BOARD_READY_POLL_INTERVAL \
((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
#define HPSA_BOARD_READY_ITERATIONS \
((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
+#define HPSA_BOARD_NOT_READY_ITERATIONS \
+ ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
+ HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
#define HPSA_POST_RESET_PAUSE_MSECS (3000)
#define HPSA_POST_RESET_NOOP_RETRIES (12)
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index f5c4c3cc0530..18464900e761 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -104,6 +104,7 @@
#define CFGTBL_Trans_Simple 0x00000002l
#define CFGTBL_Trans_Performant 0x00000004l
+#define CFGTBL_Trans_use_short_tags 0x20000000l
#define CFGTBL_BusType_Ultra2 0x00000001l
#define CFGTBL_BusType_Ultra3 0x00000002l
@@ -265,6 +266,7 @@ struct ErrorInfo {
#define DIRECT_LOOKUP_SHIFT 5
#define DIRECT_LOOKUP_BIT 0x10
+#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1))
#define HPSA_ERROR_BIT 0x02
struct ctlr_info; /* defined in hpsa.h */
@@ -291,7 +293,7 @@ struct CommandList {
struct ctlr_info *h;
int cmd_type;
long cmdindex;
- struct hlist_node list;
+ struct list_head list;
struct request *rq;
struct completion *waiting;
void *scsi_cmd;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d841e98a8bd5..0621238fac4a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1301,7 +1301,7 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
ipr_clear_res_target(res);
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
}
- } else if (!res->sdev) {
+ } else if (!res->sdev || res->del_from_ml) {
res->add_to_ml = 1;
if (ioa_cfg->allow_ml_add_del)
schedule_work(&ioa_cfg->work_q);
@@ -3104,7 +3104,10 @@ restart:
did_work = 1;
sdev = res->sdev;
if (!scsi_device_get(sdev)) {
- list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ if (!res->add_to_ml)
+ list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ else
+ res->del_from_ml = 0;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_remove_device(sdev);
scsi_device_put(sdev);
@@ -8864,7 +8867,7 @@ static void __ipr_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- flush_scheduled_work();
+ flush_work_sync(&ioa_cfg->work_q);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
spin_lock(&ipr_driver_lock);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index fec47de72535..a860452a8f71 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -608,54 +608,12 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
iscsi_sw_tcp_release_conn(conn);
}
-static int iscsi_sw_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
- char *buf, int *port,
- int (*getname)(struct socket *,
- struct sockaddr *,
- int *addrlen))
-{
- struct sockaddr_storage *addr;
- struct sockaddr_in6 *sin6;
- struct sockaddr_in *sin;
- int rc = 0, len;
-
- addr = kmalloc(sizeof(*addr), GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
-
- if (getname(sock, (struct sockaddr *) addr, &len)) {
- rc = -ENODEV;
- goto free_addr;
- }
-
- switch (addr->ss_family) {
- case AF_INET:
- sin = (struct sockaddr_in *)addr;
- spin_lock_bh(&conn->session->lock);
- sprintf(buf, "%pI4", &sin->sin_addr.s_addr);
- *port = be16_to_cpu(sin->sin_port);
- spin_unlock_bh(&conn->session->lock);
- break;
- case AF_INET6:
- sin6 = (struct sockaddr_in6 *)addr;
- spin_lock_bh(&conn->session->lock);
- sprintf(buf, "%pI6", &sin6->sin6_addr);
- *port = be16_to_cpu(sin6->sin6_port);
- spin_unlock_bh(&conn->session->lock);
- break;
- }
-free_addr:
- kfree(addr);
- return rc;
-}
-
static int
iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
int is_leading)
{
- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
- struct iscsi_host *ihost = shost_priv(shost);
+ struct iscsi_session *session = cls_session->dd_data;
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
@@ -670,27 +628,15 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
"sockfd_lookup failed %d\n", err);
return -EEXIST;
}
- /*
- * copy these values now because if we drop the session
- * userspace may still want to query the values since we will
- * be using them for the reconnect
- */
- err = iscsi_sw_tcp_get_addr(conn, sock, conn->portal_address,
- &conn->portal_port, kernel_getpeername);
- if (err)
- goto free_socket;
-
- err = iscsi_sw_tcp_get_addr(conn, sock, ihost->local_address,
- &ihost->local_port, kernel_getsockname);
- if (err)
- goto free_socket;
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
if (err)
goto free_socket;
+ spin_lock_bh(&session->lock);
/* bind iSCSI connection and socket */
tcp_sw_conn->sock = sock;
+ spin_unlock_bh(&session->lock);
/* setup Socket parameters */
sk = sock->sk;
@@ -752,24 +698,74 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- int len;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sockaddr_in6 addr;
+ int rc, len;
switch(param) {
case ISCSI_PARAM_CONN_PORT:
- spin_lock_bh(&conn->session->lock);
- len = sprintf(buf, "%hu\n", conn->portal_port);
- spin_unlock_bh(&conn->session->lock);
- break;
case ISCSI_PARAM_CONN_ADDRESS:
spin_lock_bh(&conn->session->lock);
- len = sprintf(buf, "%s\n", conn->portal_address);
+ if (!tcp_sw_conn || !tcp_sw_conn->sock) {
+ spin_unlock_bh(&conn->session->lock);
+ return -ENOTCONN;
+ }
+ rc = kernel_getpeername(tcp_sw_conn->sock,
+ (struct sockaddr *)&addr, &len);
spin_unlock_bh(&conn->session->lock);
- break;
+ if (rc)
+ return rc;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &addr, param, buf);
default:
return iscsi_conn_get_param(cls_conn, param, buf);
}
- return len;
+ return 0;
+}
+
+static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
+ struct iscsi_session *session = tcp_sw_host->session;
+ struct iscsi_conn *conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct sockaddr_in6 addr;
+ int rc, len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ spin_lock_bh(&session->lock);
+ conn = session->leadconn;
+ if (!conn) {
+ spin_unlock_bh(&session->lock);
+ return -ENOTCONN;
+ }
+ tcp_conn = conn->dd_data;
+
+ tcp_sw_conn = tcp_conn->dd_data;
+ if (!tcp_sw_conn->sock) {
+ spin_unlock_bh(&session->lock);
+ return -ENOTCONN;
+ }
+
+ rc = kernel_getsockname(tcp_sw_conn->sock,
+ (struct sockaddr *)&addr, &len);
+ spin_unlock_bh(&session->lock);
+ if (rc)
+ return rc;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &addr, param, buf);
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+
+ return 0;
}
static void
@@ -797,6 +793,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
+ struct iscsi_sw_tcp_host *tcp_sw_host;
struct Scsi_Host *shost;
if (ep) {
@@ -804,7 +801,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
return NULL;
}
- shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, 0, 1);
+ shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
+ sizeof(struct iscsi_sw_tcp_host), 1);
if (!shost)
return NULL;
shost->transportt = iscsi_sw_tcp_scsi_transport;
@@ -825,6 +823,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
if (!cls_session)
goto remove_host;
session = cls_session->dd_data;
+ tcp_sw_host = iscsi_host_priv(shost);
+ tcp_sw_host->session = session;
shost->can_queue = session->scsi_cmds_max;
if (iscsi_tcp_r2tpool_alloc(session))
@@ -929,7 +929,7 @@ static struct iscsi_transport iscsi_sw_tcp_transport = {
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_sw_tcp_conn_stop,
/* iscsi host params */
- .get_host_param = iscsi_host_get_param,
+ .get_host_param = iscsi_sw_tcp_host_get_param,
.set_host_param = iscsi_host_set_param,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 94644bad0ed7..666fe09378fa 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -55,6 +55,10 @@ struct iscsi_sw_tcp_conn {
ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
};
+struct iscsi_sw_tcp_host {
+ struct iscsi_session *session;
+};
+
struct iscsi_sw_tcp_hdrbuf {
struct iscsi_hdr hdrbuf;
char hdrextbuf[ISCSI_MAX_AHS_SIZE +
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index d21367d3305f..28231badd9e6 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -38,7 +38,7 @@ u16 fc_cpu_mask; /* cpu mask for possible cpus */
EXPORT_SYMBOL(fc_cpu_mask);
static u16 fc_cpu_order; /* 2's power to represent total possible cpus */
static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
-struct workqueue_struct *fc_exch_workqueue;
+static struct workqueue_struct *fc_exch_workqueue;
/*
* Structure and function definitions for managing Fibre Channel Exchanges
@@ -558,6 +558,22 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
return sp;
}
+/*
+ * Set the response handler for the exchange associated with a sequence.
+ */
+static void fc_seq_set_resp(struct fc_seq *sp,
+ void (*resp)(struct fc_seq *, struct fc_frame *,
+ void *),
+ void *arg)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+
+ spin_lock_bh(&ep->ex_lock);
+ ep->resp = resp;
+ ep->arg = arg;
+ spin_unlock_bh(&ep->ex_lock);
+}
+
/**
* fc_seq_exch_abort() - Abort an exchange and sequence
* @req_sp: The sequence to be aborted
@@ -650,13 +666,10 @@ static void fc_exch_timeout(struct work_struct *work)
if (e_stat & ESB_ST_ABNORMAL)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
+ if (!rc)
+ fc_exch_delete(ep);
if (resp)
resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
- if (!rc) {
- /* delete the exchange if it's already being aborted */
- fc_exch_delete(ep);
- return;
- }
fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
goto done;
}
@@ -1266,6 +1279,8 @@ free:
* @fp: The request frame
*
* On success, the sequence pointer will be returned and also in fr_seq(@fp).
+ * A reference will be held on the exchange/sequence for the caller, which
+ * must call fc_seq_release().
*/
static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
{
@@ -1283,6 +1298,15 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
}
/**
+ * fc_seq_release() - Release the hold
+ * @sp: The sequence.
+ */
+static void fc_seq_release(struct fc_seq *sp)
+{
+ fc_exch_release(fc_seq_exch(sp));
+}
+
+/**
* fc_exch_recv_req() - Handler for an incoming request
* @lport: The local port that received the request
* @mp: The EM that the exchange is on
@@ -2151,6 +2175,7 @@ err:
fc_exch_mgr_del(ema);
return -ENOMEM;
}
+EXPORT_SYMBOL(fc_exch_mgr_list_clone);
/**
* fc_exch_mgr_alloc() - Allocate an exchange manager
@@ -2254,16 +2279,45 @@ void fc_exch_mgr_free(struct fc_lport *lport)
EXPORT_SYMBOL(fc_exch_mgr_free);
/**
+ * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending
+ * upon 'xid'.
+ * @f_ctl: f_ctl
+ * @lport: The local port the frame was received on
+ * @fh: The received frame header
+ */
+static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
+ struct fc_lport *lport,
+ struct fc_frame_header *fh)
+{
+ struct fc_exch_mgr_anchor *ema;
+ u16 xid;
+
+ if (f_ctl & FC_FC_EX_CTX)
+ xid = ntohs(fh->fh_ox_id);
+ else {
+ xid = ntohs(fh->fh_rx_id);
+ if (xid == FC_XID_UNKNOWN)
+ return list_entry(lport->ema_list.prev,
+ typeof(*ema), ema_list);
+ }
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ if ((xid >= ema->mp->min_xid) &&
+ (xid <= ema->mp->max_xid))
+ return ema;
+ }
+ return NULL;
+}
+/**
* fc_exch_recv() - Handler for received frames
* @lport: The local port the frame was received on
- * @fp: The received frame
+ * @fp: The received frame
*/
void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_exch_mgr_anchor *ema;
- u32 f_ctl, found = 0;
- u16 oxid;
+ u32 f_ctl;
/* lport lock ? */
if (!lport || lport->state == LPORT_ST_DISABLED) {
@@ -2274,24 +2328,17 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
}
f_ctl = ntoh24(fh->fh_f_ctl);
- oxid = ntohs(fh->fh_ox_id);
- if (f_ctl & FC_FC_EX_CTX) {
- list_for_each_entry(ema, &lport->ema_list, ema_list) {
- if ((oxid >= ema->mp->min_xid) &&
- (oxid <= ema->mp->max_xid)) {
- found = 1;
- break;
- }
- }
-
- if (!found) {
- FC_LPORT_DBG(lport, "Received response for out "
- "of range oxid:%hx\n", oxid);
- fc_frame_free(fp);
- return;
- }
- } else
- ema = list_entry(lport->ema_list.prev, typeof(*ema), ema_list);
+ ema = fc_find_ema(f_ctl, lport, fh);
+ if (!ema) {
+ FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
+ "fc_ctl <0x%x>, xid <0x%x>\n",
+ f_ctl,
+ (f_ctl & FC_FC_EX_CTX) ?
+ ntohs(fh->fh_ox_id) :
+ ntohs(fh->fh_rx_id));
+ fc_frame_free(fp);
+ return;
+ }
/*
* If frame is marked invalid, just drop it.
@@ -2329,6 +2376,9 @@ int fc_exch_init(struct fc_lport *lport)
if (!lport->tt.seq_start_next)
lport->tt.seq_start_next = fc_seq_start_next;
+ if (!lport->tt.seq_set_resp)
+ lport->tt.seq_set_resp = fc_seq_set_resp;
+
if (!lport->tt.exch_seq_send)
lport->tt.exch_seq_send = fc_exch_seq_send;
@@ -2350,6 +2400,9 @@ int fc_exch_init(struct fc_lport *lport)
if (!lport->tt.seq_assign)
lport->tt.seq_assign = fc_seq_assign;
+ if (!lport->tt.seq_release)
+ lport->tt.seq_release = fc_seq_release;
+
return 0;
}
EXPORT_SYMBOL(fc_exch_init);
@@ -2357,7 +2410,7 @@ EXPORT_SYMBOL(fc_exch_init);
/**
* fc_setup_exch_mgr() - Setup an exchange manager
*/
-int fc_setup_exch_mgr()
+int fc_setup_exch_mgr(void)
{
fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
0, SLAB_HWCACHE_ALIGN, NULL);
@@ -2395,7 +2448,7 @@ int fc_setup_exch_mgr()
/**
* fc_destroy_exch_mgr() - Destroy an exchange manager
*/
-void fc_destroy_exch_mgr()
+void fc_destroy_exch_mgr(void)
{
destroy_workqueue(fc_exch_workqueue);
kmem_cache_destroy(fc_em_cachep);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 5962d1a5a674..b1b03af158bf 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -42,7 +42,7 @@
#include "fc_libfc.h"
-struct kmem_cache *scsi_pkt_cachep;
+static struct kmem_cache *scsi_pkt_cachep;
/* SRB state definitions */
#define FC_SRB_FREE 0 /* cmd is free */
@@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
if (fsp) {
memset(fsp, 0, sizeof(*fsp));
fsp->lp = lport;
+ fsp->xfer_ddp = FC_XID_UNKNOWN;
atomic_set(&fsp->ref_cnt, 1);
init_timer(&fsp->timer);
INIT_LIST_HEAD(&fsp->list);
@@ -1201,6 +1202,7 @@ unlock:
static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
{
int rc = FAILED;
+ unsigned long ticks_left;
if (fc_fcp_send_abort(fsp))
return FAILED;
@@ -1209,13 +1211,13 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
fsp->wait_for_comp = 1;
spin_unlock_bh(&fsp->scsi_pkt_lock);
- rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
+ ticks_left = wait_for_completion_timeout(&fsp->tm_done,
+ FC_SCSI_TM_TOV);
spin_lock_bh(&fsp->scsi_pkt_lock);
fsp->wait_for_comp = 0;
- if (!rc) {
+ if (!ticks_left) {
FC_FCP_DBG(fsp, "target abort cmd failed\n");
- rc = FAILED;
} else if (fsp->state & FC_SRB_ABORTED) {
FC_FCP_DBG(fsp, "target abort cmd passed\n");
rc = SUCCESS;
@@ -1321,7 +1323,7 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
*
* scsi-eh will escalate for when either happens.
*/
- goto out;
+ return;
}
if (fc_fcp_lock_pkt(fsp))
@@ -1787,15 +1789,14 @@ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport)
/**
* fc_queuecommand() - The queuecommand function of the SCSI template
+ * @shost: The Scsi_Host that the command was issued to
* @cmd: The scsi_cmnd to be executed
- * @done: The callback function to be called when the scsi_cmnd is complete
*
- * This is the i/o strategy routine, called by the SCSI layer. This routine
- * is called with the host_lock held.
+ * This is the i/o strategy routine, called by the SCSI layer.
*/
-static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
+int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
{
- struct fc_lport *lport;
+ struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
struct fc_fcp_pkt *fsp;
struct fc_rport_libfc_priv *rpriv;
@@ -1803,15 +1804,12 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
int rc = 0;
struct fcoe_dev_stats *stats;
- lport = shost_priv(sc_cmd->device->host);
-
rval = fc_remote_port_chkready(rport);
if (rval) {
sc_cmd->result = rval;
- done(sc_cmd);
+ sc_cmd->scsi_done(sc_cmd);
return 0;
}
- spin_unlock_irq(lport->host->host_lock);
if (!*(struct fc_remote_port **)rport->dd_data) {
/*
@@ -1819,7 +1817,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
* online
*/
sc_cmd->result = DID_IMM_RETRY << 16;
- done(sc_cmd);
+ sc_cmd->scsi_done(sc_cmd);
goto out;
}
@@ -1842,10 +1840,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
* build the libfc request pkt
*/
fsp->cmd = sc_cmd; /* save the cmd */
- fsp->lp = lport; /* save the softc ptr */
fsp->rport = rport; /* set the remote port ptr */
- fsp->xfer_ddp = FC_XID_UNKNOWN;
- sc_cmd->scsi_done = done;
/*
* set up the transfer length
@@ -1886,11 +1881,8 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
rc = SCSI_MLQUEUE_HOST_BUSY;
}
out:
- spin_lock_irq(lport->host->host_lock);
return rc;
}
-
-DEF_SCSI_QCMD(fc_queuecommand)
EXPORT_SYMBOL(fc_queuecommand);
/**
@@ -2112,7 +2104,6 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
* the sc passed in is not setup for execution like when sent
* through the queuecommand callout.
*/
- fsp->lp = lport; /* save the softc ptr */
fsp->rport = rport; /* set the remote port ptr */
/*
@@ -2245,7 +2236,7 @@ void fc_fcp_destroy(struct fc_lport *lport)
}
EXPORT_SYMBOL(fc_fcp_destroy);
-int fc_setup_fcp()
+int fc_setup_fcp(void)
{
int rc = 0;
@@ -2261,7 +2252,7 @@ int fc_setup_fcp()
return rc;
}
-void fc_destroy_fcp()
+void fc_destroy_fcp(void)
{
if (scsi_pkt_cachep)
kmem_cache_destroy(scsi_pkt_cachep);
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index 6a48c28e4420..b7735129f1f3 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -35,6 +35,27 @@ unsigned int fc_debug_logging;
module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+DEFINE_MUTEX(fc_prov_mutex);
+static LIST_HEAD(fc_local_ports);
+struct blocking_notifier_head fc_lport_notifier_head =
+ BLOCKING_NOTIFIER_INIT(fc_lport_notifier_head);
+EXPORT_SYMBOL(fc_lport_notifier_head);
+
+/*
+ * Providers which primarily send requests and PRLIs.
+ */
+struct fc4_prov *fc_active_prov[FC_FC4_PROV_SIZE] = {
+ [0] = &fc_rport_t0_prov,
+ [FC_TYPE_FCP] = &fc_rport_fcp_init,
+};
+
+/*
+ * Providers which receive requests.
+ */
+struct fc4_prov *fc_passive_prov[FC_FC4_PROV_SIZE] = {
+ [FC_TYPE_ELS] = &fc_lport_els_prov,
+};
+
/**
* libfc_init() - Initialize libfc.ko
*/
@@ -210,3 +231,102 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
}
EXPORT_SYMBOL(fc_fill_reply_hdr);
+
+/**
+ * fc_fc4_conf_lport_params() - Modify "service_params" of specified lport
+ * if there is service provider (target provider) registered with libfc
+ * for specified "fc_ft_type"
+ * @lport: Local port which service_params needs to be modified
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ */
+void fc_fc4_conf_lport_params(struct fc_lport *lport, enum fc_fh_type type)
+{
+ struct fc4_prov *prov_entry;
+ BUG_ON(type >= FC_FC4_PROV_SIZE);
+ BUG_ON(!lport);
+ prov_entry = fc_passive_prov[type];
+ if (type == FC_TYPE_FCP) {
+ if (prov_entry && prov_entry->recv)
+ lport->service_params |= FCP_SPPF_TARG_FCN;
+ }
+}
+
+void fc_lport_iterate(void (*notify)(struct fc_lport *, void *), void *arg)
+{
+ struct fc_lport *lport;
+
+ mutex_lock(&fc_prov_mutex);
+ list_for_each_entry(lport, &fc_local_ports, lport_list)
+ notify(lport, arg);
+ mutex_unlock(&fc_prov_mutex);
+}
+EXPORT_SYMBOL(fc_lport_iterate);
+
+/**
+ * fc_fc4_register_provider() - register FC-4 upper-level provider.
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ * @prov: structure describing provider including ops vector.
+ *
+ * Returns 0 on success, negative error otherwise.
+ */
+int fc_fc4_register_provider(enum fc_fh_type type, struct fc4_prov *prov)
+{
+ struct fc4_prov **prov_entry;
+ int ret = 0;
+
+ if (type >= FC_FC4_PROV_SIZE)
+ return -EINVAL;
+ mutex_lock(&fc_prov_mutex);
+ prov_entry = (prov->recv ? fc_passive_prov : fc_active_prov) + type;
+ if (*prov_entry)
+ ret = -EBUSY;
+ else
+ *prov_entry = prov;
+ mutex_unlock(&fc_prov_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(fc_fc4_register_provider);
+
+/**
+ * fc_fc4_deregister_provider() - deregister FC-4 upper-level provider.
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ * @prov: structure describing provider including ops vector.
+ */
+void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov)
+{
+ BUG_ON(type >= FC_FC4_PROV_SIZE);
+ mutex_lock(&fc_prov_mutex);
+ if (prov->recv)
+ rcu_assign_pointer(fc_passive_prov[type], NULL);
+ else
+ rcu_assign_pointer(fc_active_prov[type], NULL);
+ mutex_unlock(&fc_prov_mutex);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(fc_fc4_deregister_provider);
+
+/**
+ * fc_fc4_add_lport() - add new local port to list and run notifiers.
+ * @lport: The new local port.
+ */
+void fc_fc4_add_lport(struct fc_lport *lport)
+{
+ mutex_lock(&fc_prov_mutex);
+ list_add_tail(&lport->lport_list, &fc_local_ports);
+ blocking_notifier_call_chain(&fc_lport_notifier_head,
+ FC_LPORT_EV_ADD, lport);
+ mutex_unlock(&fc_prov_mutex);
+}
+
+/**
+ * fc_fc4_del_lport() - remove local port from list and run notifiers.
+ * @lport: The new local port.
+ */
+void fc_fc4_del_lport(struct fc_lport *lport)
+{
+ mutex_lock(&fc_prov_mutex);
+ list_del(&lport->lport_list);
+ blocking_notifier_call_chain(&fc_lport_notifier_head,
+ FC_LPORT_EV_DEL, lport);
+ mutex_unlock(&fc_prov_mutex);
+}
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index eea0c3541b71..fedc819d70c0 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -94,6 +94,17 @@ extern unsigned int fc_debug_logging;
(lport)->host->host_no, ##args))
/*
+ * FC-4 Providers.
+ */
+extern struct fc4_prov *fc_active_prov[]; /* providers without recv */
+extern struct fc4_prov *fc_passive_prov[]; /* providers with recv */
+extern struct mutex fc_prov_mutex; /* lock over table changes */
+
+extern struct fc4_prov fc_rport_t0_prov; /* type 0 provider */
+extern struct fc4_prov fc_lport_els_prov; /* ELS provider */
+extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */
+
+/*
* Set up direct-data placement for this I/O request
*/
void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
@@ -112,6 +123,9 @@ void fc_destroy_fcp(void);
* Internal libfc functions
*/
const char *fc_els_resp_type(struct fc_frame *);
+extern void fc_fc4_add_lport(struct fc_lport *);
+extern void fc_fc4_del_lport(struct fc_lport *);
+extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type);
/*
* Copies a buffer into an sg list
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index c5a10f94f845..8c08b210001d 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -633,6 +633,7 @@ int fc_lport_destroy(struct fc_lport *lport)
lport->tt.fcp_abort_io(lport);
lport->tt.disc_stop_final(lport);
lport->tt.exch_mgr_reset(lport, 0, 0);
+ fc_fc4_del_lport(lport);
return 0;
}
EXPORT_SYMBOL(fc_lport_destroy);
@@ -849,7 +850,7 @@ out:
}
/**
- * fc_lport_recv_req() - The generic lport request handler
+ * fc_lport_recv_els_req() - The generic lport ELS request handler
* @lport: The local port that received the request
* @fp: The request frame
*
@@ -859,9 +860,9 @@ out:
* Locking Note: This function should not be called with the lport
* lock held becuase it will grab the lock.
*/
-static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+static void fc_lport_recv_els_req(struct fc_lport *lport,
+ struct fc_frame *fp)
{
- struct fc_frame_header *fh = fc_frame_header_get(fp);
void (*recv)(struct fc_lport *, struct fc_frame *);
mutex_lock(&lport->lp_mutex);
@@ -873,8 +874,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
*/
if (!lport->link_up)
fc_frame_free(fp);
- else if (fh->fh_type == FC_TYPE_ELS &&
- fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
+ else {
/*
* Check opcode.
*/
@@ -903,14 +903,62 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
}
recv(lport, fp);
- } else {
- FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
- fr_eof(fp));
- fc_frame_free(fp);
}
mutex_unlock(&lport->lp_mutex);
}
+static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *spp_in,
+ struct fc_els_spp *spp_out)
+{
+ return FC_SPP_RESP_INVL;
+}
+
+struct fc4_prov fc_lport_els_prov = {
+ .prli = fc_lport_els_prli,
+ .recv = fc_lport_recv_els_req,
+};
+
+/**
+ * fc_lport_recv_req() - The generic lport request handler
+ * @lport: The lport that received the request
+ * @fp: The frame the request is in
+ *
+ * Locking Note: This function should not be called with the lport
+ * lock held becuase it may grab the lock.
+ */
+static void fc_lport_recv_req(struct fc_lport *lport,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_seq *sp = fr_seq(fp);
+ struct fc4_prov *prov;
+
+ /*
+ * Use RCU read lock and module_lock to be sure module doesn't
+ * deregister and get unloaded while we're calling it.
+ * try_module_get() is inlined and accepts a NULL parameter.
+ * Only ELSes and FCP target ops should come through here.
+ * The locking is unfortunate, and a better scheme is being sought.
+ */
+
+ rcu_read_lock();
+ if (fh->fh_type >= FC_FC4_PROV_SIZE)
+ goto drop;
+ prov = rcu_dereference(fc_passive_prov[fh->fh_type]);
+ if (!prov || !try_module_get(prov->module))
+ goto drop;
+ rcu_read_unlock();
+ prov->recv(lport, fp);
+ module_put(prov->module);
+ return;
+drop:
+ rcu_read_unlock();
+ FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
+ fc_frame_free(fp);
+ lport->tt.exch_done(sp);
+}
+
/**
* fc_lport_reset() - Reset a local port
* @lport: The local port which should be reset
@@ -1542,6 +1590,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
*/
int fc_lport_config(struct fc_lport *lport)
{
+ INIT_LIST_HEAD(&lport->ema_list);
INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
mutex_init(&lport->lp_mutex);
@@ -1549,6 +1598,7 @@ int fc_lport_config(struct fc_lport *lport)
fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
fc_lport_add_fc4_type(lport, FC_TYPE_CT);
+ fc_fc4_conf_lport_params(lport, FC_TYPE_FCP);
return 0;
}
@@ -1586,6 +1636,7 @@ int fc_lport_init(struct fc_lport *lport)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
+ fc_fc4_add_lport(lport);
return 0;
}
diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c
index dd2b43bb1c70..f33b897e4784 100644
--- a/drivers/scsi/libfc/fc_npiv.c
+++ b/drivers/scsi/libfc/fc_npiv.c
@@ -37,9 +37,7 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
vn_port = libfc_host_alloc(shost->hostt, privsize);
if (!vn_port)
- goto err_out;
- if (fc_exch_mgr_list_clone(n_port, vn_port))
- goto err_put;
+ return vn_port;
vn_port->vport = vport;
vport->dd_data = vn_port;
@@ -49,11 +47,6 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
mutex_unlock(&n_port->lp_mutex);
return vn_port;
-
-err_put:
- scsi_host_put(vn_port->host);
-err_out:
- return NULL;
}
EXPORT_SYMBOL(libfc_vport_create);
@@ -86,6 +79,7 @@ struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id)
return lport;
}
+EXPORT_SYMBOL(fc_vport_id_lookup);
/*
* When setting the link state of vports during an lport state change, it's
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index a7175adab32d..49e1ccca09d5 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -58,7 +58,7 @@
#include "fc_libfc.h"
-struct workqueue_struct *rport_event_queue;
+static struct workqueue_struct *rport_event_queue;
static void fc_rport_enter_flogi(struct fc_rport_priv *);
static void fc_rport_enter_plogi(struct fc_rport_priv *);
@@ -145,8 +145,10 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
INIT_WORK(&rdata->event_work, fc_rport_work);
- if (port_id != FC_FID_DIR_SERV)
+ if (port_id != FC_FID_DIR_SERV) {
+ rdata->lld_event_callback = lport->tt.rport_event_callback;
list_add_rcu(&rdata->peers, &lport->disc.rports);
+ }
return rdata;
}
@@ -257,6 +259,8 @@ static void fc_rport_work(struct work_struct *work)
struct fc_rport_operations *rport_ops;
struct fc_rport_identifiers ids;
struct fc_rport *rport;
+ struct fc4_prov *prov;
+ u8 type;
mutex_lock(&rdata->rp_mutex);
event = rdata->event;
@@ -300,12 +304,25 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "callback ev %d\n", event);
rport_ops->event_callback(lport, rdata, event);
}
+ if (rdata->lld_event_callback) {
+ FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
+ rdata->lld_event_callback(lport, rdata, event);
+ }
kref_put(&rdata->kref, lport->tt.rport_destroy);
break;
case RPORT_EV_FAILED:
case RPORT_EV_LOGO:
case RPORT_EV_STOP:
+ if (rdata->prli_count) {
+ mutex_lock(&fc_prov_mutex);
+ for (type = 1; type < FC_FC4_PROV_SIZE; type++) {
+ prov = fc_passive_prov[type];
+ if (prov && prov->prlo)
+ prov->prlo(rdata);
+ }
+ mutex_unlock(&fc_prov_mutex);
+ }
port_id = rdata->ids.port_id;
mutex_unlock(&rdata->rp_mutex);
@@ -313,6 +330,10 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "callback ev %d\n", event);
rport_ops->event_callback(lport, rdata, event);
}
+ if (rdata->lld_event_callback) {
+ FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
+ rdata->lld_event_callback(lport, rdata, event);
+ }
cancel_delayed_work_sync(&rdata->retry_work);
/*
@@ -336,6 +357,7 @@ static void fc_rport_work(struct work_struct *work)
if (port_id == FC_FID_DIR_SERV) {
rdata->event = RPORT_EV_NONE;
mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
} else if ((rdata->flags & FC_RP_STARTED) &&
rdata->major_retries <
lport->max_rport_retry_count) {
@@ -575,7 +597,7 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata,
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
if (PTR_ERR(fp) == -FC_EX_CLOSED)
- return fc_rport_error(rdata, fp);
+ goto out;
if (rdata->retries < rdata->local_port->max_rport_retry_count) {
FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
@@ -588,7 +610,8 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata,
return;
}
- return fc_rport_error(rdata, fp);
+out:
+ fc_rport_error(rdata, fp);
}
/**
@@ -878,6 +901,9 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
+ /* save plogi response sp_features for further reference */
+ rdata->sp_features = ntohs(plp->fl_csp.sp_features);
+
if (lport->point_to_multipoint)
fc_rport_login_complete(rdata, fp);
csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
@@ -949,6 +975,8 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_prli prli;
struct fc_els_spp spp;
} *pp;
+ struct fc_els_spp temp_spp;
+ struct fc4_prov *prov;
u32 roles = FC_RPORT_ROLE_UNKNOWN;
u32 fcp_parm = 0;
u8 op;
@@ -983,6 +1011,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n",
pp->spp.spp_flags);
+ rdata->spp_type = pp->spp.spp_type;
if (resp_code != FC_SPP_RESP_ACK) {
if (resp_code == FC_SPP_RESP_CONF)
fc_rport_error(rdata, fp);
@@ -996,6 +1025,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
fcp_parm = ntohl(pp->spp.spp_params);
if (fcp_parm & FCP_SPPF_RETRY)
rdata->flags |= FC_RP_FLAGS_RETRY;
+ if (fcp_parm & FCP_SPPF_CONF_COMPL)
+ rdata->flags |= FC_RP_FLAGS_CONF_REQ;
+
+ prov = fc_passive_prov[FC_TYPE_FCP];
+ if (prov) {
+ memset(&temp_spp, 0, sizeof(temp_spp));
+ prov->prli(rdata, pp->prli.prli_spp_len,
+ &pp->spp, &temp_spp);
+ }
rdata->supported_classes = FC_COS_CLASS3;
if (fcp_parm & FCP_SPPF_INIT_FCN)
@@ -1033,6 +1071,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
struct fc_els_spp spp;
} *pp;
struct fc_frame *fp;
+ struct fc4_prov *prov;
/*
* If the rport is one of the well known addresses
@@ -1054,9 +1093,20 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
return;
}
- if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
- fc_rport_prli_resp, rdata,
- 2 * lport->r_a_tov))
+ fc_prli_fill(lport, fp);
+
+ prov = fc_passive_prov[FC_TYPE_FCP];
+ if (prov) {
+ pp = fc_frame_payload_get(fp, sizeof(*pp));
+ prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp);
+ }
+
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rdata->ids.port_id,
+ fc_host_port_id(lport->host), FC_TYPE_ELS,
+ FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+ if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp,
+ NULL, rdata, 2 * lport->r_a_tov))
fc_rport_error_retry(rdata, NULL);
else
kref_get(&rdata->kref);
@@ -1642,9 +1692,9 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
unsigned int len;
unsigned int plen;
enum fc_els_spp_resp resp;
+ enum fc_els_spp_resp passive;
struct fc_seq_els_data rjt_data;
- u32 fcp_parm;
- u32 roles = FC_RPORT_ROLE_UNKNOWN;
+ struct fc4_prov *prov;
FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
fc_rport_state(rdata));
@@ -1678,46 +1728,42 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
pp->prli.prli_len = htons(len);
len -= sizeof(struct fc_els_prli);
- /* reinitialize remote port roles */
- rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
-
/*
* Go through all the service parameter pages and build
* response. If plen indicates longer SPP than standard,
* use that. The entire response has been pre-cleared above.
*/
spp = &pp->spp;
+ mutex_lock(&fc_prov_mutex);
while (len >= plen) {
+ rdata->spp_type = rspp->spp_type;
spp->spp_type = rspp->spp_type;
spp->spp_type_ext = rspp->spp_type_ext;
- spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
- resp = FC_SPP_RESP_ACK;
-
- switch (rspp->spp_type) {
- case 0: /* common to all FC-4 types */
- break;
- case FC_TYPE_FCP:
- fcp_parm = ntohl(rspp->spp_params);
- if (fcp_parm & FCP_SPPF_RETRY)
- rdata->flags |= FC_RP_FLAGS_RETRY;
- rdata->supported_classes = FC_COS_CLASS3;
- if (fcp_parm & FCP_SPPF_INIT_FCN)
- roles |= FC_RPORT_ROLE_FCP_INITIATOR;
- if (fcp_parm & FCP_SPPF_TARG_FCN)
- roles |= FC_RPORT_ROLE_FCP_TARGET;
- rdata->ids.roles = roles;
-
- spp->spp_params = htonl(lport->service_params);
- break;
- default:
- resp = FC_SPP_RESP_INVL;
- break;
+ resp = 0;
+
+ if (rspp->spp_type < FC_FC4_PROV_SIZE) {
+ prov = fc_active_prov[rspp->spp_type];
+ if (prov)
+ resp = prov->prli(rdata, plen, rspp, spp);
+ prov = fc_passive_prov[rspp->spp_type];
+ if (prov) {
+ passive = prov->prli(rdata, plen, rspp, spp);
+ if (!resp || passive == FC_SPP_RESP_ACK)
+ resp = passive;
+ }
+ }
+ if (!resp) {
+ if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
+ resp |= FC_SPP_RESP_CONF;
+ else
+ resp |= FC_SPP_RESP_INVL;
}
spp->spp_flags |= resp;
len -= plen;
rspp = (struct fc_els_spp *)((char *)rspp + plen);
spp = (struct fc_els_spp *)((char *)spp + plen);
}
+ mutex_unlock(&fc_prov_mutex);
/*
* Send LS_ACC. If this fails, the originator should retry.
@@ -1887,9 +1933,82 @@ int fc_rport_init(struct fc_lport *lport)
EXPORT_SYMBOL(fc_rport_init);
/**
+ * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page
+ * @spp: response service parameter page
+ *
+ * Returns the value for the response code to be placed in spp_flags;
+ * Returns 0 if not an initiator.
+ */
+static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *rspp,
+ struct fc_els_spp *spp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ u32 fcp_parm;
+
+ fcp_parm = ntohl(rspp->spp_params);
+ rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ if (fcp_parm & FCP_SPPF_INIT_FCN)
+ rdata->ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcp_parm & FCP_SPPF_TARG_FCN)
+ rdata->ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+ if (fcp_parm & FCP_SPPF_RETRY)
+ rdata->flags |= FC_RP_FLAGS_RETRY;
+ rdata->supported_classes = FC_COS_CLASS3;
+
+ if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR))
+ return 0;
+
+ spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
+
+ /*
+ * OR in our service parameters with other providers (target), if any.
+ */
+ fcp_parm = ntohl(spp->spp_params);
+ spp->spp_params = htonl(fcp_parm | lport->service_params);
+ return FC_SPP_RESP_ACK;
+}
+
+/*
+ * FC-4 provider ops for FCP initiator.
+ */
+struct fc4_prov fc_rport_fcp_init = {
+ .prli = fc_rport_fcp_prli,
+};
+
+/**
+ * fc_rport_t0_prli() - Handle incoming PRLI parameters for type 0
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page
+ * @spp: response service parameter page
+ */
+static int fc_rport_t0_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *rspp,
+ struct fc_els_spp *spp)
+{
+ if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR)
+ return FC_SPP_RESP_INVL;
+ return FC_SPP_RESP_ACK;
+}
+
+/*
+ * FC-4 provider ops for type 0 service parameters.
+ *
+ * This handles the special case of type 0 which is always successful
+ * but doesn't do anything otherwise.
+ */
+struct fc4_prov fc_rport_t0_prov = {
+ .prli = fc_rport_t0_prli,
+};
+
+/**
* fc_setup_rport() - Initialize the rport_event_queue
*/
-int fc_setup_rport()
+int fc_setup_rport(void)
{
rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
if (!rport_event_queue)
@@ -1900,7 +2019,7 @@ int fc_setup_rport()
/**
* fc_destroy_rport() - Destroy the rport_event_queue
*/
-void fc_destroy_rport()
+void fc_destroy_rport(void)
{
destroy_workqueue(rport_event_queue);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index da8b61543ee4..0c550d5b9133 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -3352,6 +3352,47 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
}
EXPORT_SYMBOL_GPL(iscsi_session_get_param);
+int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
+ enum iscsi_param param, char *buf)
+{
+ struct sockaddr_in6 *sin6 = NULL;
+ struct sockaddr_in *sin = NULL;
+ int len;
+
+ switch (addr->ss_family) {
+ case AF_INET:
+ sin = (struct sockaddr_in *)addr;
+ break;
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ if (sin)
+ len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr);
+ else
+ len = sprintf(buf, "%pI6\n", &sin6->sin6_addr);
+ break;
+ case ISCSI_PARAM_CONN_PORT:
+ if (sin)
+ len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port));
+ else
+ len = sprintf(buf, "%hu\n",
+ be16_to_cpu(sin6->sin6_port));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param);
+
int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
@@ -3416,9 +3457,6 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
case ISCSI_HOST_PARAM_INITIATOR_NAME:
len = sprintf(buf, "%s\n", ihost->initiatorname);
break;
- case ISCSI_HOST_PARAM_IPADDRESS:
- len = sprintf(buf, "%s\n", ihost->local_address);
- break;
default:
return -ENOSYS;
}
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
index 18f33cd54411..9dafe64e7c7a 100644
--- a/drivers/scsi/libsas/Kconfig
+++ b/drivers/scsi/libsas/Kconfig
@@ -46,11 +46,3 @@ config SCSI_SAS_HOST_SMP
Allows sas hosts to receive SMP frames. Selecting this
option builds an SMP interpreter into libsas. Say
N here if you want to save the few kb this consumes.
-
-config SCSI_SAS_LIBSAS_DEBUG
- bool "Compile the SAS Domain Transport Attributes in debug mode"
- default y
- depends on SCSI_SAS_LIBSAS
- help
- Compiles the SAS Layer in debug mode. In debug mode, the
- SAS Layer prints diagnostic and debug messages.
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
index 1ad1323c60fa..566a10024598 100644
--- a/drivers/scsi/libsas/Makefile
+++ b/drivers/scsi/libsas/Makefile
@@ -21,10 +21,6 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
-ifeq ($(CONFIG_SCSI_SAS_LIBSAS_DEBUG),y)
- EXTRA_CFLAGS += -DSAS_DEBUG
-endif
-
obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o
libsas-y += sas_init.o \
sas_phy.o \
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 4d3b704ede1c..31fc21f4d831 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -71,13 +71,13 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
case SAS_SG_ERR:
return AC_ERR_INVALID;
- case SAM_STAT_CHECK_CONDITION:
case SAS_OPEN_TO:
case SAS_OPEN_REJECT:
SAS_DPRINTK("%s: Saw error %d. What to do?\n",
__func__, ts->stat);
return AC_ERR_OTHER;
+ case SAM_STAT_CHECK_CONDITION:
case SAS_ABORTED_TASK:
return AC_ERR_DEV;
@@ -107,13 +107,15 @@ static void sas_ata_task_done(struct sas_task *task)
sas_ha = dev->port->ha;
spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
- if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD) {
+ if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
+ ((stat->stat == SAM_STAT_CHECK_CONDITION &&
+ dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
dev->sata_dev.sstatus = resp->sstatus;
dev->sata_dev.serror = resp->serror;
dev->sata_dev.scontrol = resp->scontrol;
- } else if (stat->stat != SAM_STAT_GOOD) {
+ } else {
ac = sas_to_ata_err(stat);
if (ac) {
SAS_DPRINTK("%s: SAS error %x\n", __func__,
@@ -305,55 +307,6 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
}
}
-static int sas_ata_scr_write(struct ata_link *link, unsigned int sc_reg_in,
- u32 val)
-{
- struct domain_device *dev = link->ap->private_data;
-
- SAS_DPRINTK("STUB %s\n", __func__);
- switch (sc_reg_in) {
- case SCR_STATUS:
- dev->sata_dev.sstatus = val;
- break;
- case SCR_CONTROL:
- dev->sata_dev.scontrol = val;
- break;
- case SCR_ERROR:
- dev->sata_dev.serror = val;
- break;
- case SCR_ACTIVE:
- dev->sata_dev.ap->link.sactive = val;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
- u32 *val)
-{
- struct domain_device *dev = link->ap->private_data;
-
- SAS_DPRINTK("STUB %s\n", __func__);
- switch (sc_reg_in) {
- case SCR_STATUS:
- *val = dev->sata_dev.sstatus;
- return 0;
- case SCR_CONTROL:
- *val = dev->sata_dev.scontrol;
- return 0;
- case SCR_ERROR:
- *val = dev->sata_dev.serror;
- return 0;
- case SCR_ACTIVE:
- *val = dev->sata_dev.ap->link.sactive;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
static struct ata_port_operations sas_sata_ops = {
.prereset = ata_std_prereset,
.softreset = NULL,
@@ -367,8 +320,6 @@ static struct ata_port_operations sas_sata_ops = {
.qc_fill_rtf = sas_ata_qc_fill_rtf,
.port_start = ata_sas_port_start,
.port_stop = ata_sas_port_stop,
- .scr_read = sas_ata_scr_read,
- .scr_write = sas_ata_scr_write
};
static struct ata_port_info sata_port_info = {
@@ -801,7 +752,7 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost)
if (!dev_is_sata(ddev))
continue;
-
+
ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata port error handler");
ata_scsi_port_error_handler(shost, ap);
}
@@ -834,13 +785,13 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
LIST_HEAD(sata_q);
ap = NULL;
-
+
list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
struct domain_device *ddev = cmd_to_domain_dev(cmd);
if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
continue;
- if(ap && ap != ddev->sata_dev.ap)
+ if (ap && ap != ddev->sata_dev.ap)
continue;
ap = ddev->sata_dev.ap;
rtn = 1;
@@ -848,8 +799,21 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
}
if (!list_empty(&sata_q)) {
- ata_port_printk(ap, KERN_DEBUG,"sas eh calling libata cmd error handler\n");
+ ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata cmd error handler\n");
ata_scsi_cmd_error_handler(shost, ap, &sata_q);
+ /*
+ * ata's error handler may leave the cmd on the list
+ * so make sure they don't remain on a stack list
+ * about to go out of scope.
+ *
+ * This looks strange, since the commands are
+ * now part of no list, but the next error
+ * action will be ata_port_error_handler()
+ * which takes no list and sweeps them up
+ * anyway from the ata tag array.
+ */
+ while (!list_empty(&sata_q))
+ list_del_init(sata_q.next);
}
} while (ap);
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
index c17c25030f1c..fc460933575c 100644
--- a/drivers/scsi/libsas/sas_dump.c
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -24,8 +24,6 @@
#include "sas_dump.h"
-#ifdef SAS_DEBUG
-
static const char *sas_hae_str[] = {
[0] = "HAE_RESET",
};
@@ -72,5 +70,3 @@ void sas_dump_port(struct asd_sas_port *port)
SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode);
SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys);
}
-
-#endif /* SAS_DEBUG */
diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h
index 47b45d4f5258..800e4c69093f 100644
--- a/drivers/scsi/libsas/sas_dump.h
+++ b/drivers/scsi/libsas/sas_dump.h
@@ -24,19 +24,7 @@
#include "sas_internal.h"
-#ifdef SAS_DEBUG
-
void sas_dprint_porte(int phyid, enum port_event pe);
void sas_dprint_phye(int phyid, enum phy_event pe);
void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he);
void sas_dump_port(struct asd_sas_port *port);
-
-#else /* SAS_DEBUG */
-
-static inline void sas_dprint_porte(int phyid, enum port_event pe) { }
-static inline void sas_dprint_phye(int phyid, enum phy_event pe) { }
-static inline void sas_dprint_hae(struct sas_ha_struct *sas_ha,
- enum ha_event he) { }
-static inline void sas_dump_port(struct asd_sas_port *port) { }
-
-#endif /* SAS_DEBUG */
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 505ffe358293..f3f693b772ac 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -244,6 +244,11 @@ static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
* dev to host FIS as described in section G.5 of
* sas-2 r 04b */
dr = &((struct smp_resp *)disc_resp)->disc;
+ if (memcmp(dev->sas_addr, dr->attached_sas_addr,
+ SAS_ADDR_SIZE) == 0) {
+ sas_printk("Found loopback topology, just ignore it!\n");
+ return 0;
+ }
if (!(dr->attached_dev_type == 0 &&
dr->attached_sata_dev))
break;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 0001374bd6b2..8b538bd1ff2b 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -33,11 +33,7 @@
#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
-#ifdef SAS_DEBUG
-#define SAS_DPRINTK(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
-#else
-#define SAS_DPRINTK(fmt, ...)
-#endif
+#define SAS_DPRINTK(fmt, ...) printk(KERN_DEBUG "sas: " fmt, ## __VA_ARGS__)
#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 67758ea8eb7f..f6e189f40917 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -681,11 +681,10 @@ enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
{
struct sas_task *task = TO_SAS_TASK(cmd);
unsigned long flags;
- enum blk_eh_timer_return rtn;
+ enum blk_eh_timer_return rtn;
if (sas_ata_timed_out(cmd, task, &rtn))
return rtn;
-
if (!task) {
cmd->request->timeout /= 2;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 746dd3d7a092..b64c6da870d3 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2010 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -325,6 +325,7 @@ struct lpfc_vport {
#define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */
#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
+#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */
uint32_t ct_flags;
#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -348,6 +349,8 @@ struct lpfc_vport {
uint32_t fc_myDID; /* fibre channel S_ID */
uint32_t fc_prevDID; /* previous fibre channel S_ID */
+ struct lpfc_name fabric_portname;
+ struct lpfc_name fabric_nodename;
int32_t stopped; /* HBA has not been restarted since last ERATT */
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
@@ -372,6 +375,7 @@ struct lpfc_vport {
#define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */
#define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */
#define WORKER_FDMI_TMO 0x4 /* vport: FDMI timeout */
+#define WORKER_DELAYED_DISC_TMO 0x8 /* vport: delayed discovery */
#define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */
#define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */
@@ -382,6 +386,7 @@ struct lpfc_vport {
struct timer_list fc_fdmitmo;
struct timer_list els_tmofunc;
+ struct timer_list delayed_disc_tmo;
int unreg_vpi_cmpl;
@@ -548,6 +553,8 @@ struct lpfc_hba {
#define LPFC_SLI3_CRP_ENABLED 0x08
#define LPFC_SLI3_BG_ENABLED 0x20
#define LPFC_SLI3_DSS_ENABLED 0x40
+#define LPFC_SLI4_PERFH_ENABLED 0x80
+#define LPFC_SLI4_PHWQ_ENABLED 0x100
uint32_t iocb_cmd_size;
uint32_t iocb_rsp_size;
@@ -655,7 +662,7 @@ struct lpfc_hba {
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
-
+ uint32_t cfg_enable_dss;
lpfc_vpd_t vpd; /* vital product data */
struct pci_dev *pcidev;
@@ -792,6 +799,10 @@ struct lpfc_hba {
struct dentry *debug_slow_ring_trc;
struct lpfc_debugfs_trc *slow_ring_trc;
atomic_t slow_ring_trc_cnt;
+ /* iDiag debugfs sub-directory */
+ struct dentry *idiag_root;
+ struct dentry *idiag_pci_cfg;
+ struct dentry *idiag_que_info;
#endif
/* Used for deferred freeing of ELS data buffers */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 3512abb8a587..e7c020df12fa 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -623,10 +623,14 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
int status = 0;
int cnt = 0;
int i;
+ int rc;
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &status, &online_compl,
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_OFFLINE_PREP);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (status != 0)
@@ -652,7 +656,10 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
}
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &status, &online_compl, type);
+ rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (status != 0)
@@ -671,6 +678,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
*
* Notes:
* Assumes any error from lpfc_do_offline() will be negative.
+ * Do not make this function static.
*
* Returns:
* lpfc_do_offline() return code if not zero
@@ -682,6 +690,7 @@ lpfc_selective_reset(struct lpfc_hba *phba)
{
struct completion online_compl;
int status = 0;
+ int rc;
if (!phba->cfg_enable_hba_reset)
return -EIO;
@@ -692,8 +701,11 @@ lpfc_selective_reset(struct lpfc_hba *phba)
return status;
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &status, &online_compl,
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (status != 0)
@@ -812,14 +824,17 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
int status=0;
+ int rc;
if (!phba->cfg_enable_hba_reset)
return -EACCES;
init_completion(&online_compl);
if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
- lpfc_workq_post_event(phba, &status, &online_compl,
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
wait_for_completion(&online_compl);
} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
@@ -1279,6 +1294,28 @@ lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_dss_show - Return the current state of dss and the configured state
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_dss_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
+ (phba->cfg_enable_dss) ? "Enabled" : "Disabled",
+ (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
+ "" : "Not ");
+}
+
+/**
* lpfc_param_show - Return a cfg attribute value in decimal
*
* Description:
@@ -1597,13 +1634,13 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
#define LPFC_ATTR(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_init(name, defval, minval, maxval)
#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1611,7 +1648,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1622,7 +1659,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_hex_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1630,7 +1667,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_hex_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1641,13 +1678,13 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_init(name, defval, minval, maxval)
#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1655,7 +1692,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1666,7 +1703,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1674,7 +1711,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1718,7 +1755,7 @@ static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
-
+static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -1813,6 +1850,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
int stat1=0, stat2=0;
unsigned int i, j, cnt=count;
u8 wwpn[8];
+ int rc;
if (!phba->cfg_enable_hba_reset)
return -EACCES;
@@ -1863,7 +1901,11 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
"0463 lpfc_soft_wwpn attribute set failed to "
"reinit adapter - %d\n", stat1);
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE);
+ rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (stat2)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1954,7 +1996,7 @@ static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
static int lpfc_poll = 0;
-module_param(lpfc_poll, int, 0);
+module_param(lpfc_poll, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
" 0 - none,"
" 1 - poll with interrupts enabled"
@@ -1964,21 +2006,21 @@ static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
lpfc_poll_show, lpfc_poll_store);
int lpfc_sli_mode = 0;
-module_param(lpfc_sli_mode, int, 0);
+module_param(lpfc_sli_mode, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
" 0 - auto (SLI-3 if supported),"
" 2 - select SLI-2 even on SLI-3 capable HBAs,"
" 3 - select SLI-3");
int lpfc_enable_npiv = 1;
-module_param(lpfc_enable_npiv, int, 0);
+module_param(lpfc_enable_npiv, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
lpfc_param_show(enable_npiv);
lpfc_param_init(enable_npiv, 1, 0, 1);
static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
int lpfc_enable_rrq;
-module_param(lpfc_enable_rrq, int, 0);
+module_param(lpfc_enable_rrq, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
lpfc_param_show(enable_rrq);
lpfc_param_init(enable_rrq, 0, 0, 1);
@@ -2040,7 +2082,7 @@ static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
lpfc_txcmplq_hw_show, NULL);
int lpfc_iocb_cnt = 2;
-module_param(lpfc_iocb_cnt, int, 1);
+module_param(lpfc_iocb_cnt, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_iocb_cnt,
"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
lpfc_param_show(iocb_cnt);
@@ -2192,7 +2234,7 @@ static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
# disappear until the timer expires. Value range is [0,255]. Default
# value is 30.
*/
-module_param(lpfc_devloss_tmo, int, 0);
+module_param(lpfc_devloss_tmo, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_devloss_tmo,
"Seconds driver will hold I/O waiting "
"for a device to come back");
@@ -2302,7 +2344,7 @@ LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
# Default value of this parameter is 1.
*/
static int lpfc_restrict_login = 1;
-module_param(lpfc_restrict_login, int, 0);
+module_param(lpfc_restrict_login, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_restrict_login,
"Restrict virtual ports login to remote initiators.");
lpfc_vport_param_show(restrict_login);
@@ -2473,7 +2515,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
static int lpfc_topology = 0;
-module_param(lpfc_topology, int, 0);
+module_param(lpfc_topology, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
lpfc_param_show(topology)
lpfc_param_init(topology, 0, 0, 6)
@@ -2915,7 +2957,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
}
static int lpfc_link_speed = 0;
-module_param(lpfc_link_speed, int, 0);
+module_param(lpfc_link_speed, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
lpfc_param_show(link_speed)
@@ -3043,7 +3085,7 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
}
static int lpfc_aer_support = 1;
-module_param(lpfc_aer_support, int, 1);
+module_param(lpfc_aer_support, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
lpfc_param_show(aer_support)
@@ -3155,7 +3197,7 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
# The value is set in milliseconds.
*/
static int lpfc_max_scsicmpl_time;
-module_param(lpfc_max_scsicmpl_time, int, 0);
+module_param(lpfc_max_scsicmpl_time, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
"Use command completion time to control queue depth");
lpfc_vport_param_show(max_scsicmpl_time);
@@ -3331,7 +3373,7 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
*/
unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION;
-module_param(lpfc_prot_mask, uint, 0);
+module_param(lpfc_prot_mask, uint, S_IRUGO);
MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
/*
@@ -3343,9 +3385,28 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
#
*/
unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
-module_param(lpfc_prot_guard, byte, 0);
+module_param(lpfc_prot_guard, byte, S_IRUGO);
MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
+/*
+ * Delay initial NPort discovery when Clean Address bit is cleared in
+ * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
+ * This parameter can have value 0 or 1.
+ * When this parameter is set to 0, no delay is added to the initial
+ * discovery.
+ * When this parameter is set to non-zero value, initial Nport discovery is
+ * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
+ * when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Default value is 0.
+ */
+int lpfc_delay_discovery;
+module_param(lpfc_delay_discovery, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_delay_discovery,
+ "Delay NPort discovery when Clean Address bit is cleared. "
+ "Allowed values: 0,1.");
/*
* lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
@@ -3437,6 +3498,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_txcmplq_hw,
&dev_attr_lpfc_fips_level,
&dev_attr_lpfc_fips_rev,
+ &dev_attr_lpfc_dss,
NULL,
};
@@ -4639,6 +4701,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
+ phba->cfg_enable_dss = 1;
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 17fde522c84a..3d40023f4804 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -53,9 +53,9 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_supported_pages(struct lpfcMboxq *);
-void lpfc_sli4_params(struct lpfcMboxq *);
+void lpfc_pc_sli4_params(struct lpfcMboxq *);
int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
-
+int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
@@ -167,6 +167,8 @@ int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_fdmi_tmo(unsigned long);
void lpfc_fdmi_timeout_handler(struct lpfc_vport *);
+void lpfc_delayed_disc_tmo(unsigned long);
+void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *);
int lpfc_config_port_post(struct lpfc_hba *);
@@ -341,6 +343,7 @@ extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
extern int lpfc_sli_mode;
extern int lpfc_enable_npiv;
+extern int lpfc_delay_discovery;
int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t);
@@ -423,6 +426,6 @@ int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
uint16_t, uint16_t, uint16_t);
void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
-void lpfc_cleanup_vports_rrqs(struct lpfc_vport *);
+void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index c004fa9a681e..d9edfd90d7ff 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1738,6 +1738,55 @@ fdmi_cmd_exit:
return 1;
}
+/**
+ * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer.
+ * @ptr - Context object of the timer.
+ *
+ * This function set the WORKER_DELAYED_DISC_TMO flag and wake up
+ * the worker thread.
+ **/
+void
+lpfc_delayed_disc_tmo(unsigned long ptr)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t tmo_posted;
+ unsigned long iflag;
+
+ spin_lock_irqsave(&vport->work_port_lock, iflag);
+ tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO;
+ if (!tmo_posted)
+ vport->work_port_events |= WORKER_DELAYED_DISC_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
+ return;
+}
+
+/**
+ * lpfc_delayed_disc_timeout_handler - Function called by worker thread to
+ * handle delayed discovery.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This function start nport discovery of the vport.
+ **/
+void
+lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ spin_lock_irq(shost->host_lock);
+ if (!(vport->fc_flag & FC_DISC_DELAYED)) {
+ spin_unlock_irq(shost->host_lock);
+ return;
+ }
+ vport->fc_flag &= ~FC_DISC_DELAYED;
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_do_scr_ns_plogi(vport->phba, vport);
+}
+
void
lpfc_fdmi_tmo(unsigned long ptr)
{
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index a80d938fafc9..a753581509d6 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -57,8 +57,8 @@
* # mount -t debugfs none /sys/kernel/debug
*
* The lpfc debugfs directory hierarchy is:
- * lpfc/lpfcX/vportY
- * where X is the lpfc hba unique_id
+ * /sys/kernel/debug/lpfc/fnX/vportY
+ * where X is the lpfc hba function unique_id
* where Y is the vport VPI on that hba
*
* Debugging services available per vport:
@@ -82,52 +82,34 @@
* the HBA. X MUST also be a power of 2.
*/
static int lpfc_debugfs_enable = 1;
-module_param(lpfc_debugfs_enable, int, 0);
+module_param(lpfc_debugfs_enable, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
/* This MUST be a power of 2 */
static int lpfc_debugfs_max_disc_trc;
-module_param(lpfc_debugfs_max_disc_trc, int, 0);
+module_param(lpfc_debugfs_max_disc_trc, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
"Set debugfs discovery trace depth");
/* This MUST be a power of 2 */
static int lpfc_debugfs_max_slow_ring_trc;
-module_param(lpfc_debugfs_max_slow_ring_trc, int, 0);
+module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc,
"Set debugfs slow ring trace depth");
static int lpfc_debugfs_mask_disc_trc;
-module_param(lpfc_debugfs_mask_disc_trc, int, 0);
+module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
"Set debugfs discovery trace mask");
#include <linux/debugfs.h>
-/* size of output line, for discovery_trace and slow_ring_trace */
-#define LPFC_DEBUG_TRC_ENTRY_SIZE 100
-
-/* nodelist output buffer size */
-#define LPFC_NODELIST_SIZE 8192
-#define LPFC_NODELIST_ENTRY_SIZE 120
-
-/* dumpHBASlim output buffer size */
-#define LPFC_DUMPHBASLIM_SIZE 4096
-
-/* dumpHostSlim output buffer size */
-#define LPFC_DUMPHOSTSLIM_SIZE 4096
-
-/* hbqinfo output buffer size */
-#define LPFC_HBQINFO_SIZE 8192
-
-struct lpfc_debug {
- char *buffer;
- int len;
-};
-
static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
static unsigned long lpfc_debugfs_start_time = 0L;
+/* iDiag */
+static struct lpfc_idiag idiag;
+
/**
* lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer
* @vport: The vport to gather the log info from.
@@ -996,8 +978,6 @@ lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
return nbytes;
}
-
-
/**
* lpfc_debugfs_nodelist_open - Open the nodelist debugfs file
* @inode: The inode pointer that contains a vport pointer.
@@ -1099,6 +1079,7 @@ lpfc_debugfs_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct lpfc_debug *debug = file->private_data;
+
return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer,
debug->len);
}
@@ -1137,6 +1118,776 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
return 0;
}
+/*
+ * iDiag debugfs file access methods
+ */
+
+/*
+ * iDiag PCI config space register access methods:
+ *
+ * The PCI config space register accessees of read, write, read-modify-write
+ * for set bits, and read-modify-write for clear bits to SLI4 PCI functions
+ * are provided. In the proper SLI4 PCI function's debugfs iDiag directory,
+ *
+ * /sys/kernel/debug/lpfc/fn<#>/iDiag
+ *
+ * the access is through the debugfs entry pciCfg:
+ *
+ * 1. For PCI config space register read access, there are two read methods:
+ * A) read a single PCI config space register in the size of a byte
+ * (8 bits), a word (16 bits), or a dword (32 bits); or B) browse through
+ * the 4K extended PCI config space.
+ *
+ * A) Read a single PCI config space register consists of two steps:
+ *
+ * Step-1: Set up PCI config space register read command, the command
+ * syntax is,
+ *
+ * echo 1 <where> <count> > pciCfg
+ *
+ * where, 1 is the iDiag command for PCI config space read, <where> is the
+ * offset from the beginning of the device's PCI config space to read from,
+ * and <count> is the size of PCI config space register data to read back,
+ * it will be 1 for reading a byte (8 bits), 2 for reading a word (16 bits
+ * or 2 bytes), or 4 for reading a dword (32 bits or 4 bytes).
+ *
+ * Setp-2: Perform the debugfs read operation to execute the idiag command
+ * set up in Step-1,
+ *
+ * cat pciCfg
+ *
+ * Examples:
+ * To read PCI device's vendor-id and device-id from PCI config space,
+ *
+ * echo 1 0 4 > pciCfg
+ * cat pciCfg
+ *
+ * To read PCI device's currnt command from config space,
+ *
+ * echo 1 4 2 > pciCfg
+ * cat pciCfg
+ *
+ * B) Browse through the entire 4K extended PCI config space also consists
+ * of two steps:
+ *
+ * Step-1: Set up PCI config space register browsing command, the command
+ * syntax is,
+ *
+ * echo 1 0 4096 > pciCfg
+ *
+ * where, 1 is the iDiag command for PCI config space read, 0 must be used
+ * as the offset for PCI config space register browse, and 4096 must be
+ * used as the count for PCI config space register browse.
+ *
+ * Step-2: Repeately issue the debugfs read operation to browse through
+ * the entire PCI config space registers:
+ *
+ * cat pciCfg
+ * cat pciCfg
+ * cat pciCfg
+ * ...
+ *
+ * When browsing to the end of the 4K PCI config space, the browse method
+ * shall wrap around to start reading from beginning again, and again...
+ *
+ * 2. For PCI config space register write access, it supports a single PCI
+ * config space register write in the size of a byte (8 bits), a word
+ * (16 bits), or a dword (32 bits). The command syntax is,
+ *
+ * echo 2 <where> <count> <value> > pciCfg
+ *
+ * where, 2 is the iDiag command for PCI config space write, <where> is
+ * the offset from the beginning of the device's PCI config space to write
+ * into, <count> is the size of data to write into the PCI config space,
+ * it will be 1 for writing a byte (8 bits), 2 for writing a word (16 bits
+ * or 2 bytes), or 4 for writing a dword (32 bits or 4 bytes), and <value>
+ * is the data to be written into the PCI config space register at the
+ * offset.
+ *
+ * Examples:
+ * To disable PCI device's interrupt assertion,
+ *
+ * 1) Read in device's PCI config space register command field <cmd>:
+ *
+ * echo 1 4 2 > pciCfg
+ * cat pciCfg
+ *
+ * 2) Set bit 10 (Interrupt Disable bit) in the <cmd>:
+ *
+ * <cmd> = <cmd> | (1 < 10)
+ *
+ * 3) Write the modified command back:
+ *
+ * echo 2 4 2 <cmd> > pciCfg
+ *
+ * 3. For PCI config space register set bits access, it supports a single PCI
+ * config space register set bits in the size of a byte (8 bits), a word
+ * (16 bits), or a dword (32 bits). The command syntax is,
+ *
+ * echo 3 <where> <count> <bitmask> > pciCfg
+ *
+ * where, 3 is the iDiag command for PCI config space set bits, <where> is
+ * the offset from the beginning of the device's PCI config space to set
+ * bits into, <count> is the size of the bitmask to set into the PCI config
+ * space, it will be 1 for setting a byte (8 bits), 2 for setting a word
+ * (16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4 bytes), and
+ * <bitmask> is the bitmask, indicating the bits to be set into the PCI
+ * config space register at the offset. The logic performed to the content
+ * of the PCI config space register, regval, is,
+ *
+ * regval |= <bitmask>
+ *
+ * 4. For PCI config space register clear bits access, it supports a single
+ * PCI config space register clear bits in the size of a byte (8 bits),
+ * a word (16 bits), or a dword (32 bits). The command syntax is,
+ *
+ * echo 4 <where> <count> <bitmask> > pciCfg
+ *
+ * where, 4 is the iDiag command for PCI config space clear bits, <where>
+ * is the offset from the beginning of the device's PCI config space to
+ * clear bits from, <count> is the size of the bitmask to set into the PCI
+ * config space, it will be 1 for setting a byte (8 bits), 2 for setting
+ * a word(16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4
+ * bytes), and <bitmask> is the bitmask, indicating the bits to be cleared
+ * from the PCI config space register at the offset. the logic performed
+ * to the content of the PCI config space register, regval, is,
+ *
+ * regval &= ~<bitmask>
+ *
+ * Note, for all single register read, write, set bits, or clear bits access,
+ * the offset (<where>) must be aligned with the size of the data:
+ *
+ * For data size of byte (8 bits), the offset must be aligned to the byte
+ * boundary; for data size of word (16 bits), the offset must be aligned
+ * to the word boundary; while for data size of dword (32 bits), the offset
+ * must be aligned to the dword boundary. Otherwise, the interface will
+ * return the error:
+ *
+ * "-bash: echo: write error: Invalid argument".
+ *
+ * For example:
+ *
+ * echo 1 2 4 > pciCfg
+ * -bash: echo: write error: Invalid argument
+ *
+ * Note also, all of the numbers in the command fields for all read, write,
+ * set bits, and clear bits PCI config space register command fields can be
+ * either decimal or hex.
+ *
+ * For example,
+ * echo 1 0 4096 > pciCfg
+ *
+ * will be the same as
+ * echo 1 0 0x1000 > pciCfg
+ *
+ * And,
+ * echo 2 155 1 10 > pciCfg
+ *
+ * will be
+ * echo 2 0x9b 1 0xa > pciCfg
+ */
+
+/**
+ * lpfc_idiag_cmd_get - Get and parse idiag debugfs comands from user space
+ * @buf: The pointer to the user space buffer.
+ * @nbytes: The number of bytes in the user space buffer.
+ * @idiag_cmd: pointer to the idiag command struct.
+ *
+ * This routine reads data from debugfs user space buffer and parses the
+ * buffer for getting the idiag command and arguments. The while space in
+ * between the set of data is used as the parsing separator.
+ *
+ * This routine returns 0 when successful, it returns proper error code
+ * back to the user space in error conditions.
+ */
+static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes,
+ struct lpfc_idiag_cmd *idiag_cmd)
+{
+ char mybuf[64];
+ char *pbuf, *step_str;
+ int bsize, i;
+
+ /* Protect copy from user */
+ if (!access_ok(VERIFY_READ, buf, nbytes))
+ return -EFAULT;
+
+ memset(mybuf, 0, sizeof(mybuf));
+ memset(idiag_cmd, 0, sizeof(*idiag_cmd));
+ bsize = min(nbytes, (sizeof(mybuf)-1));
+
+ if (copy_from_user(mybuf, buf, bsize))
+ return -EFAULT;
+ pbuf = &mybuf[0];
+ step_str = strsep(&pbuf, "\t ");
+
+ /* The opcode must present */
+ if (!step_str)
+ return -EINVAL;
+
+ idiag_cmd->opcode = simple_strtol(step_str, NULL, 0);
+ if (idiag_cmd->opcode == 0)
+ return -EINVAL;
+
+ for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) {
+ step_str = strsep(&pbuf, "\t ");
+ if (!step_str)
+ return 0;
+ idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_idiag_open - idiag open debugfs
+ * @inode: The inode pointer that contains a pointer to phba.
+ * @file: The file pointer to attach the file operation.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It
+ * gets the reference to phba from the i_private field in @inode, it then
+ * allocates buffer for the file operation, performs the necessary PCI config
+ * space read into the allocated buffer according to the idiag user command
+ * setup, and then returns a pointer to buffer in the private_data field in
+ * @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an
+ * negative error value.
+ **/
+static int
+lpfc_idiag_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->i_private = inode->i_private;
+ debug->buffer = NULL;
+ file->private_data = debug;
+
+ return 0;
+}
+
+/**
+ * lpfc_idiag_release - Release idiag access file operation
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine is the generic release routine for the idiag access file
+ * operation, it frees the buffer that was allocated when the debugfs file
+ * was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_idiag_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ /* Free the buffers to the file operation */
+ kfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+/**
+ * lpfc_idiag_cmd_release - Release idiag cmd access file operation
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs file
+ * was opened. It also reset the fields in the idiag command struct in the
+ * case the command is not continuous browsing of the data structure.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_idiag_cmd_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ /* Read PCI config register, if not read all, clear command fields */
+ if ((debug->op == LPFC_IDIAG_OP_RD) &&
+ (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD))
+ if ((idiag.cmd.data[1] == sizeof(uint8_t)) ||
+ (idiag.cmd.data[1] == sizeof(uint16_t)) ||
+ (idiag.cmd.data[1] == sizeof(uint32_t)))
+ memset(&idiag, 0, sizeof(idiag));
+
+ /* Write PCI config register, clear command fields */
+ if ((debug->op == LPFC_IDIAG_OP_WR) &&
+ (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR))
+ memset(&idiag, 0, sizeof(idiag));
+
+ /* Free the buffers to the file operation */
+ kfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+/**
+ * lpfc_idiag_pcicfg_read - idiag debugfs read pcicfg
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba pci config space according to the
+ * idiag command, and copies to user @buf. Depending on the PCI config space
+ * read command setup, it does either a single register read of a byte
+ * (8 bits), a word (16 bits), or a dword (32 bits) or browsing through all
+ * registers from the 4K extended PCI config space.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ int offset_label, offset, len = 0, index = LPFC_PCI_CFG_RD_SIZE;
+ int where, count;
+ char *pbuffer;
+ struct pci_dev *pdev;
+ uint32_t u32val;
+ uint16_t u16val;
+ uint8_t u8val;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_PCI_CFG_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
+ where = idiag.cmd.data[0];
+ count = idiag.cmd.data[1];
+ } else
+ return 0;
+
+ /* Read single PCI config space register */
+ switch (count) {
+ case SIZE_U8: /* byte (8 bits) */
+ pci_read_config_byte(pdev, where, &u8val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: %02x\n", where, u8val);
+ break;
+ case SIZE_U16: /* word (16 bits) */
+ pci_read_config_word(pdev, where, &u16val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: %04x\n", where, u16val);
+ break;
+ case SIZE_U32: /* double word (32 bits) */
+ pci_read_config_dword(pdev, where, &u32val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: %08x\n", where, u32val);
+ break;
+ case LPFC_PCI_CFG_SIZE: /* browse all */
+ goto pcicfg_browse;
+ break;
+ default:
+ /* illegal count */
+ len = 0;
+ break;
+ }
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+pcicfg_browse:
+
+ /* Browse all PCI config space registers */
+ offset_label = idiag.offset.last_rd;
+ offset = offset_label;
+
+ /* Read PCI config space */
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: ", offset_label);
+ while (index > 0) {
+ pci_read_config_dword(pdev, offset, &u32val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%08x ", u32val);
+ offset += sizeof(uint32_t);
+ index -= sizeof(uint32_t);
+ if (!index)
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "\n");
+ else if (!(index % (8 * sizeof(uint32_t)))) {
+ offset_label += (8 * sizeof(uint32_t));
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "\n%03x: ", offset_label);
+ }
+ }
+
+ /* Set up the offset for next portion of pci cfg read */
+ idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE;
+ if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE)
+ idiag.offset.last_rd = 0;
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_pcicfg_write - Syntax check and set up idiag pcicfg commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and
+ * then perform the syntax check for PCI config space read or write command
+ * accordingly. In the case of PCI config space read command, it sets up
+ * the command in the idiag command struct for the debugfs read operation.
+ * In the case of PCI config space write operation, it executes the write
+ * operation into the PCI config space accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ */
+static ssize_t
+lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t where, value, count;
+ uint32_t u32val;
+ uint16_t u16val;
+ uint8_t u8val;
+ struct pci_dev *pdev;
+ int rc;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return -EFAULT;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc)
+ return rc;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
+ /* Read command from PCI config space, set up command fields */
+ where = idiag.cmd.data[0];
+ count = idiag.cmd.data[1];
+ if (count == LPFC_PCI_CFG_SIZE) {
+ if (where != 0)
+ goto error_out;
+ } else if ((count != sizeof(uint8_t)) &&
+ (count != sizeof(uint16_t)) &&
+ (count != sizeof(uint32_t)))
+ goto error_out;
+ if (count == sizeof(uint8_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t))
+ goto error_out;
+ if (where % sizeof(uint8_t))
+ goto error_out;
+ }
+ if (count == sizeof(uint16_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t))
+ goto error_out;
+ if (where % sizeof(uint16_t))
+ goto error_out;
+ }
+ if (count == sizeof(uint32_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t))
+ goto error_out;
+ if (where % sizeof(uint32_t))
+ goto error_out;
+ }
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ /* Write command to PCI config space, read-modify-write */
+ where = idiag.cmd.data[0];
+ count = idiag.cmd.data[1];
+ value = idiag.cmd.data[2];
+ /* Sanity checks */
+ if ((count != sizeof(uint8_t)) &&
+ (count != sizeof(uint16_t)) &&
+ (count != sizeof(uint32_t)))
+ goto error_out;
+ if (count == sizeof(uint8_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t))
+ goto error_out;
+ if (where % sizeof(uint8_t))
+ goto error_out;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+ pci_write_config_byte(pdev, where,
+ (uint8_t)value);
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+ rc = pci_read_config_byte(pdev, where, &u8val);
+ if (!rc) {
+ u8val |= (uint8_t)value;
+ pci_write_config_byte(pdev, where,
+ u8val);
+ }
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ rc = pci_read_config_byte(pdev, where, &u8val);
+ if (!rc) {
+ u8val &= (uint8_t)(~value);
+ pci_write_config_byte(pdev, where,
+ u8val);
+ }
+ }
+ }
+ if (count == sizeof(uint16_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t))
+ goto error_out;
+ if (where % sizeof(uint16_t))
+ goto error_out;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+ pci_write_config_word(pdev, where,
+ (uint16_t)value);
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+ rc = pci_read_config_word(pdev, where, &u16val);
+ if (!rc) {
+ u16val |= (uint16_t)value;
+ pci_write_config_word(pdev, where,
+ u16val);
+ }
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ rc = pci_read_config_word(pdev, where, &u16val);
+ if (!rc) {
+ u16val &= (uint16_t)(~value);
+ pci_write_config_word(pdev, where,
+ u16val);
+ }
+ }
+ }
+ if (count == sizeof(uint32_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t))
+ goto error_out;
+ if (where % sizeof(uint32_t))
+ goto error_out;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+ pci_write_config_dword(pdev, where, value);
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+ rc = pci_read_config_dword(pdev, where,
+ &u32val);
+ if (!rc) {
+ u32val |= value;
+ pci_write_config_dword(pdev, where,
+ u32val);
+ }
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ rc = pci_read_config_dword(pdev, where,
+ &u32val);
+ if (!rc) {
+ u32val &= ~value;
+ pci_write_config_dword(pdev, where,
+ u32val);
+ }
+ }
+ }
+ } else
+ /* All other opecodes are illegal for now */
+ goto error_out;
+
+ return nbytes;
+error_out:
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_queinfo_read - idiag debugfs read queue information
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba SLI4 PCI function queue information,
+ * and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ int len = 0, fcp_qidx;
+ char *pbuffer;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ /* Get slow-path event queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path EQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], EQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.sp_eq->queue_id,
+ phba->sli4_hba.sp_eq->entry_count,
+ phba->sli4_hba.sp_eq->host_index,
+ phba->sli4_hba.sp_eq->hba_index);
+
+ /* Get fast-path event queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Fast-path EQ information:\n");
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], EQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.fp_eq[fcp_qidx]->queue_id,
+ phba->sli4_hba.fp_eq[fcp_qidx]->entry_count,
+ phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
+ phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
+ }
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+
+ /* Get mailbox complete queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Mailbox CQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated EQ-ID [%02d]:\n",
+ phba->sli4_hba.mbx_cq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], CQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.mbx_cq->queue_id,
+ phba->sli4_hba.mbx_cq->entry_count,
+ phba->sli4_hba.mbx_cq->host_index,
+ phba->sli4_hba.mbx_cq->hba_index);
+
+ /* Get slow-path complete queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path CQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated EQ-ID [%02d]:\n",
+ phba->sli4_hba.els_cq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], CQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.els_cq->queue_id,
+ phba->sli4_hba.els_cq->entry_count,
+ phba->sli4_hba.els_cq->host_index,
+ phba->sli4_hba.els_cq->hba_index);
+
+ /* Get fast-path complete queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Fast-path CQ information:\n");
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated EQ-ID [%02d]:\n",
+ phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], EQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id,
+ phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count,
+ phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
+ phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
+ }
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+
+ /* Get mailbox queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Mailbox MQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.mbx_wq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], MQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.mbx_wq->queue_id,
+ phba->sli4_hba.mbx_wq->entry_count,
+ phba->sli4_hba.mbx_wq->host_index,
+ phba->sli4_hba.mbx_wq->hba_index);
+
+ /* Get slow-path work queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path WQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.els_wq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], WQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.els_wq->queue_id,
+ phba->sli4_hba.els_wq->entry_count,
+ phba->sli4_hba.els_wq->host_index,
+ phba->sli4_hba.els_wq->hba_index);
+
+ /* Get fast-path work queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Fast-path WQ information:\n");
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], WQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id,
+ phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count,
+ phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
+ phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
+ }
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+
+ /* Get receive queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path RQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.hdr_rq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], RHQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.hdr_rq->queue_id,
+ phba->sli4_hba.hdr_rq->entry_count,
+ phba->sli4_hba.hdr_rq->host_index,
+ phba->sli4_hba.hdr_rq->hba_index);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], RDQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.dat_rq->queue_id,
+ phba->sli4_hba.dat_rq->entry_count,
+ phba->sli4_hba.dat_rq->host_index,
+ phba->sli4_hba.dat_rq->hba_index);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
#undef lpfc_debugfs_op_disc_trc
static const struct file_operations lpfc_debugfs_op_disc_trc = {
.owner = THIS_MODULE,
@@ -1213,6 +1964,28 @@ static const struct file_operations lpfc_debugfs_op_slow_ring_trc = {
static struct dentry *lpfc_debugfs_root = NULL;
static atomic_t lpfc_debugfs_hba_count;
+
+/*
+ * File operations for the iDiag debugfs
+ */
+#undef lpfc_idiag_op_pciCfg
+static const struct file_operations lpfc_idiag_op_pciCfg = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_pcicfg_read,
+ .write = lpfc_idiag_pcicfg_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_queInfo
+static const struct file_operations lpfc_idiag_op_queInfo = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .read = lpfc_idiag_queinfo_read,
+ .release = lpfc_idiag_release,
+};
+
#endif
/**
@@ -1249,8 +2022,8 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
if (!lpfc_debugfs_start_time)
lpfc_debugfs_start_time = jiffies;
- /* Setup lpfcX directory for specific HBA */
- snprintf(name, sizeof(name), "lpfc%d", phba->brd_no);
+ /* Setup funcX directory for specific HBA PCI function */
+ snprintf(name, sizeof(name), "fn%d", phba->brd_no);
if (!phba->hba_debugfs_root) {
phba->hba_debugfs_root =
debugfs_create_dir(name, lpfc_debugfs_root);
@@ -1275,28 +2048,38 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
/* Setup dumpHBASlim */
- snprintf(name, sizeof(name), "dumpHBASlim");
- phba->debug_dumpHBASlim =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpHBASlim);
- if (!phba->debug_dumpHBASlim) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0413 Cannot create debugfs dumpHBASlim\n");
- goto debug_failed;
- }
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ snprintf(name, sizeof(name), "dumpHBASlim");
+ phba->debug_dumpHBASlim =
+ debugfs_create_file(name,
+ S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dumpHBASlim);
+ if (!phba->debug_dumpHBASlim) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0413 Cannot create debugfs "
+ "dumpHBASlim\n");
+ goto debug_failed;
+ }
+ } else
+ phba->debug_dumpHBASlim = NULL;
/* Setup dumpHostSlim */
- snprintf(name, sizeof(name), "dumpHostSlim");
- phba->debug_dumpHostSlim =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpHostSlim);
- if (!phba->debug_dumpHostSlim) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0414 Cannot create debugfs dumpHostSlim\n");
- goto debug_failed;
- }
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ snprintf(name, sizeof(name), "dumpHostSlim");
+ phba->debug_dumpHostSlim =
+ debugfs_create_file(name,
+ S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dumpHostSlim);
+ if (!phba->debug_dumpHostSlim) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0414 Cannot create debugfs "
+ "dumpHostSlim\n");
+ goto debug_failed;
+ }
+ } else
+ phba->debug_dumpHBASlim = NULL;
/* Setup dumpData */
snprintf(name, sizeof(name), "dumpData");
@@ -1322,8 +2105,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
-
-
/* Setup slow ring trace */
if (lpfc_debugfs_max_slow_ring_trc) {
num = lpfc_debugfs_max_slow_ring_trc - 1;
@@ -1342,7 +2123,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
}
-
snprintf(name, sizeof(name), "slow_ring_trace");
phba->debug_slow_ring_trc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
@@ -1434,6 +2214,53 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
"0409 Cant create debugfs nodelist\n");
goto debug_failed;
}
+
+ /*
+ * iDiag debugfs root entry points for SLI4 device only
+ */
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ goto debug_failed;
+
+ snprintf(name, sizeof(name), "iDiag");
+ if (!phba->idiag_root) {
+ phba->idiag_root =
+ debugfs_create_dir(name, phba->hba_debugfs_root);
+ if (!phba->idiag_root) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2922 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ /* Initialize iDiag data structure */
+ memset(&idiag, 0, sizeof(idiag));
+ }
+
+ /* iDiag read PCI config space */
+ snprintf(name, sizeof(name), "pciCfg");
+ if (!phba->idiag_pci_cfg) {
+ phba->idiag_pci_cfg =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_pciCfg);
+ if (!phba->idiag_pci_cfg) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2923 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ idiag.offset.last_rd = 0;
+ }
+
+ /* iDiag get PCI function queue information */
+ snprintf(name, sizeof(name), "queInfo");
+ if (!phba->idiag_que_info) {
+ phba->idiag_que_info =
+ debugfs_create_file(name, S_IFREG|S_IRUGO,
+ phba->idiag_root, phba, &lpfc_idiag_op_queInfo);
+ if (!phba->idiag_que_info) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2924 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+
debug_failed:
return;
#endif
@@ -1508,8 +2335,31 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
phba->debug_slow_ring_trc = NULL;
}
+ /*
+ * iDiag release
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (phba->idiag_que_info) {
+ /* iDiag queInfo */
+ debugfs_remove(phba->idiag_que_info);
+ phba->idiag_que_info = NULL;
+ }
+ if (phba->idiag_pci_cfg) {
+ /* iDiag pciCfg */
+ debugfs_remove(phba->idiag_pci_cfg);
+ phba->idiag_pci_cfg = NULL;
+ }
+
+ /* Finally remove the iDiag debugfs root */
+ if (phba->idiag_root) {
+ /* iDiag root */
+ debugfs_remove(phba->idiag_root);
+ phba->idiag_root = NULL;
+ }
+ }
+
if (phba->hba_debugfs_root) {
- debugfs_remove(phba->hba_debugfs_root); /* lpfcX */
+ debugfs_remove(phba->hba_debugfs_root); /* fnX */
phba->hba_debugfs_root = NULL;
atomic_dec(&lpfc_debugfs_hba_count);
}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 03c7313a1012..91b9a9427cda 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -22,6 +22,44 @@
#define _H_LPFC_DEBUG_FS
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+
+/* size of output line, for discovery_trace and slow_ring_trace */
+#define LPFC_DEBUG_TRC_ENTRY_SIZE 100
+
+/* nodelist output buffer size */
+#define LPFC_NODELIST_SIZE 8192
+#define LPFC_NODELIST_ENTRY_SIZE 120
+
+/* dumpHBASlim output buffer size */
+#define LPFC_DUMPHBASLIM_SIZE 4096
+
+/* dumpHostSlim output buffer size */
+#define LPFC_DUMPHOSTSLIM_SIZE 4096
+
+/* hbqinfo output buffer size */
+#define LPFC_HBQINFO_SIZE 8192
+
+/* rdPciConf output buffer size */
+#define LPFC_PCI_CFG_SIZE 4096
+#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2)
+#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
+
+/* queue info output buffer size */
+#define LPFC_QUE_INFO_GET_BUF_SIZE 2048
+
+#define SIZE_U8 sizeof(uint8_t)
+#define SIZE_U16 sizeof(uint16_t)
+#define SIZE_U32 sizeof(uint32_t)
+
+struct lpfc_debug {
+ char *i_private;
+ char op;
+#define LPFC_IDIAG_OP_RD 1
+#define LPFC_IDIAG_OP_WR 2
+ char *buffer;
+ int len;
+};
+
struct lpfc_debugfs_trc {
char *fmt;
uint32_t data1;
@@ -30,6 +68,26 @@ struct lpfc_debugfs_trc {
uint32_t seq_cnt;
unsigned long jif;
};
+
+struct lpfc_idiag_offset {
+ uint32_t last_rd;
+};
+
+#define LPFC_IDIAG_CMD_DATA_SIZE 4
+struct lpfc_idiag_cmd {
+ uint32_t opcode;
+#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001
+#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002
+#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
+#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
+ uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
+};
+
+struct lpfc_idiag {
+ uint32_t active;
+ struct lpfc_idiag_cmd cmd;
+ struct lpfc_idiag_offset offset;
+};
#endif
/* Mask for discovery_trace */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index c62d567cc845..8e28edf9801e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -485,6 +485,59 @@ fail:
}
/**
+ * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @sp: pointer to service parameter data structure.
+ *
+ * This routine is called from FLOGI/FDISC completion handler functions.
+ * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter else return
+ * 0. This function also set flag in the vport data structure to delay
+ * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
+ * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter.
+ *
+ * Return code
+ * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
+ * 1 - FCID or Fabric Nodename or Fabric portname is changed.
+ *
+ **/
+static uint8_t
+lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
+ struct serv_parm *sp)
+{
+ uint8_t fabric_param_changed = 0;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if ((vport->fc_prevDID != vport->fc_myDID) ||
+ memcmp(&vport->fabric_portname, &sp->portName,
+ sizeof(struct lpfc_name)) ||
+ memcmp(&vport->fabric_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name)))
+ fabric_param_changed = 1;
+
+ /*
+ * Word 1 Bit 31 in common service parameter is overloaded.
+ * Word 1 Bit 31 in FLOGI request is multiple NPort request
+ * Word 1 Bit 31 in FLOGI response is clean address bit
+ *
+ * If fabric parameter is changed and clean address bit is
+ * cleared delay nport discovery if
+ * - vport->fc_prevDID != 0 (not initial discovery) OR
+ * - lpfc_delay_discovery module parameter is set.
+ */
+ if (fabric_param_changed && !sp->cmn.clean_address_bit &&
+ (vport->fc_prevDID || lpfc_delay_discovery)) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_DISC_DELAYED;
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ return fabric_param_changed;
+}
+
+
+/**
* lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: pointer to a node-list data structure.
@@ -512,6 +565,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *np;
struct lpfc_nodelist *next_np;
+ uint8_t fabric_param_changed;
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_FABRIC;
@@ -544,6 +598,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
sp->cmn.bbRcvSizeLsb;
+
+ fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+ memcpy(&vport->fabric_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+ memcpy(&vport->fabric_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name));
memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
@@ -565,7 +625,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
}
- if ((vport->fc_prevDID != vport->fc_myDID) &&
+ if (fabric_param_changed &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
/* If our NportID changed, we need to ensure all
@@ -2203,6 +2263,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_sli *psli;
+ struct lpfcMboxq *mbox;
psli = &phba->sli;
/* we pass cmdiocb to state machine which needs rspiocb as well */
@@ -2260,6 +2321,21 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
NLP_EVT_CMPL_LOGO);
out:
lpfc_els_free_iocb(phba, cmdiocb);
+ /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
+ if ((vport->fc_flag & FC_PT2PT) &&
+ !(vport->fc_flag & FC_PT2PT_PLOGI)) {
+ phba->pport->fc_myDID = 0;
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_config_link(phba, mbox);
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
+ MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ }
+ }
return;
}
@@ -2745,7 +2821,8 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
}
break;
case ELS_CMD_FDISC:
- lpfc_issue_els_fdisc(vport, ndlp, retry);
+ if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
+ lpfc_issue_els_fdisc(vport, ndlp, retry);
break;
}
return;
@@ -2815,9 +2892,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
switch (irsp->ulpStatus) {
case IOSTAT_FCP_RSP_ERROR:
+ break;
case IOSTAT_REMOTE_STOP:
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ /* This IO was aborted by the target, we don't
+ * know the rxid and because we did not send the
+ * ABTS we cannot generate and RRQ.
+ */
+ lpfc_set_rrq_active(phba, ndlp,
+ cmdiocb->sli4_xritag, 0, 0);
+ }
break;
-
case IOSTAT_LOCAL_REJECT:
switch ((irsp->un.ulpWord[4] & 0xff)) {
case IOERR_LOOP_OPEN_FAILURE:
@@ -4013,28 +4098,34 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
uint8_t *pcmd;
struct RRQ *rrq;
uint16_t rxid;
+ uint16_t xri;
struct lpfc_node_rrq *prrq;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
pcmd += sizeof(uint32_t);
rrq = (struct RRQ *)pcmd;
- rxid = bf_get(rrq_oxid, rrq);
+ rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
+ rxid = be16_to_cpu(bf_get(rrq_rxid, rrq));
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
" x%x x%x\n",
- bf_get(rrq_did, rrq),
- bf_get(rrq_oxid, rrq),
+ be32_to_cpu(bf_get(rrq_did, rrq)),
+ be16_to_cpu(bf_get(rrq_oxid, rrq)),
rxid,
iocb->iotag, iocb->iocb.ulpContext);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
- prrq = lpfc_get_active_rrq(vport, rxid, ndlp->nlp_DID);
+ if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
+ xri = be16_to_cpu(bf_get(rrq_oxid, rrq));
+ else
+ xri = rxid;
+ prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
if (prrq)
- lpfc_clr_rrq_active(phba, rxid, prrq);
+ lpfc_clr_rrq_active(phba, xri, prrq);
return;
}
@@ -6166,6 +6257,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (vport->load_flag & FC_UNLOADING)
goto dropit;
+ /* If NPort discovery is delayed drop incoming ELS */
+ if ((vport->fc_flag & FC_DISC_DELAYED) &&
+ (cmd != ELS_CMD_PLOGI))
+ goto dropit;
+
ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
/* Cannot find existing Fabric ndlp, so allocate a new one */
@@ -6218,6 +6314,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
lpfc_send_els_event(vport, ndlp, payload);
+
+ /* If Nport discovery is delayed, reject PLOGIs */
+ if (vport->fc_flag & FC_DISC_DELAYED) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ break;
+ }
if (vport->port_state < LPFC_DISC_AUTH) {
if (!(phba->pport->fc_flag & FC_PT2PT) ||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -6596,6 +6698,21 @@ void
lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp, *ndlp_fdmi;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /*
+ * If lpfc_delay_discovery parameter is set and the clean address
+ * bit is cleared and fc fabric parameters chenged, delay FC NPort
+ * discovery.
+ */
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_flag & FC_DISC_DELAYED) {
+ spin_unlock_irq(shost->host_lock);
+ mod_timer(&vport->delayed_disc_tmo,
+ jiffies + HZ * phba->fc_ratov);
+ return;
+ }
+ spin_unlock_irq(shost->host_lock);
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (!ndlp) {
@@ -6938,6 +7055,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *next_np;
IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_iocbq *piocb;
+ struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+ struct serv_parm *sp;
+ uint8_t fabric_param_changed;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0123 FDISC completes. x%x/x%x prevDID: x%x\n",
@@ -6981,7 +7101,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
- if ((vport->fc_prevDID != vport->fc_myDID) &&
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ sp = prsp->virt + sizeof(uint32_t);
+ fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+ memcpy(&vport->fabric_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+ memcpy(&vport->fabric_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name));
+ if (fabric_param_changed &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
/* If our NportID changed, we need to ensure all
* remaining NPORTs get unreg_login'ed so we can
@@ -7582,6 +7709,32 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+ unsigned long iflag = 0;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+ list_for_each_entry_safe(sglq_entry, sglq_next,
+ &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
+ if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
+ sglq_entry->ndlp = NULL;
+ }
+ spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
+}
+
+/**
* lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the els xri abort wcqe structure.
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bb015960dbc9..154c715fb3af 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -658,6 +658,8 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_ramp_down_queue_handler(phba);
if (work_port_events & WORKER_RAMP_UP_QUEUE)
lpfc_ramp_up_queue_handler(phba);
+ if (work_port_events & WORKER_DELAYED_DISC_TMO)
+ lpfc_delayed_disc_timeout_handler(vport);
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -838,6 +840,11 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
lpfc_port_link_failure(vport);
+ /* Stop delayed Nport discovery */
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_DISC_DELAYED;
+ spin_unlock_irq(shost->host_lock);
+ del_timer_sync(&vport->delayed_disc_tmo);
}
int
@@ -3160,7 +3167,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_unlock_irq(shost->host_lock);
vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
- lpfc_cleanup_vports_rrqs(vport);
+ lpfc_cleanup_vports_rrqs(vport, NULL);
/*
* This shost reference might have been taken at the beginning of
* lpfc_vport_delete()
@@ -3900,6 +3907,8 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
return;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_nlp_put(ndlp);
return;
}
@@ -4289,7 +4298,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_del_init(&ndlp->els_retry_evt.evt_listp);
list_del_init(&ndlp->dev_loss_evt.evt_listp);
-
+ lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_unreg_rpi(vport, ndlp);
return 0;
@@ -4426,10 +4435,11 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
+ unsigned long iflags;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
ndlp = __lpfc_findnode_did(vport, did);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
return ndlp;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 96ed3ba6ba95..94ae37c5111a 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -341,6 +341,12 @@ struct csp {
uint8_t bbCreditMsb;
uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
+/*
+ * Word 1 Bit 31 in common service parameter is overloaded.
+ * Word 1 Bit 31 in FLOGI request is multiple NPort request
+ * Word 1 Bit 31 in FLOGI response is clean address bit
+ */
+#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */
#ifdef __BIG_ENDIAN_BITFIELD
uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
@@ -3198,7 +3204,10 @@ typedef struct {
#define IOERR_SLER_RRQ_RJT_ERR 0x4C
#define IOERR_SLER_RRQ_RETRY_ERR 0x4D
#define IOERR_SLER_ABTS_ERR 0x4E
-
+#define IOERR_ELXSEC_KEY_UNWRAP_ERROR 0xF0
+#define IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR 0xF1
+#define IOERR_ELXSEC_CRYPTO_ERROR 0xF2
+#define IOERR_ELXSEC_CRYPTO_COMPARE_ERROR 0xF3
#define IOERR_DRVR_MASK 0x100
#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
#define IOERR_SLI_BRESET 0x102
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 94c1aa1136de..c7178d60c7bf 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -778,6 +778,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
+#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
/* FCoE Opcodes */
#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
@@ -1852,6 +1853,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
+#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11
+#define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_perfh_WORD word2
uint32_t word3;
#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
@@ -1877,6 +1881,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7
#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11
+#define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3
};
struct lpfc_mbx_supp_pages {
@@ -1935,7 +1942,7 @@ struct lpfc_mbx_supp_pages {
#define LPFC_SLI4_PARAMETERS 2
};
-struct lpfc_mbx_sli4_params {
+struct lpfc_mbx_pc_sli4_params {
uint32_t word1;
#define qs_SHIFT 0
#define qs_MASK 0x00000001
@@ -2051,6 +2058,88 @@ struct lpfc_mbx_sli4_params {
uint32_t rsvd_13_63[51];
};
+struct lpfc_sli4_parameters {
+ uint32_t word0;
+#define cfg_prot_type_SHIFT 0
+#define cfg_prot_type_MASK 0x000000FF
+#define cfg_prot_type_WORD word0
+ uint32_t word1;
+#define cfg_ft_SHIFT 0
+#define cfg_ft_MASK 0x00000001
+#define cfg_ft_WORD word1
+#define cfg_sli_rev_SHIFT 4
+#define cfg_sli_rev_MASK 0x0000000f
+#define cfg_sli_rev_WORD word1
+#define cfg_sli_family_SHIFT 8
+#define cfg_sli_family_MASK 0x0000000f
+#define cfg_sli_family_WORD word1
+#define cfg_if_type_SHIFT 12
+#define cfg_if_type_MASK 0x0000000f
+#define cfg_if_type_WORD word1
+#define cfg_sli_hint_1_SHIFT 16
+#define cfg_sli_hint_1_MASK 0x000000ff
+#define cfg_sli_hint_1_WORD word1
+#define cfg_sli_hint_2_SHIFT 24
+#define cfg_sli_hint_2_MASK 0x0000001f
+#define cfg_sli_hint_2_WORD word1
+ uint32_t word2;
+ uint32_t word3;
+ uint32_t word4;
+#define cfg_cqv_SHIFT 14
+#define cfg_cqv_MASK 0x00000003
+#define cfg_cqv_WORD word4
+ uint32_t word5;
+ uint32_t word6;
+#define cfg_mqv_SHIFT 14
+#define cfg_mqv_MASK 0x00000003
+#define cfg_mqv_WORD word6
+ uint32_t word7;
+ uint32_t word8;
+#define cfg_wqv_SHIFT 14
+#define cfg_wqv_MASK 0x00000003
+#define cfg_wqv_WORD word8
+ uint32_t word9;
+ uint32_t word10;
+#define cfg_rqv_SHIFT 14
+#define cfg_rqv_MASK 0x00000003
+#define cfg_rqv_WORD word10
+ uint32_t word11;
+#define cfg_rq_db_window_SHIFT 28
+#define cfg_rq_db_window_MASK 0x0000000f
+#define cfg_rq_db_window_WORD word11
+ uint32_t word12;
+#define cfg_fcoe_SHIFT 0
+#define cfg_fcoe_MASK 0x00000001
+#define cfg_fcoe_WORD word12
+#define cfg_phwq_SHIFT 15
+#define cfg_phwq_MASK 0x00000001
+#define cfg_phwq_WORD word12
+#define cfg_loopbk_scope_SHIFT 28
+#define cfg_loopbk_scope_MASK 0x0000000f
+#define cfg_loopbk_scope_WORD word12
+ uint32_t sge_supp_len;
+ uint32_t word14;
+#define cfg_sgl_page_cnt_SHIFT 0
+#define cfg_sgl_page_cnt_MASK 0x0000000f
+#define cfg_sgl_page_cnt_WORD word14
+#define cfg_sgl_page_size_SHIFT 8
+#define cfg_sgl_page_size_MASK 0x000000ff
+#define cfg_sgl_page_size_WORD word14
+#define cfg_sgl_pp_align_SHIFT 16
+#define cfg_sgl_pp_align_MASK 0x000000ff
+#define cfg_sgl_pp_align_WORD word14
+ uint32_t word15;
+ uint32_t word16;
+ uint32_t word17;
+ uint32_t word18;
+ uint32_t word19;
+};
+
+struct lpfc_mbx_get_sli4_parameters {
+ struct mbox_header header;
+ struct lpfc_sli4_parameters sli4_parameters;
+};
+
/* Mailbox Completion Queue Error Messages */
#define MB_CQE_STATUS_SUCCESS 0x0
#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -2103,7 +2192,8 @@ struct lpfc_mqe {
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
struct lpfc_mbx_query_fw_cfg query_fw_cfg;
struct lpfc_mbx_supp_pages supp_pages;
- struct lpfc_mbx_sli4_params sli4_params;
+ struct lpfc_mbx_pc_sli4_params sli4_params;
+ struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
struct lpfc_mbx_nop nop;
} un;
};
@@ -2381,6 +2471,10 @@ struct wqe_common {
#define wqe_wqes_SHIFT 15
#define wqe_wqes_MASK 0x00000001
#define wqe_wqes_WORD word10
+/* Note that this field overlaps above fields */
+#define wqe_wqid_SHIFT 1
+#define wqe_wqid_MASK 0x0000007f
+#define wqe_wqid_WORD word10
#define wqe_pri_SHIFT 16
#define wqe_pri_MASK 0x00000007
#define wqe_pri_WORD word10
@@ -2599,7 +2693,8 @@ struct fcp_iwrite64_wqe {
uint32_t total_xfer_len;
uint32_t initial_xfer_len;
struct wqe_common wqe_com; /* words 6-11 */
- uint32_t rsvd_12_15[4]; /* word 12-15 */
+ uint32_t rsrvd12;
+ struct ulp_bde64 ph_bde; /* words 13-15 */
};
struct fcp_iread64_wqe {
@@ -2608,7 +2703,8 @@ struct fcp_iread64_wqe {
uint32_t total_xfer_len; /* word 4 */
uint32_t rsrvd5; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
- uint32_t rsvd_12_15[4]; /* word 12-15 */
+ uint32_t rsrvd12;
+ struct ulp_bde64 ph_bde; /* words 13-15 */
};
struct fcp_icmnd64_wqe {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6d0b36aa3389..35665cfb5689 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -460,7 +460,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
|| ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
&& !(phba->lmt & LMT_16Gb))) {
/* Reset link speed to auto */
- lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1302 Invalid speed for this board: "
"Reset link speed to auto: x%x\n",
phba->cfg_link_speed);
@@ -945,17 +945,13 @@ static void
lpfc_rrq_timeout(unsigned long ptr)
{
struct lpfc_hba *phba;
- uint32_t tmo_posted;
unsigned long iflag;
phba = (struct lpfc_hba *)ptr;
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
- tmo_posted = phba->hba_flag & HBA_RRQ_ACTIVE;
- if (!tmo_posted)
- phba->hba_flag |= HBA_RRQ_ACTIVE;
+ phba->hba_flag |= HBA_RRQ_ACTIVE;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
- if (!tmo_posted)
- lpfc_worker_wake_up(phba);
+ lpfc_worker_wake_up(phba);
}
/**
@@ -2280,6 +2276,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
/* Wait for any activity on ndlps to settle */
msleep(10);
}
+ lpfc_cleanup_vports_rrqs(vport, NULL);
}
/**
@@ -2295,6 +2292,7 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
{
del_timer_sync(&vport->els_tmofunc);
del_timer_sync(&vport->fc_fdmitmo);
+ del_timer_sync(&vport->delayed_disc_tmo);
lpfc_can_disctmo(vport);
return;
}
@@ -2355,6 +2353,10 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
del_timer_sync(&phba->fabric_block_timer);
del_timer_sync(&phba->eratt_poll);
del_timer_sync(&phba->hb_tmofunc);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ del_timer_sync(&phba->rrq_tmr);
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+ }
phba->hb_outstanding = 0;
switch (phba->pci_dev_grp) {
@@ -2732,6 +2734,11 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
init_timer(&vport->els_tmofunc);
vport->els_tmofunc.function = lpfc_els_timeout;
vport->els_tmofunc.data = (unsigned long)vport;
+
+ init_timer(&vport->delayed_disc_tmo);
+ vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
+ vport->delayed_disc_tmo.data = (unsigned long)vport;
+
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
goto out_put_shost;
@@ -4283,36 +4290,37 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_bsmbx;
}
- /* Get the Supported Pages. It is always available. */
+ /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
lpfc_supported_pages(mboxq);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- if (unlikely(rc)) {
- rc = -EIO;
- mempool_free(mboxq, phba->mbox_mem_pool);
- goto out_free_bsmbx;
- }
-
- mqe = &mboxq->u.mqe;
- memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
- LPFC_MAX_SUPPORTED_PAGES);
- for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
- switch (pn_page[i]) {
- case LPFC_SLI4_PARAMETERS:
- phba->sli4_hba.pc_sli4_params.supported = 1;
- break;
- default:
- break;
+ if (!rc) {
+ mqe = &mboxq->u.mqe;
+ memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+ LPFC_MAX_SUPPORTED_PAGES);
+ for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+ switch (pn_page[i]) {
+ case LPFC_SLI4_PARAMETERS:
+ phba->sli4_hba.pc_sli4_params.supported = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ /* Read the port's SLI4 Parameters capabilities if supported. */
+ if (phba->sli4_hba.pc_sli4_params.supported)
+ rc = lpfc_pc_sli4_params_get(phba, mboxq);
+ if (rc) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ rc = -EIO;
+ goto out_free_bsmbx;
}
}
-
- /* Read the port's SLI4 Parameters capabilities if supported. */
- if (phba->sli4_hba.pc_sli4_params.supported)
- rc = lpfc_pc_sli4_params_get(phba, mboxq);
+ /*
+ * Get sli4 parameters that override parameters from Port capabilities.
+ * If this call fails it is not a critical error so continue loading.
+ */
+ lpfc_get_sli4_parameters(phba, mboxq);
mempool_free(mboxq, phba->mbox_mem_pool);
- if (rc) {
- rc = -EIO;
- goto out_free_bsmbx;
- }
/* Create all the SLI4 queues */
rc = lpfc_sli4_queue_create(phba);
if (rc)
@@ -7810,7 +7818,7 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
mqe = &mboxq->u.mqe;
/* Read the port's SLI4 Parameters port capabilities */
- lpfc_sli4_params(mboxq);
+ lpfc_pc_sli4_params(mboxq);
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
else {
@@ -7854,6 +7862,66 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
/**
+ * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+ *
+ * This function is called in the SLI4 code path to read the port's
+ * sli4 capabilities.
+ *
+ * This function may be be called from any context that can block-wait
+ * for the completion. The expectation is that this routine is called
+ * typically from probe_one or from the online routine.
+ **/
+int
+lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ int rc;
+ struct lpfc_mqe *mqe = &mboxq->u.mqe;
+ struct lpfc_pc_sli4_params *sli4_params;
+ int length;
+ struct lpfc_sli4_parameters *mbx_sli4_parameters;
+
+ /* Read the port's SLI4 Config Parameters */
+ length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
+ length, LPFC_SLI4_MBX_EMBED);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
+ lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
+ if (unlikely(rc))
+ return rc;
+ sli4_params = &phba->sli4_hba.pc_sli4_params;
+ mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
+ sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
+ sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
+ sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
+ sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
+ mbx_sli4_parameters);
+ sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
+ mbx_sli4_parameters);
+ if (bf_get(cfg_phwq, mbx_sli4_parameters))
+ phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
+ else
+ phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
+ sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
+ sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+ sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
+ sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
+ sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
+ sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
+ sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
+ mbx_sli4_parameters);
+ sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
+ mbx_sli4_parameters);
+ return 0;
+}
+
+/**
* lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
* @pdev: pointer to PCI device
* @pid: pointer to PCI device identifier
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 23403c650207..dba32dfdb59b 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1263,7 +1263,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
if (phba->cfg_enable_bg)
mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
- mb->un.varCfgPort.cdss = 1; /* Configure Security */
+ if (phba->cfg_enable_dss)
+ mb->un.varCfgPort.cdss = 1; /* Configure Security */
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
@@ -1692,7 +1693,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
* @mbox: pointer to lpfc mbox command.
* @subsystem: The sli4 config sub mailbox subsystem.
* @opcode: The sli4 config sub mailbox command opcode.
- * @length: Length of the sli4 config mailbox command.
+ * @length: Length of the sli4 config mailbox command (including sub-header).
*
* This routine sets up the header fields of SLI4 specific mailbox command
* for sending IOCTL command.
@@ -1723,14 +1724,14 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
if (emb) {
/* Set up main header fields */
bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
- sli4_config->header.cfg_mhdr.payload_length =
- LPFC_MBX_CMD_HDR_LENGTH + length;
+ sli4_config->header.cfg_mhdr.payload_length = length;
/* Set up sub-header fields following main header */
bf_set(lpfc_mbox_hdr_opcode,
&sli4_config->header.cfg_shdr.request, opcode);
bf_set(lpfc_mbox_hdr_subsystem,
&sli4_config->header.cfg_shdr.request, subsystem);
- sli4_config->header.cfg_shdr.request.request_length = length;
+ sli4_config->header.cfg_shdr.request.request_length =
+ length - LPFC_MBX_CMD_HDR_LENGTH;
return length;
}
@@ -1902,6 +1903,7 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
/* Set up host requested features. */
bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
+ bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
/* Enable DIF (block guard) only if configured to do so. */
if (phba->cfg_enable_bg)
@@ -2159,17 +2161,16 @@ lpfc_supported_pages(struct lpfcMboxq *mbox)
}
/**
- * lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params
- * mailbox command.
+ * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
* @mbox: pointer to lpfc mbox command to initialize.
*
* The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
* retrieve the particular SLI4 features supported by the port.
**/
void
-lpfc_sli4_params(struct lpfcMboxq *mbox)
+lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
{
- struct lpfc_mbx_sli4_params *sli4_params;
+ struct lpfc_mbx_pc_sli4_params *sli4_params;
memset(mbox, 0, sizeof(*mbox));
sli4_params = &mbox->u.mqe.un.sli4_params;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d85a7423a694..52b35159fc35 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -350,7 +350,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
- /* no need to reg_login if we are already in one of these states */
+ /*
+ * Need to unreg_login if we are already in one of these states and
+ * change to NPR state. This will block the port until after the ACC
+ * completes and the reg_login is issued and completed.
+ */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@@ -359,8 +363,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
- return 1;
+ lpfc_unreg_rpi(vport, ndlp);
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
}
if ((vport->fc_flag & FC_PT2PT) &&
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c97751c95d77..bf34178b80bf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -609,6 +609,32 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
}
/**
+ * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_scsi_buf *psb, *next_psb;
+ unsigned long iflag = 0;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ list_for_each_entry_safe(psb, next_psb,
+ &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+ if (psb->rdata && psb->rdata->pnode
+ && psb->rdata->pnode->vport == vport)
+ psb->rdata = NULL;
+ }
+ spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
+/**
* lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
@@ -640,7 +666,11 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
psb->status = IOSTAT_SUCCESS;
spin_unlock(
&phba->sli4_hba.abts_scsi_buf_list_lock);
- ndlp = psb->rdata->pnode;
+ if (psb->rdata && psb->rdata->pnode)
+ ndlp = psb->rdata->pnode;
+ else
+ ndlp = NULL;
+
rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (ndlp)
@@ -964,36 +994,29 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
- struct lpfc_scsi_buf *lpfc_cmd = NULL;
- struct lpfc_scsi_buf *start_lpfc_cmd = NULL;
- struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
+ struct lpfc_scsi_buf *lpfc_cmd ;
unsigned long iflag = 0;
int found = 0;
spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
- list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
- spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
- while (!found && lpfc_cmd) {
+ list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
+ list) {
if (lpfc_test_rrq_active(phba, ndlp,
- lpfc_cmd->cur_iocbq.sli4_xritag)) {
- lpfc_release_scsi_buf_s4(phba, lpfc_cmd);
- spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
- list_remove_head(scsi_buf_list, lpfc_cmd,
- struct lpfc_scsi_buf, list);
- spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
- iflag);
- if (lpfc_cmd == start_lpfc_cmd) {
- lpfc_cmd = NULL;
- break;
- } else
- continue;
- }
+ lpfc_cmd->cur_iocbq.sli4_xritag))
+ continue;
+ list_del(&lpfc_cmd->list);
found = 1;
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->nonsg_phys = 0;
lpfc_cmd->prot_seg_cnt = 0;
+ break;
}
- return lpfc_cmd;
+ spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
+ iflag);
+ if (!found)
+ return NULL;
+ else
+ return lpfc_cmd;
}
/**
* lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
@@ -1981,12 +2004,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
struct scatterlist *sgel = NULL;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ struct sli4_sge *first_data_sgl;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
dma_addr_t physaddr;
uint32_t num_bde = 0;
uint32_t dma_len;
uint32_t dma_offset = 0;
int nseg;
+ struct ulp_bde64 *bde;
/*
* There are three possibilities here - use scatter-gather segment, use
@@ -2011,7 +2036,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl += 1;
-
+ first_data_sgl = sgl;
lpfc_cmd->seg_cnt = nseg;
if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
@@ -2047,6 +2072,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dma_offset += dma_len;
sgl++;
}
+ /* setup the performance hint (first data BDE) if enabled */
+ if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
+ bde = (struct ulp_bde64 *)
+ &(iocb_cmd->unsli3.sli3Words[5]);
+ bde->addrLow = first_data_sgl->addr_lo;
+ bde->addrHigh = first_data_sgl->addr_hi;
+ bde->tus.f.bdeSize =
+ le32_to_cpu(first_data_sgl->sge_len);
+ bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bde->tus.w = cpu_to_le32(bde->tus.w);
+ }
} else {
sgl += 1;
/* clear the last flag in the fcp_rsp map entry */
@@ -2471,6 +2507,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_worker_wake_up(phba);
break;
case IOSTAT_LOCAL_REJECT:
+ case IOSTAT_REMOTE_STOP:
+ if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
+ lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
+ cmd->result = ScsiResult(DID_NO_CONNECT, 0);
+ break;
+ }
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
@@ -2478,7 +2524,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
cmd->result = ScsiResult(DID_REQUEUE, 0);
break;
}
-
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
@@ -2497,7 +2542,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
"on unprotected cmd\n");
}
}
-
+ if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
+ && (phba->sli_rev == LPFC_SLI_REV4)
+ && (pnode && NLP_CHK_NODE_ACT(pnode))) {
+ /* This IO was aborted by the target, we don't
+ * know the rxid and because we did not send the
+ * ABTS we cannot generate and RRQ.
+ */
+ lpfc_set_rrq_active(phba, pnode,
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ 0, 0);
+ }
/* else: fall through */
default:
cmd->result = ScsiResult(DID_ERROR, 0);
@@ -2508,9 +2563,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE))
cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
SAM_STAT_BUSY);
- } else {
+ } else
cmd->result = ScsiResult(DID_OK, 0);
- }
if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
@@ -3004,11 +3058,11 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
* transport is still transitioning.
*/
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
+ cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
goto out_fail_command;
}
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
- goto out_host_busy;
+ goto out_tgt_busy;
lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
if (lpfc_cmd == NULL) {
@@ -3125,6 +3179,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
out_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
+ out_tgt_busy:
+ return SCSI_MLQUEUE_TARGET_BUSY;
+
out_fail_command:
done(cmnd);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a359d2b873ce..2ee0374a9908 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -96,7 +96,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
/* set consumption flag every once in a while */
if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
-
+ if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
+ bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
/* Update the host index before invoking device */
@@ -534,15 +535,35 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t adj_xri;
struct lpfc_node_rrq *rrq;
int empty;
+ uint32_t did = 0;
+
+
+ if (!ndlp)
+ return -EINVAL;
+
+ if (!phba->cfg_enable_rrq)
+ return -EINVAL;
+
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+ goto out;
+ }
+ did = ndlp->nlp_DID;
/*
* set the active bit even if there is no mem available.
*/
adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (!ndlp)
- return -EINVAL;
+
+ if (NLP_CHK_FREE_REQ(ndlp))
+ goto out;
+
+ if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
+ goto out;
+
if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
- return -EINVAL;
+ goto out;
+
rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
if (rrq) {
rrq->send_rrq = send_rrq;
@@ -553,14 +574,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
empty = list_empty(&phba->active_rrq_list);
- if (phba->cfg_enable_rrq && send_rrq)
- /*
- * We need the xri before we can add this to the
- * phba active rrq list.
- */
- rrq->send_rrq = send_rrq;
- else
- rrq->send_rrq = 0;
+ rrq->send_rrq = send_rrq;
list_add_tail(&rrq->list, &phba->active_rrq_list);
if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
phba->hba_flag |= HBA_RRQ_ACTIVE;
@@ -569,40 +583,49 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
}
return 0;
}
- return -ENOMEM;
+out:
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2921 Can't set rrq active xri:0x%x rxid:0x%x"
+ " DID:0x%x Send:%d\n",
+ xritag, rxid, did, send_rrq);
+ return -EINVAL;
}
/**
- * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
+ * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
* @phba: Pointer to HBA context object.
* @xritag: xri used in this exchange.
* @rrq: The RRQ to be cleared.
*
- * This function is called with hbalock held. This function
**/
-static void
-__lpfc_clr_rrq_active(struct lpfc_hba *phba,
- uint16_t xritag,
- struct lpfc_node_rrq *rrq)
+void
+lpfc_clr_rrq_active(struct lpfc_hba *phba,
+ uint16_t xritag,
+ struct lpfc_node_rrq *rrq)
{
uint16_t adj_xri;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL;
- ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
+ if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
+ ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
/* The target DID could have been swapped (cable swap)
* we should use the ndlp from the findnode if it is
* available.
*/
- if (!ndlp)
+ if ((!ndlp) && rrq->ndlp)
ndlp = rrq->ndlp;
+ if (!ndlp)
+ goto out;
+
adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
rrq->send_rrq = 0;
rrq->xritag = 0;
rrq->rrq_stop_time = 0;
}
+out:
mempool_free(rrq, phba->rrq_pool);
}
@@ -627,34 +650,34 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
struct lpfc_node_rrq *nextrrq;
unsigned long next_time;
unsigned long iflags;
+ LIST_HEAD(send_rrq);
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
next_time = jiffies + HZ * (phba->fc_ratov + 1);
list_for_each_entry_safe(rrq, nextrrq,
- &phba->active_rrq_list, list) {
- if (time_after(jiffies, rrq->rrq_stop_time)) {
- list_del(&rrq->list);
- if (!rrq->send_rrq)
- /* this call will free the rrq */
- __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
- else {
- /* if we send the rrq then the completion handler
- * will clear the bit in the xribitmap.
- */
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- if (lpfc_send_rrq(phba, rrq)) {
- lpfc_clr_rrq_active(phba, rrq->xritag,
- rrq);
- }
- spin_lock_irqsave(&phba->hbalock, iflags);
- }
- } else if (time_before(rrq->rrq_stop_time, next_time))
+ &phba->active_rrq_list, list) {
+ if (time_after(jiffies, rrq->rrq_stop_time))
+ list_move(&rrq->list, &send_rrq);
+ else if (time_before(rrq->rrq_stop_time, next_time))
next_time = rrq->rrq_stop_time;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (!list_empty(&phba->active_rrq_list))
mod_timer(&phba->rrq_tmr, next_time);
+ list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
+ list_del(&rrq->list);
+ if (!rrq->send_rrq)
+ /* this call will free the rrq */
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ else if (lpfc_send_rrq(phba, rrq)) {
+ /* if we send the rrq then the completion handler
+ * will clear the bit in the xribitmap.
+ */
+ lpfc_clr_rrq_active(phba, rrq->xritag,
+ rrq);
+ }
+ }
}
/**
@@ -692,29 +715,37 @@ lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
/**
* lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
* @vport: Pointer to vport context object.
- *
- * Remove all active RRQs for this vport from the phba->active_rrq_list and
- * clear the rrq.
+ * @ndlp: Pointer to the lpfc_node_list structure.
+ * If ndlp is NULL Remove all active RRQs for this vport from the
+ * phba->active_rrq_list and clear the rrq.
+ * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
**/
void
-lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport)
+lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_node_rrq *rrq;
struct lpfc_node_rrq *nextrrq;
unsigned long iflags;
+ LIST_HEAD(rrq_list);
if (phba->sli_rev != LPFC_SLI_REV4)
return;
- spin_lock_irqsave(&phba->hbalock, iflags);
- list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
- if (rrq->vport == vport) {
- list_del(&rrq->list);
- __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
- }
+ if (!ndlp) {
+ lpfc_sli4_vport_delete_els_xri_aborted(vport);
+ lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
}
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
+ if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
+ list_move(&rrq->list, &rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
+ list_del(&rrq->list);
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ }
}
/**
@@ -732,24 +763,27 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
struct lpfc_node_rrq *nextrrq;
unsigned long next_time;
unsigned long iflags;
+ LIST_HEAD(rrq_list);
if (phba->sli_rev != LPFC_SLI_REV4)
return;
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
next_time = jiffies + HZ * (phba->fc_ratov * 2);
- list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+ list_splice_init(&phba->active_rrq_list, &rrq_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
list_del(&rrq->list);
- __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
if (!list_empty(&phba->active_rrq_list))
mod_timer(&phba->rrq_tmr, next_time);
}
/**
- * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
+ * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
* @phba: Pointer to HBA context object.
* @ndlp: Targets nodelist pointer for this exchange.
* @xritag the xri in the bitmap to test.
@@ -758,8 +792,8 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
* returns 0 = rrq not active for this xri
* 1 = rrq is valid for this xri.
**/
-static int
-__lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+int
+lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag)
{
uint16_t adj_xri;
@@ -802,52 +836,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
}
/**
- * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
- * @phba: Pointer to HBA context object.
- * @xritag: xri used in this exchange.
- * @rrq: The RRQ to be cleared.
- *
- * This function is takes the hbalock.
- **/
-void
-lpfc_clr_rrq_active(struct lpfc_hba *phba,
- uint16_t xritag,
- struct lpfc_node_rrq *rrq)
-{
- unsigned long iflags;
-
- spin_lock_irqsave(&phba->hbalock, iflags);
- __lpfc_clr_rrq_active(phba, xritag, rrq);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- return;
-}
-
-
-
-/**
- * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
- * @phba: Pointer to HBA context object.
- * @ndlp: Targets nodelist pointer for this exchange.
- * @xritag the xri in the bitmap to test.
- *
- * This function takes the hbalock.
- * returns 0 = rrq not active for this xri
- * 1 = rrq is valid for this xri.
- **/
-int
-lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
- uint16_t xritag)
-{
- int ret;
- unsigned long iflags;
-
- spin_lock_irqsave(&phba->hbalock, iflags);
- ret = __lpfc_test_rrq_active(phba, ndlp, xritag);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- return ret;
-}
-
-/**
* __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
* @phba: Pointer to HBA context object.
* @piocb: Pointer to the iocbq.
@@ -884,7 +872,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
return NULL;
adj_xri = sglq->sli4_xritag -
phba->sli4_hba.max_cfg_param.xri_base;
- if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
+ if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
/* This xri has an rrq outstanding for this DID.
* put it back in the list and get another xri.
*/
@@ -969,7 +957,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
} else {
sglq->state = SGL_FREED;
sglq->ndlp = NULL;
- list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
+ list_add_tail(&sglq->list,
+ &phba->sli4_hba.lpfc_sgl_list);
/* Check if TXQ queue needs to be serviced */
if (pring->txq_cnt)
@@ -4817,7 +4806,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"0378 No support for fcpi mode.\n");
ftr_rsp++;
}
-
+ if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
+ phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
+ else
+ phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
/*
* If the port cannot support the host's requested features
* then turn off the global config parameters to disable the
@@ -5004,7 +4996,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
phba->link_state = LPFC_LINK_DOWN;
spin_unlock_irq(&phba->hbalock);
- rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK)
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
out_unset_queue:
/* Unset all the queues set up in this routine when error out */
if (rc)
@@ -10478,6 +10471,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
cq->type = type;
cq->subtype = subtype;
cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+ cq->assoc_qid = eq->queue_id;
cq->host_index = 0;
cq->hba_index = 0;
@@ -10672,6 +10666,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
goto out;
}
mq->type = LPFC_MQ;
+ mq->assoc_qid = cq->queue_id;
mq->subtype = subtype;
mq->host_index = 0;
mq->hba_index = 0;
@@ -10759,6 +10754,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
goto out;
}
wq->type = LPFC_WQ;
+ wq->assoc_qid = cq->queue_id;
wq->subtype = subtype;
wq->host_index = 0;
wq->hba_index = 0;
@@ -10876,6 +10872,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
goto out;
}
hrq->type = LPFC_HRQ;
+ hrq->assoc_qid = cq->queue_id;
hrq->subtype = subtype;
hrq->host_index = 0;
hrq->hba_index = 0;
@@ -10936,6 +10933,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
goto out;
}
drq->type = LPFC_DRQ;
+ drq->assoc_qid = cq->queue_id;
drq->subtype = subtype;
drq->host_index = 0;
drq->hba_index = 0;
@@ -11189,7 +11187,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
if (!mbox)
return -ENOMEM;
length = (sizeof(struct lpfc_mbx_rq_destroy) -
- sizeof(struct mbox_header));
+ sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
length, LPFC_SLI4_MBX_EMBED);
@@ -11279,7 +11277,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
sizeof(struct lpfc_mbx_post_sgl_pages) -
- sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+ sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
&mbox->u.mqe.un.post_sgl_pages;
@@ -12402,7 +12400,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
sizeof(struct lpfc_mbx_post_hdr_tmpl) -
- sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+ sizeof(struct lpfc_sli4_cfg_mhdr),
+ LPFC_SLI4_MBX_EMBED);
bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
hdr_tmpl, rpi_page->page_count);
bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index c7217d579e0f..595056b89608 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -125,9 +125,9 @@ struct lpfc_queue {
uint32_t entry_count; /* Number of entries to support on the queue */
uint32_t entry_size; /* Size of each queue entry. */
uint32_t queue_id; /* Queue ID assigned by the hardware */
+ uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
struct list_head page_list;
uint32_t page_count; /* Number of pages allocated for this queue */
-
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
union sli4_qe qe[1]; /* array to index entries (must be last) */
@@ -359,6 +359,10 @@ struct lpfc_pc_sli4_params {
uint32_t hdr_pp_align;
uint32_t sgl_pages_max;
uint32_t sgl_pp_align;
+ uint8_t cqv;
+ uint8_t mqv;
+ uint8_t wqv;
+ uint8_t rqv;
};
/* SLI4 HBA data structure entries */
@@ -562,6 +566,8 @@ void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
+void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *);
int lpfc_sli4_brdreset(struct lpfc_hba *);
int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 386cf92de492..0a4d376dbca5 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2010 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.20"
+#define LPFC_DRIVER_VERSION "8.3.21"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 6b8d2952e32f..30ba5440c67a 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -464,6 +464,7 @@ disable_vport(struct fc_vport *fc_vport)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
long timeout;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (ndlp && NLP_CHK_NODE_ACT(ndlp)
@@ -498,6 +499,9 @@ disable_vport(struct fc_vport *fc_vport)
* scsi_host_put() to release the vport.
*/
lpfc_mbx_unreg_vpi(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ spin_unlock_irq(shost->host_lock);
lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 1b5e375732c0..635b228c3ead 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.05.29-rc1"
-#define MEGASAS_RELDATE "Dec. 7, 2010"
-#define MEGASAS_EXT_VERSION "Tue. Dec. 7 17:00:00 PDT 2010"
+#define MEGASAS_VERSION "00.00.05.34-rc1"
+#define MEGASAS_RELDATE "Feb. 24, 2011"
+#define MEGASAS_EXT_VERSION "Thu. Feb. 24 17:00:00 PDT 2011"
/*
* Device IDs
@@ -723,6 +723,7 @@ struct megasas_ctrl_info {
MEGASAS_MAX_DEV_PER_CHANNEL)
#define MEGASAS_MAX_SECTORS (2*1024)
+#define MEGASAS_MAX_SECTORS_IEEE (2*128)
#define MEGASAS_DBG_LVL 1
#define MEGASAS_FW_BUSY 1
@@ -1477,4 +1478,7 @@ struct megasas_mgmt_info {
int max_index;
};
+#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
+#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
+
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 5d6d07bd1cd0..f875e818905f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,12 +18,13 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.05.29-rc1
+ * Version : v00.00.05.34-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
* Sumant Patro
* Bo Yang
+ * Adam Radford <linuxraid@lsi.com>
*
* Send feedback to: <megaraidlinux@lsi.com>
*
@@ -134,7 +135,11 @@ spinlock_t poll_aen_lock;
void
megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
u8 alt_status);
-
+static u32
+megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
+static int
+megasas_adp_reset_gen2(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *reg_set);
static irqreturn_t megasas_isr(int irq, void *devp);
static u32
megasas_init_adapter_mfi(struct megasas_instance *instance);
@@ -554,6 +559,8 @@ static int
megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
{
u32 status;
+ u32 mfiStatus = 0;
+
/*
* Check if it is our interrupt
*/
@@ -564,6 +571,15 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
}
/*
+ * Check if it is our interrupt
+ */
+ if ((megasas_read_fw_status_reg_gen2(regs) & MFI_STATE_MASK) ==
+ MFI_STATE_FAULT) {
+ mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+ } else
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+ /*
* Clear the interrupt by writing back the same value
*/
writel(status, &regs->outbound_intr_status);
@@ -573,7 +589,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
*/
readl(&regs->outbound_intr_status);
- return 1;
+ return mfiStatus;
}
/**
@@ -597,17 +613,6 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
}
/**
- * megasas_adp_reset_skinny - For controller reset
- * @regs: MFI register set
- */
-static int
-megasas_adp_reset_skinny(struct megasas_instance *instance,
- struct megasas_register_set __iomem *regs)
-{
- return 0;
-}
-
-/**
* megasas_check_reset_skinny - For controller reset check
* @regs: MFI register set
*/
@@ -625,7 +630,7 @@ static struct megasas_instance_template megasas_instance_template_skinny = {
.disable_intr = megasas_disable_intr_skinny,
.clear_intr = megasas_clear_intr_skinny,
.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
- .adp_reset = megasas_adp_reset_skinny,
+ .adp_reset = megasas_adp_reset_gen2,
.check_reset = megasas_check_reset_skinny,
.service_isr = megasas_isr,
.tasklet = megasas_complete_cmd_dpc,
@@ -740,20 +745,28 @@ megasas_adp_reset_gen2(struct megasas_instance *instance,
{
u32 retry = 0 ;
u32 HostDiag;
+ u32 *seq_offset = &reg_set->seq_offset;
+ u32 *hostdiag_offset = &reg_set->host_diag;
+
+ if (instance->instancet == &megasas_instance_template_skinny) {
+ seq_offset = &reg_set->fusion_seq_offset;
+ hostdiag_offset = &reg_set->fusion_host_diag;
+ }
+
+ writel(0, seq_offset);
+ writel(4, seq_offset);
+ writel(0xb, seq_offset);
+ writel(2, seq_offset);
+ writel(7, seq_offset);
+ writel(0xd, seq_offset);
- writel(0, &reg_set->seq_offset);
- writel(4, &reg_set->seq_offset);
- writel(0xb, &reg_set->seq_offset);
- writel(2, &reg_set->seq_offset);
- writel(7, &reg_set->seq_offset);
- writel(0xd, &reg_set->seq_offset);
msleep(1000);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
while ( !( HostDiag & DIAG_WRITE_ENABLE) ) {
msleep(100);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n",
retry, HostDiag);
@@ -764,14 +777,14 @@ megasas_adp_reset_gen2(struct megasas_instance *instance,
printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
- writel((HostDiag | DIAG_RESET_ADAPTER), &reg_set->host_diag);
+ writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
ssleep(10);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
while ( ( HostDiag & DIAG_RESET_ADAPTER) ) {
msleep(100);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n",
retry, HostDiag);
@@ -2503,7 +2516,9 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
if ((mfiStatus = instance->instancet->clear_intr(
instance->reg_set)
) == 0) {
- return IRQ_NONE;
+ /* Hardware may not set outbound_intr_status in MSI-X mode */
+ if (!instance->msi_flag)
+ return IRQ_NONE;
}
instance->mfiStatus = mfiStatus;
@@ -2611,7 +2626,9 @@ megasas_transition_to_ready(struct megasas_instance* instance)
case MFI_STATE_FAULT:
printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
- return -ENODEV;
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FAULT;
+ break;
case MFI_STATE_WAIT_HANDSHAKE:
/*
@@ -3424,7 +3441,6 @@ fail_reply_queue:
megasas_free_cmds(instance);
fail_alloc_cmds:
- iounmap(instance->reg_set);
return 1;
}
@@ -3494,7 +3510,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Get operational params, sge flags, send init cmd to controller */
if (instance->instancet->init_adapter(instance))
- return -ENODEV;
+ goto fail_init_adapter;
printk(KERN_ERR "megasas: INIT adapter done\n");
@@ -3543,7 +3559,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
* Setup tasklet for cmd completion
*/
- tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
+ tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
/* Initialize the cmd completion timer */
@@ -3553,6 +3569,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
MEGASAS_COMPLETION_TIMER_INTERVAL);
return 0;
+fail_init_adapter:
fail_ready_state:
iounmap(instance->reg_set);
@@ -3820,6 +3837,10 @@ static int megasas_io_attach(struct megasas_instance *instance)
instance->max_fw_cmds - MEGASAS_INT_CMDS;
host->this_id = instance->init_id;
host->sg_tablesize = instance->max_num_sge;
+
+ if (instance->fw_support_ieee)
+ instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
+
/*
* Check if the module parameter value for max_sectors can be used
*/
@@ -3899,9 +3920,26 @@ fail_set_dma_mask:
static int __devinit
megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
- int rval;
+ int rval, pos;
struct Scsi_Host *host;
struct megasas_instance *instance;
+ u16 control = 0;
+
+ /* Reset MSI-X in the kdump kernel */
+ if (reset_devices) {
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(pdev, msi_control_reg(pos),
+ &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE) {
+ dev_info(&pdev->dev, "resetting MSI-X\n");
+ pci_write_config_word(pdev,
+ msi_control_reg(pos),
+ control &
+ ~PCI_MSIX_FLAGS_ENABLE);
+ }
+ }
+ }
/*
* Announce PCI information
@@ -4039,12 +4077,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
- /*
- * Initialize MFI Firmware
- */
- if (megasas_init_fw(instance))
- goto fail_init_mfi;
-
/* Try to enable MSI-X */
if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) &&
@@ -4054,6 +4086,12 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
instance->msi_flag = 1;
/*
+ * Initialize MFI Firmware
+ */
+ if (megasas_init_fw(instance))
+ goto fail_init_mfi;
+
+ /*
* Register IRQ
*/
if (request_irq(instance->msi_flag ? instance->msixentry.vector :
@@ -4105,24 +4143,23 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
instance->instancet->disable_intr(instance->reg_set);
free_irq(instance->msi_flag ? instance->msixentry.vector :
instance->pdev->irq, instance);
+fail_irq:
+ if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)
+ megasas_release_fusion(instance);
+ else
+ megasas_release_mfi(instance);
+ fail_init_mfi:
if (instance->msi_flag)
pci_disable_msix(instance->pdev);
-
- fail_irq:
- fail_init_mfi:
fail_alloc_dma_buf:
if (instance->evt_detail)
pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
instance->evt_detail,
instance->evt_detail_h);
- if (instance->producer) {
+ if (instance->producer)
pci_free_consistent(pdev, sizeof(u32), instance->producer,
instance->producer_h);
- megasas_release_mfi(instance);
- } else {
- megasas_release_fusion(instance);
- }
if (instance->consumer)
pci_free_consistent(pdev, sizeof(u32), instance->consumer,
instance->consumer_h);
@@ -4242,9 +4279,8 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
/* cancel the delayed work if this work still in queue */
if (instance->ev != NULL) {
struct megasas_aen_event *ev = instance->ev;
- cancel_delayed_work(
+ cancel_delayed_work_sync(
(struct delayed_work *)&ev->hotplug_work);
- flush_scheduled_work();
instance->ev = NULL;
}
@@ -4297,6 +4333,10 @@ megasas_resume(struct pci_dev *pdev)
if (megasas_set_dma_mask(pdev))
goto fail_set_dma_mask;
+ /* Now re-enable MSI-X */
+ if (instance->msi_flag)
+ pci_enable_msix(instance->pdev, &instance->msixentry, 1);
+
/*
* Initialize MFI Firmware
*/
@@ -4333,10 +4373,6 @@ megasas_resume(struct pci_dev *pdev)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- /* Now re-enable MSI-X */
- if (instance->msi_flag)
- pci_enable_msix(instance->pdev, &instance->msixentry, 1);
-
/*
* Register IRQ
*/
@@ -4417,9 +4453,8 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
/* cancel the delayed work if this work still in queue*/
if (instance->ev != NULL) {
struct megasas_aen_event *ev = instance->ev;
- cancel_delayed_work(
+ cancel_delayed_work_sync(
(struct delayed_work *)&ev->hotplug_work);
- flush_scheduled_work();
instance->ev = NULL;
}
@@ -4611,6 +4646,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* For each user buffer, create a mirror buffer and copy in
*/
for (i = 0; i < ioc->sge_count; i++) {
+ if (!ioc->sgl[i].iov_len)
+ continue;
+
kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
ioc->sgl[i].iov_len,
&buf_handle, GFP_KERNEL);
@@ -5177,6 +5215,7 @@ megasas_aen_polling(struct work_struct *work)
break;
case MR_EVT_LD_OFFLINE:
+ case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index d6e2a663b165..145a8cffb1fa 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -81,6 +81,10 @@ u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+
+void
+megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
+
u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
struct LD_LOAD_BALANCE_INFO *lbInfo);
u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
@@ -983,13 +987,15 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
return 0;
-fail_alloc_cmds:
-fail_alloc_mfi_cmds:
fail_map_info:
if (i == 1)
dma_free_coherent(&instance->pdev->dev, fusion->map_sz,
fusion->ld_map[0], fusion->ld_map_phys[0]);
fail_ioc_init:
+ megasas_free_cmds_fusion(instance);
+fail_alloc_cmds:
+ megasas_free_cmds(instance);
+fail_alloc_mfi_cmds:
return 1;
}
@@ -1431,8 +1437,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
/* Check if this is a system PD I/O */
- if ((instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) &&
- (instance->pd_list[pd_index].driveType == TYPE_DISK)) {
+ if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
io_request->Function = 0;
io_request->DevHandle =
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
@@ -1455,7 +1460,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
io_request->RaidContext.VirtualDiskTgtId = device_id;
- io_request->LUN[0] = scmd->device->lun;
+ io_request->LUN[1] = scmd->device->lun;
io_request->DataLength = scsi_bufflen(scmd);
}
@@ -1479,7 +1484,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
device_id = MEGASAS_DEV_INDEX(instance, scp);
/* Zero out some fields so they don't get reused */
- io_request->LUN[0] = 0;
+ io_request->LUN[1] = 0;
io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
io_request->EEDPFlags = 0;
@@ -1743,7 +1748,7 @@ complete_cmd_fusion(struct megasas_instance *instance)
wmb();
writel(fusion->last_reply_idx,
&instance->reg_set->reply_post_host_index);
-
+ megasas_check_and_restore_queue_depth(instance);
return IRQ_HANDLED;
}
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 8be75e65f763..a3e60385787f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.16
+ * mpi2.h Version: 02.00.17
*
* Version History
* ---------------
@@ -63,6 +63,7 @@
* function codes, 0xF0 to 0xFF.
* 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
* Added alternative defines for the SGE Direction bit.
+ * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -88,7 +89,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x10)
+#define MPI2_HEADER_VERSION_UNIT (0x11)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index d76a65847603..f5b9c766e28f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.15
+ * mpi2_cnfg.h Version: 02.00.16
*
* Version History
* ---------------
@@ -125,6 +125,8 @@
* define.
* Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define.
* Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
+ * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing)
+ * defines.
* --------------------------------------------------------------------------
*/
@@ -745,8 +747,6 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040)
#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020)
#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004)
-#define MPI2_IOUNITPAGE1_MULTI_PATHING (0x00000002)
-#define MPI2_IOUNITPAGE1_SINGLE_PATHING (0x00000000)
/* IO Unit Page 3 */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
deleted file mode 100644
index b1e88f26b748..000000000000
--- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
+++ /dev/null
@@ -1,384 +0,0 @@
- ==============================
- Fusion-MPT MPI 2.0 Header File Change History
- ==============================
-
- Copyright (c) 2000-2010 LSI Corporation.
-
- ---------------------------------------
- Header Set Release Version: 02.00.14
- Header Set Release Date: 10-28-09
- ---------------------------------------
-
- Filename Current version Prior version
- ---------- --------------- -------------
- mpi2.h 02.00.14 02.00.13
- mpi2_cnfg.h 02.00.13 02.00.12
- mpi2_init.h 02.00.08 02.00.07
- mpi2_ioc.h 02.00.13 02.00.12
- mpi2_raid.h 02.00.04 02.00.04
- mpi2_sas.h 02.00.03 02.00.02
- mpi2_targ.h 02.00.03 02.00.03
- mpi2_tool.h 02.00.04 02.00.04
- mpi2_type.h 02.00.00 02.00.00
- mpi2_ra.h 02.00.00 02.00.00
- mpi2_hbd.h 02.00.00
- mpi2_history.txt 02.00.14 02.00.13
-
-
- * Date Version Description
- * -------- -------- ------------------------------------------------------
-
-mpi2.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT.
- * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT.
- * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT.
- * Moved ReplyPostHostIndex register to offset 0x6C of the
- * MPI2_SYSTEM_INTERFACE_REGS and modified the define for
- * MPI2_REPLY_POST_HOST_INDEX_OFFSET.
- * Added union of request descriptors.
- * Added union of reply descriptors.
- * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT.
- * Added define for MPI2_VERSION_02_00.
- * Fixed the size of the FunctionDependent5 field in the
- * MPI2_DEFAULT_REPLY structure.
- * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT.
- * Removed the MPI-defined Fault Codes and extended the
- * product specific codes up to 0xEFFF.
- * Added a sixth key value for the WriteSequence register
- * and changed the flush value to 0x0.
- * Added message function codes for Diagnostic Buffer Post
- * and Diagnsotic Release.
- * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED
- * Moved MPI2_VERSION_UNION from mpi2_ioc.h.
- * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT.
- * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT.
- * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT.
- * Added #defines for marking a reply descriptor as unused.
- * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT.
- * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT.
- * Moved LUN field defines from mpi2_init.h.
- * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT.
- * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT.
- * In all request and reply descriptors, replaced VF_ID
- * field with MSIxIndex field.
- * Removed DevHandle field from
- * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
- * bytes reserved.
- * Added RAID Accelerator functionality.
- * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
- * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
- * Added MSI-x index mask and shift for Reply Post Host
- * Index register.
- * Added function code for Host Based Discovery Action.
- * --------------------------------------------------------------------------
-
-mpi2_cnfg.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags.
- * Added Manufacturing Page 11.
- * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE
- * define.
- * 06-26-07 02.00.02 Adding generic structure for product-specific
- * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS.
- * Rework of BIOS Page 2 configuration page.
- * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the
- * forms.
- * Added configuration pages IOC Page 8 and Driver
- * Persistent Mapping Page 0.
- * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated
- * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1,
- * RAID Physical Disk Pages 0 and 1, RAID Configuration
- * Page 0).
- * Added new value for AccessStatus field of SAS Device
- * Page 0 (_SATA_NEEDS_INITIALIZATION).
- * 10-31-07 02.00.04 Added missing SEPDevHandle field to
- * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
- * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for
- * NVDATA.
- * Modified IOC Page 7 to use masks and added field for
- * SASBroadcastPrimitiveMasks.
- * Added MPI2_CONFIG_PAGE_BIOS_4.
- * Added MPI2_CONFIG_PAGE_LOG_0.
- * 02-29-08 02.00.06 Modified various names to make them 32-character unique.
- * Added SAS Device IDs.
- * Updated Integrated RAID configuration pages including
- * Manufacturing Page 4, IOC Page 6, and RAID Configuration
- * Page 0.
- * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA.
- * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION.
- * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING.
- * Added missing MaxNumRoutedSasAddresses field to
- * MPI2_CONFIG_PAGE_EXPANDER_0.
- * Added SAS Port Page 0.
- * Modified structure layout for
- * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0.
- * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use
- * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array.
- * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF
- * to 0x000000FF.
- * Added two new values for the Physical Disk Coercion Size
- * bits in the Flags field of Manufacturing Page 4.
- * Added product-specific Manufacturing pages 16 to 31.
- * Modified Flags bits for controlling write cache on SATA
- * drives in IO Unit Page 1.
- * Added new bit to AdditionalControlFlags of SAS IO Unit
- * Page 1 to control Invalid Topology Correction.
- * Added SupportedPhysDisks field to RAID Volume Page 1 and
- * added related defines.
- * Added additional defines for RAID Volume Page 0
- * VolumeStatusFlags field.
- * Modified meaning of RAID Volume Page 0 VolumeSettings
- * define for auto-configure of hot-swap drives.
- * Added PhysDiskAttributes field (and related defines) to
- * RAID Physical Disk Page 0.
- * Added MPI2_SAS_PHYINFO_PHY_VACANT define.
- * Added three new DiscoveryStatus bits for SAS IO Unit
- * Page 0 and SAS Expander Page 0.
- * Removed multiplexing information from SAS IO Unit pages.
- * Added BootDeviceWaitTime field to SAS IO Unit Page 4.
- * Removed Zone Address Resolved bit from PhyInfo and from
- * Expander Page 0 Flags field.
- * Added two new AccessStatus values to SAS Device Page 0
- * for indicating routing problems. Added 3 reserved words
- * to this page.
- * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3.
- * Inserted missing reserved field into structure for IOC
- * Page 6.
- * Added more pending task bits to RAID Volume Page 0
- * VolumeStatusFlags defines.
- * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define.
- * Added a new DiscoveryStatus bit for SAS IO Unit Page 0
- * and SAS Expander Page 0 to flag a downstream initiator
- * when in simplified routing mode.
- * Removed SATA Init Failure defines for DiscoveryStatus
- * fields of SAS IO Unit Page 0 and SAS Expander Page 0.
- * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define.
- * Added PortGroups, DmaGroup, and ControlGroup fields to
- * SAS Device Page 0.
- * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO
- * Unit Page 6.
- * Added expander reduced functionality data to SAS
- * Expander Page 0.
- * Added SAS PHY Page 2 and SAS PHY Page 3.
- * 07-30-09 02.00.12 Added IO Unit Page 7.
- * Added new device ids.
- * Added SAS IO Unit Page 5.
- * Added partial and slumber power management capable flags
- * to SAS Device Page 0 Flags field.
- * Added PhyInfo defines for power condition.
- * Added Ethernet configuration pages.
- * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
- * Added SAS PHY Page 4 structure and defines.
- * --------------------------------------------------------------------------
-
-mpi2_init.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t.
- * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines.
- * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention.
- * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY.
- * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t.
- * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO
- * Control field Task Attribute flags.
- * Moved LUN field defines to mpi2.h becasue they are
- * common to many structures.
- * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to
- * Query Asynchronous Event.
- * Defined two new bits in the SlotStatus field of the SCSI
- * Enclosure Processor Request and Reply.
- * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
- * both SCSI IO Error Reply and SCSI Task Management Reply.
- * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
- * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
- * --------------------------------------------------------------------------
-
-mpi2_ioc.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to
- * MaxTargets.
- * Added TotalImageSize field to FWDownload Request.
- * Added reserved words to FWUpload Request.
- * 06-26-07 02.00.02 Added IR Configuration Change List Event.
- * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit
- * request and replaced it with
- * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth.
- * Replaced the MinReplyQueueDepth field of the IOCFacts
- * reply with MaxReplyDescriptorPostQueueDepth.
- * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum
- * depth for the Reply Descriptor Post Queue.
- * Added SASAddress field to Initiator Device Table
- * Overflow Event data.
- * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING
- * for SAS Initiator Device Status Change Event data.
- * Modified Reason Code defines for SAS Topology Change
- * List Event data, including adding a bit for PHY Vacant
- * status, and adding a mask for the Reason Code.
- * Added define for
- * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING.
- * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID.
- * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of
- * the IOCFacts Reply.
- * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
- * Moved MPI2_VERSION_UNION to mpi2.h.
- * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks
- * instead of enables, and added SASBroadcastPrimitiveMasks
- * field.
- * Added Log Entry Added Event and related structure.
- * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID.
- * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET.
- * Added MaxVolumes and MaxPersistentEntries fields to
- * IOCFacts reply.
- * Added ProtocalFlags and IOCCapabilities fields to
- * MPI2_FW_IMAGE_HEADER.
- * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT.
- * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to
- * a U16 (from a U32).
- * Removed extra 's' from EventMasks name.
- * 06-27-08 02.00.08 Fixed an offset in a comment.
- * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST.
- * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and
- * renamed MinReplyFrameSize to ReplyFrameSize.
- * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX.
- * Added two new RAIDOperation values for Integrated RAID
- * Operations Status Event data.
- * Added four new IR Configuration Change List Event data
- * ReasonCode values.
- * Added two new ReasonCode defines for SAS Device Status
- * Change Event data.
- * Added three new DiscoveryStatus bits for the SAS
- * Discovery event data.
- * Added Multiplexing Status Change bit to the PhyStatus
- * field of the SAS Topology Change List event data.
- * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY.
- * BootFlags are now product-specific.
- * Added defines for the indivdual signature bytes
- * for MPI2_INIT_IMAGE_FOOTER.
- * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define.
- * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR
- * define.
- * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE
- * define.
- * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define.
- * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define.
- * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define.
- * Added two new reason codes for SAS Device Status Change
- * Event.
- * Added new event: SAS PHY Counter.
- * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
- * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
- * Added new product id family for 2208.
- * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
- * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
- * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
- * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
- * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
- * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
- * Added Host Based Discovery Phy Event data.
- * Added defines for ProductID Product field
- * (MPI2_FW_HEADER_PID_).
- * Modified values for SAS ProductID Family
- * (MPI2_FW_HEADER_PID_FAMILY_).
- * --------------------------------------------------------------------------
-
-mpi2_raid.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 08-31-07 02.00.01 Modifications to RAID Action request and reply,
- * including the Actions and ActionData.
- * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD.
- * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
- * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
- * can be sized by the build environment.
- * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
- * VolumeCreationFlags and marked the old one as obsolete.
- * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
- * --------------------------------------------------------------------------
-
-mpi2_sas.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit
- * Control Request.
- * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
- * Request.
- * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
- * to MPI2_SGE_IO_UNION since it supports chained SGLs.
- * 05-12-10 02.00.04 Modified some comments.
- * --------------------------------------------------------------------------
-
-mpi2_targ.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 08-31-07 02.00.01 Added Command Buffer Data Location Address Space bits to
- * BufferPostFlags field of CommandBufferPostBase Request.
- * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
- * 10-02-08 02.00.03 Removed NextCmdBufferOffset from
- * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST.
- * Target Status Send Request only takes a single SGE for
- * response data.
- * --------------------------------------------------------------------------
-
-mpi2_tool.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release
- * structures and defines.
- * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
- * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
- * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
- * and reply messages.
- * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
- * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
- * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
- * --------------------------------------------------------------------------
-
-mpi2_type.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * --------------------------------------------------------------------------
-
-mpi2_ra.h
- * 05-06-09 02.00.00 Initial version.
- * --------------------------------------------------------------------------
-
-mpi2_hbd.h
- * 10-28-09 02.00.00 Initial version.
- * --------------------------------------------------------------------------
-
-
-mpi2_history.txt Parts list history
-
-Filename 02.00.14 02.00.13 02.00.12
----------- -------- -------- --------
-mpi2.h 02.00.14 02.00.13 02.00.12
-mpi2_cnfg.h 02.00.13 02.00.12 02.00.11
-mpi2_init.h 02.00.08 02.00.07 02.00.07
-mpi2_ioc.h 02.00.13 02.00.12 02.00.11
-mpi2_raid.h 02.00.04 02.00.04 02.00.03
-mpi2_sas.h 02.00.03 02.00.02 02.00.02
-mpi2_targ.h 02.00.03 02.00.03 02.00.03
-mpi2_tool.h 02.00.04 02.00.04 02.00.03
-mpi2_type.h 02.00.00 02.00.00 02.00.00
-mpi2_ra.h 02.00.00 02.00.00 02.00.00
-mpi2_hbd.h 02.00.00
-
-Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
----------- -------- -------- -------- -------- -------- --------
-mpi2.h 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
-mpi2_cnfg.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 02.00.06
-mpi2_init.h 02.00.06 02.00.06 02.00.05 02.00.05 02.00.04 02.00.03
-mpi2_ioc.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07 02.00.06
-mpi2_raid.h 02.00.03 02.00.03 02.00.03 02.00.03 02.00.02 02.00.02
-mpi2_sas.h 02.00.02 02.00.02 02.00.01 02.00.01 02.00.01 02.00.01
-mpi2_targ.h 02.00.03 02.00.03 02.00.02 02.00.02 02.00.02 02.00.02
-mpi2_tool.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02
-mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
-
-Filename 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
----------- -------- -------- -------- -------- -------- --------
-mpi2.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
-mpi2_cnfg.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
-mpi2_init.h 02.00.02 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00
-mpi2_ioc.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
-mpi2_raid.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00
-mpi2_sas.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00
-mpi2_targ.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00
-mpi2_tool.h 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
-mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
-
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index 608f6d6e6fca..fdffde1ebc0f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -6,7 +6,7 @@
* Title: MPI Serial Attached SCSI structures and definitions
* Creation Date: February 9, 2007
*
- * mpi2_sas.h Version: 02.00.04
+ * mpi2_sas.h Version: 02.00.05
*
* Version History
* ---------------
@@ -21,6 +21,7 @@
* 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
* to MPI2_SGE_IO_UNION since it supports chained SGLs.
* 05-12-10 02.00.04 Modified some comments.
+ * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control.
* --------------------------------------------------------------------------
*/
@@ -163,7 +164,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST
U32 Reserved4; /* 0x14 */
U32 DataLength; /* 0x18 */
U8 CommandFIS[20]; /* 0x1C */
- MPI2_SGE_IO_UNION SGL; /* 0x20 */
+ MPI2_SGE_IO_UNION SGL; /* 0x30 */
} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t;
@@ -246,6 +247,8 @@ typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST
#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D)
#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E)
#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F)
+#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14)
+#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15)
#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80)
/* values for the PrimFlags field */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 5c6e3a67bb94..2a4bceda364b 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.05
+ * mpi2_tool.h Version: 02.00.06
*
* Version History
* ---------------
@@ -23,6 +23,8 @@
* Added MPI2_DIAG_BUF_TYPE_EXTENDED.
* Incremented MPI2_DIAG_BUF_TYPE_COUNT.
* 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
+ * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
+ * Post Request.
* --------------------------------------------------------------------------
*/
@@ -354,6 +356,10 @@ typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST
/* count of the number of buffer types */
#define MPI2_DIAG_BUF_TYPE_COUNT (0x03)
+/* values for the Flags field */
+#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002)
+#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001)
+
/****************************************************************************
* Diagnostic Buffer Post reply
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 9ead0399808a..e8a6f1cf1e4b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -752,20 +752,19 @@ static u8
_base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
{
int i;
- u8 cb_idx = 0xFF;
-
- if (smid >= ioc->hi_priority_smid) {
- if (smid < ioc->internal_smid) {
- i = smid - ioc->hi_priority_smid;
- cb_idx = ioc->hpr_lookup[i].cb_idx;
- } else if (smid <= ioc->hba_queue_depth) {
- i = smid - ioc->internal_smid;
- cb_idx = ioc->internal_lookup[i].cb_idx;
- }
- } else {
+ u8 cb_idx;
+
+ if (smid < ioc->hi_priority_smid) {
i = smid - 1;
cb_idx = ioc->scsi_lookup[i].cb_idx;
- }
+ } else if (smid < ioc->internal_smid) {
+ i = smid - ioc->hi_priority_smid;
+ cb_idx = ioc->hpr_lookup[i].cb_idx;
+ } else if (smid <= ioc->hba_queue_depth) {
+ i = smid - ioc->internal_smid;
+ cb_idx = ioc->internal_lookup[i].cb_idx;
+ } else
+ cb_idx = 0xFF;
return cb_idx;
}
@@ -1430,7 +1429,7 @@ mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
struct scsi_cmnd *scmd)
{
unsigned long flags;
- struct request_tracker *request;
+ struct scsiio_tracker *request;
u16 smid;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
@@ -1442,7 +1441,7 @@ mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
}
request = list_entry(ioc->free_list.next,
- struct request_tracker, tracker_list);
+ struct scsiio_tracker, tracker_list);
request->scmd = scmd;
request->cb_idx = cb_idx;
smid = request->smid;
@@ -1496,48 +1495,47 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
struct chain_tracker *chain_req, *next;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- if (smid >= ioc->hi_priority_smid) {
- if (smid < ioc->internal_smid) {
- /* hi-priority */
- i = smid - ioc->hi_priority_smid;
- ioc->hpr_lookup[i].cb_idx = 0xFF;
- list_add_tail(&ioc->hpr_lookup[i].tracker_list,
- &ioc->hpr_free_list);
- } else {
- /* internal queue */
- i = smid - ioc->internal_smid;
- ioc->internal_lookup[i].cb_idx = 0xFF;
- list_add_tail(&ioc->internal_lookup[i].tracker_list,
- &ioc->internal_free_list);
+ if (smid < ioc->hi_priority_smid) {
+ /* scsiio queue */
+ i = smid - 1;
+ if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
+ list_for_each_entry_safe(chain_req, next,
+ &ioc->scsi_lookup[i].chain_list, tracker_list) {
+ list_del_init(&chain_req->tracker_list);
+ list_add_tail(&chain_req->tracker_list,
+ &ioc->free_chain_list);
+ }
}
+ ioc->scsi_lookup[i].cb_idx = 0xFF;
+ ioc->scsi_lookup[i].scmd = NULL;
+ list_add_tail(&ioc->scsi_lookup[i].tracker_list,
+ &ioc->free_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return;
- }
- /* scsiio queue */
- i = smid - 1;
- if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
- list_for_each_entry_safe(chain_req, next,
- &ioc->scsi_lookup[i].chain_list, tracker_list) {
- list_del_init(&chain_req->tracker_list);
- list_add_tail(&chain_req->tracker_list,
- &ioc->free_chain_list);
+ /*
+ * See _wait_for_commands_to_complete() call with regards
+ * to this code.
+ */
+ if (ioc->shost_recovery && ioc->pending_io_count) {
+ if (ioc->pending_io_count == 1)
+ wake_up(&ioc->reset_wq);
+ ioc->pending_io_count--;
}
+ return;
+ } else if (smid < ioc->internal_smid) {
+ /* hi-priority */
+ i = smid - ioc->hi_priority_smid;
+ ioc->hpr_lookup[i].cb_idx = 0xFF;
+ list_add_tail(&ioc->hpr_lookup[i].tracker_list,
+ &ioc->hpr_free_list);
+ } else if (smid <= ioc->hba_queue_depth) {
+ /* internal queue */
+ i = smid - ioc->internal_smid;
+ ioc->internal_lookup[i].cb_idx = 0xFF;
+ list_add_tail(&ioc->internal_lookup[i].tracker_list,
+ &ioc->internal_free_list);
}
- ioc->scsi_lookup[i].cb_idx = 0xFF;
- ioc->scsi_lookup[i].scmd = NULL;
- list_add_tail(&ioc->scsi_lookup[i].tracker_list,
- &ioc->free_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-
- /*
- * See _wait_for_commands_to_complete() call with regards to this code.
- */
- if (ioc->shost_recovery && ioc->pending_io_count) {
- if (ioc->pending_io_count == 1)
- wake_up(&ioc->reset_wq);
- ioc->pending_io_count--;
- }
}
/**
@@ -1725,6 +1723,31 @@ _base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
}
/**
+ * _base_display_intel_branding - Display branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
+{
+ if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_INTEL &&
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008) {
+
+ switch (ioc->pdev->subsystem_device) {
+ case MPT2SAS_INTEL_RMS2LL080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS2LL080_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS2LL040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS2LL040_BRANDING);
+ break;
+ }
+ }
+}
+
+/**
* _base_display_ioc_capabilities - Disply IOC's capabilities.
* @ioc: per adapter object
*
@@ -1754,6 +1777,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
ioc->bios_pg3.BiosVersion & 0x000000FF);
_base_display_dell_branding(ioc);
+ _base_display_intel_branding(ioc);
printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
@@ -2252,9 +2276,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
ioc->name, (unsigned long long) ioc->request_dma));
total_sz += sz;
- sz = ioc->scsiio_depth * sizeof(struct request_tracker);
+ sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
ioc->scsi_lookup_pages = get_order(sz);
- ioc->scsi_lookup = (struct request_tracker *)__get_free_pages(
+ ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
GFP_KERNEL, ioc->scsi_lookup_pages);
if (!ioc->scsi_lookup) {
printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, "
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 283568c6fb04..a3f8aa9baea4 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,8 +69,8 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "07.100.00.00"
-#define MPT2SAS_MAJOR_VERSION 07
+#define MPT2SAS_DRIVER_VERSION "08.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 08
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
#define MPT2SAS_RELEASE_VERSION 00
@@ -101,7 +101,8 @@
#define MPT_NAME_LENGTH 32 /* generic length of strings */
#define MPT_STRING_LENGTH 64
-#define MPT_MAX_CALLBACKS 16
+#define MPT_MAX_CALLBACKS 16
+
#define CAN_SLEEP 1
#define NO_SLEEP 0
@@ -154,6 +155,20 @@
#define MPT2SAS_DELL_6GBPS_SAS_SSDID 0x1F22
/*
+ * Intel HBA branding
+ */
+#define MPT2SAS_INTEL_RMS2LL080_BRANDING \
+ "Intel Integrated RAID Module RMS2LL080"
+#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
+ "Intel Integrated RAID Module RMS2LL040"
+
+/*
+ * Intel HBA SSDIDs
+ */
+#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
+#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
+
+/*
* per target private data
*/
#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01
@@ -431,14 +446,14 @@ struct chain_tracker {
};
/**
- * struct request_tracker - firmware request tracker
+ * struct scsiio_tracker - scsi mf request tracker
* @smid: system message id
* @scmd: scsi request pointer
* @cb_idx: callback index
* @chain_list: list of chains associated to this IO
* @tracker_list: list of free request (ioc->free_list)
*/
-struct request_tracker {
+struct scsiio_tracker {
u16 smid;
struct scsi_cmnd *scmd;
u8 cb_idx;
@@ -447,6 +462,19 @@ struct request_tracker {
};
/**
+ * struct request_tracker - misc mf request tracker
+ * @smid: system message id
+ * @scmd: scsi request pointer
+ * @cb_idx: callback index
+ * @tracker_list: list of free request (ioc->free_list)
+ */
+struct request_tracker {
+ u16 smid;
+ u8 cb_idx;
+ struct list_head tracker_list;
+};
+
+/**
* struct _tr_list - target reset list
* @handle: device handle
* @state: state machine
@@ -709,7 +737,7 @@ struct MPT2SAS_ADAPTER {
u8 *request;
dma_addr_t request_dma;
u32 request_dma_sz;
- struct request_tracker *scsi_lookup;
+ struct scsiio_tracker *scsi_lookup;
ulong scsi_lookup_pages;
spinlock_t scsi_lookup_lock;
struct list_head free_list;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 5ded3db6e316..6ceb7759bfe5 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -6975,7 +6975,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
u32 device_state;
mpt2sas_base_stop_watchdog(ioc);
- flush_scheduled_work();
scsi_block_requests(shost);
device_state = pci_choose_state(pdev, state);
printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering "
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index b37c8a3c1bb0..86afb13f1e79 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1005,11 +1005,23 @@ int osd_req_read_sg(struct osd_request *or,
const struct osd_sg_entry *sglist, unsigned numentries)
{
u64 len;
- int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
+ u64 off;
+ int ret;
- if (ret)
- return ret;
- osd_req_read(or, obj, 0, bio, len);
+ if (numentries > 1) {
+ off = 0;
+ ret = _add_sg_continuation_descriptor(or, sglist, numentries,
+ &len);
+ if (ret)
+ return ret;
+ } else {
+ /* Optimize the case of single segment, read_sg is a
+ * bidi operation.
+ */
+ len = sglist->len;
+ off = sglist->offset;
+ }
+ osd_req_read(or, obj, off, bio, len);
return 0;
}
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index d8db0137c0c7..18b6c55cd08c 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1382,53 +1382,50 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
return MPI_IO_STATUS_BUSY;
}
-static void pm8001_work_queue(struct work_struct *work)
+static void pm8001_work_fn(struct work_struct *work)
{
- struct delayed_work *dw = container_of(work, struct delayed_work, work);
- struct pm8001_wq *wq = container_of(dw, struct pm8001_wq, work_q);
+ struct pm8001_work *pw = container_of(work, struct pm8001_work, work);
struct pm8001_device *pm8001_dev;
- struct domain_device *dev;
+ struct domain_device *dev;
- switch (wq->handler) {
+ switch (pw->handler) {
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_DS_IN_ERROR:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_DS_NON_OPERATIONAL:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
}
- list_del(&wq->entry);
- kfree(wq);
+ kfree(pw);
}
static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
int handler)
{
- struct pm8001_wq *wq;
+ struct pm8001_work *pw;
int ret = 0;
- wq = kmalloc(sizeof(struct pm8001_wq), GFP_ATOMIC);
- if (wq) {
- wq->pm8001_ha = pm8001_ha;
- wq->data = data;
- wq->handler = handler;
- INIT_DELAYED_WORK(&wq->work_q, pm8001_work_queue);
- list_add_tail(&wq->entry, &pm8001_ha->wq_list);
- schedule_delayed_work(&wq->work_q, 0);
+ pw = kmalloc(sizeof(struct pm8001_work), GFP_ATOMIC);
+ if (pw) {
+ pw->pm8001_ha = pm8001_ha;
+ pw->data = data;
+ pw->handler = handler;
+ INIT_WORK(&pw->work, pm8001_work_fn);
+ queue_work(pm8001_wq, &pw->work);
} else
ret = -ENOMEM;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index b95285f3383f..002360da01e3 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -51,6 +51,8 @@ static int pm8001_id;
LIST_HEAD(hba_list);
+struct workqueue_struct *pm8001_wq;
+
/**
* The main structure which LLDD must register for scsi core.
*/
@@ -134,7 +136,6 @@ static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha,
static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
{
int i;
- struct pm8001_wq *wq;
if (!pm8001_ha)
return;
@@ -150,8 +151,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
if (pm8001_ha->shost)
scsi_host_put(pm8001_ha->shost);
- list_for_each_entry(wq, &pm8001_ha->wq_list, entry)
- cancel_delayed_work(&wq->work_q);
+ flush_workqueue(pm8001_wq);
kfree(pm8001_ha->tags);
kfree(pm8001_ha);
}
@@ -381,7 +381,6 @@ pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost)
pm8001_ha->sas = sha;
pm8001_ha->shost = shost;
pm8001_ha->id = pm8001_id++;
- INIT_LIST_HEAD(&pm8001_ha->wq_list);
pm8001_ha->logging_level = 0x01;
sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
#ifdef PM8001_USE_TASKLET
@@ -758,7 +757,7 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
int i , pos;
u32 device_state;
pm8001_ha = sha->lldd_ha;
- flush_scheduled_work();
+ flush_workqueue(pm8001_wq);
scsi_block_requests(pm8001_ha->shost);
pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pos == 0) {
@@ -870,17 +869,26 @@ static struct pci_driver pm8001_pci_driver = {
*/
static int __init pm8001_init(void)
{
- int rc;
+ int rc = -ENOMEM;
+
+ pm8001_wq = alloc_workqueue("pm8001", 0, 0);
+ if (!pm8001_wq)
+ goto err;
+
pm8001_id = 0;
pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops);
if (!pm8001_stt)
- return -ENOMEM;
+ goto err_wq;
rc = pci_register_driver(&pm8001_pci_driver);
if (rc)
- goto err_out;
+ goto err_tp;
return 0;
-err_out:
+
+err_tp:
sas_release_transport(pm8001_stt);
+err_wq:
+ destroy_workqueue(pm8001_wq);
+err:
return rc;
}
@@ -888,6 +896,7 @@ static void __exit pm8001_exit(void)
{
pci_unregister_driver(&pm8001_pci_driver);
sas_release_transport(pm8001_stt);
+ destroy_workqueue(pm8001_wq);
}
module_init(pm8001_init);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 7f064f9ca828..bdb6b27dedd6 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -50,6 +50,7 @@
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <scsi/libsas.h>
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
@@ -379,18 +380,16 @@ struct pm8001_hba_info {
#ifdef PM8001_USE_TASKLET
struct tasklet_struct tasklet;
#endif
- struct list_head wq_list;
u32 logging_level;
u32 fw_status;
const struct firmware *fw_image;
};
-struct pm8001_wq {
- struct delayed_work work_q;
+struct pm8001_work {
+ struct work_struct work;
struct pm8001_hba_info *pm8001_ha;
void *data;
int handler;
- struct list_head entry;
};
struct pm8001_fw_image_header {
@@ -460,6 +459,9 @@ struct fw_control_ex {
void *param3;
};
+/* pm8001 workqueue */
+extern struct workqueue_struct *pm8001_wq;
+
/******************** function prototype *********************/
int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out);
void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 321cf3ae8630..bcf858e88c64 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -5454,7 +5454,7 @@ static void __devexit pmcraid_remove(struct pci_dev *pdev)
pmcraid_shutdown(pdev);
pmcraid_disable_interrupts(pinstance, ~0);
- flush_scheduled_work();
+ flush_work_sync(&pinstance->worker_q);
pmcraid_kill_tasklets(pinstance);
pmcraid_unregister_interrupt_handler(pinstance);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ccfc8e78be21..6c51c0a35b9e 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2402,13 +2402,13 @@ struct qla_hw_data {
volatile struct {
uint32_t mbox_int :1;
uint32_t mbox_busy :1;
-
uint32_t disable_risc_code_load :1;
uint32_t enable_64bit_addressing :1;
uint32_t enable_lip_reset :1;
uint32_t enable_target_reset :1;
uint32_t enable_lip_full_login :1;
uint32_t enable_led_scheme :1;
+
uint32_t msi_enabled :1;
uint32_t msix_enabled :1;
uint32_t disable_serdes :1;
@@ -2417,6 +2417,7 @@ struct qla_hw_data {
uint32_t pci_channel_io_perm_failure :1;
uint32_t fce_enabled :1;
uint32_t fac_supported :1;
+
uint32_t chip_reset_done :1;
uint32_t port0 :1;
uint32_t running_gold_fw :1;
@@ -2424,9 +2425,11 @@ struct qla_hw_data {
uint32_t cpu_affinity_enabled :1;
uint32_t disable_msix_handshake :1;
uint32_t fcp_prio_enabled :1;
- uint32_t fw_hung :1;
- uint32_t quiesce_owner:1;
+ uint32_t isp82xx_fw_hung:1;
+
+ uint32_t quiesce_owner:1;
uint32_t thermal_supported:1;
+ uint32_t isp82xx_reset_hdlr_active:1;
/* 26 bits */
} flags;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 89e900adb679..d48326ee3f61 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -565,6 +565,7 @@ extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
extern void qla82xx_start_iocbs(srb_t *);
extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
+extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
/* BSG related functions */
extern int qla24xx_bsg_request(struct fc_bsg_job *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4c083928c2fb..74a91b6dfc68 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -121,8 +121,11 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
rval = QLA_FUNCTION_FAILED;
if (ms_pkt->entry_status != 0) {
- DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n",
- vha->host_no, routine, ms_pkt->entry_status));
+ DEBUG2_3(printk(KERN_WARNING "scsi(%ld): %s failed, error status "
+ "(%x) on port_id: %02x%02x%02x.\n",
+ vha->host_no, routine, ms_pkt->entry_status,
+ vha->d_id.b.domain, vha->d_id.b.area,
+ vha->d_id.b.al_pa));
} else {
if (IS_FWI2_CAPABLE(ha))
comp_status = le16_to_cpu(
@@ -136,8 +139,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
if (ct_rsp->header.response !=
__constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
DEBUG2_3(printk("scsi(%ld): %s failed, "
- "rejected request:\n", vha->host_no,
- routine));
+ "rejected request on port_id: %02x%02x%02x\n",
+ vha->host_no, routine,
+ vha->d_id.b.domain, vha->d_id.b.area,
+ vha->d_id.b.al_pa));
DEBUG2_3(qla2x00_dump_buffer(
(uint8_t *)&ct_rsp->header,
sizeof(struct ct_rsp_hdr)));
@@ -147,8 +152,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
break;
default:
DEBUG2_3(printk("scsi(%ld): %s failed, completion "
- "status (%x).\n", vha->host_no, routine,
- comp_status));
+ "status (%x) on port_id: %02x%02x%02x.\n",
+ vha->host_no, routine, comp_status,
+ vha->d_id.b.domain, vha->d_id.b.area,
+ vha->d_id.b.al_pa));
break;
}
}
@@ -1965,7 +1972,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
"scsi(%ld): GFF_ID issue IOCB failed "
"(%d).\n", vha->host_no, rval));
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
- "GPN_ID") != QLA_SUCCESS) {
+ "GFF_ID") != QLA_SUCCESS) {
DEBUG2_3(printk(KERN_INFO
"scsi(%ld): GFF_ID IOCB status had a "
"failure status code\n", vha->host_no));
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d9479c3fe5f8..8575808dbae0 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1967,7 +1967,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
} else {
/* Mailbox cmd failed. Timeout on min_wait. */
if (time_after_eq(jiffies, mtime) ||
- (IS_QLA82XX(ha) && ha->flags.fw_hung))
+ ha->flags.isp82xx_fw_hung)
break;
}
@@ -3945,8 +3945,13 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp;
unsigned long flags;
+ fc_port_t *fcport;
- vha->flags.online = 0;
+ /* For ISP82XX, driver waits for completion of the commands.
+ * online flag should be set.
+ */
+ if (!IS_QLA82XX(ha))
+ vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
ha->qla_stats.total_isp_aborts++;
@@ -3954,7 +3959,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
qla_printk(KERN_INFO, ha,
"Performing ISP error recovery - ha= %p.\n", ha);
- /* Chip reset does not apply to 82XX */
+ /* For ISP82XX, reset_chip is just disabling interrupts.
+ * Driver waits for the completion of the commands.
+ * the interrupts need to be enabled.
+ */
if (!IS_QLA82XX(ha))
ha->isp_ops->reset_chip(vha);
@@ -3980,14 +3988,31 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
LOOP_DOWN_TIME);
}
+ /* Clear all async request states across all VPs. */
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ list_for_each_entry(fcport, &vp->vp_fcports, list)
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
if (!ha->flags.eeh_busy) {
/* Make sure for ISP 82XX IO DMA is complete */
if (IS_QLA82XX(ha)) {
- if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
- WAIT_HOST) == QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Done wait for pending commands\n"));
- }
+ qla82xx_chip_reset_cleanup(vha);
+
+ /* Done waiting for pending commands.
+ * Reset the online flag.
+ */
+ vha->flags.online = 0;
}
/* Requeue all commands in outstanding command list. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 4c1ba6263eb3..d78d5896fc33 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -328,6 +328,7 @@ qla2x00_start_scsi(srb_t *sp)
struct qla_hw_data *ha;
struct req_que *req;
struct rsp_que *rsp;
+ char tag[2];
/* Setup device pointers. */
ret = 0;
@@ -406,7 +407,22 @@ qla2x00_start_scsi(srb_t *sp)
cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
/* Update tagged queuing modifier */
- cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_HEAD_TAG);
+ break;
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_ORDERED_TAG);
+ break;
+ default:
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_SIMPLE_TAG);
+ break;
+ }
+ }
/* Load SCSI command packet. */
memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
@@ -971,6 +987,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint16_t fcp_cmnd_len;
struct fcp_cmnd *fcp_cmnd;
dma_addr_t crc_ctx_dma;
+ char tag[2];
cmd = sp->cmd;
@@ -1068,9 +1085,27 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
- fcp_cmnd->task_attribute = 0;
fcp_cmnd->task_management = 0;
+ /*
+ * Update tagged queuing modifier if using command tag queuing
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ fcp_cmnd->task_attribute = TSK_ORDERED;
+ break;
+ default:
+ fcp_cmnd->task_attribute = 0;
+ break;
+ }
+ } else {
+ fcp_cmnd->task_attribute = 0;
+ }
+
cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
@@ -1177,6 +1212,7 @@ qla24xx_start_scsi(srb_t *sp)
struct scsi_cmnd *cmd = sp->cmd;
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
+ char tag[2];
/* Setup device pointers. */
ret = 0;
@@ -1260,6 +1296,18 @@ qla24xx_start_scsi(srb_t *sp)
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+ /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ cmd_pkt->task = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->task = TSK_ORDERED;
+ break;
+ }
+ }
+
/* Load SCSI command packet. */
memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index e473e9fb363c..7a7c0ecfe7dd 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -71,6 +71,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
+ if (ha->flags.isp82xx_fw_hung) {
+ /* Setting Link-Down error */
+ mcp->mb[0] = MBS_LINK_DOWN_ERROR;
+ rval = QLA_FUNCTION_FAILED;
+ goto premature_exit;
+ }
+
/*
* Wait for active mailbox commands to finish by waiting at most tov
* seconds. This is to serialize actual issuing of mailbox cmds during
@@ -83,13 +90,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
- if (IS_QLA82XX(ha) && ha->flags.fw_hung) {
- /* Setting Link-Down error */
- mcp->mb[0] = MBS_LINK_DOWN_ERROR;
- rval = QLA_FUNCTION_FAILED;
- goto premature_exit;
- }
-
ha->flags.mbox_busy = 1;
/* Save mailbox command for debug */
ha->mcp = mcp;
@@ -223,7 +223,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->flags.mbox_int = 0;
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- if (IS_QLA82XX(ha) && ha->flags.fw_hung) {
+ if (ha->flags.isp82xx_fw_hung) {
ha->flags.mbox_busy = 0;
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
@@ -2462,22 +2462,19 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
"-- completion status (%x).\n", __func__,
vha->host_no, le16_to_cpu(sts->comp_status)));
rval = QLA_FUNCTION_FAILED;
- } else if (!(le16_to_cpu(sts->scsi_status) &
- SS_RESPONSE_INFO_LEN_VALID)) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- no response info (%x).\n", __func__, vha->host_no,
- le16_to_cpu(sts->scsi_status)));
- rval = QLA_FUNCTION_FAILED;
- } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- not enough response info (%d).\n", __func__,
- vha->host_no, le32_to_cpu(sts->rsp_data_len)));
- rval = QLA_FUNCTION_FAILED;
- } else if (sts->data[3]) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- response (%x).\n", __func__,
- vha->host_no, sts->data[3]));
- rval = QLA_FUNCTION_FAILED;
+ } else if (le16_to_cpu(sts->scsi_status) &
+ SS_RESPONSE_INFO_LEN_VALID) {
+ if (le32_to_cpu(sts->rsp_data_len) < 4) {
+ DEBUG2_3_11(printk("%s(%ld): ignoring inconsistent "
+ "data length -- not enough response info (%d).\n",
+ __func__, vha->host_no,
+ le32_to_cpu(sts->rsp_data_len)));
+ } else if (sts->data[3]) {
+ DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+ "-- response (%x).\n", __func__,
+ vha->host_no, sts->data[3]));
+ rval = QLA_FUNCTION_FAILED;
+ }
}
/* Issue marker IOCB. */
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index fdb96a3584a5..76ec876e6b21 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -7,6 +7,7 @@
#include "qla_def.h"
#include <linux/delay.h>
#include <linux/pci.h>
+#include <scsi/scsi_tcq.h>
#define MASK(n) ((1ULL<<(n))-1)
#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
@@ -2547,7 +2548,7 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
*dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
*dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- *dsd_seg++ = dsd_list_len;
+ cmd_pkt->fcp_data_dseg_len = dsd_list_len;
} else {
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
@@ -2620,6 +2621,7 @@ qla82xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
+ char tag[2];
/* Setup device pointers. */
ret = 0;
@@ -2770,6 +2772,22 @@ sufficient_dsds:
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+ /*
+ * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ ctx->fcp_cmnd->task_attribute =
+ TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ ctx->fcp_cmnd->task_attribute =
+ TSK_ORDERED;
+ break;
+ }
+ }
+
/* build FCP_CMND IU */
memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
@@ -2835,6 +2853,20 @@ sufficient_dsds:
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
sizeof(cmd_pkt->lun));
+ /*
+ * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ cmd_pkt->task = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->task = TSK_ORDERED;
+ break;
+ }
+ }
+
/* Load SCSI command packet. */
memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
@@ -3457,46 +3489,28 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
}
}
-static void
+int
qla82xx_check_fw_alive(scsi_qla_host_t *vha)
{
- uint32_t fw_heartbeat_counter, halt_status;
- struct qla_hw_data *ha = vha->hw;
+ uint32_t fw_heartbeat_counter;
+ int status = 0;
- fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
+ QLA82XX_PEG_ALIVE_COUNTER);
/* all 0xff, assume AER/EEH in progress, ignore */
if (fw_heartbeat_counter == 0xffffffff)
- return;
+ return status;
if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
vha->seconds_since_last_heartbeat++;
/* FW not alive after 2 seconds */
if (vha->seconds_since_last_heartbeat == 2) {
vha->seconds_since_last_heartbeat = 0;
- halt_status = qla82xx_rd_32(ha,
- QLA82XX_PEG_HALT_STATUS1);
- if (halt_status & HALT_STATUS_UNRECOVERABLE) {
- set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
- } else {
- qla_printk(KERN_INFO, ha,
- "scsi(%ld): %s - detect abort needed\n",
- vha->host_no, __func__);
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- }
- qla2xxx_wake_dpc(vha);
- ha->flags.fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- DEBUG2(qla_printk(KERN_ERR, ha,
- "Due to fw hung, doing premature "
- "completion of mbx command\n"));
- if (test_bit(MBX_INTR_WAIT,
- &ha->mbx_cmd_flags))
- complete(&ha->mbx_intr_comp);
- }
+ status = 1;
}
} else
vha->seconds_since_last_heartbeat = 0;
vha->fw_heartbeat_counter = fw_heartbeat_counter;
+ return status;
}
/*
@@ -3557,6 +3571,8 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
break;
case QLA82XX_DEV_NEED_RESET:
qla82xx_need_reset_handler(vha);
+ dev_init_timeout = jiffies +
+ (ha->nx_dev_init_timeout * HZ);
break;
case QLA82XX_DEV_NEED_QUIESCENT:
qla82xx_need_qsnt_handler(vha);
@@ -3596,30 +3612,18 @@ exit:
void qla82xx_watchdog(scsi_qla_host_t *vha)
{
- uint32_t dev_state;
+ uint32_t dev_state, halt_status;
struct qla_hw_data *ha = vha->hw;
- dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-
/* don't poll if reset is going on */
- if (!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
- test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))) {
- if (dev_state == QLA82XX_DEV_NEED_RESET) {
+ if (!ha->flags.isp82xx_reset_hdlr_active) {
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ if (dev_state == QLA82XX_DEV_NEED_RESET &&
+ !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
qla_printk(KERN_WARNING, ha,
- "%s(): Adapter reset needed!\n", __func__);
+ "%s(): Adapter reset needed!\n", __func__);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
- ha->flags.fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- DEBUG2(qla_printk(KERN_ERR, ha,
- "Need reset, doing premature "
- "completion of mbx command\n"));
- if (test_bit(MBX_INTR_WAIT,
- &ha->mbx_cmd_flags))
- complete(&ha->mbx_intr_comp);
- }
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
DEBUG(qla_printk(KERN_INFO, ha,
@@ -3629,6 +3633,31 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
qla2xxx_wake_dpc(vha);
} else {
qla82xx_check_fw_alive(vha);
+ if (qla82xx_check_fw_alive(vha)) {
+ halt_status = qla82xx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS1);
+ if (halt_status & HALT_STATUS_UNRECOVERABLE) {
+ set_bit(ISP_UNRECOVERABLE,
+ &vha->dpc_flags);
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld): %s - detect abort needed\n",
+ vha->host_no, __func__);
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+ qla2xxx_wake_dpc(vha);
+ ha->flags.isp82xx_fw_hung = 1;
+ if (ha->flags.mbox_busy) {
+ ha->flags.mbox_int = 1;
+ DEBUG2(qla_printk(KERN_ERR, ha,
+ "Due to fw hung, doing premature "
+ "completion of mbx command\n"));
+ if (test_bit(MBX_INTR_WAIT,
+ &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
+ }
+ }
}
}
}
@@ -3663,6 +3692,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
"Exiting.\n", __func__, vha->host_no);
return QLA_SUCCESS;
}
+ ha->flags.isp82xx_reset_hdlr_active = 1;
qla82xx_idc_lock(ha);
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
@@ -3683,7 +3713,8 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
qla82xx_idc_unlock(ha);
if (rval == QLA_SUCCESS) {
- ha->flags.fw_hung = 0;
+ ha->flags.isp82xx_fw_hung = 0;
+ ha->flags.isp82xx_reset_hdlr_active = 0;
qla82xx_restart_isp(vha);
}
@@ -3791,3 +3822,71 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
return status;
}
+
+void
+qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
+{
+ int i;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Check if 82XX firmware is alive or not
+ * We may have arrived here from NEED_RESET
+ * detection only
+ */
+ if (!ha->flags.isp82xx_fw_hung) {
+ for (i = 0; i < 2; i++) {
+ msleep(1000);
+ if (qla82xx_check_fw_alive(vha)) {
+ ha->flags.isp82xx_fw_hung = 1;
+ if (ha->flags.mbox_busy) {
+ ha->flags.mbox_int = 1;
+ complete(&ha->mbx_intr_comp);
+ }
+ break;
+ }
+ }
+ }
+
+ /* Abort all commands gracefully if fw NOT hung */
+ if (!ha->flags.isp82xx_fw_hung) {
+ int cnt, que;
+ srb_t *sp;
+ struct req_que *req;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ continue;
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+ if (!sp->ctx ||
+ (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
+ spin_unlock_irqrestore(
+ &ha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(sp)) {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx abort command failed in %s\n",
+ vha->host_no, __func__);
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx abort command success in %s\n",
+ vha->host_no, __func__);
+ }
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* Wait for pending cmds (physical and virtual) to complete */
+ if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
+ WAIT_HOST) == QLA_SUCCESS) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "Done wait for pending commands\n"));
+ }
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e90f7c16b956..75a966c94860 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -506,7 +506,7 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
static inline srb_t *
qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
- struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+ struct scsi_cmnd *cmd)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
@@ -520,14 +520,13 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
sp->cmd = cmd;
sp->flags = 0;
CMD_SP(cmd) = (void *)sp;
- cmd->scsi_done = done;
sp->ctx = NULL;
return sp;
}
static int
-qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
@@ -537,7 +536,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
srb_t *sp;
int rval;
- spin_unlock_irq(vha->host->host_lock);
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure)
cmd->result = DID_NO_CONNECT << 16;
@@ -569,40 +567,32 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
goto qc24_target_busy;
}
- sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
+ sp = qla2x00_get_new_sp(base_vha, fcport, cmd);
if (!sp)
- goto qc24_host_busy_lock;
+ goto qc24_host_busy;
rval = ha->isp_ops->start_scsi(sp);
if (rval != QLA_SUCCESS)
goto qc24_host_busy_free_sp;
- spin_lock_irq(vha->host->host_lock);
-
return 0;
qc24_host_busy_free_sp:
qla2x00_sp_free_dma(sp);
mempool_free(sp, ha->srb_mempool);
-qc24_host_busy_lock:
- spin_lock_irq(vha->host->host_lock);
+qc24_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
qc24_target_busy:
- spin_lock_irq(vha->host->host_lock);
return SCSI_MLQUEUE_TARGET_BUSY;
qc24_fail_command:
- spin_lock_irq(vha->host->host_lock);
- done(cmd);
+ cmd->scsi_done(cmd);
return 0;
}
-static DEF_SCSI_QCMD(qla2xxx_queuecommand)
-
-
/*
* qla2x00_eh_wait_on_command
* Waits for the command to be returned by the Firmware for some
@@ -821,17 +811,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
srb_t *sp;
- int ret = SUCCESS;
+ int ret;
unsigned int id, lun;
unsigned long flags;
int wait = 0;
struct qla_hw_data *ha = vha->hw;
- fc_block_scsi_eh(cmd);
-
if (!CMD_SP(cmd))
return SUCCESS;
+ ret = fc_block_scsi_eh(cmd);
+ if (ret != 0)
+ return ret;
+ ret = SUCCESS;
+
id = cmd->device->id;
lun = cmd->device->lun;
@@ -940,11 +933,13 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
int err;
- fc_block_scsi_eh(cmd);
-
if (!fcport)
return FAILED;
+ err = fc_block_scsi_eh(cmd);
+ if (err != 0)
+ return err;
+
qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
vha->host_no, cmd->device->id, cmd->device->lun, name);
@@ -1018,14 +1013,17 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
int ret = FAILED;
unsigned int id, lun;
- fc_block_scsi_eh(cmd);
-
id = cmd->device->id;
lun = cmd->device->lun;
if (!fcport)
return ret;
+ ret = fc_block_scsi_eh(cmd);
+ if (ret != 0)
+ return ret;
+ ret = FAILED;
+
qla_printk(KERN_INFO, vha->hw,
"scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
@@ -1078,14 +1076,17 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
unsigned int id, lun;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- fc_block_scsi_eh(cmd);
-
id = cmd->device->id;
lun = cmd->device->lun;
if (!fcport)
return ret;
+ ret = fc_block_scsi_eh(cmd);
+ if (ret != 0)
+ return ret;
+ ret = FAILED;
+
qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
@@ -3805,7 +3806,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
ha->flags.eeh_busy = 1;
/* For ISP82XX complete any pending mailbox cmd */
if (IS_QLA82XX(ha)) {
- ha->flags.fw_hung = 1;
+ ha->flags.isp82xx_fw_hung = 1;
if (ha->flags.mbox_busy) {
ha->flags.mbox_int = 1;
DEBUG2(qla_printk(KERN_ERR, ha,
@@ -3945,7 +3946,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_READY);
qla82xx_idc_unlock(ha);
- ha->flags.fw_hung = 0;
+ ha->flags.isp82xx_fw_hung = 0;
rval = qla82xx_restart_isp(base_vha);
qla82xx_idc_lock(ha);
/* Clear driver state register */
@@ -3958,7 +3959,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
"This devfn is not reset owner = 0x%x\n", ha->pdev->devfn));
if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
QLA82XX_DEV_READY)) {
- ha->flags.fw_hung = 0;
+ ha->flags.isp82xx_fw_hung = 0;
rval = qla82xx_restart_isp(base_vha);
qla82xx_idc_lock(ha);
qla82xx_set_drv_active(base_vha);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index cf0075a2d0c2..3a260c3f055a 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.05-k0"
+#define QLA2XXX_VERSION "8.03.07.00"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
-#define QLA_DRIVER_PATCH_VER 5
+#define QLA_DRIVER_PATCH_VER 7
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index a6b2d72022fc..fa5758cbdedb 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -89,32 +89,34 @@ static const char * scsi_debug_version_date = "20100324";
/* With these defaults, this driver will make 1 host with 1 target
* (id 0) containing 1 logical unit (lun 0). That is 1 device.
*/
+#define DEF_ATO 1
#define DEF_DELAY 1
#define DEF_DEV_SIZE_MB 8
-#define DEF_EVERY_NTH 0
-#define DEF_NUM_PARTS 0
-#define DEF_OPTS 0
-#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
-#define DEF_PTYPE 0
+#define DEF_DIF 0
+#define DEF_DIX 0
#define DEF_D_SENSE 0
-#define DEF_NO_LUN_0 0
-#define DEF_VIRTUAL_GB 0
+#define DEF_EVERY_NTH 0
#define DEF_FAKE_RW 0
-#define DEF_VPD_USE_HOSTNO 1
-#define DEF_SECTOR_SIZE 512
-#define DEF_DIX 0
-#define DEF_DIF 0
#define DEF_GUARD 0
-#define DEF_ATO 1
-#define DEF_PHYSBLK_EXP 0
+#define DEF_LBPU 0
+#define DEF_LBPWS 0
+#define DEF_LBPWS10 0
#define DEF_LOWEST_ALIGNED 0
+#define DEF_NO_LUN_0 0
+#define DEF_NUM_PARTS 0
+#define DEF_OPTS 0
#define DEF_OPT_BLKS 64
+#define DEF_PHYSBLK_EXP 0
+#define DEF_PTYPE 0
+#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
+#define DEF_SECTOR_SIZE 512
+#define DEF_UNMAP_ALIGNMENT 0
+#define DEF_UNMAP_GRANULARITY 1
#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
#define DEF_UNMAP_MAX_DESC 256
-#define DEF_UNMAP_GRANULARITY 1
-#define DEF_UNMAP_ALIGNMENT 0
-#define DEF_TPWS 0
-#define DEF_TPU 0
+#define DEF_VIRTUAL_GB 0
+#define DEF_VPD_USE_HOSTNO 1
+#define DEF_WRITESAME_LENGTH 0xFFFF
/* bit mask values for scsi_debug_opts */
#define SCSI_DEBUG_OPT_NOISE 1
@@ -144,6 +146,7 @@ static const char * scsi_debug_version_date = "20100324";
/* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
* sector on read commands: */
#define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
+#define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
* or "peripheral device" addressing (value 0) */
@@ -155,36 +158,38 @@ static const char * scsi_debug_version_date = "20100324";
#define SCSI_DEBUG_CANQUEUE 255
static int scsi_debug_add_host = DEF_NUM_HOST;
+static int scsi_debug_ato = DEF_ATO;
static int scsi_debug_delay = DEF_DELAY;
static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
+static int scsi_debug_dif = DEF_DIF;
+static int scsi_debug_dix = DEF_DIX;
+static int scsi_debug_dsense = DEF_D_SENSE;
static int scsi_debug_every_nth = DEF_EVERY_NTH;
+static int scsi_debug_fake_rw = DEF_FAKE_RW;
+static int scsi_debug_guard = DEF_GUARD;
+static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
static int scsi_debug_max_luns = DEF_MAX_LUNS;
static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
-static int scsi_debug_num_parts = DEF_NUM_PARTS;
+static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
static int scsi_debug_no_uld = 0;
+static int scsi_debug_num_parts = DEF_NUM_PARTS;
static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
+static int scsi_debug_opt_blks = DEF_OPT_BLKS;
static int scsi_debug_opts = DEF_OPTS;
-static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
+static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
-static int scsi_debug_dsense = DEF_D_SENSE;
-static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
+static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
+static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
-static int scsi_debug_fake_rw = DEF_FAKE_RW;
static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
-static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
-static int scsi_debug_dix = DEF_DIX;
-static int scsi_debug_dif = DEF_DIF;
-static int scsi_debug_guard = DEF_GUARD;
-static int scsi_debug_ato = DEF_ATO;
-static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
-static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
-static int scsi_debug_opt_blks = DEF_OPT_BLKS;
-static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
-static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
-static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
+static unsigned int scsi_debug_lbpu = DEF_LBPU;
+static unsigned int scsi_debug_lbpws = DEF_LBPWS;
+static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
-static unsigned int scsi_debug_tpws = DEF_TPWS;
-static unsigned int scsi_debug_tpu = DEF_TPU;
+static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
+static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
+static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
+static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
static int scsi_debug_cmnd_count = 0;
@@ -206,6 +211,11 @@ static int sdebug_sectors_per; /* sectors per cylinder */
#define SCSI_DEBUG_MAX_CMD_LEN 32
+static unsigned int scsi_debug_lbp(void)
+{
+ return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
+}
+
struct sdebug_dev_info {
struct list_head dev_list;
unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
@@ -727,7 +737,7 @@ static int inquiry_evpd_b0(unsigned char * arr)
/* Optimal Transfer Length */
put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
- if (scsi_debug_tpu) {
+ if (scsi_debug_lbpu) {
/* Maximum Unmap LBA Count */
put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
@@ -744,7 +754,10 @@ static int inquiry_evpd_b0(unsigned char * arr)
/* Optimal Unmap Granularity */
put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
- return 0x3c; /* Mandatory page length for thin provisioning */
+ /* Maximum WRITE SAME Length */
+ put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
+
+ return 0x3c; /* Mandatory page length for Logical Block Provisioning */
return sizeof(vpdb0_data);
}
@@ -767,12 +780,15 @@ static int inquiry_evpd_b2(unsigned char *arr)
memset(arr, 0, 0x8);
arr[0] = 0; /* threshold exponent */
- if (scsi_debug_tpu)
+ if (scsi_debug_lbpu)
arr[1] = 1 << 7;
- if (scsi_debug_tpws)
+ if (scsi_debug_lbpws)
arr[1] |= 1 << 6;
+ if (scsi_debug_lbpws10)
+ arr[1] |= 1 << 5;
+
return 0x8;
}
@@ -831,7 +847,8 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
arr[n++] = 0x89; /* ATA information */
arr[n++] = 0xb0; /* Block limits (SBC) */
arr[n++] = 0xb1; /* Block characteristics (SBC) */
- arr[n++] = 0xb2; /* Thin provisioning (SBC) */
+ if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
+ arr[n++] = 0xb2;
arr[3] = n - 4; /* number of supported VPD pages */
} else if (0x80 == cmd[2]) { /* unit serial number */
arr[1] = cmd[2]; /*sanity */
@@ -879,7 +896,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_evpd_b1(&arr[4]);
- } else if (0xb2 == cmd[2]) { /* Thin provisioning (SBC) */
+ } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_evpd_b2(&arr[4]);
} else {
@@ -1053,8 +1070,8 @@ static int resp_readcap16(struct scsi_cmnd * scp,
arr[13] = scsi_debug_physblk_exp & 0xf;
arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
- if (scsi_debug_tpu || scsi_debug_tpws)
- arr[14] |= 0x80; /* TPE */
+ if (scsi_debug_lbp())
+ arr[14] |= 0x80; /* LBPME */
arr[15] = scsi_debug_lowest_aligned & 0xff;
@@ -1791,15 +1808,15 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
return ret;
if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
- (lba <= OPT_MEDIUM_ERR_ADDR) &&
+ (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
/* claim unrecoverable read error */
- mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR,
- 0);
+ mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
/* set info field and valid bit for fixed descriptor */
if (0x70 == (devip->sense_buff[0] & 0x7f)) {
devip->sense_buff[0] |= 0x80; /* Valid bit */
- ret = OPT_MEDIUM_ERR_ADDR;
+ ret = (lba < OPT_MEDIUM_ERR_ADDR)
+ ? OPT_MEDIUM_ERR_ADDR : (int)lba;
devip->sense_buff[3] = (ret >> 24) & 0xff;
devip->sense_buff[4] = (ret >> 16) & 0xff;
devip->sense_buff[5] = (ret >> 8) & 0xff;
@@ -2084,6 +2101,12 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
if (ret)
return ret;
+ if (num > scsi_debug_write_same_length) {
+ mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
+ 0);
+ return check_condition_result;
+ }
+
write_lock_irqsave(&atomic_rw, iflags);
if (unmap && scsi_debug_unmap_granularity) {
@@ -2695,37 +2718,40 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
/sys/bus/pseudo/drivers/scsi_debug directory is changed.
*/
module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
+module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
+module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
+module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
+module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
+module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
+module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
+module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
+module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
+module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
+module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
-module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
-module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
- S_IRUGO | S_IWUSR);
module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
-module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
-module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
-module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
-module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
-module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
-module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
-module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
+module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
+module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
-module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
-module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
-module_param_named(tpu, scsi_debug_tpu, int, S_IRUGO);
-module_param_named(tpws, scsi_debug_tpws, int, S_IRUGO);
+module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
+module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
+ S_IRUGO | S_IWUSR);
+module_param_named(write_same_length, scsi_debug_write_same_length, int,
+ S_IRUGO | S_IWUSR);
MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2733,36 +2759,38 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(SCSI_DEBUG_VERSION);
MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
+MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
+MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
+MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
+MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
+MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
+MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
+MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
+MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
-MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
-MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
-MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
-MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
-MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
-MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
-MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
-MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
-MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
+MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
+MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
-MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
-MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
-MODULE_PARM_DESC(tpu, "enable TP, support UNMAP command (def=0)");
-MODULE_PARM_DESC(tpws, "enable TP, support WRITE SAME(16) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
+MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
+MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
static char sdebug_info[256];
@@ -3150,7 +3178,7 @@ static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
{
ssize_t count;
- if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0)
+ if (!scsi_debug_lbp())
return scnprintf(buf, PAGE_SIZE, "0-%u\n",
sdebug_store_sectors);
@@ -3333,8 +3361,8 @@ static int __init scsi_debug_init(void)
memset(dif_storep, 0xff, dif_size);
}
- /* Thin Provisioning */
- if (scsi_debug_tpu || scsi_debug_tpws) {
+ /* Logical Block Provisioning */
+ if (scsi_debug_lbp()) {
unsigned int map_bytes;
scsi_debug_unmap_max_blocks =
@@ -3664,7 +3692,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
errsts = resp_readcap16(SCpnt, devip);
else if (cmd[1] == SAI_GET_LBA_STATUS) {
- if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0) {
+ if (scsi_debug_lbp() == 0) {
mk_sense_buffer(devip, ILLEGAL_REQUEST,
INVALID_COMMAND_OPCODE, 0);
errsts = check_condition_result;
@@ -3775,8 +3803,10 @@ write:
}
break;
case WRITE_SAME_16:
+ case WRITE_SAME:
if (cmd[1] & 0x8) {
- if (scsi_debug_tpws == 0) {
+ if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
+ (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
mk_sense_buffer(devip, ILLEGAL_REQUEST,
INVALID_FIELD_IN_CDB, 0);
errsts = check_condition_result;
@@ -3785,8 +3815,6 @@ write:
}
if (errsts)
break;
- /* fall through */
- case WRITE_SAME:
errsts = check_readiness(SCpnt, 0, devip);
if (errsts)
break;
@@ -3798,7 +3826,7 @@ write:
if (errsts)
break;
- if (scsi_debug_unmap_max_desc == 0 || scsi_debug_tpu == 0) {
+ if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
mk_sense_buffer(devip, ILLEGAL_REQUEST,
INVALID_COMMAND_OPCODE, 0);
errsts = check_condition_result;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 43fad4c09beb..82e9e5c0476e 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -382,6 +382,91 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
/**
+ * scsi_dev_info_list_del_keyed - remove one dev_info list entry.
+ * @vendor: vendor string
+ * @model: model (product) string
+ * @key: specify list to use
+ *
+ * Description:
+ * Remove and destroy one dev_info entry for @vendor, @model
+ * in list specified by @key.
+ *
+ * Returns: 0 OK, -error on failure.
+ **/
+int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
+{
+ struct scsi_dev_info_list *devinfo, *found = NULL;
+ struct scsi_dev_info_list_table *devinfo_table =
+ scsi_devinfo_lookup_by_key(key);
+
+ if (IS_ERR(devinfo_table))
+ return PTR_ERR(devinfo_table);
+
+ list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
+ dev_info_list) {
+ if (devinfo->compatible) {
+ /*
+ * Behave like the older version of get_device_flags.
+ */
+ size_t max;
+ /*
+ * XXX why skip leading spaces? If an odd INQUIRY
+ * value, that should have been part of the
+ * scsi_static_device_list[] entry, such as " FOO"
+ * rather than "FOO". Since this code is already
+ * here, and we don't know what device it is
+ * trying to work with, leave it as-is.
+ */
+ max = 8; /* max length of vendor */
+ while ((max > 0) && *vendor == ' ') {
+ max--;
+ vendor++;
+ }
+ /*
+ * XXX removing the following strlen() would be
+ * good, using it means that for a an entry not in
+ * the list, we scan every byte of every vendor
+ * listed in scsi_static_device_list[], and never match
+ * a single one (and still have to compare at
+ * least the first byte of each vendor).
+ */
+ if (memcmp(devinfo->vendor, vendor,
+ min(max, strlen(devinfo->vendor))))
+ continue;
+ /*
+ * Skip spaces again.
+ */
+ max = 16; /* max length of model */
+ while ((max > 0) && *model == ' ') {
+ max--;
+ model++;
+ }
+ if (memcmp(devinfo->model, model,
+ min(max, strlen(devinfo->model))))
+ continue;
+ found = devinfo;
+ } else {
+ if (!memcmp(devinfo->vendor, vendor,
+ sizeof(devinfo->vendor)) &&
+ !memcmp(devinfo->model, model,
+ sizeof(devinfo->model)))
+ found = devinfo;
+ }
+ if (found)
+ break;
+ }
+
+ if (found) {
+ list_del(&found->dev_info_list);
+ kfree(found);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(scsi_dev_info_list_del_keyed);
+
+/**
* scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list.
* @dev_list: string of device flags to add
*
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 45c75649b9e0..991de3c15cfc 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -223,7 +223,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
* @scmd: Cmd to have sense checked.
*
* Return value:
- * SUCCESS or FAILED or NEEDS_RETRY
+ * SUCCESS or FAILED or NEEDS_RETRY or TARGET_ERROR
*
* Notes:
* When a deferred error is detected the current command has
@@ -326,17 +326,19 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
*/
return SUCCESS;
- /* these three are not supported */
+ /* these are not supported */
case COPY_ABORTED:
case VOLUME_OVERFLOW:
case MISCOMPARE:
- return SUCCESS;
+ case BLANK_CHECK:
+ case DATA_PROTECT:
+ return TARGET_ERROR;
case MEDIUM_ERROR:
if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
sshdr.asc == 0x13 || /* AMNF DATA FIELD */
sshdr.asc == 0x14) { /* RECORD NOT FOUND */
- return SUCCESS;
+ return TARGET_ERROR;
}
return NEEDS_RETRY;
@@ -344,11 +346,9 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
if (scmd->device->retry_hwerror)
return ADD_TO_MLQUEUE;
else
- return SUCCESS;
+ return TARGET_ERROR;
case ILLEGAL_REQUEST:
- case BLANK_CHECK:
- case DATA_PROTECT:
default:
return SUCCESS;
}
@@ -787,6 +787,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
case SUCCESS:
case NEEDS_RETRY:
case FAILED:
+ case TARGET_ERROR:
break;
case ADD_TO_MLQUEUE:
rtn = NEEDS_RETRY;
@@ -1469,6 +1470,14 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
rtn = scsi_check_sense(scmd);
if (rtn == NEEDS_RETRY)
goto maybe_retry;
+ else if (rtn == TARGET_ERROR) {
+ /*
+ * Need to modify host byte to signal a
+ * permanent target failure
+ */
+ scmd->result |= (DID_TARGET_FAILURE << 16);
+ rtn = SUCCESS;
+ }
/* if rtn == FAILED, we have no sense information;
* returning FAILED will wake the error handler thread
* to collect the sense and redo the decide
@@ -1486,6 +1495,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
case RESERVATION_CONFLICT:
sdev_printk(KERN_INFO, scmd->device,
"reservation conflict\n");
+ scmd->result |= (DID_NEXUS_FAILURE << 16);
return SUCCESS; /* causes immediate i/o error */
default:
return FAILED;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fb2bb35c62cb..2d63c8ad1442 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -667,6 +667,30 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
}
EXPORT_SYMBOL(scsi_release_buffers);
+static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
+{
+ int error = 0;
+
+ switch(host_byte(result)) {
+ case DID_TRANSPORT_FAILFAST:
+ error = -ENOLINK;
+ break;
+ case DID_TARGET_FAILURE:
+ cmd->result |= (DID_OK << 16);
+ error = -EREMOTEIO;
+ break;
+ case DID_NEXUS_FAILURE:
+ cmd->result |= (DID_OK << 16);
+ error = -EBADE;
+ break;
+ default:
+ error = -EIO;
+ break;
+ }
+
+ return error;
+}
+
/*
* Function: scsi_io_completion()
*
@@ -737,7 +761,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
req->sense_len = len;
}
if (!sense_deferred)
- error = -EIO;
+ error = __scsi_error_from_host_byte(cmd, result);
}
req->resid_len = scsi_get_resid(cmd);
@@ -796,7 +820,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
return;
- error = -EIO;
+ error = __scsi_error_from_host_byte(cmd, result);
if (host_byte(result) == DID_RESET) {
/* Third party bus reset or reset for error recovery
@@ -843,6 +867,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
description = "Host Data Integrity Failure";
action = ACTION_FAIL;
error = -EILSEQ;
+ /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
+ } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
+ (cmd->cmnd[0] == UNMAP ||
+ cmd->cmnd[0] == WRITE_SAME_16 ||
+ cmd->cmnd[0] == WRITE_SAME)) {
+ description = "Discard failure";
+ action = ACTION_FAIL;
} else
action = ACTION_FAIL;
break;
@@ -1038,6 +1069,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
cmd->request = req;
cmd->cmnd = req->cmd;
+ cmd->prot_op = SCSI_PROT_NORMAL;
return cmd;
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 342ee1a9c41d..2a588955423a 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -45,6 +45,7 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
enum {
SCSI_DEVINFO_GLOBAL = 0,
SCSI_DEVINFO_SPI,
+ SCSI_DEVINFO_DH,
};
extern int scsi_get_device_flags(struct scsi_device *sdev,
@@ -56,6 +57,7 @@ extern int scsi_get_device_flags_keyed(struct scsi_device *sdev,
extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
char *model, char *strflags,
int flags, int key);
+extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key);
extern int scsi_dev_info_add_list(int key, const char *name);
extern int scsi_dev_info_remove_list(int key);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index f905ecb5704d..b4218390941e 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -954,6 +954,7 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
if (dd_size)
conn->dd_data = &conn[1];
+ mutex_init(&conn->ep_mutex);
INIT_LIST_HEAD(&conn->conn_list);
conn->transport = transport;
conn->cid = cid;
@@ -975,7 +976,6 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
spin_lock_irqsave(&connlock, flags);
list_add(&conn->conn_list, &connlist);
- conn->active = 1;
spin_unlock_irqrestore(&connlock, flags);
ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n");
@@ -1001,7 +1001,6 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
unsigned long flags;
spin_lock_irqsave(&connlock, flags);
- conn->active = 0;
list_del(&conn->conn_list);
spin_unlock_irqrestore(&connlock, flags);
@@ -1430,6 +1429,29 @@ release_host:
return err;
}
+static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
+ u64 ep_handle)
+{
+ struct iscsi_cls_conn *conn;
+ struct iscsi_endpoint *ep;
+
+ if (!transport->ep_disconnect)
+ return -EINVAL;
+
+ ep = iscsi_lookup_endpoint(ep_handle);
+ if (!ep)
+ return -EINVAL;
+ conn = ep->conn;
+ if (conn) {
+ mutex_lock(&conn->ep_mutex);
+ conn->ep = NULL;
+ mutex_unlock(&conn->ep_mutex);
+ }
+
+ transport->ep_disconnect(ep);
+ return 0;
+}
+
static int
iscsi_if_transport_ep(struct iscsi_transport *transport,
struct iscsi_uevent *ev, int msg_type)
@@ -1454,14 +1476,8 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
ev->u.ep_poll.timeout_ms);
break;
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
- if (!transport->ep_disconnect)
- return -EINVAL;
-
- ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
- if (!ep)
- return -EINVAL;
-
- transport->ep_disconnect(ep);
+ rc = iscsi_if_ep_disconnect(transport,
+ ev->u.ep_disconnect.ep_handle);
break;
}
return rc;
@@ -1609,12 +1625,31 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
session = iscsi_session_lookup(ev->u.b_conn.sid);
conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
- if (session && conn)
- ev->r.retcode = transport->bind_conn(session, conn,
- ev->u.b_conn.transport_eph,
- ev->u.b_conn.is_leading);
- else
+ if (conn && conn->ep)
+ iscsi_if_ep_disconnect(transport, conn->ep->id);
+
+ if (!session || !conn) {
err = -EINVAL;
+ break;
+ }
+
+ ev->r.retcode = transport->bind_conn(session, conn,
+ ev->u.b_conn.transport_eph,
+ ev->u.b_conn.is_leading);
+ if (ev->r.retcode || !transport->ep_connect)
+ break;
+
+ ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
+ if (ep) {
+ ep->conn = conn;
+
+ mutex_lock(&conn->ep_mutex);
+ conn->ep = ep;
+ mutex_unlock(&conn->ep_mutex);
+ } else
+ iscsi_cls_conn_printk(KERN_ERR, conn,
+ "Could not set ep conn "
+ "binding\n");
break;
case ISCSI_UEVENT_SET_PARAM:
err = iscsi_set_param(transport, ev);
@@ -1747,13 +1782,48 @@ iscsi_conn_attr(data_digest, ISCSI_PARAM_DATADGST_EN);
iscsi_conn_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN);
iscsi_conn_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN);
iscsi_conn_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT);
-iscsi_conn_attr(port, ISCSI_PARAM_CONN_PORT);
iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
-iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
+#define iscsi_conn_ep_attr_show(param) \
+static ssize_t show_conn_ep_param_##param(struct device *dev, \
+ struct device_attribute *attr,\
+ char *buf) \
+{ \
+ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
+ struct iscsi_transport *t = conn->transport; \
+ struct iscsi_endpoint *ep; \
+ ssize_t rc; \
+ \
+ /* \
+ * Need to make sure ep_disconnect does not free the LLD's \
+ * interconnect resources while we are trying to read them. \
+ */ \
+ mutex_lock(&conn->ep_mutex); \
+ ep = conn->ep; \
+ if (!ep && t->ep_connect) { \
+ mutex_unlock(&conn->ep_mutex); \
+ return -ENOTCONN; \
+ } \
+ \
+ if (ep) \
+ rc = t->get_ep_param(ep, param, buf); \
+ else \
+ rc = t->get_conn_param(conn, param, buf); \
+ mutex_unlock(&conn->ep_mutex); \
+ return rc; \
+}
+
+#define iscsi_conn_ep_attr(field, param) \
+ iscsi_conn_ep_attr_show(param) \
+static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, \
+ show_conn_ep_param_##param, NULL);
+
+iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT);
+
/*
* iSCSI session attrs
*/
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e56730214c05..3be5db5d6343 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -96,6 +96,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
#define SD_MINORS 0
#endif
+static void sd_config_discard(struct scsi_disk *, unsigned int);
static int sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
static int sd_probe(struct device *);
@@ -294,7 +295,54 @@ sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning);
+ return snprintf(buf, 20, "%u\n", sdkp->lbpme);
+}
+
+static const char *lbp_mode[] = {
+ [SD_LBP_FULL] = "full",
+ [SD_LBP_UNMAP] = "unmap",
+ [SD_LBP_WS16] = "writesame_16",
+ [SD_LBP_WS10] = "writesame_10",
+ [SD_LBP_ZERO] = "writesame_zero",
+ [SD_LBP_DISABLE] = "disabled",
+};
+
+static ssize_t
+sd_show_provisioning_mode(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%s\n", lbp_mode[sdkp->provisioning_mode]);
+}
+
+static ssize_t
+sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (sdp->type != TYPE_DISK)
+ return -EINVAL;
+
+ if (!strncmp(buf, lbp_mode[SD_LBP_UNMAP], 20))
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_WS16], 20))
+ sd_config_discard(sdkp, SD_LBP_WS16);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_WS10], 20))
+ sd_config_discard(sdkp, SD_LBP_WS10);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_ZERO], 20))
+ sd_config_discard(sdkp, SD_LBP_ZERO);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_DISABLE], 20))
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ else
+ return -EINVAL;
+
+ return count;
}
static struct device_attribute sd_disk_attrs[] = {
@@ -309,6 +357,8 @@ static struct device_attribute sd_disk_attrs[] = {
__ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL),
__ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
__ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
+ __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode,
+ sd_store_provisioning_mode),
__ATTR_NULL,
};
@@ -433,6 +483,49 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
scsi_set_prot_type(scmd, dif);
}
+static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
+{
+ struct request_queue *q = sdkp->disk->queue;
+ unsigned int logical_block_size = sdkp->device->sector_size;
+ unsigned int max_blocks = 0;
+
+ q->limits.discard_zeroes_data = sdkp->lbprz;
+ q->limits.discard_alignment = sdkp->unmap_alignment;
+ q->limits.discard_granularity =
+ max(sdkp->physical_block_size,
+ sdkp->unmap_granularity * logical_block_size);
+
+ switch (mode) {
+
+ case SD_LBP_DISABLE:
+ q->limits.max_discard_sectors = 0;
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+ return;
+
+ case SD_LBP_UNMAP:
+ max_blocks = min_not_zero(sdkp->max_unmap_blocks, 0xffffffff);
+ break;
+
+ case SD_LBP_WS16:
+ max_blocks = min_not_zero(sdkp->max_ws_blocks, 0xffffffff);
+ break;
+
+ case SD_LBP_WS10:
+ max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff);
+ break;
+
+ case SD_LBP_ZERO:
+ max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff);
+ q->limits.discard_zeroes_data = 1;
+ break;
+ }
+
+ q->limits.max_discard_sectors = max_blocks * (logical_block_size >> 9);
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+
+ sdkp->provisioning_mode = mode;
+}
+
/**
* scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device
* @sdp: scsi device to operate one
@@ -449,6 +542,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
unsigned int nr_sectors = bio_sectors(bio);
unsigned int len;
int ret;
+ char *buf;
struct page *page;
if (sdkp->device->sector_size == 4096) {
@@ -464,8 +558,9 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
if (!page)
return BLKPREP_DEFER;
- if (sdkp->unmap) {
- char *buf = page_address(page);
+ switch (sdkp->provisioning_mode) {
+ case SD_LBP_UNMAP:
+ buf = page_address(page);
rq->cmd_len = 10;
rq->cmd[0] = UNMAP;
@@ -477,7 +572,9 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
put_unaligned_be32(nr_sectors, &buf[16]);
len = 24;
- } else {
+ break;
+
+ case SD_LBP_WS16:
rq->cmd_len = 16;
rq->cmd[0] = WRITE_SAME_16;
rq->cmd[1] = 0x8; /* UNMAP */
@@ -485,11 +582,29 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
put_unaligned_be32(nr_sectors, &rq->cmd[10]);
len = sdkp->device->sector_size;
+ break;
+
+ case SD_LBP_WS10:
+ case SD_LBP_ZERO:
+ rq->cmd_len = 10;
+ rq->cmd[0] = WRITE_SAME;
+ if (sdkp->provisioning_mode == SD_LBP_WS10)
+ rq->cmd[1] = 0x8; /* UNMAP */
+ put_unaligned_be32(sector, &rq->cmd[2]);
+ put_unaligned_be16(nr_sectors, &rq->cmd[7]);
+
+ len = sdkp->device->sector_size;
+ break;
+
+ default:
+ goto out;
}
blk_add_request_payload(rq, page, len);
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
rq->buffer = page_address(page);
+
+out:
if (ret != BLKPREP_OK) {
__free_page(page);
rq->buffer = NULL;
@@ -1251,12 +1366,10 @@ static int sd_done(struct scsi_cmnd *SCpnt)
struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
int sense_valid = 0;
int sense_deferred = 0;
+ unsigned char op = SCpnt->cmnd[0];
- if (SCpnt->request->cmd_flags & REQ_DISCARD) {
- if (!result)
- scsi_set_resid(SCpnt, 0);
- return good_bytes;
- }
+ if ((SCpnt->request->cmd_flags & REQ_DISCARD) && !result)
+ scsi_set_resid(SCpnt, 0);
if (result) {
sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
@@ -1295,10 +1408,17 @@ static int sd_done(struct scsi_cmnd *SCpnt)
SCpnt->result = 0;
memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
break;
- case ABORTED_COMMAND: /* DIF: Target detected corruption */
- case ILLEGAL_REQUEST: /* DIX: Host detected corruption */
- if (sshdr.asc == 0x10)
+ case ABORTED_COMMAND:
+ if (sshdr.asc == 0x10) /* DIF: Target detected corruption */
+ good_bytes = sd_completed_bytes(SCpnt);
+ break;
+ case ILLEGAL_REQUEST:
+ if (sshdr.asc == 0x10) /* DIX: Host detected corruption */
good_bytes = sd_completed_bytes(SCpnt);
+ /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
+ if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
+ (op == UNMAP || op == WRITE_SAME_16 || op == WRITE_SAME))
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
break;
default:
break;
@@ -1596,17 +1716,13 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
sd_printk(KERN_NOTICE, sdkp,
"physical block alignment offset: %u\n", alignment);
- if (buffer[14] & 0x80) { /* TPE */
- struct request_queue *q = sdp->request_queue;
+ if (buffer[14] & 0x80) { /* LBPME */
+ sdkp->lbpme = 1;
- sdkp->thin_provisioning = 1;
- q->limits.discard_granularity = sdkp->physical_block_size;
- q->limits.max_discard_sectors = 0xffffffff;
+ if (buffer[14] & 0x40) /* LBPRZ */
+ sdkp->lbprz = 1;
- if (buffer[14] & 0x40) /* TPRZ */
- q->limits.discard_zeroes_data = 1;
-
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ sd_config_discard(sdkp, SD_LBP_WS16);
}
sdkp->capacity = lba + 1;
@@ -2091,7 +2207,6 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
*/
static void sd_read_block_limits(struct scsi_disk *sdkp)
{
- struct request_queue *q = sdkp->disk->queue;
unsigned int sector_sz = sdkp->device->sector_size;
const int vpd_len = 64;
unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
@@ -2106,39 +2221,46 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
blk_queue_io_opt(sdkp->disk->queue,
get_unaligned_be32(&buffer[12]) * sector_sz);
- /* Thin provisioning enabled and page length indicates TP support */
- if (sdkp->thin_provisioning && buffer[3] == 0x3c) {
- unsigned int lba_count, desc_count, granularity;
+ if (buffer[3] == 0x3c) {
+ unsigned int lba_count, desc_count;
- lba_count = get_unaligned_be32(&buffer[20]);
- desc_count = get_unaligned_be32(&buffer[24]);
-
- if (lba_count && desc_count) {
- if (sdkp->tpvpd && !sdkp->tpu)
- sdkp->unmap = 0;
- else
- sdkp->unmap = 1;
- }
+ sdkp->max_ws_blocks =
+ (u32) min_not_zero(get_unaligned_be64(&buffer[36]),
+ (u64)0xffffffff);
- if (sdkp->tpvpd && !sdkp->tpu && !sdkp->tpws) {
- sd_printk(KERN_ERR, sdkp, "Thin provisioning is " \
- "enabled but neither TPU, nor TPWS are " \
- "set. Disabling discard!\n");
+ if (!sdkp->lbpme)
goto out;
- }
- if (lba_count)
- q->limits.max_discard_sectors =
- lba_count * sector_sz >> 9;
+ lba_count = get_unaligned_be32(&buffer[20]);
+ desc_count = get_unaligned_be32(&buffer[24]);
- granularity = get_unaligned_be32(&buffer[28]);
+ if (lba_count && desc_count)
+ sdkp->max_unmap_blocks = lba_count;
- if (granularity)
- q->limits.discard_granularity = granularity * sector_sz;
+ sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
if (buffer[32] & 0x80)
- q->limits.discard_alignment =
+ sdkp->unmap_alignment =
get_unaligned_be32(&buffer[32]) & ~(1 << 31);
+
+ if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
+
+ if (sdkp->max_unmap_blocks)
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else
+ sd_config_discard(sdkp, SD_LBP_WS16);
+
+ } else { /* LBP VPD page tells us what to use */
+
+ if (sdkp->lbpu && sdkp->max_unmap_blocks)
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else if (sdkp->lbpws)
+ sd_config_discard(sdkp, SD_LBP_WS16);
+ else if (sdkp->lbpws10)
+ sd_config_discard(sdkp, SD_LBP_WS10);
+ else
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ }
}
out:
@@ -2172,15 +2294,15 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
}
/**
- * sd_read_thin_provisioning - Query thin provisioning VPD page
+ * sd_read_block_provisioning - Query provisioning VPD page
* @disk: disk to query
*/
-static void sd_read_thin_provisioning(struct scsi_disk *sdkp)
+static void sd_read_block_provisioning(struct scsi_disk *sdkp)
{
unsigned char *buffer;
const int vpd_len = 8;
- if (sdkp->thin_provisioning == 0)
+ if (sdkp->lbpme == 0)
return;
buffer = kmalloc(vpd_len, GFP_KERNEL);
@@ -2188,9 +2310,10 @@ static void sd_read_thin_provisioning(struct scsi_disk *sdkp)
if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
goto out;
- sdkp->tpvpd = 1;
- sdkp->tpu = (buffer[5] >> 7) & 1; /* UNMAP */
- sdkp->tpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
+ sdkp->lbpvpd = 1;
+ sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */
+ sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
+ sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
out:
kfree(buffer);
@@ -2247,7 +2370,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_capacity(sdkp, buffer);
if (sd_try_extended_inquiry(sdp)) {
- sd_read_thin_provisioning(sdkp);
+ sd_read_block_provisioning(sdkp);
sd_read_block_limits(sdkp);
sd_read_block_characteristics(sdkp);
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index c9d8f6ca49e2..6ad798bfd52a 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -43,6 +43,15 @@ enum {
SD_MEMPOOL_SIZE = 2, /* CDB pool size */
};
+enum {
+ SD_LBP_FULL = 0, /* Full logical block provisioning */
+ SD_LBP_UNMAP, /* Use UNMAP command */
+ SD_LBP_WS16, /* Use WRITE SAME(16) with UNMAP bit */
+ SD_LBP_WS10, /* Use WRITE SAME(10) with UNMAP bit */
+ SD_LBP_ZERO, /* Use WRITE SAME(10) with zero payload */
+ SD_LBP_DISABLE, /* Discard disabled due to failed cmd */
+};
+
struct scsi_disk {
struct scsi_driver *driver; /* always &sd_template */
struct scsi_device *device;
@@ -50,21 +59,27 @@ struct scsi_disk {
struct gendisk *disk;
atomic_t openers;
sector_t capacity; /* size in 512-byte sectors */
+ u32 max_ws_blocks;
+ u32 max_unmap_blocks;
+ u32 unmap_granularity;
+ u32 unmap_alignment;
u32 index;
unsigned int physical_block_size;
u8 media_present;
u8 write_prot;
u8 protection_type;/* Data Integrity Field */
+ u8 provisioning_mode;
unsigned ATO : 1; /* state of disk ATO bit */
unsigned WCE : 1; /* state of disk WCE bit */
unsigned RCD : 1; /* state of disk RCD bit, unused */
unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
unsigned first_scan : 1;
- unsigned thin_provisioning : 1;
- unsigned unmap : 1;
- unsigned tpws : 1;
- unsigned tpu : 1;
- unsigned tpvpd : 1;
+ unsigned lbpme : 1;
+ unsigned lbprz : 1;
+ unsigned lbpu : 1;
+ unsigned lbpws : 1;
+ unsigned lbpws10 : 1;
+ unsigned lbpvpd : 1;
};
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 366080baf474..7f19c8b7b84c 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -667,7 +667,13 @@ target_emulate_readcapacity(struct se_cmd *cmd)
{
struct se_device *dev = SE_DEV(cmd);
unsigned char *buf = cmd->t_task->t_task_buf;
- u32 blocks = dev->transport->get_blocks(dev);
+ unsigned long long blocks_long = dev->transport->get_blocks(dev);
+ u32 blocks;
+
+ if (blocks_long >= 0x00000000ffffffff)
+ blocks = 0xffffffff;
+ else
+ blocks = (u32)blocks_long;
buf[0] = (blocks >> 24) & 0xff;
buf[1] = (blocks >> 16) & 0xff;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 3740e327f180..c35f1a73bc8b 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -177,6 +177,8 @@ static int __init xen_hvc_init(void)
}
if (xencons_irq < 0)
xencons_irq = 0; /* NO_IRQ */
+ else
+ set_irq_noprobe(xencons_irq);
hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256);
if (IS_ERR(hp))
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 92c91c83edde..eb7958c675a8 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -47,7 +47,6 @@
#include <linux/clk.h>
#include <linux/ctype.h>
#include <linux/err.h>
-#include <linux/list.h>
#include <linux/dmaengine.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
@@ -65,11 +64,8 @@
struct sci_port {
struct uart_port port;
- /* Port type */
- unsigned int type;
-
- /* Port IRQs: ERI, RXI, TXI, BRI (optional) */
- unsigned int irqs[SCIx_NR_IRQS];
+ /* Platform configuration */
+ struct plat_sci_port *cfg;
/* Port enable callback */
void (*enable)(struct uart_port *port);
@@ -81,26 +77,15 @@ struct sci_port {
struct timer_list break_timer;
int break_flag;
- /* SCSCR initialization */
- unsigned int scscr;
-
- /* SCBRR calculation algo */
- unsigned int scbrr_algo_id;
-
/* Interface clock */
struct clk *iclk;
/* Function clock */
struct clk *fclk;
- struct list_head node;
-
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
- struct device *dma_dev;
- unsigned int slave_tx;
- unsigned int slave_rx;
struct dma_async_tx_descriptor *desc_tx;
struct dma_async_tx_descriptor *desc_rx[2];
dma_cookie_t cookie_tx;
@@ -117,16 +102,14 @@ struct sci_port {
struct timer_list rx_timer;
unsigned int rx_timeout;
#endif
-};
-struct sh_sci_priv {
- spinlock_t lock;
- struct list_head ports;
- struct notifier_block clk_nb;
+ struct notifier_block freq_transition;
};
/* Function prototypes */
+static void sci_start_tx(struct uart_port *port);
static void sci_stop_tx(struct uart_port *port);
+static void sci_start_rx(struct uart_port *port);
#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
@@ -142,12 +125,6 @@ to_sci_port(struct uart_port *uart)
#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
#ifdef CONFIG_CONSOLE_POLL
-static inline void handle_error(struct uart_port *port)
-{
- /* Clear error flags */
- sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
-}
-
static int sci_poll_get_char(struct uart_port *port)
{
unsigned short status;
@@ -156,7 +133,7 @@ static int sci_poll_get_char(struct uart_port *port)
do {
status = sci_in(port, SCxSR);
if (status & SCxSR_ERRORS(port)) {
- handle_error(port);
+ sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
continue;
}
break;
@@ -475,7 +452,7 @@ static void sci_transmit_chars(struct uart_port *port)
/* On SH3, SCIF may read end-of-break as a space->mark char */
#define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
-static inline void sci_receive_chars(struct uart_port *port)
+static void sci_receive_chars(struct uart_port *port)
{
struct sci_port *sci_port = to_sci_port(port);
struct tty_struct *tty = port->state->port.tty;
@@ -566,18 +543,20 @@ static inline void sci_receive_chars(struct uart_port *port)
}
#define SCI_BREAK_JIFFIES (HZ/20)
-/* The sci generates interrupts during the break,
+
+/*
+ * The sci generates interrupts during the break,
* 1 per millisecond or so during the break period, for 9600 baud.
* So dont bother disabling interrupts.
* But dont want more than 1 break event.
* Use a kernel timer to periodically poll the rx line until
* the break is finished.
*/
-static void sci_schedule_break_timer(struct sci_port *port)
+static inline void sci_schedule_break_timer(struct sci_port *port)
{
- port->break_timer.expires = jiffies + SCI_BREAK_JIFFIES;
- add_timer(&port->break_timer);
+ mod_timer(&port->break_timer, jiffies + SCI_BREAK_JIFFIES);
}
+
/* Ensure that two consecutive samples find the break over. */
static void sci_break_timer(unsigned long data)
{
@@ -594,7 +573,7 @@ static void sci_break_timer(unsigned long data)
port->break_flag = 0;
}
-static inline int sci_handle_errors(struct uart_port *port)
+static int sci_handle_errors(struct uart_port *port)
{
int copied = 0;
unsigned short status = sci_in(port, SCxSR);
@@ -650,7 +629,7 @@ static inline int sci_handle_errors(struct uart_port *port)
return copied;
}
-static inline int sci_handle_fifo_overrun(struct uart_port *port)
+static int sci_handle_fifo_overrun(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
int copied = 0;
@@ -671,7 +650,7 @@ static inline int sci_handle_fifo_overrun(struct uart_port *port)
return copied;
}
-static inline int sci_handle_breaks(struct uart_port *port)
+static int sci_handle_breaks(struct uart_port *port)
{
int copied = 0;
unsigned short status = sci_in(port, SCxSR);
@@ -794,7 +773,7 @@ static inline unsigned long port_rx_irq_mask(struct uart_port *port)
* it's unset, it's logically inferred that there's no point in
* testing for it.
*/
- return SCSCR_RIE | (to_sci_port(port)->scscr & SCSCR_REIE);
+ return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE);
}
static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
@@ -839,17 +818,18 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
static int sci_notifier(struct notifier_block *self,
unsigned long phase, void *p)
{
- struct sh_sci_priv *priv = container_of(self,
- struct sh_sci_priv, clk_nb);
struct sci_port *sci_port;
unsigned long flags;
+ sci_port = container_of(self, struct sci_port, freq_transition);
+
if ((phase == CPUFREQ_POSTCHANGE) ||
(phase == CPUFREQ_RESUMECHANGE)) {
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(sci_port, &priv->ports, node)
- sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
- spin_unlock_irqrestore(&priv->lock, flags);
+ struct uart_port *port = &sci_port->port;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->uartclk = clk_get_rate(sci_port->iclk);
+ spin_unlock_irqrestore(&port->lock, flags);
}
return NOTIFY_OK;
@@ -882,21 +862,21 @@ static int sci_request_irq(struct sci_port *port)
const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full",
"SCI Transmit Data Empty", "SCI Break" };
- if (port->irqs[0] == port->irqs[1]) {
- if (unlikely(!port->irqs[0]))
+ if (port->cfg->irqs[0] == port->cfg->irqs[1]) {
+ if (unlikely(!port->cfg->irqs[0]))
return -ENODEV;
- if (request_irq(port->irqs[0], sci_mpxed_interrupt,
+ if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt,
IRQF_DISABLED, "sci", port)) {
dev_err(port->port.dev, "Can't allocate IRQ\n");
return -ENODEV;
}
} else {
for (i = 0; i < ARRAY_SIZE(handlers); i++) {
- if (unlikely(!port->irqs[i]))
+ if (unlikely(!port->cfg->irqs[i]))
continue;
- if (request_irq(port->irqs[i], handlers[i],
+ if (request_irq(port->cfg->irqs[i], handlers[i],
IRQF_DISABLED, desc[i], port)) {
dev_err(port->port.dev, "Can't allocate IRQ\n");
return -ENODEV;
@@ -911,14 +891,14 @@ static void sci_free_irq(struct sci_port *port)
{
int i;
- if (port->irqs[0] == port->irqs[1])
- free_irq(port->irqs[0], port);
+ if (port->cfg->irqs[0] == port->cfg->irqs[1])
+ free_irq(port->cfg->irqs[0], port);
else {
- for (i = 0; i < ARRAY_SIZE(port->irqs); i++) {
- if (!port->irqs[i])
+ for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) {
+ if (!port->cfg->irqs[i])
continue;
- free_irq(port->irqs[i], port);
+ free_irq(port->cfg->irqs[i], port);
}
}
}
@@ -1037,9 +1017,6 @@ static void sci_dma_rx_complete(void *arg)
schedule_work(&s->work_rx);
}
-static void sci_start_rx(struct uart_port *port);
-static void sci_start_tx(struct uart_port *port);
-
static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
{
struct dma_chan *chan = s->chan_rx;
@@ -1325,7 +1302,7 @@ static void rx_timer_fn(unsigned long arg)
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
scr &= ~0x4000;
- enable_irq(s->irqs[1]);
+ enable_irq(s->cfg->irqs[1]);
}
sci_out(port, SCSCR, scr | SCSCR_RIE);
dev_dbg(port->dev, "DMA Rx timed out\n");
@@ -1341,9 +1318,9 @@ static void sci_request_dma(struct uart_port *port)
int nent;
dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__,
- port->line, s->dma_dev);
+ port->line, s->cfg->dma_dev);
- if (!s->dma_dev)
+ if (!s->cfg->dma_dev)
return;
dma_cap_zero(mask);
@@ -1352,8 +1329,8 @@ static void sci_request_dma(struct uart_port *port)
param = &s->param_tx;
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
- param->slave_id = s->slave_tx;
- param->dma_dev = s->dma_dev;
+ param->slave_id = s->cfg->dma_slave_tx;
+ param->dma_dev = s->cfg->dma_dev;
s->cookie_tx = -EINVAL;
chan = dma_request_channel(mask, filter, param);
@@ -1381,8 +1358,8 @@ static void sci_request_dma(struct uart_port *port)
param = &s->param_rx;
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
- param->slave_id = s->slave_rx;
- param->dma_dev = s->dma_dev;
+ param->slave_id = s->cfg->dma_slave_rx;
+ param->dma_dev = s->cfg->dma_dev;
chan = dma_request_channel(mask, filter, param);
dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
@@ -1427,7 +1404,7 @@ static void sci_free_dma(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
- if (!s->dma_dev)
+ if (!s->cfg->dma_dev)
return;
if (s->chan_tx)
@@ -1435,21 +1412,32 @@ static void sci_free_dma(struct uart_port *port)
if (s->chan_rx)
sci_rx_dma_release(s, false);
}
+#else
+static inline void sci_request_dma(struct uart_port *port)
+{
+}
+
+static inline void sci_free_dma(struct uart_port *port)
+{
+}
#endif
static int sci_startup(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
+ int ret;
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
if (s->enable)
s->enable(port);
- sci_request_irq(s);
-#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ ret = sci_request_irq(s);
+ if (unlikely(ret < 0))
+ return ret;
+
sci_request_dma(port);
-#endif
+
sci_start_tx(port);
sci_start_rx(port);
@@ -1464,9 +1452,8 @@ static void sci_shutdown(struct uart_port *port)
sci_stop_rx(port);
sci_stop_tx(port);
-#ifdef CONFIG_SERIAL_SH_SCI_DMA
+
sci_free_dma(port);
-#endif
sci_free_irq(s);
if (s->disable)
@@ -1491,6 +1478,7 @@ static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
/* Warn, but use a safe default */
WARN_ON(1);
+
return ((freq + 16 * bps) / (32 * bps) - 1);
}
@@ -1514,7 +1502,10 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
if (likely(baud && port->uartclk))
- t = sci_scbrr_calc(s->scbrr_algo_id, baud, port->uartclk);
+ t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk);
+
+ if (s->enable)
+ s->enable(port);
do {
status = sci_in(port, SCxSR);
@@ -1526,6 +1517,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
smr_val = sci_in(port, SCSMR) & 3;
+
if ((termios->c_cflag & CSIZE) == CS7)
smr_val |= 0x40;
if (termios->c_cflag & PARENB)
@@ -1540,7 +1532,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
sci_out(port, SCSMR, smr_val);
dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
- s->scscr);
+ s->cfg->scscr);
if (t > 0) {
if (t >= 256) {
@@ -1556,7 +1548,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
sci_init_pins(port, termios->c_cflag);
sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
- sci_out(port, SCSCR, s->scscr);
+ sci_out(port, SCSCR, s->cfg->scscr);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
/*
@@ -1582,6 +1574,9 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
if ((termios->c_cflag & CREAD) != 0)
sci_start_rx(port);
+
+ if (s->disable)
+ s->disable(port);
}
static const char *sci_type(struct uart_port *port)
@@ -1602,31 +1597,33 @@ static const char *sci_type(struct uart_port *port)
return NULL;
}
-static void sci_release_port(struct uart_port *port)
+static inline unsigned long sci_port_size(struct uart_port *port)
{
- /* Nothing here yet .. */
-}
-
-static int sci_request_port(struct uart_port *port)
-{
- /* Nothing here yet .. */
- return 0;
+ /*
+ * Pick an arbitrary size that encapsulates all of the base
+ * registers by default. This can be optimized later, or derived
+ * from platform resource data at such a time that ports begin to
+ * behave more erratically.
+ */
+ return 64;
}
-static void sci_config_port(struct uart_port *port, int flags)
+static int sci_remap_port(struct uart_port *port)
{
- struct sci_port *s = to_sci_port(port);
-
- port->type = s->type;
+ unsigned long size = sci_port_size(port);
+ /*
+ * Nothing to do if there's already an established membase.
+ */
if (port->membase)
- return;
+ return 0;
if (port->flags & UPF_IOREMAP) {
- port->membase = ioremap_nocache(port->mapbase, 0x40);
-
- if (IS_ERR(port->membase))
+ port->membase = ioremap_nocache(port->mapbase, size);
+ if (unlikely(!port->membase)) {
dev_err(port->dev, "can't remap port#%d\n", port->line);
+ return -ENXIO;
+ }
} else {
/*
* For the simple (and majority of) cases where we don't
@@ -1635,13 +1632,54 @@ static void sci_config_port(struct uart_port *port, int flags)
*/
port->membase = (void __iomem *)port->mapbase;
}
+
+ return 0;
+}
+
+static void sci_release_port(struct uart_port *port)
+{
+ if (port->flags & UPF_IOREMAP) {
+ iounmap(port->membase);
+ port->membase = NULL;
+ }
+
+ release_mem_region(port->mapbase, sci_port_size(port));
+}
+
+static int sci_request_port(struct uart_port *port)
+{
+ unsigned long size = sci_port_size(port);
+ struct resource *res;
+ int ret;
+
+ res = request_mem_region(port->mapbase, size, dev_name(port->dev));
+ if (unlikely(res == NULL))
+ return -EBUSY;
+
+ ret = sci_remap_port(port);
+ if (unlikely(ret != 0)) {
+ release_resource(res);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sci_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ struct sci_port *sport = to_sci_port(port);
+
+ port->type = sport->cfg->type;
+ sci_request_port(port);
+ }
}
static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
{
struct sci_port *s = to_sci_port(port);
- if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
+ if (ser->irq != s->cfg->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
return -EINVAL;
if (ser->baud_base < 2400)
/* No paper tape reader for Mitch.. */
@@ -1726,36 +1764,29 @@ static int __devinit sci_init_single(struct platform_device *dev,
sci_port->break_timer.function = sci_break_timer;
init_timer(&sci_port->break_timer);
- port->mapbase = p->mapbase;
- port->membase = p->membase;
+ sci_port->cfg = p;
- port->irq = p->irqs[SCIx_TXI_IRQ];
+ port->mapbase = p->mapbase;
+ port->type = p->type;
port->flags = p->flags;
- sci_port->type = port->type = p->type;
- sci_port->scscr = p->scscr;
- sci_port->scbrr_algo_id = p->scbrr_algo_id;
-#ifdef CONFIG_SERIAL_SH_SCI_DMA
- sci_port->dma_dev = p->dma_dev;
- sci_port->slave_tx = p->dma_slave_tx;
- sci_port->slave_rx = p->dma_slave_rx;
+ /*
+ * The UART port needs an IRQ value, so we peg this to the TX IRQ
+ * for the multi-IRQ ports, which is where we are primarily
+ * concerned with the shutdown path synchronization.
+ *
+ * For the muxed case there's nothing more to do.
+ */
+ port->irq = p->irqs[SCIx_TXI_IRQ];
- dev_dbg(port->dev, "%s: DMA device %p, tx %d, rx %d\n", __func__,
- p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
-#endif
+ if (p->dma_dev)
+ dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n",
+ p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
- memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
return 0;
}
#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
-static struct tty_driver *serial_console_device(struct console *co, int *index)
-{
- struct uart_driver *p = &sci_uart_driver;
- *index = co->index;
- return p->tty_driver;
-}
-
static void serial_console_putchar(struct uart_port *port, int ch)
{
sci_poll_put_char(port, ch);
@@ -1768,8 +1799,8 @@ static void serial_console_putchar(struct uart_port *port, int ch)
static void serial_console_write(struct console *co, const char *s,
unsigned count)
{
- struct uart_port *port = co->data;
- struct sci_port *sci_port = to_sci_port(port);
+ struct sci_port *sci_port = &sci_ports[co->index];
+ struct uart_port *port = &sci_port->port;
unsigned short bits;
if (sci_port->enable)
@@ -1797,32 +1828,17 @@ static int __devinit serial_console_setup(struct console *co, char *options)
int ret;
/*
- * Check whether an invalid uart number has been specified, and
- * if so, search for the first available port that does have
- * console support.
- */
- if (co->index >= SCI_NPORTS)
- co->index = 0;
-
- if (co->data) {
- port = co->data;
- sci_port = to_sci_port(port);
- } else {
- sci_port = &sci_ports[co->index];
- port = &sci_port->port;
- co->data = port;
- }
-
- /*
- * Also need to check port->type, we don't actually have any
- * UPIO_PORT ports, but uart_report_port() handily misreports
- * it anyways if we don't have a port available by the time this is
- * called.
+ * Refuse to handle any bogus ports.
*/
- if (!port->type)
+ if (co->index < 0 || co->index >= SCI_NPORTS)
return -ENODEV;
- sci_config_port(port, 0);
+ sci_port = &sci_ports[co->index];
+ port = &sci_port->port;
+
+ ret = sci_remap_port(port);
+ if (unlikely(ret != 0))
+ return ret;
if (sci_port->enable)
sci_port->enable(port);
@@ -1842,11 +1858,12 @@ static int __devinit serial_console_setup(struct console *co, char *options)
static struct console serial_console = {
.name = "ttySC",
- .device = serial_console_device,
+ .device = uart_console_device,
.write = serial_console_write,
.setup = serial_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
+ .data = &sci_uart_driver,
};
static int __init sci_console_init(void)
@@ -1856,14 +1873,39 @@ static int __init sci_console_init(void)
}
console_initcall(sci_console_init);
-static struct sci_port early_serial_port;
static struct console early_serial_console = {
.name = "early_ttySC",
.write = serial_console_write,
.flags = CON_PRINTBUFFER,
+ .index = -1,
};
+
static char early_serial_buf[32];
+static int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
+{
+ struct plat_sci_port *cfg = pdev->dev.platform_data;
+
+ if (early_serial_console.data)
+ return -EEXIST;
+
+ early_serial_console.index = pdev->id;
+
+ sci_init_single(NULL, &sci_ports[pdev->id], pdev->id, cfg);
+
+ serial_console_setup(&early_serial_console, early_serial_buf);
+
+ if (!strstr(early_serial_buf, "keep"))
+ early_serial_console.flags |= CON_BOOT;
+
+ register_console(&early_serial_console);
+ return 0;
+}
+#else
+static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
@@ -1885,24 +1927,18 @@ static struct uart_driver sci_uart_driver = {
.cons = SCI_CONSOLE,
};
-
static int sci_remove(struct platform_device *dev)
{
- struct sh_sci_priv *priv = platform_get_drvdata(dev);
- struct sci_port *p;
- unsigned long flags;
+ struct sci_port *port = platform_get_drvdata(dev);
- cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
+ cpufreq_unregister_notifier(&port->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(p, &priv->ports, node) {
- uart_remove_one_port(&sci_uart_driver, &p->port);
- clk_put(p->iclk);
- clk_put(p->fclk);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
+ uart_remove_one_port(&sci_uart_driver, &port->port);
+
+ clk_put(port->iclk);
+ clk_put(port->fclk);
- kfree(priv);
return 0;
}
@@ -1911,8 +1947,6 @@ static int __devinit sci_probe_single(struct platform_device *dev,
struct plat_sci_port *p,
struct sci_port *sciport)
{
- struct sh_sci_priv *priv = platform_get_drvdata(dev);
- unsigned long flags;
int ret;
/* Sanity check */
@@ -1929,68 +1963,35 @@ static int __devinit sci_probe_single(struct platform_device *dev,
if (ret)
return ret;
- ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
- if (ret)
- return ret;
-
- INIT_LIST_HEAD(&sciport->node);
-
- spin_lock_irqsave(&priv->lock, flags);
- list_add(&sciport->node, &priv->ports);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
+ return uart_add_one_port(&sci_uart_driver, &sciport->port);
}
-/*
- * Register a set of serial devices attached to a platform device. The
- * list is terminated with a zero flags entry, which means we expect
- * all entries to have at least UPF_BOOT_AUTOCONF set. Platforms that need
- * remapping (such as sh64) should also set UPF_IOREMAP.
- */
static int __devinit sci_probe(struct platform_device *dev)
{
struct plat_sci_port *p = dev->dev.platform_data;
- struct sh_sci_priv *priv;
- int i, ret = -EINVAL;
+ struct sci_port *sp = &sci_ports[dev->id];
+ int ret;
-#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
- if (is_early_platform_device(dev)) {
- if (dev->id == -1)
- return -ENOTSUPP;
- early_serial_console.index = dev->id;
- early_serial_console.data = &early_serial_port.port;
- sci_init_single(NULL, &early_serial_port, dev->id, p);
- serial_console_setup(&early_serial_console, early_serial_buf);
- if (!strstr(early_serial_buf, "keep"))
- early_serial_console.flags |= CON_BOOT;
- register_console(&early_serial_console);
- return 0;
- }
-#endif
+ /*
+ * If we've come here via earlyprintk initialization, head off to
+ * the special early probe. We don't have sufficient device state
+ * to make it beyond this yet.
+ */
+ if (is_early_platform_device(dev))
+ return sci_probe_earlyprintk(dev);
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ platform_set_drvdata(dev, sp);
- INIT_LIST_HEAD(&priv->ports);
- spin_lock_init(&priv->lock);
- platform_set_drvdata(dev, priv);
+ ret = sci_probe_single(dev, dev->id, p, sp);
+ if (ret)
+ goto err_unreg;
- priv->clk_nb.notifier_call = sci_notifier;
- cpufreq_register_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
+ sp->freq_transition.notifier_call = sci_notifier;
- if (dev->id != -1) {
- ret = sci_probe_single(dev, dev->id, p, &sci_ports[dev->id]);
- if (ret)
- goto err_unreg;
- } else {
- for (i = 0; p && p->flags != 0; p++, i++) {
- ret = sci_probe_single(dev, i, p, &sci_ports[i]);
- if (ret)
- goto err_unreg;
- }
- }
+ ret = cpufreq_register_notifier(&sp->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (unlikely(ret < 0))
+ goto err_unreg;
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_gdb_detach();
@@ -2005,28 +2006,20 @@ err_unreg:
static int sci_suspend(struct device *dev)
{
- struct sh_sci_priv *priv = dev_get_drvdata(dev);
- struct sci_port *p;
- unsigned long flags;
+ struct sci_port *sport = dev_get_drvdata(dev);
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(p, &priv->ports, node)
- uart_suspend_port(&sci_uart_driver, &p->port);
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (sport)
+ uart_suspend_port(&sci_uart_driver, &sport->port);
return 0;
}
static int sci_resume(struct device *dev)
{
- struct sh_sci_priv *priv = dev_get_drvdata(dev);
- struct sci_port *p;
- unsigned long flags;
+ struct sci_port *sport = dev_get_drvdata(dev);
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(p, &priv->ports, node)
- uart_resume_port(&sci_uart_driver, &p->port);
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (sport)
+ uart_resume_port(&sci_uart_driver, &sport->port);
return 0;
}
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index b223d6cbf33a..5fefed53fa42 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -54,9 +54,6 @@
# define PBCR 0xa4050102
#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
# define SCSPTR0 0xffe00010 /* 16 bit SCIF */
-# define SCSPTR1 0xffe10010 /* 16 bit SCIF */
-# define SCSPTR2 0xffe20010 /* 16 bit SCIF */
-# define SCSPTR3 0xffe30010 /* 16 bit SCIF */
#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
# define PADR 0xA4050120
# define PSDR 0xA405013e
@@ -69,77 +66,42 @@
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
# define SCSPTR0 0xa4050160
-# define SCSPTR1 0xa405013e
-# define SCSPTR2 0xa4050160
-# define SCSPTR3 0xa405013e
-# define SCSPTR4 0xa4050128
-# define SCSPTR5 0xa4050128
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
-# define SCIF_PTR2_OFFS 0x0000020
-# define SCSPTR2 ((port->mapbase)+SCIF_PTR2_OFFS) /* 16 bit SCIF */
#elif defined(CONFIG_H83007) || defined(CONFIG_H83068)
# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
#elif defined(CONFIG_H8S2678)
# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
# define SCSPTR0 0xfe4b0020
-# define SCSPTR1 0xfe4b0020
-# define SCSPTR2 0xfe4b0020
# define SCIF_ORER 0x0001
-# define SCIF_ONLY
#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
-# define SCSPTR1 0xffe08024 /* 16 bit SCIF */
-# define SCSPTR2 0xffe10020 /* 16 bit SCIF/IRDA */
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
# define SCSPTR0 0xff923020 /* 16 bit SCIF */
-# define SCSPTR1 0xff924020 /* 16 bit SCIF */
-# define SCSPTR2 0xff925020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
-# define SCSPTR1 0xffe10024 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* Overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
defined(CONFIG_CPU_SUBTYPE_SH7786)
# define SCSPTR0 0xffea0024 /* 16 bit SCIF */
-# define SCSPTR1 0xffeb0024 /* 16 bit SCIF */
-# define SCSPTR2 0xffec0024 /* 16 bit SCIF */
-# define SCSPTR3 0xffed0024 /* 16 bit SCIF */
-# define SCSPTR4 0xffee0024 /* 16 bit SCIF */
-# define SCSPTR5 0xffef0024 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* Overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
defined(CONFIG_CPU_SUBTYPE_SH7203) || \
defined(CONFIG_CPU_SUBTYPE_SH7206) || \
defined(CONFIG_CPU_SUBTYPE_SH7263)
# define SCSPTR0 0xfffe8020 /* 16 bit SCIF */
-# define SCSPTR1 0xfffe8820 /* 16 bit SCIF */
-# define SCSPTR2 0xfffe9020 /* 16 bit SCIF */
-# define SCSPTR3 0xfffe9820 /* 16 bit SCIF */
-# if defined(CONFIG_CPU_SUBTYPE_SH7201)
-# define SCSPTR4 0xfffeA020 /* 16 bit SCIF */
-# define SCSPTR5 0xfffeA820 /* 16 bit SCIF */
-# define SCSPTR6 0xfffeB020 /* 16 bit SCIF */
-# define SCSPTR7 0xfffeB820 /* 16 bit SCIF */
-# endif
#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
# define SCSPTR0 0xf8400020 /* 16 bit SCIF */
-# define SCSPTR1 0xf8410020 /* 16 bit SCIF */
-# define SCSPTR2 0xf8420020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
# define SCSPTR0 0xffc30020 /* 16 bit SCIF */
-# define SCSPTR1 0xffc40020 /* 16 bit SCIF */
-# define SCSPTR2 0xffc50020 /* 16 bit SCIF */
-# define SCSPTR3 0xffc60020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* Overrun error bit */
#else
# error CPU subtype not defined
@@ -411,7 +373,6 @@ SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
SCIF_FNS(SCFDR, 0, 0, 0x1C, 16)
-SCIF_FNS(SCSPTR2, 0, 0, 0x20, 16)
SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index b57bc273b184..2a753f1e9183 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1942,6 +1942,7 @@ config FB_SH_MOBILE_LCDC
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
select FB_DEFERRED_IO
+ select FB_BACKLIGHT
select SH_MIPI_DSI if SH_LCD_MIPI_DSI
---help---
Frame buffer driver for the on-chip SH-Mobile LCD controller.
@@ -2302,6 +2303,17 @@ config FB_JZ4740
help
Framebuffer support for the JZ4740 SoC.
+config FB_PUV3_UNIGFX
+ tristate "PKUnity v3 Unigfx framebuffer support"
+ depends on FB && UNICORE32 && ARCH_PUV3
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ help
+ Choose this option if you want to use the Unigfx device as a
+ framebuffer device. Without the support of PCI & AGP.
+
source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 8c8fabdff9d0..b0eb3da24670 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -139,6 +139,7 @@ obj-$(CONFIG_FB_MB862XX) += mb862xx/
obj-$(CONFIG_FB_MSM) += msm/
obj-$(CONFIG_FB_NUC900) += nuc900fb.o
obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
+obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o
# Platform or fallback drivers go here
obj-$(CONFIG_FB_UVESA) += uvesafb.o
diff --git a/drivers/video/fb-puv3.c b/drivers/video/fb-puv3.c
new file mode 100644
index 000000000000..dbd2dc4745d1
--- /dev/null
+++ b/drivers/video/fb-puv3.c
@@ -0,0 +1,846 @@
+/*
+ * Frame Buffer Driver for PKUnity-v3 Unigfx
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <asm/sizes.h>
+#include <mach/hardware.h>
+
+/* Platform_data reserved for unifb registers. */
+#define UNIFB_REGS_NUM 10
+/* RAM reserved for the frame buffer. */
+#define UNIFB_MEMSIZE (SZ_4M) /* 4 MB for 1024*768*32b */
+
+/*
+ * cause UNIGFX don not have EDID
+ * all the modes are organized as follow
+ */
+static const struct fb_videomode unifb_modes[] = {
+ /* 0 640x480-60 VESA */
+ { "640x480@60", 60, 640, 480, 25175000, 48, 16, 34, 10, 96, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 1 640x480-75 VESA */
+ { "640x480@75", 75, 640, 480, 31500000, 120, 16, 18, 1, 64, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 2 800x600-60 VESA */
+ { "800x600@60", 60, 800, 600, 40000000, 88, 40, 26, 1, 128, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 3 800x600-75 VESA */
+ { "800x600@75", 75, 800, 600, 49500000, 160, 16, 23, 1, 80, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 4 1024x768-60 VESA */
+ { "1024x768@60", 60, 1024, 768, 65000000, 160, 24, 34, 3, 136, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 5 1024x768-75 VESA */
+ { "1024x768@75", 75, 1024, 768, 78750000, 176, 16, 30, 1, 96, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 6 1280x960-60 VESA */
+ { "1280x960@60", 60, 1280, 960, 108000000, 312, 96, 38, 1, 112, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 7 1440x900-60 VESA */
+ { "1440x900@60", 60, 1440, 900, 106500000, 232, 80, 30, 3, 152, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 8 FIXME 9 1024x600-60 VESA UNTESTED */
+ { "1024x600@60", 60, 1024, 600, 50650000, 160, 24, 26, 1, 136, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 9 FIXME 10 1024x600-75 VESA UNTESTED */
+ { "1024x600@75", 75, 1024, 600, 61500000, 176, 16, 23, 1, 96, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 10 FIXME 11 1366x768-60 VESA UNTESTED */
+ { "1366x768@60", 60, 1366, 768, 85500000, 256, 58, 18, 1, 112, 3,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+};
+
+static struct fb_var_screeninfo unifb_default = {
+ .xres = 640,
+ .yres = 480,
+ .xres_virtual = 640,
+ .yres_virtual = 480,
+ .bits_per_pixel = 16,
+ .red = { 11, 5, 0 },
+ .green = { 5, 6, 0 },
+ .blue = { 0, 5, 0 },
+ .activate = FB_ACTIVATE_NOW,
+ .height = -1,
+ .width = -1,
+ .pixclock = 25175000,
+ .left_margin = 48,
+ .right_margin = 16,
+ .upper_margin = 33,
+ .lower_margin = 10,
+ .hsync_len = 96,
+ .vsync_len = 2,
+ .vmode = FB_VMODE_NONINTERLACED,
+};
+
+static struct fb_fix_screeninfo unifb_fix = {
+ .id = "UNIGFX FB",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_TRUECOLOR,
+ .xpanstep = 1,
+ .ypanstep = 1,
+ .ywrapstep = 1,
+ .accel = FB_ACCEL_NONE,
+};
+
+static void unifb_sync(struct fb_info *info)
+{
+ /* TODO: may, this can be replaced by interrupt */
+ int cnt;
+
+ for (cnt = 0; cnt < 0x10000000; cnt++) {
+ if (readl(UGE_COMMAND) & 0x1000000)
+ return;
+ }
+
+ if (cnt > 0x8000000)
+ dev_warn(info->device, "Warning: UniGFX GE time out ...\n");
+}
+
+static void unifb_prim_fillrect(struct fb_info *info,
+ const struct fb_fillrect *region)
+{
+ int awidth = region->width;
+ int aheight = region->height;
+ int m_iBpp = info->var.bits_per_pixel;
+ int screen_width = info->var.xres;
+ int src_sel = 1; /* from fg_color */
+ int pat_sel = 1;
+ int src_x0 = 0;
+ int dst_x0 = region->dx;
+ int src_y0 = 0;
+ int dst_y0 = region->dy;
+ int rop_alpha_sel = 0;
+ int rop_alpha_code = 0xCC;
+ int x_dir = 1;
+ int y_dir = 1;
+ int alpha_r = 0;
+ int alpha_sel = 0;
+ int dst_pitch = screen_width * (m_iBpp / 8);
+ int dst_offset = dst_y0 * dst_pitch + dst_x0 * (m_iBpp / 8);
+ int src_pitch = screen_width * (m_iBpp / 8);
+ int src_offset = src_y0 * src_pitch + src_x0 * (m_iBpp / 8);
+ unsigned int command = 0;
+ int clip_region = 0;
+ int clip_en = 0;
+ int tp_en = 0;
+ int fg_color = 0;
+ int bottom = info->var.yres - 1;
+ int right = info->var.xres - 1;
+ int top = 0;
+
+ bottom = (bottom << 16) | right;
+ command = (rop_alpha_sel << 26) | (pat_sel << 18) | (src_sel << 16)
+ | (x_dir << 20) | (y_dir << 21) | (command << 24)
+ | (clip_region << 23) | (clip_en << 22) | (tp_en << 27);
+ src_pitch = (dst_pitch << 16) | src_pitch;
+ awidth = awidth | (aheight << 16);
+ alpha_r = ((rop_alpha_code & 0xff) << 8) | (alpha_r & 0xff)
+ | (alpha_sel << 16);
+ src_x0 = (src_x0 & 0x1fff) | ((src_y0 & 0x1fff) << 16);
+ dst_x0 = (dst_x0 & 0x1fff) | ((dst_y0 & 0x1fff) << 16);
+ fg_color = region->color;
+
+ unifb_sync(info);
+
+ writel(((u32 *)(info->pseudo_palette))[fg_color], UGE_FCOLOR);
+ writel(0, UGE_BCOLOR);
+ writel(src_pitch, UGE_PITCH);
+ writel(src_offset, UGE_SRCSTART);
+ writel(dst_offset, UGE_DSTSTART);
+ writel(awidth, UGE_WIDHEIGHT);
+ writel(top, UGE_CLIP0);
+ writel(bottom, UGE_CLIP1);
+ writel(alpha_r, UGE_ROPALPHA);
+ writel(src_x0, UGE_SRCXY);
+ writel(dst_x0, UGE_DSTXY);
+ writel(command, UGE_COMMAND);
+}
+
+static void unifb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *region)
+{
+ struct fb_fillrect modded;
+ int vxres, vyres;
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ sys_fillrect(info, region);
+ return;
+ }
+
+ vxres = info->var.xres_virtual;
+ vyres = info->var.yres_virtual;
+
+ memcpy(&modded, region, sizeof(struct fb_fillrect));
+
+ if (!modded.width || !modded.height ||
+ modded.dx >= vxres || modded.dy >= vyres)
+ return;
+
+ if (modded.dx + modded.width > vxres)
+ modded.width = vxres - modded.dx;
+ if (modded.dy + modded.height > vyres)
+ modded.height = vyres - modded.dy;
+
+ unifb_prim_fillrect(info, &modded);
+}
+
+static void unifb_prim_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ int awidth = area->width;
+ int aheight = area->height;
+ int m_iBpp = info->var.bits_per_pixel;
+ int screen_width = info->var.xres;
+ int src_sel = 2; /* from mem */
+ int pat_sel = 0;
+ int src_x0 = area->sx;
+ int dst_x0 = area->dx;
+ int src_y0 = area->sy;
+ int dst_y0 = area->dy;
+
+ int rop_alpha_sel = 0;
+ int rop_alpha_code = 0xCC;
+ int x_dir = 1;
+ int y_dir = 1;
+
+ int alpha_r = 0;
+ int alpha_sel = 0;
+ int dst_pitch = screen_width * (m_iBpp / 8);
+ int dst_offset = dst_y0 * dst_pitch + dst_x0 * (m_iBpp / 8);
+ int src_pitch = screen_width * (m_iBpp / 8);
+ int src_offset = src_y0 * src_pitch + src_x0 * (m_iBpp / 8);
+ unsigned int command = 0;
+ int clip_region = 0;
+ int clip_en = 1;
+ int tp_en = 0;
+ int top = 0;
+ int bottom = info->var.yres;
+ int right = info->var.xres;
+ int fg_color = 0;
+ int bg_color = 0;
+
+ if (src_x0 < 0)
+ src_x0 = 0;
+ if (src_y0 < 0)
+ src_y0 = 0;
+
+ if (src_y0 - dst_y0 > 0) {
+ y_dir = 1;
+ } else {
+ y_dir = 0;
+ src_offset = (src_y0 + aheight) * src_pitch +
+ src_x0 * (m_iBpp / 8);
+ dst_offset = (dst_y0 + aheight) * dst_pitch +
+ dst_x0 * (m_iBpp / 8);
+ src_y0 += aheight;
+ dst_y0 += aheight;
+ }
+
+ command = (rop_alpha_sel << 26) | (pat_sel << 18) | (src_sel << 16) |
+ (x_dir << 20) | (y_dir << 21) | (command << 24) |
+ (clip_region << 23) | (clip_en << 22) | (tp_en << 27);
+ src_pitch = (dst_pitch << 16) | src_pitch;
+ awidth = awidth | (aheight << 16);
+ alpha_r = ((rop_alpha_code & 0xff) << 8) | (alpha_r & 0xff) |
+ (alpha_sel << 16);
+ src_x0 = (src_x0 & 0x1fff) | ((src_y0 & 0x1fff) << 16);
+ dst_x0 = (dst_x0 & 0x1fff) | ((dst_y0 & 0x1fff) << 16);
+ bottom = (bottom << 16) | right;
+
+ unifb_sync(info);
+
+ writel(src_pitch, UGE_PITCH);
+ writel(src_offset, UGE_SRCSTART);
+ writel(dst_offset, UGE_DSTSTART);
+ writel(awidth, UGE_WIDHEIGHT);
+ writel(top, UGE_CLIP0);
+ writel(bottom, UGE_CLIP1);
+ writel(bg_color, UGE_BCOLOR);
+ writel(fg_color, UGE_FCOLOR);
+ writel(alpha_r, UGE_ROPALPHA);
+ writel(src_x0, UGE_SRCXY);
+ writel(dst_x0, UGE_DSTXY);
+ writel(command, UGE_COMMAND);
+}
+
+static void unifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+ struct fb_copyarea modded;
+ u32 vxres, vyres;
+ modded.sx = area->sx;
+ modded.sy = area->sy;
+ modded.dx = area->dx;
+ modded.dy = area->dy;
+ modded.width = area->width;
+ modded.height = area->height;
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ sys_copyarea(info, area);
+ return;
+ }
+
+ vxres = info->var.xres_virtual;
+ vyres = info->var.yres_virtual;
+
+ if (!modded.width || !modded.height ||
+ modded.sx >= vxres || modded.sy >= vyres ||
+ modded.dx >= vxres || modded.dy >= vyres)
+ return;
+
+ if (modded.sx + modded.width > vxres)
+ modded.width = vxres - modded.sx;
+ if (modded.dx + modded.width > vxres)
+ modded.width = vxres - modded.dx;
+ if (modded.sy + modded.height > vyres)
+ modded.height = vyres - modded.sy;
+ if (modded.dy + modded.height > vyres)
+ modded.height = vyres - modded.dy;
+
+ unifb_prim_copyarea(info, &modded);
+}
+
+static void unifb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ sys_imageblit(info, image);
+}
+
+static u_long get_line_length(int xres_virtual, int bpp)
+{
+ u_long length;
+
+ length = xres_virtual * bpp;
+ length = (length + 31) & ~31;
+ length >>= 3;
+ return length;
+}
+
+/*
+ * Setting the video mode has been split into two parts.
+ * First part, xxxfb_check_var, must not write anything
+ * to hardware, it should only verify and adjust var.
+ * This means it doesn't alter par but it does use hardware
+ * data from it to check this var.
+ */
+static int unifb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ u_long line_length;
+
+ /*
+ * FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal!
+ * as FB_VMODE_SMOOTH_XPAN is only used internally
+ */
+
+ if (var->vmode & FB_VMODE_CONUPDATE) {
+ var->vmode |= FB_VMODE_YWRAP;
+ var->xoffset = info->var.xoffset;
+ var->yoffset = info->var.yoffset;
+ }
+
+ /*
+ * Some very basic checks
+ */
+ if (!var->xres)
+ var->xres = 1;
+ if (!var->yres)
+ var->yres = 1;
+ if (var->xres > var->xres_virtual)
+ var->xres_virtual = var->xres;
+ if (var->yres > var->yres_virtual)
+ var->yres_virtual = var->yres;
+ if (var->bits_per_pixel <= 1)
+ var->bits_per_pixel = 1;
+ else if (var->bits_per_pixel <= 8)
+ var->bits_per_pixel = 8;
+ else if (var->bits_per_pixel <= 16)
+ var->bits_per_pixel = 16;
+ else if (var->bits_per_pixel <= 24)
+ var->bits_per_pixel = 24;
+ else if (var->bits_per_pixel <= 32)
+ var->bits_per_pixel = 32;
+ else
+ return -EINVAL;
+
+ if (var->xres_virtual < var->xoffset + var->xres)
+ var->xres_virtual = var->xoffset + var->xres;
+ if (var->yres_virtual < var->yoffset + var->yres)
+ var->yres_virtual = var->yoffset + var->yres;
+
+ /*
+ * Memory limit
+ */
+ line_length =
+ get_line_length(var->xres_virtual, var->bits_per_pixel);
+ if (line_length * var->yres_virtual > UNIFB_MEMSIZE)
+ return -ENOMEM;
+
+ /*
+ * Now that we checked it we alter var. The reason being is that the
+ * video mode passed in might not work but slight changes to it might
+ * make it work. This way we let the user know what is acceptable.
+ */
+ switch (var->bits_per_pixel) {
+ case 1:
+ case 8:
+ var->red.offset = 0;
+ var->red.length = 8;
+ var->green.offset = 0;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
+ case 16: /* RGBA 5551 */
+ if (var->transp.length) {
+ var->red.offset = 0;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 5;
+ var->blue.offset = 10;
+ var->blue.length = 5;
+ var->transp.offset = 15;
+ var->transp.length = 1;
+ } else { /* RGB 565 */
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ }
+ break;
+ case 24: /* RGB 888 */
+ var->red.offset = 0;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 16;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
+ case 32: /* RGBA 8888 */
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ break;
+ }
+ var->red.msb_right = 0;
+ var->green.msb_right = 0;
+ var->blue.msb_right = 0;
+ var->transp.msb_right = 0;
+
+ return 0;
+}
+
+/*
+ * This routine actually sets the video mode. It's in here where we
+ * the hardware state info->par and fix which can be affected by the
+ * change in par. For this driver it doesn't do much.
+ */
+static int unifb_set_par(struct fb_info *info)
+{
+ int hTotal, vTotal, hSyncStart, hSyncEnd, vSyncStart, vSyncEnd;
+ int format;
+
+#ifdef CONFIG_PUV3_PM
+ struct clk *clk_vga;
+ u32 pixclk = 0;
+ int i;
+
+ for (i = 0; i <= 10; i++) {
+ if (info->var.xres == unifb_modes[i].xres
+ && info->var.yres == unifb_modes[i].yres
+ && info->var.upper_margin == unifb_modes[i].upper_margin
+ && info->var.lower_margin == unifb_modes[i].lower_margin
+ && info->var.left_margin == unifb_modes[i].left_margin
+ && info->var.right_margin == unifb_modes[i].right_margin
+ && info->var.hsync_len == unifb_modes[i].hsync_len
+ && info->var.vsync_len == unifb_modes[i].vsync_len) {
+ pixclk = unifb_modes[i].pixclock;
+ break;
+ }
+ }
+
+ /* set clock rate */
+ clk_vga = clk_get(info->device, "VGA_CLK");
+ if (clk_vga == ERR_PTR(-ENOENT))
+ return -ENOENT;
+
+ if (pixclk != 0) {
+ if (clk_set_rate(clk_vga, pixclk)) { /* set clock failed */
+ info->fix = unifb_fix;
+ info->var = unifb_default;
+ if (clk_set_rate(clk_vga, unifb_default.pixclock))
+ return -EINVAL;
+ }
+ }
+#endif
+
+ info->fix.line_length = get_line_length(info->var.xres_virtual,
+ info->var.bits_per_pixel);
+
+ hSyncStart = info->var.xres + info->var.right_margin;
+ hSyncEnd = hSyncStart + info->var.hsync_len;
+ hTotal = hSyncEnd + info->var.left_margin;
+
+ vSyncStart = info->var.yres + info->var.lower_margin;
+ vSyncEnd = vSyncStart + info->var.vsync_len;
+ vTotal = vSyncEnd + info->var.upper_margin;
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ format = UDE_CFG_DST8;
+ break;
+ case 16:
+ format = UDE_CFG_DST16;
+ break;
+ case 24:
+ format = UDE_CFG_DST24;
+ break;
+ case 32:
+ format = UDE_CFG_DST32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel(PKUNITY_UNIGFX_MMAP_BASE, UDE_FSA);
+ writel(info->var.yres, UDE_LS);
+ writel(get_line_length(info->var.xres,
+ info->var.bits_per_pixel) >> 3, UDE_PS);
+ /* >> 3 for hardware required. */
+ writel((hTotal << 16) | (info->var.xres), UDE_HAT);
+ writel(((hTotal - 1) << 16) | (info->var.xres - 1), UDE_HBT);
+ writel(((hSyncEnd - 1) << 16) | (hSyncStart - 1), UDE_HST);
+ writel((vTotal << 16) | (info->var.yres), UDE_VAT);
+ writel(((vTotal - 1) << 16) | (info->var.yres - 1), UDE_VBT);
+ writel(((vSyncEnd - 1) << 16) | (vSyncStart - 1), UDE_VST);
+ writel(UDE_CFG_GDEN_ENABLE | UDE_CFG_TIMEUP_ENABLE
+ | format | 0xC0000001, UDE_CFG);
+
+ return 0;
+}
+
+/*
+ * Set a single color register. The values supplied are already
+ * rounded down to the hardware's capabilities (according to the
+ * entries in the var structure). Return != 0 for invalid regno.
+ */
+static int unifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *info)
+{
+ if (regno >= 256) /* no. of hw registers */
+ return 1;
+
+ /* grayscale works only partially under directcolor */
+ if (info->var.grayscale) {
+ /* grayscale = 0.30*R + 0.59*G + 0.11*B */
+ red = green = blue =
+ (red * 77 + green * 151 + blue * 28) >> 8;
+ }
+
+#define CNVT_TOHW(val, width) ((((val)<<(width))+0x7FFF-(val))>>16)
+ switch (info->fix.visual) {
+ case FB_VISUAL_TRUECOLOR:
+ case FB_VISUAL_PSEUDOCOLOR:
+ red = CNVT_TOHW(red, info->var.red.length);
+ green = CNVT_TOHW(green, info->var.green.length);
+ blue = CNVT_TOHW(blue, info->var.blue.length);
+ transp = CNVT_TOHW(transp, info->var.transp.length);
+ break;
+ case FB_VISUAL_DIRECTCOLOR:
+ red = CNVT_TOHW(red, 8); /* expect 8 bit DAC */
+ green = CNVT_TOHW(green, 8);
+ blue = CNVT_TOHW(blue, 8);
+ /* hey, there is bug in transp handling... */
+ transp = CNVT_TOHW(transp, 8);
+ break;
+ }
+#undef CNVT_TOHW
+ /* Truecolor has hardware independent palette */
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+ u32 v;
+
+ if (regno >= 16)
+ return 1;
+
+ v = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset) |
+ (transp << info->var.transp.offset);
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ break;
+ case 16:
+ case 24:
+ case 32:
+ ((u32 *) (info->pseudo_palette))[regno] = v;
+ break;
+ default:
+ return 1;
+ }
+ return 0;
+ }
+ return 0;
+}
+
+/*
+ * Pan or Wrap the Display
+ *
+ * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
+ */
+static int unifb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ if (var->vmode & FB_VMODE_YWRAP) {
+ if (var->yoffset < 0
+ || var->yoffset >= info->var.yres_virtual
+ || var->xoffset)
+ return -EINVAL;
+ } else {
+ if (var->xoffset + var->xres > info->var.xres_virtual ||
+ var->yoffset + var->yres > info->var.yres_virtual)
+ return -EINVAL;
+ }
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ if (var->vmode & FB_VMODE_YWRAP)
+ info->var.vmode |= FB_VMODE_YWRAP;
+ else
+ info->var.vmode &= ~FB_VMODE_YWRAP;
+ return 0;
+}
+
+int unifb_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ unsigned long size = vma->vm_end - vma->vm_start;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long pos = info->fix.smem_start + offset;
+
+ if (offset + size > info->fix.smem_len)
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start, pos >> PAGE_SHIFT, size,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
+ return 0;
+
+}
+
+static struct fb_ops unifb_ops = {
+ .fb_read = fb_sys_read,
+ .fb_write = fb_sys_write,
+ .fb_check_var = unifb_check_var,
+ .fb_set_par = unifb_set_par,
+ .fb_setcolreg = unifb_setcolreg,
+ .fb_pan_display = unifb_pan_display,
+ .fb_fillrect = unifb_fillrect,
+ .fb_copyarea = unifb_copyarea,
+ .fb_imageblit = unifb_imageblit,
+ .fb_mmap = unifb_mmap,
+};
+
+/*
+ * Initialisation
+ */
+static int unifb_probe(struct platform_device *dev)
+{
+ struct fb_info *info;
+ u32 unifb_regs[UNIFB_REGS_NUM];
+ int retval = -ENOMEM;
+ struct resource *iomem, *mapmem;
+
+ info = framebuffer_alloc(sizeof(u32)*256, &dev->dev);
+ if (!info)
+ goto err;
+
+ info->screen_base = (char __iomem *)KUSER_UNIGFX_BASE;
+ info->fbops = &unifb_ops;
+
+ retval = fb_find_mode(&info->var, info, NULL,
+ unifb_modes, 10, &unifb_modes[0], 16);
+
+ if (!retval || (retval == 4))
+ info->var = unifb_default;
+
+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ unifb_fix.mmio_start = iomem->start;
+
+ mapmem = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ unifb_fix.smem_start = mapmem->start;
+ unifb_fix.smem_len = UNIFB_MEMSIZE;
+
+ info->fix = unifb_fix;
+ info->pseudo_palette = info->par;
+ info->par = NULL;
+ info->flags = FBINFO_FLAG_DEFAULT;
+#ifdef FB_ACCEL_PUV3_UNIGFX
+ info->fix.accel = FB_ACCEL_PUV3_UNIGFX;
+#endif
+
+ retval = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (retval < 0)
+ goto err1;
+
+ retval = register_framebuffer(info);
+ if (retval < 0)
+ goto err2;
+ platform_set_drvdata(dev, info);
+ platform_device_add_data(dev, unifb_regs, sizeof(u32) * UNIFB_REGS_NUM);
+
+ printk(KERN_INFO
+ "fb%d: Virtual frame buffer device, using %dM of video memory\n",
+ info->node, UNIFB_MEMSIZE >> 20);
+ return 0;
+err2:
+ fb_dealloc_cmap(&info->cmap);
+err1:
+ framebuffer_release(info);
+err:
+ return retval;
+}
+
+static int unifb_remove(struct platform_device *dev)
+{
+ struct fb_info *info = platform_get_drvdata(dev);
+
+ if (info) {
+ unregister_framebuffer(info);
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int unifb_resume(struct platform_device *dev)
+{
+ int rc = 0;
+ u32 *unifb_regs = dev->dev.platform_data;
+
+ if (dev->dev.power.power_state.event == PM_EVENT_ON)
+ return 0;
+
+ console_lock();
+
+ if (dev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+ writel(unifb_regs[0], UDE_FSA);
+ writel(unifb_regs[1], UDE_LS);
+ writel(unifb_regs[2], UDE_PS);
+ writel(unifb_regs[3], UDE_HAT);
+ writel(unifb_regs[4], UDE_HBT);
+ writel(unifb_regs[5], UDE_HST);
+ writel(unifb_regs[6], UDE_VAT);
+ writel(unifb_regs[7], UDE_VBT);
+ writel(unifb_regs[8], UDE_VST);
+ writel(unifb_regs[9], UDE_CFG);
+ }
+ dev->dev.power.power_state = PMSG_ON;
+
+ console_unlock();
+
+ return rc;
+}
+
+static int unifb_suspend(struct platform_device *dev, pm_message_t mesg)
+{
+ u32 *unifb_regs = dev->dev.platform_data;
+
+ unifb_regs[0] = readl(UDE_FSA);
+ unifb_regs[1] = readl(UDE_LS);
+ unifb_regs[2] = readl(UDE_PS);
+ unifb_regs[3] = readl(UDE_HAT);
+ unifb_regs[4] = readl(UDE_HBT);
+ unifb_regs[5] = readl(UDE_HST);
+ unifb_regs[6] = readl(UDE_VAT);
+ unifb_regs[7] = readl(UDE_VBT);
+ unifb_regs[8] = readl(UDE_VST);
+ unifb_regs[9] = readl(UDE_CFG);
+
+ if (mesg.event == dev->dev.power.power_state.event)
+ return 0;
+
+ switch (mesg.event) {
+ case PM_EVENT_FREEZE: /* about to take snapshot */
+ case PM_EVENT_PRETHAW: /* before restoring snapshot */
+ goto done;
+ }
+
+ console_lock();
+
+ /* do nothing... */
+
+ console_unlock();
+
+done:
+ dev->dev.power.power_state = mesg;
+
+ return 0;
+}
+#else
+#define unifb_resume NULL
+#define unifb_suspend NULL
+#endif
+
+static struct platform_driver unifb_driver = {
+ .probe = unifb_probe,
+ .remove = unifb_remove,
+ .resume = unifb_resume,
+ .suspend = unifb_suspend,
+ .driver = {
+ .name = "PKUnity-v3-UNIGFX",
+ },
+};
+
+static int __init unifb_init(void)
+{
+#ifndef MODULE
+ if (fb_get_options("unifb", NULL))
+ return -ENODEV;
+#endif
+
+ return platform_driver_register(&unifb_driver);
+}
+
+module_init(unifb_init);
+
+static void __exit unifb_exit(void)
+{
+ platform_driver_unregister(&unifb_driver);
+}
+
+module_exit(unifb_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index bf12e53aed5c..bf2629f83f40 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -21,6 +21,8 @@
#include <linux/ioctl.h>
#include <linux/slab.h>
#include <linux/console.h>
+#include <linux/backlight.h>
+#include <linux/gpio.h>
#include <video/sh_mobile_lcdc.h>
#include <asm/atomic.h>
@@ -67,6 +69,7 @@ static unsigned long lcdc_offs_mainlcd[NR_CH_REGS] = {
[LDSM1R] = 0x428,
[LDSM2R] = 0x42c,
[LDSA1R] = 0x430,
+ [LDSA2R] = 0x434,
[LDMLSR] = 0x438,
[LDHCNR] = 0x448,
[LDHSYNR] = 0x44c,
@@ -151,6 +154,7 @@ static bool banked(int reg_nr)
case LDDFR:
case LDSM1R:
case LDSA1R:
+ case LDSA2R:
case LDMLSR:
case LDHCNR:
case LDHSYNR:
@@ -463,6 +467,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
struct sh_mobile_lcdc_board_cfg *board_cfg;
unsigned long tmp;
int bpp = 0;
+ unsigned long ldddsr;
int k, m;
int ret = 0;
@@ -541,16 +546,21 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
}
/* word and long word swap */
- switch (bpp) {
- case 16:
- lcdc_write(priv, _LDDDSR, lcdc_read(priv, _LDDDSR) | 6);
- break;
- case 24:
- lcdc_write(priv, _LDDDSR, lcdc_read(priv, _LDDDSR) | 7);
- break;
- case 32:
- lcdc_write(priv, _LDDDSR, lcdc_read(priv, _LDDDSR) | 4);
- break;
+ ldddsr = lcdc_read(priv, _LDDDSR);
+ if (priv->ch[0].info->var.nonstd)
+ lcdc_write(priv, _LDDDSR, ldddsr | 7);
+ else {
+ switch (bpp) {
+ case 16:
+ lcdc_write(priv, _LDDDSR, ldddsr | 6);
+ break;
+ case 24:
+ lcdc_write(priv, _LDDDSR, ldddsr | 7);
+ break;
+ case 32:
+ lcdc_write(priv, _LDDDSR, ldddsr | 4);
+ break;
+ }
}
for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
@@ -561,21 +571,40 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
/* set bpp format in PKF[4:0] */
tmp = lcdc_read_chan(ch, LDDFR);
- tmp &= ~0x0001001f;
- switch (ch->info->var.bits_per_pixel) {
- case 16:
- tmp |= 0x03;
- break;
- case 24:
- tmp |= 0x0b;
- break;
- case 32:
- break;
+ tmp &= ~0x0003031f;
+ if (ch->info->var.nonstd) {
+ tmp |= (ch->info->var.nonstd << 16);
+ switch (ch->info->var.bits_per_pixel) {
+ case 12:
+ break;
+ case 16:
+ tmp |= (0x1 << 8);
+ break;
+ case 24:
+ tmp |= (0x2 << 8);
+ break;
+ }
+ } else {
+ switch (ch->info->var.bits_per_pixel) {
+ case 16:
+ tmp |= 0x03;
+ break;
+ case 24:
+ tmp |= 0x0b;
+ break;
+ case 32:
+ break;
+ }
}
lcdc_write_chan(ch, LDDFR, tmp);
/* point out our frame buffer */
lcdc_write_chan(ch, LDSA1R, ch->info->fix.smem_start);
+ if (ch->info->var.nonstd)
+ lcdc_write_chan(ch, LDSA2R,
+ ch->info->fix.smem_start +
+ ch->info->var.xres *
+ ch->info->var.yres_virtual);
/* set line size */
lcdc_write_chan(ch, LDMLSR, ch->info->fix.line_length);
@@ -618,6 +647,11 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
board_cfg->display_on(board_cfg->board_data, ch->info);
module_put(board_cfg->owner);
}
+
+ if (ch->bl) {
+ ch->bl->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(ch->bl);
+ }
}
return 0;
@@ -648,6 +682,11 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
sh_mobile_lcdc_clk_on(priv);
}
+ if (ch->bl) {
+ ch->bl->props.power = FB_BLANK_POWERDOWN;
+ backlight_update_status(ch->bl);
+ }
+
board_cfg = &ch->cfg.board_cfg;
if (try_module_get(board_cfg->owner) && board_cfg->display_off) {
board_cfg->display_off(board_cfg->board_data);
@@ -804,9 +843,15 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
struct sh_mobile_lcdc_priv *priv = ch->lcdc;
unsigned long ldrcntr;
unsigned long new_pan_offset;
+ unsigned long base_addr_y, base_addr_c;
+ unsigned long c_offset;
- new_pan_offset = (var->yoffset * info->fix.line_length) +
- (var->xoffset * (info->var.bits_per_pixel / 8));
+ if (!var->nonstd)
+ new_pan_offset = (var->yoffset * info->fix.line_length) +
+ (var->xoffset * (info->var.bits_per_pixel / 8));
+ else
+ new_pan_offset = (var->yoffset * info->fix.line_length) +
+ (var->xoffset);
if (new_pan_offset == ch->pan_offset)
return 0; /* No change, do nothing */
@@ -814,7 +859,26 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
ldrcntr = lcdc_read(priv, _LDRCNTR);
/* Set the source address for the next refresh */
- lcdc_write_chan_mirror(ch, LDSA1R, ch->dma_handle + new_pan_offset);
+ base_addr_y = ch->dma_handle + new_pan_offset;
+ if (var->nonstd) {
+ /* Set y offset */
+ c_offset = (var->yoffset *
+ info->fix.line_length *
+ (info->var.bits_per_pixel - 8)) / 8;
+ base_addr_c = ch->dma_handle + var->xres * var->yres_virtual +
+ c_offset;
+ /* Set x offset */
+ if (info->var.bits_per_pixel == 24)
+ base_addr_c += 2 * var->xoffset;
+ else
+ base_addr_c += var->xoffset;
+ } else
+ base_addr_c = 0;
+
+ lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y);
+ if (base_addr_c)
+ lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c);
+
if (lcdc_chan_is_sublcd(ch))
lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS);
else
@@ -885,7 +949,10 @@ static void sh_mobile_fb_reconfig(struct fb_info *info)
/* Couldn't reconfigure, hopefully, can continue as before */
return;
- info->fix.line_length = mode1.xres * (ch->cfg.bpp / 8);
+ if (info->var.nonstd)
+ info->fix.line_length = mode1.xres;
+ else
+ info->fix.line_length = mode1.xres * (ch->cfg.bpp / 8);
/*
* fb_set_var() calls the notifier change internally, only if
@@ -980,8 +1047,80 @@ static struct fb_ops sh_mobile_lcdc_ops = {
.fb_check_var = sh_mobile_check_var,
};
-static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp)
+static int sh_mobile_lcdc_update_bl(struct backlight_device *bdev)
+{
+ struct sh_mobile_lcdc_chan *ch = bl_get_data(bdev);
+ struct sh_mobile_lcdc_board_cfg *cfg = &ch->cfg.board_cfg;
+ int brightness = bdev->props.brightness;
+
+ if (bdev->props.power != FB_BLANK_UNBLANK ||
+ bdev->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
+ brightness = 0;
+
+ return cfg->set_brightness(cfg->board_data, brightness);
+}
+
+static int sh_mobile_lcdc_get_brightness(struct backlight_device *bdev)
+{
+ struct sh_mobile_lcdc_chan *ch = bl_get_data(bdev);
+ struct sh_mobile_lcdc_board_cfg *cfg = &ch->cfg.board_cfg;
+
+ return cfg->get_brightness(cfg->board_data);
+}
+
+static int sh_mobile_lcdc_check_fb(struct backlight_device *bdev,
+ struct fb_info *info)
+{
+ return (info->bl_dev == bdev);
+}
+
+static struct backlight_ops sh_mobile_lcdc_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = sh_mobile_lcdc_update_bl,
+ .get_brightness = sh_mobile_lcdc_get_brightness,
+ .check_fb = sh_mobile_lcdc_check_fb,
+};
+
+static struct backlight_device *sh_mobile_lcdc_bl_probe(struct device *parent,
+ struct sh_mobile_lcdc_chan *ch)
+{
+ struct backlight_device *bl;
+
+ bl = backlight_device_register(ch->cfg.bl_info.name, parent, ch,
+ &sh_mobile_lcdc_bl_ops, NULL);
+ if (!bl) {
+ dev_err(parent, "unable to register backlight device\n");
+ return NULL;
+ }
+
+ bl->props.max_brightness = ch->cfg.bl_info.max_brightness;
+ bl->props.brightness = bl->props.max_brightness;
+ backlight_update_status(bl);
+
+ return bl;
+}
+
+static void sh_mobile_lcdc_bl_remove(struct backlight_device *bdev)
+{
+ backlight_device_unregister(bdev);
+}
+
+static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp,
+ int nonstd)
{
+ if (nonstd) {
+ switch (bpp) {
+ case 12:
+ case 16:
+ case 24:
+ var->bits_per_pixel = bpp;
+ var->nonstd = nonstd;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
+
switch (bpp) {
case 16: /* PKF[4:0] = 00011 - RGB 565 */
var->red.offset = 11;
@@ -1198,6 +1337,10 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
init_completion(&ch->vsync_completion);
ch->pan_offset = 0;
+ /* probe the backlight is there is one defined */
+ if (ch->cfg.bl_info.max_brightness)
+ ch->bl = sh_mobile_lcdc_bl_probe(&pdev->dev, ch);
+
switch (pdata->ch[i].chan) {
case LCDC_CHAN_MAINLCD:
ch->enabled = 1 << 1;
@@ -1260,6 +1403,14 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
k < cfg->num_cfg && lcd_cfg;
k++, lcd_cfg++) {
unsigned long size = lcd_cfg->yres * lcd_cfg->xres;
+ /* NV12 buffers must have even number of lines */
+ if ((cfg->nonstd) && cfg->bpp == 12 &&
+ (lcd_cfg->yres & 0x1)) {
+ dev_err(&pdev->dev, "yres must be multiple of 2"
+ " for YCbCr420 mode.\n");
+ error = -EINVAL;
+ goto err1;
+ }
if (size > max_size) {
max_cfg = lcd_cfg;
@@ -1274,7 +1425,11 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
max_cfg->xres, max_cfg->yres);
info->fix = sh_mobile_lcdc_fix;
- info->fix.smem_len = max_size * (cfg->bpp / 8) * 2;
+ info->fix.smem_len = max_size * 2 * cfg->bpp / 8;
+
+ /* Only pan in 2 line steps for NV12 */
+ if (cfg->nonstd && cfg->bpp == 12)
+ info->fix.ypanstep = 2;
if (!mode) {
mode = &default_720p;
@@ -1292,7 +1447,7 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
var->yres_virtual = var->yres * 2;
var->activate = FB_ACTIVATE_NOW;
- error = sh_mobile_lcdc_set_bpp(var, cfg->bpp);
+ error = sh_mobile_lcdc_set_bpp(var, cfg->bpp, cfg->nonstd);
if (error)
break;
@@ -1316,7 +1471,11 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
}
info->fix.smem_start = ch->dma_handle;
- info->fix.line_length = var->xres * (cfg->bpp / 8);
+ if (var->nonstd)
+ info->fix.line_length = var->xres;
+ else
+ info->fix.line_length = var->xres * (cfg->bpp / 8);
+
info->screen_base = buf;
info->device = &pdev->dev;
ch->display_var = *var;
@@ -1345,6 +1504,8 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
}
}
+ info->bl_dev = ch->bl;
+
error = register_framebuffer(info);
if (error < 0)
goto err1;
@@ -1404,6 +1565,11 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev)
framebuffer_release(info);
}
+ for (i = 0; i < ARRAY_SIZE(priv->ch); i++) {
+ if (priv->ch[i].bl)
+ sh_mobile_lcdc_bl_remove(priv->ch[i].bl);
+ }
+
if (priv->dot_clk)
clk_put(priv->dot_clk);
diff --git a/drivers/video/sh_mobile_lcdcfb.h b/drivers/video/sh_mobile_lcdcfb.h
index 9ecee2fba1d7..4635eed63eee 100644
--- a/drivers/video/sh_mobile_lcdcfb.h
+++ b/drivers/video/sh_mobile_lcdcfb.h
@@ -8,7 +8,7 @@
/* per-channel registers */
enum { LDDCKPAT1R, LDDCKPAT2R, LDMT1R, LDMT2R, LDMT3R, LDDFR, LDSM1R,
- LDSM2R, LDSA1R, LDMLSR, LDHCNR, LDHSYNR, LDVLNR, LDVSYNR, LDPMR,
+ LDSM2R, LDSA1R, LDSA2R, LDMLSR, LDHCNR, LDHSYNR, LDVLNR, LDVSYNR, LDPMR,
LDHAJR,
NR_CH_REGS };
@@ -16,6 +16,7 @@ enum { LDDCKPAT1R, LDDCKPAT2R, LDMT1R, LDMT2R, LDMT3R, LDDFR, LDSM1R,
struct sh_mobile_lcdc_priv;
struct fb_info;
+struct backlight_device;
struct sh_mobile_lcdc_chan {
struct sh_mobile_lcdc_priv *lcdc;
@@ -26,6 +27,7 @@ struct sh_mobile_lcdc_chan {
u32 pseudo_palette[PALETTE_NR];
unsigned long saved_ch_regs[NR_CH_REGS];
struct fb_info *info;
+ struct backlight_device *bl;
dma_addr_t dma_handle;
struct fb_deferred_io defio;
struct scatterlist *sglist;
diff --git a/drivers/video/via/chip.h b/drivers/video/via/chip.h
index 48f1342897bd..781f3aa66b42 100644
--- a/drivers/video/via/chip.h
+++ b/drivers/video/via/chip.h
@@ -110,16 +110,13 @@
struct tmds_chip_information {
int tmds_chip_name;
int tmds_chip_slave_addr;
- int data_mode;
int output_interface;
int i2c_port;
- int device_type;
};
struct lvds_chip_information {
int lvds_chip_name;
int lvds_chip_slave_addr;
- int data_mode;
int output_interface;
int i2c_port;
};
@@ -142,9 +139,6 @@ struct chip_information {
struct crt_setting_information {
int iga_path;
- int h_active;
- int v_active;
- int bpp;
int refresh_rate;
};
@@ -162,8 +156,6 @@ struct lvds_setting_information {
int h_active;
int v_active;
int bpp;
- int refresh_rate;
- int lcd_panel_id;
int lcd_panel_hres;
int lcd_panel_vres;
int display_method;
@@ -188,7 +180,6 @@ struct GFX_DPA_SETTING {
};
struct VT1636_DPA_SETTING {
- int PanelSizeID;
u8 CLK_SEL_ST1;
u8 CLK_SEL_ST2;
};
diff --git a/drivers/video/via/dvi.c b/drivers/video/via/dvi.c
index 84e21b39dd0b..41ca198b5098 100644
--- a/drivers/video/via/dvi.c
+++ b/drivers/video/via/dvi.c
@@ -195,7 +195,9 @@ void viafb_dvi_set_mode(struct VideoModeTable *mode, int mode_bpp,
struct crt_mode_table *pDviTiming;
unsigned long desirePixelClock, maxPixelClock;
pDviTiming = mode->crtc;
- desirePixelClock = pDviTiming->clk / 1000000;
+ desirePixelClock = pDviTiming->refresh_rate
+ * pDviTiming->crtc.hor_total * pDviTiming->crtc.ver_total
+ / 1000000;
maxPixelClock = (unsigned long)viaparinfo->
tmds_setting_info->max_pixel_clock;
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index 36d73f940d8b..5728fd76bc11 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -22,342 +22,290 @@
#include <linux/via-core.h>
#include "global.h"
-static struct pll_map pll_value[] = {
- {25175000,
- {99, 7, 3},
- {85, 3, 4}, /* ignoring bit difference: 0x00008000 */
- {141, 5, 4},
- {141, 5, 4} },
- {29581000,
- {33, 4, 2},
- {66, 2, 4}, /* ignoring bit difference: 0x00808000 */
- {166, 5, 4}, /* ignoring bit difference: 0x00008000 */
- {165, 5, 4} },
- {26880000,
- {15, 4, 1},
- {30, 2, 3}, /* ignoring bit difference: 0x00808000 */
- {150, 5, 4},
- {150, 5, 4} },
- {31500000,
- {53, 3, 3}, /* ignoring bit difference: 0x00008000 */
- {141, 4, 4}, /* ignoring bit difference: 0x00008000 */
- {176, 5, 4},
- {176, 5, 4} },
- {31728000,
- {31, 7, 1},
- {177, 5, 4}, /* ignoring bit difference: 0x00008000 */
- {177, 5, 4},
- {142, 4, 4} },
- {32688000,
- {73, 4, 3},
- {146, 4, 4}, /* ignoring bit difference: 0x00008000 */
- {183, 5, 4},
- {146, 4, 4} },
- {36000000,
- {101, 5, 3}, /* ignoring bit difference: 0x00008000 */
- {161, 4, 4}, /* ignoring bit difference: 0x00008000 */
- {202, 5, 4},
- {161, 4, 4} },
- {40000000,
- {89, 4, 3},
- {89, 4, 3}, /* ignoring bit difference: 0x00008000 */
- {112, 5, 3},
- {112, 5, 3} },
- {41291000,
- {23, 4, 1},
- {69, 3, 3}, /* ignoring bit difference: 0x00008000 */
- {115, 5, 3},
- {115, 5, 3} },
- {43163000,
- {121, 5, 3},
- {121, 5, 3}, /* ignoring bit difference: 0x00008000 */
- {121, 5, 3},
- {121, 5, 3} },
- {45250000,
- {127, 5, 3},
- {127, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {127, 5, 3},
- {127, 5, 3} },
- {46000000,
- {90, 7, 2},
- {103, 4, 3}, /* ignoring bit difference: 0x00008000 */
- {129, 5, 3},
- {103, 4, 3} },
- {46996000,
- {105, 4, 3}, /* ignoring bit difference: 0x00008000 */
- {131, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {131, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {105, 4, 3} },
- {48000000,
- {67, 20, 0},
- {134, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {134, 5, 3},
- {134, 5, 3} },
- {48875000,
- {99, 29, 0},
- {82, 3, 3}, /* ignoring bit difference: 0x00808000 */
- {82, 3, 3}, /* ignoring bit difference: 0x00808000 */
- {137, 5, 3} },
- {49500000,
- {83, 6, 2},
- {83, 3, 3}, /* ignoring bit difference: 0x00008000 */
- {138, 5, 3},
- {83, 3, 3} },
- {52406000,
- {117, 4, 3},
- {117, 4, 3}, /* ignoring bit difference: 0x00008000 */
- {117, 4, 3},
- {88, 3, 3} },
- {52977000,
- {37, 5, 1},
- {148, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {148, 5, 3},
- {148, 5, 3} },
- {56250000,
- {55, 7, 1}, /* ignoring bit difference: 0x00008000 */
- {126, 4, 3}, /* ignoring bit difference: 0x00008000 */
- {157, 5, 3},
- {157, 5, 3} },
- {57275000,
- {0, 0, 0},
- {2, 2, 0},
- {2, 2, 0},
- {157, 5, 3} }, /* ignoring bit difference: 0x00808000 */
- {60466000,
- {76, 9, 1},
- {169, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {169, 5, 3}, /* FIXED: old = {72, 2, 3} */
- {169, 5, 3} },
- {61500000,
- {86, 20, 0},
- {172, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {172, 5, 3},
- {172, 5, 3} },
- {65000000,
- {109, 6, 2}, /* ignoring bit difference: 0x00008000 */
- {109, 3, 3}, /* ignoring bit difference: 0x00008000 */
- {109, 3, 3},
- {109, 3, 3} },
- {65178000,
- {91, 5, 2},
- {182, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {109, 3, 3},
- {182, 5, 3} },
- {66750000,
- {75, 4, 2},
- {150, 4, 3}, /* ignoring bit difference: 0x00808000 */
- {150, 4, 3},
- {112, 3, 3} },
- {68179000,
- {19, 4, 0},
- {114, 3, 3}, /* ignoring bit difference: 0x00008000 */
- {190, 5, 3},
- {191, 5, 3} },
- {69924000,
- {83, 17, 0},
- {195, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {195, 5, 3},
- {195, 5, 3} },
- {70159000,
- {98, 20, 0},
- {196, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {196, 5, 3},
- {195, 5, 3} },
- {72000000,
- {121, 24, 0},
- {161, 4, 3}, /* ignoring bit difference: 0x00808000 */
- {161, 4, 3},
- {161, 4, 3} },
- {78750000,
- {33, 3, 1},
- {66, 3, 2}, /* ignoring bit difference: 0x00008000 */
- {110, 5, 2},
- {110, 5, 2} },
- {80136000,
- {28, 5, 0},
- {68, 3, 2}, /* ignoring bit difference: 0x00008000 */
- {112, 5, 2},
- {112, 5, 2} },
- {83375000,
- {93, 2, 3},
- {93, 4, 2}, /* ignoring bit difference: 0x00800000 */
- {93, 4, 2}, /* ignoring bit difference: 0x00800000 */
- {117, 5, 2} },
- {83950000,
- {41, 7, 0},
- {117, 5, 2}, /* ignoring bit difference: 0x00008000 */
- {117, 5, 2},
- {117, 5, 2} },
- {84750000,
- {118, 5, 2},
- {118, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {118, 5, 2},
- {118, 5, 2} },
- {85860000,
- {84, 7, 1},
- {120, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {120, 5, 2},
- {118, 5, 2} },
- {88750000,
- {31, 5, 0},
- {124, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {174, 7, 2}, /* ignoring bit difference: 0x00808000 */
- {124, 5, 2} },
- {94500000,
- {33, 5, 0},
- {132, 5, 2}, /* ignoring bit difference: 0x00008000 */
- {132, 5, 2},
- {132, 5, 2} },
- {97750000,
- {82, 6, 1},
- {137, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {137, 5, 2},
- {137, 5, 2} },
- {101000000,
- {127, 9, 1},
- {141, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {141, 5, 2},
- {141, 5, 2} },
- {106500000,
- {119, 4, 2},
- {119, 4, 2}, /* ignoring bit difference: 0x00808000 */
- {119, 4, 2},
- {149, 5, 2} },
- {108000000,
- {121, 4, 2},
- {121, 4, 2}, /* ignoring bit difference: 0x00808000 */
- {151, 5, 2},
- {151, 5, 2} },
- {113309000,
- {95, 12, 0},
- {95, 3, 2}, /* ignoring bit difference: 0x00808000 */
- {95, 3, 2},
- {159, 5, 2} },
- {118840000,
- {83, 5, 1},
- {166, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {166, 5, 2},
- {166, 5, 2} },
- {119000000,
- {108, 13, 0},
- {133, 4, 2}, /* ignoring bit difference: 0x00808000 */
- {133, 4, 2},
- {167, 5, 2} },
- {121750000,
- {85, 5, 1},
- {170, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {68, 2, 2},
- {0, 0, 0} },
- {125104000,
- {53, 6, 0}, /* ignoring bit difference: 0x00008000 */
- {106, 3, 2}, /* ignoring bit difference: 0x00008000 */
- {175, 5, 2},
- {0, 0, 0} },
- {135000000,
- {94, 5, 1},
- {28, 3, 0}, /* ignoring bit difference: 0x00804000 */
- {151, 4, 2},
- {189, 5, 2} },
- {136700000,
- {115, 12, 0},
- {191, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {191, 5, 2},
- {191, 5, 2} },
- {138400000,
- {87, 9, 0},
- {116, 3, 2}, /* ignoring bit difference: 0x00808000 */
- {116, 3, 2},
- {194, 5, 2} },
- {146760000,
- {103, 5, 1},
- {206, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {206, 5, 2},
- {206, 5, 2} },
- {153920000,
- {86, 8, 0},
- {86, 4, 1}, /* ignoring bit difference: 0x00808000 */
- {86, 4, 1},
- {86, 4, 1} }, /* FIXED: old = {84, 2, 1} */
- {156000000,
- {109, 5, 1},
- {109, 5, 1}, /* ignoring bit difference: 0x00808000 */
- {109, 5, 1},
- {108, 5, 1} },
- {157500000,
- {55, 5, 0}, /* ignoring bit difference: 0x00008000 */
- {22, 2, 0}, /* ignoring bit difference: 0x00802000 */
- {110, 5, 1},
- {110, 5, 1} },
- {162000000,
- {113, 5, 1},
- {113, 5, 1}, /* ignoring bit difference: 0x00808000 */
- {113, 5, 1},
- {113, 5, 1} },
- {187000000,
- {118, 9, 0},
- {131, 5, 1}, /* ignoring bit difference: 0x00808000 */
- {131, 5, 1},
- {131, 5, 1} },
- {193295000,
- {108, 8, 0},
- {81, 3, 1}, /* ignoring bit difference: 0x00808000 */
- {135, 5, 1},
- {135, 5, 1} },
- {202500000,
- {99, 7, 0},
- {85, 3, 1}, /* ignoring bit difference: 0x00808000 */
- {142, 5, 1},
- {142, 5, 1} },
- {204000000,
- {100, 7, 0},
- {143, 5, 1}, /* ignoring bit difference: 0x00808000 */
- {143, 5, 1},
- {143, 5, 1} },
- {218500000,
- {92, 6, 0},
- {153, 5, 1}, /* ignoring bit difference: 0x00808000 */
- {153, 5, 1},
- {153, 5, 1} },
- {234000000,
- {98, 6, 0},
- {98, 3, 1}, /* ignoring bit difference: 0x00008000 */
- {98, 3, 1},
- {164, 5, 1} },
- {267250000,
- {112, 6, 0},
- {112, 3, 1}, /* ignoring bit difference: 0x00808000 */
- {187, 5, 1},
- {187, 5, 1} },
- {297500000,
- {102, 5, 0}, /* ignoring bit difference: 0x00008000 */
- {166, 4, 1}, /* ignoring bit difference: 0x00008000 */
- {208, 5, 1},
- {208, 5, 1} },
- {74481000,
- {26, 5, 0},
- {125, 3, 3}, /* ignoring bit difference: 0x00808000 */
- {208, 5, 3},
- {209, 5, 3} },
- {172798000,
- {121, 5, 1},
- {121, 5, 1}, /* ignoring bit difference: 0x00808000 */
- {121, 5, 1},
- {121, 5, 1} },
- {122614000,
- {60, 7, 0},
- {137, 4, 2}, /* ignoring bit difference: 0x00808000 */
- {137, 4, 2},
- {172, 5, 2} },
- {74270000,
- {83, 8, 1},
- {208, 5, 3},
- {208, 5, 3},
- {0, 0, 0} },
- {148500000,
- {83, 8, 0},
- {208, 5, 2},
- {166, 4, 2},
- {208, 5, 2} }
+static struct pll_config cle266_pll_config[] = {
+ {19, 4, 0},
+ {26, 5, 0},
+ {28, 5, 0},
+ {31, 5, 0},
+ {33, 5, 0},
+ {55, 5, 0},
+ {102, 5, 0},
+ {53, 6, 0},
+ {92, 6, 0},
+ {98, 6, 0},
+ {112, 6, 0},
+ {41, 7, 0},
+ {60, 7, 0},
+ {99, 7, 0},
+ {100, 7, 0},
+ {83, 8, 0},
+ {86, 8, 0},
+ {108, 8, 0},
+ {87, 9, 0},
+ {118, 9, 0},
+ {95, 12, 0},
+ {115, 12, 0},
+ {108, 13, 0},
+ {83, 17, 0},
+ {67, 20, 0},
+ {86, 20, 0},
+ {98, 20, 0},
+ {121, 24, 0},
+ {99, 29, 0},
+ {33, 3, 1},
+ {15, 4, 1},
+ {23, 4, 1},
+ {37, 5, 1},
+ {83, 5, 1},
+ {85, 5, 1},
+ {94, 5, 1},
+ {103, 5, 1},
+ {109, 5, 1},
+ {113, 5, 1},
+ {121, 5, 1},
+ {82, 6, 1},
+ {31, 7, 1},
+ {55, 7, 1},
+ {84, 7, 1},
+ {83, 8, 1},
+ {76, 9, 1},
+ {127, 9, 1},
+ {33, 4, 2},
+ {75, 4, 2},
+ {119, 4, 2},
+ {121, 4, 2},
+ {91, 5, 2},
+ {118, 5, 2},
+ {83, 6, 2},
+ {109, 6, 2},
+ {90, 7, 2},
+ {93, 2, 3},
+ {53, 3, 3},
+ {73, 4, 3},
+ {89, 4, 3},
+ {105, 4, 3},
+ {117, 4, 3},
+ {101, 5, 3},
+ {121, 5, 3},
+ {127, 5, 3},
+ {99, 7, 3}
+};
+
+static struct pll_config k800_pll_config[] = {
+ {22, 2, 0},
+ {28, 3, 0},
+ {81, 3, 1},
+ {85, 3, 1},
+ {98, 3, 1},
+ {112, 3, 1},
+ {86, 4, 1},
+ {166, 4, 1},
+ {109, 5, 1},
+ {113, 5, 1},
+ {121, 5, 1},
+ {131, 5, 1},
+ {143, 5, 1},
+ {153, 5, 1},
+ {66, 3, 2},
+ {68, 3, 2},
+ {95, 3, 2},
+ {106, 3, 2},
+ {116, 3, 2},
+ {93, 4, 2},
+ {119, 4, 2},
+ {121, 4, 2},
+ {133, 4, 2},
+ {137, 4, 2},
+ {117, 5, 2},
+ {118, 5, 2},
+ {120, 5, 2},
+ {124, 5, 2},
+ {132, 5, 2},
+ {137, 5, 2},
+ {141, 5, 2},
+ {166, 5, 2},
+ {170, 5, 2},
+ {191, 5, 2},
+ {206, 5, 2},
+ {208, 5, 2},
+ {30, 2, 3},
+ {69, 3, 3},
+ {82, 3, 3},
+ {83, 3, 3},
+ {109, 3, 3},
+ {114, 3, 3},
+ {125, 3, 3},
+ {89, 4, 3},
+ {103, 4, 3},
+ {117, 4, 3},
+ {126, 4, 3},
+ {150, 4, 3},
+ {161, 4, 3},
+ {121, 5, 3},
+ {127, 5, 3},
+ {131, 5, 3},
+ {134, 5, 3},
+ {148, 5, 3},
+ {169, 5, 3},
+ {172, 5, 3},
+ {182, 5, 3},
+ {195, 5, 3},
+ {196, 5, 3},
+ {208, 5, 3},
+ {66, 2, 4},
+ {85, 3, 4},
+ {141, 4, 4},
+ {146, 4, 4},
+ {161, 4, 4},
+ {177, 5, 4}
+};
+
+static struct pll_config cx700_pll_config[] = {
+ {98, 3, 1},
+ {86, 4, 1},
+ {109, 5, 1},
+ {110, 5, 1},
+ {113, 5, 1},
+ {121, 5, 1},
+ {131, 5, 1},
+ {135, 5, 1},
+ {142, 5, 1},
+ {143, 5, 1},
+ {153, 5, 1},
+ {187, 5, 1},
+ {208, 5, 1},
+ {68, 2, 2},
+ {95, 3, 2},
+ {116, 3, 2},
+ {93, 4, 2},
+ {119, 4, 2},
+ {133, 4, 2},
+ {137, 4, 2},
+ {151, 4, 2},
+ {166, 4, 2},
+ {110, 5, 2},
+ {112, 5, 2},
+ {117, 5, 2},
+ {118, 5, 2},
+ {120, 5, 2},
+ {132, 5, 2},
+ {137, 5, 2},
+ {141, 5, 2},
+ {151, 5, 2},
+ {166, 5, 2},
+ {175, 5, 2},
+ {191, 5, 2},
+ {206, 5, 2},
+ {174, 7, 2},
+ {82, 3, 3},
+ {109, 3, 3},
+ {117, 4, 3},
+ {150, 4, 3},
+ {161, 4, 3},
+ {112, 5, 3},
+ {115, 5, 3},
+ {121, 5, 3},
+ {127, 5, 3},
+ {129, 5, 3},
+ {131, 5, 3},
+ {134, 5, 3},
+ {138, 5, 3},
+ {148, 5, 3},
+ {157, 5, 3},
+ {169, 5, 3},
+ {172, 5, 3},
+ {190, 5, 3},
+ {195, 5, 3},
+ {196, 5, 3},
+ {208, 5, 3},
+ {141, 5, 4},
+ {150, 5, 4},
+ {166, 5, 4},
+ {176, 5, 4},
+ {177, 5, 4},
+ {183, 5, 4},
+ {202, 5, 4}
+};
+
+static struct pll_config vx855_pll_config[] = {
+ {86, 4, 1},
+ {108, 5, 1},
+ {110, 5, 1},
+ {113, 5, 1},
+ {121, 5, 1},
+ {131, 5, 1},
+ {135, 5, 1},
+ {142, 5, 1},
+ {143, 5, 1},
+ {153, 5, 1},
+ {164, 5, 1},
+ {187, 5, 1},
+ {208, 5, 1},
+ {110, 5, 2},
+ {112, 5, 2},
+ {117, 5, 2},
+ {118, 5, 2},
+ {124, 5, 2},
+ {132, 5, 2},
+ {137, 5, 2},
+ {141, 5, 2},
+ {149, 5, 2},
+ {151, 5, 2},
+ {159, 5, 2},
+ {166, 5, 2},
+ {167, 5, 2},
+ {172, 5, 2},
+ {189, 5, 2},
+ {191, 5, 2},
+ {194, 5, 2},
+ {206, 5, 2},
+ {208, 5, 2},
+ {83, 3, 3},
+ {88, 3, 3},
+ {109, 3, 3},
+ {112, 3, 3},
+ {103, 4, 3},
+ {105, 4, 3},
+ {161, 4, 3},
+ {112, 5, 3},
+ {115, 5, 3},
+ {121, 5, 3},
+ {127, 5, 3},
+ {134, 5, 3},
+ {137, 5, 3},
+ {148, 5, 3},
+ {157, 5, 3},
+ {169, 5, 3},
+ {172, 5, 3},
+ {182, 5, 3},
+ {191, 5, 3},
+ {195, 5, 3},
+ {209, 5, 3},
+ {142, 4, 4},
+ {146, 4, 4},
+ {161, 4, 4},
+ {141, 5, 4},
+ {150, 5, 4},
+ {165, 5, 4},
+ {176, 5, 4}
+};
+
+/* according to VIA Technologies these values are based on experiment */
+static struct io_reg scaling_parameters[] = {
+ {VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */
+ {VIACR, CR7B, 0xFF, 0x02}, /* LCD Scaling Parameter 2 */
+ {VIACR, CR7C, 0xFF, 0x03}, /* LCD Scaling Parameter 3 */
+ {VIACR, CR7D, 0xFF, 0x04}, /* LCD Scaling Parameter 4 */
+ {VIACR, CR7E, 0xFF, 0x07}, /* LCD Scaling Parameter 5 */
+ {VIACR, CR7F, 0xFF, 0x0A}, /* LCD Scaling Parameter 6 */
+ {VIACR, CR80, 0xFF, 0x0D}, /* LCD Scaling Parameter 7 */
+ {VIACR, CR81, 0xFF, 0x13}, /* LCD Scaling Parameter 8 */
+ {VIACR, CR82, 0xFF, 0x16}, /* LCD Scaling Parameter 9 */
+ {VIACR, CR83, 0xFF, 0x19}, /* LCD Scaling Parameter 10 */
+ {VIACR, CR84, 0xFF, 0x1C}, /* LCD Scaling Parameter 11 */
+ {VIACR, CR85, 0xFF, 0x1D}, /* LCD Scaling Parameter 12 */
+ {VIACR, CR86, 0xFF, 0x1E}, /* LCD Scaling Parameter 13 */
+ {VIACR, CR87, 0xFF, 0x1F}, /* LCD Scaling Parameter 14 */
};
static struct fifo_depth_select display_fifo_depth_reg = {
@@ -751,7 +699,7 @@ void viafb_unlock_crt(void)
viafb_write_reg_mask(CR47, VIACR, 0, BIT0);
}
-void write_dac_reg(u8 index, u8 r, u8 g, u8 b)
+static void write_dac_reg(u8 index, u8 r, u8 g, u8 b)
{
outb(index, LUT_INDEX_WRITE);
outb(r, LUT_DATA);
@@ -1674,43 +1622,63 @@ static u32 vx855_encode_pll(struct pll_config pll)
| pll.multiplier;
}
-u32 viafb_get_clk_value(int clk)
+static inline u32 get_pll_internal_frequency(u32 ref_freq,
+ struct pll_config pll)
{
- u32 value = 0;
- int i = 0;
+ return ref_freq / pll.divisor * pll.multiplier;
+}
- while (i < NUM_TOTAL_PLL_TABLE && clk != pll_value[i].clk)
- i++;
+static inline u32 get_pll_output_frequency(u32 ref_freq, struct pll_config pll)
+{
+ return get_pll_internal_frequency(ref_freq, pll)>>pll.rshift;
+}
- if (i == NUM_TOTAL_PLL_TABLE) {
- printk(KERN_WARNING "viafb_get_clk_value: PLL lookup failed!");
- } else {
- switch (viaparinfo->chip_info->gfx_chip_name) {
- case UNICHROME_CLE266:
- case UNICHROME_K400:
- value = cle266_encode_pll(pll_value[i].cle266_pll);
- break;
+static struct pll_config get_pll_config(struct pll_config *config, int size,
+ int clk)
+{
+ struct pll_config best = config[0];
+ const u32 f0 = 14318180; /* X1 frequency */
+ int i;
- case UNICHROME_K800:
- case UNICHROME_PM800:
- case UNICHROME_CN700:
- value = k800_encode_pll(pll_value[i].k800_pll);
- break;
+ for (i = 1; i < size; i++) {
+ if (abs(get_pll_output_frequency(f0, config[i]) - clk)
+ < abs(get_pll_output_frequency(f0, best) - clk))
+ best = config[i];
+ }
- case UNICHROME_CX700:
- case UNICHROME_CN750:
- case UNICHROME_K8M890:
- case UNICHROME_P4M890:
- case UNICHROME_P4M900:
- case UNICHROME_VX800:
- value = k800_encode_pll(pll_value[i].cx700_pll);
- break;
+ return best;
+}
- case UNICHROME_VX855:
- case UNICHROME_VX900:
- value = vx855_encode_pll(pll_value[i].vx855_pll);
- break;
- }
+u32 viafb_get_clk_value(int clk)
+{
+ u32 value = 0;
+
+ switch (viaparinfo->chip_info->gfx_chip_name) {
+ case UNICHROME_CLE266:
+ case UNICHROME_K400:
+ value = cle266_encode_pll(get_pll_config(cle266_pll_config,
+ ARRAY_SIZE(cle266_pll_config), clk));
+ break;
+ case UNICHROME_K800:
+ case UNICHROME_PM800:
+ case UNICHROME_CN700:
+ value = k800_encode_pll(get_pll_config(k800_pll_config,
+ ARRAY_SIZE(k800_pll_config), clk));
+ break;
+ case UNICHROME_CX700:
+ case UNICHROME_CN750:
+ case UNICHROME_K8M890:
+ case UNICHROME_P4M890:
+ case UNICHROME_P4M900:
+ case UNICHROME_VX800:
+ value = k800_encode_pll(get_pll_config(cx700_pll_config,
+ ARRAY_SIZE(cx700_pll_config), clk));
+ break;
+ case UNICHROME_VX855:
+ case UNICHROME_VX900:
+ value = vx855_encode_pll(get_pll_config(vx855_pll_config,
+ ARRAY_SIZE(vx855_pll_config), clk));
+ break;
}
return value;
@@ -2034,7 +2002,7 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
int i;
int index = 0;
int h_addr, v_addr;
- u32 pll_D_N;
+ u32 pll_D_N, clock;
for (i = 0; i < video_mode->mode_array; i++) {
index = i;
@@ -2087,7 +2055,9 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
&& (viaparinfo->chip_info->gfx_chip_name != UNICHROME_K400))
viafb_load_FIFO_reg(set_iga, h_addr, v_addr);
- pll_D_N = viafb_get_clk_value(crt_table[index].clk);
+ clock = crt_reg.hor_total * crt_reg.ver_total
+ * crt_table[index].refresh_rate;
+ pll_D_N = viafb_get_clk_value(clock);
DEBUG_MSG(KERN_INFO "PLL=%x", pll_D_N);
viafb_set_vclock(pll_D_N, set_iga);
@@ -2117,9 +2087,6 @@ void viafb_update_device_setting(int hres, int vres,
int bpp, int vmode_refresh, int flag)
{
if (flag == 0) {
- viaparinfo->crt_setting_info->h_active = hres;
- viaparinfo->crt_setting_info->v_active = vres;
- viaparinfo->crt_setting_info->bpp = bpp;
viaparinfo->crt_setting_info->refresh_rate =
vmode_refresh;
@@ -2129,13 +2096,9 @@ void viafb_update_device_setting(int hres, int vres,
viaparinfo->lvds_setting_info->h_active = hres;
viaparinfo->lvds_setting_info->v_active = vres;
viaparinfo->lvds_setting_info->bpp = bpp;
- viaparinfo->lvds_setting_info->refresh_rate =
- vmode_refresh;
viaparinfo->lvds_setting_info2->h_active = hres;
viaparinfo->lvds_setting_info2->v_active = vres;
viaparinfo->lvds_setting_info2->bpp = bpp;
- viaparinfo->lvds_setting_info2->refresh_rate =
- vmode_refresh;
} else {
if (viaparinfo->tmds_setting_info->iga_path == IGA2) {
@@ -2147,15 +2110,11 @@ void viafb_update_device_setting(int hres, int vres,
viaparinfo->lvds_setting_info->h_active = hres;
viaparinfo->lvds_setting_info->v_active = vres;
viaparinfo->lvds_setting_info->bpp = bpp;
- viaparinfo->lvds_setting_info->refresh_rate =
- vmode_refresh;
}
if (IGA2 == viaparinfo->lvds_setting_info2->iga_path) {
viaparinfo->lvds_setting_info2->h_active = hres;
viaparinfo->lvds_setting_info2->v_active = vres;
viaparinfo->lvds_setting_info2->bpp = bpp;
- viaparinfo->lvds_setting_info2->refresh_rate =
- vmode_refresh;
}
}
}
@@ -2430,6 +2389,7 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
break;
}
+ viafb_write_regx(scaling_parameters, ARRAY_SIZE(scaling_parameters));
device_off();
via_set_state(devices, VIA_STATE_OFF);
@@ -2608,35 +2568,43 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
int viafb_get_pixclock(int hres, int vres, int vmode_refresh)
{
int i;
+ struct crt_mode_table *best;
+ struct VideoModeTable *vmode = viafb_get_mode(hres, vres);
+
+ if (!vmode)
+ return RES_640X480_60HZ_PIXCLOCK;
- for (i = 0; i < NUM_TOTAL_RES_MAP_REFRESH; i++) {
- if ((hres == res_map_refresh_tbl[i].hres)
- && (vres == res_map_refresh_tbl[i].vres)
- && (vmode_refresh == res_map_refresh_tbl[i].vmode_refresh))
- return res_map_refresh_tbl[i].pixclock;
+ best = &vmode->crtc[0];
+ for (i = 1; i < vmode->mode_array; i++) {
+ if (abs(vmode->crtc[i].refresh_rate - vmode_refresh)
+ < abs(best->refresh_rate - vmode_refresh))
+ best = &vmode->crtc[i];
}
- return RES_640X480_60HZ_PIXCLOCK;
+ return 1000000000 / (best->crtc.hor_total * best->crtc.ver_total)
+ * 1000 / best->refresh_rate;
}
int viafb_get_refresh(int hres, int vres, u32 long_refresh)
{
-#define REFRESH_TOLERANCE 3
- int i, nearest = -1, diff = REFRESH_TOLERANCE;
- for (i = 0; i < NUM_TOTAL_RES_MAP_REFRESH; i++) {
- if ((hres == res_map_refresh_tbl[i].hres)
- && (vres == res_map_refresh_tbl[i].vres)
- && (diff > (abs(long_refresh -
- res_map_refresh_tbl[i].vmode_refresh)))) {
- diff = abs(long_refresh - res_map_refresh_tbl[i].
- vmode_refresh);
- nearest = i;
- }
+ int i;
+ struct crt_mode_table *best;
+ struct VideoModeTable *vmode = viafb_get_mode(hres, vres);
+
+ if (!vmode)
+ return 60;
+
+ best = &vmode->crtc[0];
+ for (i = 1; i < vmode->mode_array; i++) {
+ if (abs(vmode->crtc[i].refresh_rate - long_refresh)
+ < abs(best->refresh_rate - long_refresh))
+ best = &vmode->crtc[i];
}
-#undef REFRESH_TOLERANCE
- if (nearest > 0)
- return res_map_refresh_tbl[nearest].vmode_refresh;
- return 60;
+
+ if (abs(best->refresh_rate - long_refresh) > 3)
+ return 60;
+
+ return best->refresh_rate;
}
static void device_off(void)
diff --git a/drivers/video/via/hw.h b/drivers/video/via/hw.h
index 668d534542ef..7295263299f7 100644
--- a/drivers/video/via/hw.h
+++ b/drivers/video/via/hw.h
@@ -893,8 +893,6 @@ struct iga2_crtc_timing {
/* VT3410 chipset*/
#define VX900_FUNCTION3 0x3410
-#define NUM_TOTAL_PLL_TABLE ARRAY_SIZE(pll_value)
-
struct IODATA {
u8 Index;
u8 Mask;
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index 3425c3969806..64bc7e763103 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -26,10 +26,12 @@
/* CLE266 Software Power Sequence */
/* {Mask}, {Data}, {Delay} */
-int PowerSequenceOn[3][3] = { {0x10, 0x08, 0x06}, {0x10, 0x08, 0x06},
- {0x19, 0x1FE, 0x01} };
-int PowerSequenceOff[3][3] = { {0x06, 0x08, 0x10}, {0x00, 0x00, 0x00},
- {0xD2, 0x19, 0x01} };
+static const int PowerSequenceOn[3][3] = {
+ {0x10, 0x08, 0x06}, {0x10, 0x08, 0x06}, {0x19, 0x1FE, 0x01}
+};
+static const int PowerSequenceOff[3][3] = {
+ {0x06, 0x08, 0x10}, {0x00, 0x00, 0x00}, {0xD2, 0x19, 0x01}
+};
static struct _lcd_scaling_factor lcd_scaling_factor = {
/* LCD Horizontal Scaling Factor Register */
@@ -95,8 +97,6 @@ void __devinit viafb_init_lcd_size(void)
DEBUG_MSG(KERN_INFO "viafb_init_lcd_size()\n");
fp_id_to_vindex(viafb_lcd_panel_id);
- viaparinfo->lvds_setting_info2->lcd_panel_id =
- viaparinfo->lvds_setting_info->lcd_panel_id;
viaparinfo->lvds_setting_info2->lcd_panel_hres =
viaparinfo->lvds_setting_info->lcd_panel_hres;
viaparinfo->lvds_setting_info2->lcd_panel_vres =
@@ -203,176 +203,132 @@ static void __devinit fp_id_to_vindex(int panel_id)
case 0x0:
viaparinfo->lvds_setting_info->lcd_panel_hres = 640;
viaparinfo->lvds_setting_info->lcd_panel_vres = 480;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID0_640X480;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x1:
viaparinfo->lvds_setting_info->lcd_panel_hres = 800;
viaparinfo->lvds_setting_info->lcd_panel_vres = 600;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID1_800X600;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x2:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID2_1024X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x3:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID3_1280X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x4:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1024;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID4_1280X1024;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x5:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1400;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1050;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID5_1400X1050;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x6:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1600;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1200;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID6_1600X1200;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x8:
viaparinfo->lvds_setting_info->lcd_panel_hres = 800;
viaparinfo->lvds_setting_info->lcd_panel_vres = 480;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_IDA_800X480;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x9:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID2_1024X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0xA:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID2_1024X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0xB:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID2_1024X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0xC:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID3_1280X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0xD:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1024;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID4_1280X1024;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0xE:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1400;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1050;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID5_1400X1050;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0xF:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1600;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1200;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID6_1600X1200;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0x10:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1366;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID7_1366X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0x11:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_vres = 600;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID8_1024X600;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x12:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID3_1280X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x13:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 800;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID9_1280X800;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x14:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1360;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_IDB_1360X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0x15:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID3_1280X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0x16:
viaparinfo->lvds_setting_info->lcd_panel_hres = 480;
viaparinfo->lvds_setting_info->lcd_panel_vres = 640;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_IDC_480X640;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
@@ -380,16 +336,12 @@ static void __devinit fp_id_to_vindex(int panel_id)
/* OLPC XO-1.5 panel */
viaparinfo->lvds_setting_info->lcd_panel_hres = 1200;
viaparinfo->lvds_setting_info->lcd_panel_vres = 900;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_IDD_1200X900;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
default:
viaparinfo->lvds_setting_info->lcd_panel_hres = 800;
viaparinfo->lvds_setting_info->lcd_panel_vres = 600;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID1_800X600;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
}
@@ -610,7 +562,7 @@ void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
int set_vres = plvds_setting_info->v_active;
int panel_hres = plvds_setting_info->lcd_panel_hres;
int panel_vres = plvds_setting_info->lcd_panel_vres;
- u32 pll_D_N;
+ u32 pll_D_N, clock;
struct display_timing mode_crt_reg, panel_crt_reg;
struct crt_mode_table *panel_crt_table = NULL;
struct VideoModeTable *vmode_tbl = viafb_get_mode(panel_hres,
@@ -625,7 +577,9 @@ void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
DEBUG_MSG(KERN_INFO "bellow viafb_lcd_set_mode!!\n");
if (VT1636_LVDS == plvds_chip_info->lvds_chip_name)
viafb_init_lvds_vt1636(plvds_setting_info, plvds_chip_info);
- plvds_setting_info->vclk = panel_crt_table->clk;
+ clock = panel_crt_reg.hor_total * panel_crt_reg.ver_total
+ * panel_crt_table->refresh_rate;
+ plvds_setting_info->vclk = clock;
if (set_iga == IGA1) {
/* IGA1 doesn't have LCD scaling, so set it as centering. */
viafb_load_crtc_timing(lcd_centering_timging
@@ -660,7 +614,7 @@ void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
fill_lcd_format();
- pll_D_N = viafb_get_clk_value(panel_crt_table[0].clk);
+ pll_D_N = viafb_get_clk_value(clock);
DEBUG_MSG(KERN_INFO "PLL=0x%x", pll_D_N);
viafb_set_vclock(pll_D_N, set_iga);
lcd_patch_skew(plvds_setting_info, plvds_chip_info);
@@ -1064,34 +1018,33 @@ static struct display_timing lcd_centering_timging(struct display_timing
bool viafb_lcd_get_mobile_state(bool *mobile)
{
- unsigned char *romptr, *tableptr;
+ unsigned char __iomem *romptr, *tableptr, *biosptr;
u8 core_base;
- unsigned char *biosptr;
/* Rom address */
- u32 romaddr = 0x000C0000;
- u16 start_pattern = 0;
+ const u32 romaddr = 0x000C0000;
+ u16 start_pattern;
biosptr = ioremap(romaddr, 0x10000);
+ start_pattern = readw(biosptr);
- memcpy(&start_pattern, biosptr, 2);
/* Compare pattern */
if (start_pattern == 0xAA55) {
/* Get the start of Table */
/* 0x1B means BIOS offset position */
romptr = biosptr + 0x1B;
- tableptr = biosptr + *((u16 *) romptr);
+ tableptr = biosptr + readw(romptr);
/* Get the start of biosver structure */
/* 18 means BIOS version position. */
romptr = tableptr + 18;
- romptr = biosptr + *((u16 *) romptr);
+ romptr = biosptr + readw(romptr);
/* The offset should be 44, but the
actual image is less three char. */
/* pRom += 44; */
romptr += 41;
- core_base = *romptr++;
+ core_base = readb(romptr);
if (core_base & 0x8)
*mobile = false;
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 2cbe1031b421..4b7831f0d012 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -627,77 +627,6 @@
#define M2048x1536_R60_HSP NEGATIVE
#define M2048x1536_R60_VSP POSITIVE
-/* define PLL index: */
-#define CLK_25_175M 25175000
-#define CLK_26_880M 26880000
-#define CLK_29_581M 29581000
-#define CLK_31_500M 31500000
-#define CLK_31_728M 31728000
-#define CLK_32_668M 32688000
-#define CLK_36_000M 36000000
-#define CLK_40_000M 40000000
-#define CLK_41_291M 41291000
-#define CLK_43_163M 43163000
-#define CLK_45_250M 45250000 /* 45.46MHz */
-#define CLK_46_000M 46000000
-#define CLK_46_996M 46996000
-#define CLK_48_000M 48000000
-#define CLK_48_875M 48875000
-#define CLK_49_500M 49500000
-#define CLK_52_406M 52406000
-#define CLK_52_977M 52977000
-#define CLK_56_250M 56250000
-#define CLK_57_275M 57275000
-#define CLK_60_466M 60466000
-#define CLK_61_500M 61500000
-#define CLK_65_000M 65000000
-#define CLK_65_178M 65178000
-#define CLK_66_750M 66750000 /* 67.116MHz */
-#define CLK_68_179M 68179000
-#define CLK_69_924M 69924000
-#define CLK_70_159M 70159000
-#define CLK_72_000M 72000000
-#define CLK_74_270M 74270000
-#define CLK_78_750M 78750000
-#define CLK_80_136M 80136000
-#define CLK_83_375M 83375000
-#define CLK_83_950M 83950000
-#define CLK_84_750M 84750000 /* 84.537Mhz */
-#define CLK_85_860M 85860000
-#define CLK_88_750M 88750000
-#define CLK_94_500M 94500000
-#define CLK_97_750M 97750000
-#define CLK_101_000M 101000000
-#define CLK_106_500M 106500000
-#define CLK_108_000M 108000000
-#define CLK_113_309M 113309000
-#define CLK_118_840M 118840000
-#define CLK_119_000M 119000000
-#define CLK_121_750M 121750000 /* 121.704MHz */
-#define CLK_125_104M 125104000
-#define CLK_135_000M 135000000
-#define CLK_136_700M 136700000
-#define CLK_138_400M 138400000
-#define CLK_146_760M 146760000
-#define CLK_148_500M 148500000
-
-#define CLK_153_920M 153920000
-#define CLK_156_000M 156000000
-#define CLK_157_500M 157500000
-#define CLK_162_000M 162000000
-#define CLK_187_000M 187000000
-#define CLK_193_295M 193295000
-#define CLK_202_500M 202500000
-#define CLK_204_000M 204000000
-#define CLK_218_500M 218500000
-#define CLK_234_000M 234000000
-#define CLK_267_250M 267250000
-#define CLK_297_500M 297500000
-#define CLK_74_481M 74481000
-#define CLK_172_798M 172798000
-#define CLK_122_614M 122614000
-
-
/* Definition CRTC Timing Index */
#define H_TOTAL_INDEX 0
#define H_ADDR_INDEX 1
@@ -722,76 +651,7 @@
/* Definition Video Mode Pixel Clock (picoseconds)
*/
-#define RES_480X640_60HZ_PIXCLOCK 39722
#define RES_640X480_60HZ_PIXCLOCK 39722
-#define RES_640X480_75HZ_PIXCLOCK 31747
-#define RES_640X480_85HZ_PIXCLOCK 27777
-#define RES_640X480_100HZ_PIXCLOCK 23168
-#define RES_640X480_120HZ_PIXCLOCK 19081
-#define RES_720X480_60HZ_PIXCLOCK 37020
-#define RES_720X576_60HZ_PIXCLOCK 30611
-#define RES_800X600_60HZ_PIXCLOCK 25000
-#define RES_800X600_75HZ_PIXCLOCK 20203
-#define RES_800X600_85HZ_PIXCLOCK 17777
-#define RES_800X600_100HZ_PIXCLOCK 14667
-#define RES_800X600_120HZ_PIXCLOCK 11912
-#define RES_800X480_60HZ_PIXCLOCK 33805
-#define RES_848X480_60HZ_PIXCLOCK 31756
-#define RES_856X480_60HZ_PIXCLOCK 31518
-#define RES_1024X512_60HZ_PIXCLOCK 24218
-#define RES_1024X600_60HZ_PIXCLOCK 20460
-#define RES_1024X768_60HZ_PIXCLOCK 15385
-#define RES_1024X768_75HZ_PIXCLOCK 12699
-#define RES_1024X768_85HZ_PIXCLOCK 10582
-#define RES_1024X768_100HZ_PIXCLOCK 8825
-#define RES_1152X864_75HZ_PIXCLOCK 9259
-#define RES_1280X768_60HZ_PIXCLOCK 12480
-#define RES_1280X800_60HZ_PIXCLOCK 11994
-#define RES_1280X960_60HZ_PIXCLOCK 9259
-#define RES_1280X1024_60HZ_PIXCLOCK 9260
-#define RES_1280X1024_75HZ_PIXCLOCK 7408
-#define RES_1280X768_85HZ_PIXCLOCK 6349
-#define RES_1440X1050_60HZ_PIXCLOCK 7993
-#define RES_1600X1200_60HZ_PIXCLOCK 6172
-#define RES_1600X1200_75HZ_PIXCLOCK 4938
-#define RES_1280X720_60HZ_PIXCLOCK 13426
-#define RES_1200X900_60HZ_PIXCLOCK 17459
-#define RES_1920X1080_60HZ_PIXCLOCK 5787
-#define RES_1400X1050_60HZ_PIXCLOCK 8214
-#define RES_1400X1050_75HZ_PIXCLOCK 6410
-#define RES_1368X768_60HZ_PIXCLOCK 11647
-#define RES_960X600_60HZ_PIXCLOCK 22099
-#define RES_1000X600_60HZ_PIXCLOCK 20834
-#define RES_1024X576_60HZ_PIXCLOCK 21278
-#define RES_1088X612_60HZ_PIXCLOCK 18877
-#define RES_1152X720_60HZ_PIXCLOCK 14981
-#define RES_1200X720_60HZ_PIXCLOCK 14253
-#define RES_1280X600_60HZ_PIXCLOCK 16260
-#define RES_1280X720_50HZ_PIXCLOCK 16538
-#define RES_1280X768_50HZ_PIXCLOCK 15342
-#define RES_1366X768_50HZ_PIXCLOCK 14301
-#define RES_1366X768_60HZ_PIXCLOCK 11646
-#define RES_1360X768_60HZ_PIXCLOCK 11799
-#define RES_1440X900_60HZ_PIXCLOCK 9390
-#define RES_1440X900_75HZ_PIXCLOCK 7315
-#define RES_1600X900_60HZ_PIXCLOCK 8415
-#define RES_1600X1024_60HZ_PIXCLOCK 7315
-#define RES_1680X1050_60HZ_PIXCLOCK 6814
-#define RES_1680X1050_75HZ_PIXCLOCK 5348
-#define RES_1792X1344_60HZ_PIXCLOCK 4902
-#define RES_1856X1392_60HZ_PIXCLOCK 4577
-#define RES_1920X1200_60HZ_PIXCLOCK 5173
-#define RES_1920X1440_60HZ_PIXCLOCK 4274
-#define RES_1920X1440_75HZ_PIXCLOCK 3367
-#define RES_2048X1536_60HZ_PIXCLOCK 3742
-
-#define RES_1360X768_RB_60HZ_PIXCLOCK 13889
-#define RES_1400X1050_RB_60HZ_PIXCLOCK 9901
-#define RES_1440X900_RB_60HZ_PIXCLOCK 11268
-#define RES_1600X900_RB_60HZ_PIXCLOCK 10230
-#define RES_1680X1050_RB_60HZ_PIXCLOCK 8403
-#define RES_1920X1080_RB_60HZ_PIXCLOCK 7225
-#define RES_1920X1200_RB_60HZ_PIXCLOCK 6497
/* LCD display method
*/
@@ -822,7 +682,6 @@ struct display_timing {
struct crt_mode_table {
int refresh_rate;
- unsigned long clk;
int h_sync_polarity;
int v_sync_polarity;
struct display_timing crtc;
diff --git a/drivers/video/via/tblDPASetting.c b/drivers/video/via/tblDPASetting.c
index 0c4c8cc712f4..73bb554e7c1e 100644
--- a/drivers/video/via/tblDPASetting.c
+++ b/drivers/video/via/tblDPASetting.c
@@ -20,17 +20,6 @@
*/
#include "global.h"
-/* For VT3324: */
-struct VT1636_DPA_SETTING VT1636_DPA_SETTING_TBL_VT3324[] = {
- /* Panel ID, CLK_SEL_ST1[09], CLK_SEL_ST2[08] */
- {LCD_PANEL_ID0_640X480, 0x00, 0x00}, /* For 640x480 */
- {LCD_PANEL_ID1_800X600, 0x00, 0x00}, /* For 800x600 */
- {LCD_PANEL_ID2_1024X768, 0x00, 0x00}, /* For 1024x768 */
- {LCD_PANEL_ID3_1280X768, 0x00, 0x00}, /* For 1280x768 */
- {LCD_PANEL_ID4_1280X1024, 0x00, 0x00}, /* For 1280x1024 */
- {LCD_PANEL_ID5_1400X1050, 0x00, 0x00}, /* For 1400x1050 */
- {LCD_PANEL_ID6_1600X1200, 0x0B, 0x03} /* For 1600x1200 */
-};
struct GFX_DPA_SETTING GFX_DPA_SETTING_TBL_VT3324[] = {
/* ClkRange, DVP0, DVP0DataDriving, DVP0ClockDriving, DVP1,
@@ -57,18 +46,6 @@ struct GFX_DPA_SETTING GFX_DPA_SETTING_TBL_VT3324[] = {
0x00},
};
-/* For VT3327: */
-struct VT1636_DPA_SETTING VT1636_DPA_SETTING_TBL_VT3327[] = {
- /* Panel ID, CLK_SEL_ST1[09], CLK_SEL_ST2[08] */
- {LCD_PANEL_ID0_640X480, 0x00, 0x00}, /* For 640x480 */
- {LCD_PANEL_ID1_800X600, 0x00, 0x00}, /* For 800x600 */
- {LCD_PANEL_ID2_1024X768, 0x00, 0x00}, /* For 1024x768 */
- {LCD_PANEL_ID3_1280X768, 0x00, 0x00}, /* For 1280x768 */
- {LCD_PANEL_ID4_1280X1024, 0x00, 0x00}, /* For 1280x1024 */
- {LCD_PANEL_ID5_1400X1050, 0x00, 0x00}, /* For 1400x1050 */
- {LCD_PANEL_ID6_1600X1200, 0x00, 0x00} /* For 1600x1200 */
-};
-
struct GFX_DPA_SETTING GFX_DPA_SETTING_TBL_VT3327[] = {
/* ClkRange,DVP0, DVP0DataDriving, DVP0ClockDriving, DVP1,
DVP1Driving, DFPHigh, DFPLow */
diff --git a/drivers/video/via/tblDPASetting.h b/drivers/video/via/tblDPASetting.h
index b065a83481d3..6db61519cb5d 100644
--- a/drivers/video/via/tblDPASetting.h
+++ b/drivers/video/via/tblDPASetting.h
@@ -38,9 +38,7 @@ enum DPA_RANGE {
DPA_CLK_RANGE_150M
};
-extern struct VT1636_DPA_SETTING VT1636_DPA_SETTING_TBL_VT3324[7];
extern struct GFX_DPA_SETTING GFX_DPA_SETTING_TBL_VT3324[6];
-extern struct VT1636_DPA_SETTING VT1636_DPA_SETTING_TBL_VT3327[7];
extern struct GFX_DPA_SETTING GFX_DPA_SETTING_TBL_VT3327[];
extern struct GFX_DPA_SETTING GFX_DPA_SETTING_TBL_VT3364[6];
diff --git a/drivers/video/via/via_i2c.c b/drivers/video/via/via_i2c.c
index 3844b558b7bd..78f1405dbab7 100644
--- a/drivers/video/via/via_i2c.c
+++ b/drivers/video/via/via_i2c.c
@@ -32,7 +32,7 @@
*/
#define VIAFB_NUM_I2C 5
static struct via_i2c_stuff via_i2c_par[VIAFB_NUM_I2C];
-struct viafb_dev *i2c_vdev; /* Passed in from core */
+static struct viafb_dev *i2c_vdev; /* Passed in from core */
static void via_i2c_setscl(void *data, int state)
{
@@ -209,7 +209,6 @@ static int create_i2c_bus(struct i2c_adapter *adapter,
sprintf(adapter->name, "viafb i2c io_port idx 0x%02x",
adap_cfg->ioport_index);
adapter->owner = THIS_MODULE;
- adapter->id = 0x01FFFF;
adapter->class = I2C_CLASS_DDC;
adapter->algo_data = algo;
if (pdev)
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 4e66349e4366..f555b891cc72 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -43,11 +43,11 @@ static int viafb_second_size;
static int viafb_accel = 1;
/* Added for specifying active devices.*/
-char *viafb_active_dev;
+static char *viafb_active_dev;
/*Added for specify lcd output port*/
-char *viafb_lcd_port = "";
-char *viafb_dvi_port = "";
+static char *viafb_lcd_port = "";
+static char *viafb_dvi_port = "";
static void retrieve_device_setting(struct viafb_ioctl_setting
*setting_info);
diff --git a/drivers/video/via/viamode.c b/drivers/video/via/viamode.c
index 2dbad3c0f679..8c5bc41ff6a4 100644
--- a/drivers/video/via/viamode.c
+++ b/drivers/video/via/viamode.c
@@ -21,72 +21,6 @@
#include <linux/via-core.h>
#include "global.h"
-struct res_map_refresh res_map_refresh_tbl[] = {
-/*hres, vres, vclock, vmode_refresh*/
- {480, 640, RES_480X640_60HZ_PIXCLOCK, 60},
- {640, 480, RES_640X480_60HZ_PIXCLOCK, 60},
- {640, 480, RES_640X480_75HZ_PIXCLOCK, 75},
- {640, 480, RES_640X480_85HZ_PIXCLOCK, 85},
- {640, 480, RES_640X480_100HZ_PIXCLOCK, 100},
- {640, 480, RES_640X480_120HZ_PIXCLOCK, 120},
- {720, 480, RES_720X480_60HZ_PIXCLOCK, 60},
- {720, 576, RES_720X576_60HZ_PIXCLOCK, 60},
- {800, 480, RES_800X480_60HZ_PIXCLOCK, 60},
- {800, 600, RES_800X600_60HZ_PIXCLOCK, 60},
- {800, 600, RES_800X600_75HZ_PIXCLOCK, 75},
- {800, 600, RES_800X600_85HZ_PIXCLOCK, 85},
- {800, 600, RES_800X600_100HZ_PIXCLOCK, 100},
- {800, 600, RES_800X600_120HZ_PIXCLOCK, 120},
- {848, 480, RES_848X480_60HZ_PIXCLOCK, 60},
- {856, 480, RES_856X480_60HZ_PIXCLOCK, 60},
- {1024, 512, RES_1024X512_60HZ_PIXCLOCK, 60},
- {1024, 600, RES_1024X600_60HZ_PIXCLOCK, 60},
- {1024, 768, RES_1024X768_60HZ_PIXCLOCK, 60},
- {1024, 768, RES_1024X768_75HZ_PIXCLOCK, 75},
- {1024, 768, RES_1024X768_85HZ_PIXCLOCK, 85},
- {1024, 768, RES_1024X768_100HZ_PIXCLOCK, 100},
-/* {1152,864, RES_1152X864_70HZ_PIXCLOCK, 70},*/
- {1152, 864, RES_1152X864_75HZ_PIXCLOCK, 75},
- {1280, 768, RES_1280X768_60HZ_PIXCLOCK, 60},
- {1280, 800, RES_1280X800_60HZ_PIXCLOCK, 60},
- {1280, 960, RES_1280X960_60HZ_PIXCLOCK, 60},
- {1280, 1024, RES_1280X1024_60HZ_PIXCLOCK, 60},
- {1280, 1024, RES_1280X1024_75HZ_PIXCLOCK, 75},
- {1280, 1024, RES_1280X768_85HZ_PIXCLOCK, 85},
- {1440, 1050, RES_1440X1050_60HZ_PIXCLOCK, 60},
- {1600, 1200, RES_1600X1200_60HZ_PIXCLOCK, 60},
- {1600, 1200, RES_1600X1200_75HZ_PIXCLOCK, 75},
- {1280, 720, RES_1280X720_60HZ_PIXCLOCK, 60},
- {1920, 1080, RES_1920X1080_60HZ_PIXCLOCK, 60},
- {1400, 1050, RES_1400X1050_60HZ_PIXCLOCK, 60},
- {1400, 1050, RES_1400X1050_75HZ_PIXCLOCK, 75},
- {1368, 768, RES_1368X768_60HZ_PIXCLOCK, 60},
- {960, 600, RES_960X600_60HZ_PIXCLOCK, 60},
- {1000, 600, RES_1000X600_60HZ_PIXCLOCK, 60},
- {1024, 576, RES_1024X576_60HZ_PIXCLOCK, 60},
- {1088, 612, RES_1088X612_60HZ_PIXCLOCK, 60},
- {1152, 720, RES_1152X720_60HZ_PIXCLOCK, 60},
- {1200, 720, RES_1200X720_60HZ_PIXCLOCK, 60},
- {1200, 900, RES_1200X900_60HZ_PIXCLOCK, 60},
- {1280, 600, RES_1280X600_60HZ_PIXCLOCK, 60},
- {1280, 720, RES_1280X720_50HZ_PIXCLOCK, 50},
- {1280, 768, RES_1280X768_50HZ_PIXCLOCK, 50},
- {1360, 768, RES_1360X768_60HZ_PIXCLOCK, 60},
- {1366, 768, RES_1366X768_50HZ_PIXCLOCK, 50},
- {1366, 768, RES_1366X768_60HZ_PIXCLOCK, 60},
- {1440, 900, RES_1440X900_60HZ_PIXCLOCK, 60},
- {1440, 900, RES_1440X900_75HZ_PIXCLOCK, 75},
- {1600, 900, RES_1600X900_60HZ_PIXCLOCK, 60},
- {1600, 1024, RES_1600X1024_60HZ_PIXCLOCK, 60},
- {1680, 1050, RES_1680X1050_60HZ_PIXCLOCK, 60},
- {1680, 1050, RES_1680X1050_75HZ_PIXCLOCK, 75},
- {1792, 1344, RES_1792X1344_60HZ_PIXCLOCK, 60},
- {1856, 1392, RES_1856X1392_60HZ_PIXCLOCK, 60},
- {1920, 1200, RES_1920X1200_60HZ_PIXCLOCK, 60},
- {1920, 1440, RES_1920X1440_60HZ_PIXCLOCK, 60},
- {1920, 1440, RES_1920X1440_75HZ_PIXCLOCK, 75},
- {2048, 1536, RES_2048X1536_60HZ_PIXCLOCK, 60}
-};
struct io_reg CN400_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
{VIASR, SR15, 0x02, 0x02},
@@ -108,20 +42,6 @@ struct io_reg CN400_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
{VIACR, CR6A, 0xFF, 0x40},
{VIACR, CR6B, 0xFF, 0x00},
{VIACR, CR6C, 0xFF, 0x00},
-{VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */
-{VIACR, CR7B, 0xFF, 0x02}, /* LCD Scaling Parameter 2 */
-{VIACR, CR7C, 0xFF, 0x03}, /* LCD Scaling Parameter 3 */
-{VIACR, CR7D, 0xFF, 0x04}, /* LCD Scaling Parameter 4 */
-{VIACR, CR7E, 0xFF, 0x07}, /* LCD Scaling Parameter 5 */
-{VIACR, CR7F, 0xFF, 0x0A}, /* LCD Scaling Parameter 6 */
-{VIACR, CR80, 0xFF, 0x0D}, /* LCD Scaling Parameter 7 */
-{VIACR, CR81, 0xFF, 0x13}, /* LCD Scaling Parameter 8 */
-{VIACR, CR82, 0xFF, 0x16}, /* LCD Scaling Parameter 9 */
-{VIACR, CR83, 0xFF, 0x19}, /* LCD Scaling Parameter 10 */
-{VIACR, CR84, 0xFF, 0x1C}, /* LCD Scaling Parameter 11 */
-{VIACR, CR85, 0xFF, 0x1D}, /* LCD Scaling Parameter 12 */
-{VIACR, CR86, 0xFF, 0x1E}, /* LCD Scaling Parameter 13 */
-{VIACR, CR87, 0xFF, 0x1F}, /* LCD Scaling Parameter 14 */
{VIACR, CR88, 0xFF, 0x40}, /* LCD Panel Type */
{VIACR, CR89, 0xFF, 0x00}, /* LCD Timing Control 0 */
{VIACR, CR8A, 0xFF, 0x88}, /* LCD Timing Control 1 */
@@ -172,20 +92,6 @@ struct io_reg CN700_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
{VIACR, CR78, 0xFF, 0x00}, /* LCD scaling Factor */
{VIACR, CR79, 0xFF, 0x00}, /* LCD scaling Factor */
{VIACR, CR9F, 0x03, 0x00}, /* LCD scaling Factor */
-{VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */
-{VIACR, CR7B, 0xFF, 0x02}, /* LCD Scaling Parameter 2 */
-{VIACR, CR7C, 0xFF, 0x03}, /* LCD Scaling Parameter 3 */
-{VIACR, CR7D, 0xFF, 0x04}, /* LCD Scaling Parameter 4 */
-{VIACR, CR7E, 0xFF, 0x07}, /* LCD Scaling Parameter 5 */
-{VIACR, CR7F, 0xFF, 0x0A}, /* LCD Scaling Parameter 6 */
-{VIACR, CR80, 0xFF, 0x0D}, /* LCD Scaling Parameter 7 */
-{VIACR, CR81, 0xFF, 0x13}, /* LCD Scaling Parameter 8 */
-{VIACR, CR82, 0xFF, 0x16}, /* LCD Scaling Parameter 9 */
-{VIACR, CR83, 0xFF, 0x19}, /* LCD Scaling Parameter 10 */
-{VIACR, CR84, 0xFF, 0x1C}, /* LCD Scaling Parameter 11 */
-{VIACR, CR85, 0xFF, 0x1D}, /* LCD Scaling Parameter 12 */
-{VIACR, CR86, 0xFF, 0x1E}, /* LCD Scaling Parameter 13 */
-{VIACR, CR87, 0xFF, 0x1F}, /* LCD Scaling Parameter 14 */
{VIACR, CR88, 0xFF, 0x40}, /* LCD Panel Type */
{VIACR, CR89, 0xFF, 0x00}, /* LCD Timing Control 0 */
{VIACR, CR8A, 0xFF, 0x88}, /* LCD Timing Control 1 */
@@ -229,20 +135,6 @@ struct io_reg KM400_ModeXregs[] = {
{VIACR, CR36, 0xFF, 0x01}, /* Power Mangement 3 */
{VIACR, CR68, 0xFF, 0x67}, /* Default FIFO For IGA2 */
{VIACR, CR6A, 0x20, 0x20}, /* Extended FIFO On */
- {VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */
- {VIACR, CR7B, 0xFF, 0x02}, /* LCD Scaling Parameter 2 */
- {VIACR, CR7C, 0xFF, 0x03}, /* LCD Scaling Parameter 3 */
- {VIACR, CR7D, 0xFF, 0x04}, /* LCD Scaling Parameter 4 */
- {VIACR, CR7E, 0xFF, 0x07}, /* LCD Scaling Parameter 5 */
- {VIACR, CR7F, 0xFF, 0x0A}, /* LCD Scaling Parameter 6 */
- {VIACR, CR80, 0xFF, 0x0D}, /* LCD Scaling Parameter 7 */
- {VIACR, CR81, 0xFF, 0x13}, /* LCD Scaling Parameter 8 */
- {VIACR, CR82, 0xFF, 0x16}, /* LCD Scaling Parameter 9 */
- {VIACR, CR83, 0xFF, 0x19}, /* LCD Scaling Parameter 10 */
- {VIACR, CR84, 0xFF, 0x1C}, /* LCD Scaling Parameter 11 */
- {VIACR, CR85, 0xFF, 0x1D}, /* LCD Scaling Parameter 12 */
- {VIACR, CR86, 0xFF, 0x1E}, /* LCD Scaling Parameter 13 */
- {VIACR, CR87, 0xFF, 0x1F}, /* LCD Scaling Parameter 14 */
{VIACR, CR88, 0xFF, 0x40}, /* LCD Panel Type */
{VIACR, CR89, 0xFF, 0x00}, /* LCD Timing Control 0 */
{VIACR, CR8A, 0xFF, 0x88}, /* LCD Timing Control 1 */
@@ -283,20 +175,6 @@ struct io_reg CX700_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
{VIACR, CR6A, 0xFF, 0x40},
{VIACR, CR6B, 0xFF, 0x00},
{VIACR, CR6C, 0xFF, 0x00},
-{VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */
-{VIACR, CR7B, 0xFF, 0x02}, /* LCD Scaling Parameter 2 */
-{VIACR, CR7C, 0xFF, 0x03}, /* LCD Scaling Parameter 3 */
-{VIACR, CR7D, 0xFF, 0x04}, /* LCD Scaling Parameter 4 */
-{VIACR, CR7E, 0xFF, 0x07}, /* LCD Scaling Parameter 5 */
-{VIACR, CR7F, 0xFF, 0x0A}, /* LCD Scaling Parameter 6 */
-{VIACR, CR80, 0xFF, 0x0D}, /* LCD Scaling Parameter 7 */
-{VIACR, CR81, 0xFF, 0x13}, /* LCD Scaling Parameter 8 */
-{VIACR, CR82, 0xFF, 0x16}, /* LCD Scaling Parameter 9 */
-{VIACR, CR83, 0xFF, 0x19}, /* LCD Scaling Parameter 10 */
-{VIACR, CR84, 0xFF, 0x1C}, /* LCD Scaling Parameter 11 */
-{VIACR, CR85, 0xFF, 0x1D}, /* LCD Scaling Parameter 12 */
-{VIACR, CR86, 0xFF, 0x1E}, /* LCD Scaling Parameter 13 */
-{VIACR, CR87, 0xFF, 0x1F}, /* LCD Scaling Parameter 14 */
{VIACR, CR88, 0xFF, 0x40}, /* LCD Panel Type */
{VIACR, CR89, 0xFF, 0x00}, /* LCD Timing Control 0 */
{VIACR, CR8A, 0xFF, 0x88}, /* LCD Timing Control 1 */
@@ -342,20 +220,6 @@ struct io_reg VX855_ModeXregs[] = {
{VIACR, CR6A, 0xFD, 0x60},
{VIACR, CR6B, 0xFF, 0x00},
{VIACR, CR6C, 0xFF, 0x00},
-{VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */
-{VIACR, CR7B, 0xFF, 0x02}, /* LCD Scaling Parameter 2 */
-{VIACR, CR7C, 0xFF, 0x03}, /* LCD Scaling Parameter 3 */
-{VIACR, CR7D, 0xFF, 0x04}, /* LCD Scaling Parameter 4 */
-{VIACR, CR7E, 0xFF, 0x07}, /* LCD Scaling Parameter 5 */
-{VIACR, CR7F, 0xFF, 0x0A}, /* LCD Scaling Parameter 6 */
-{VIACR, CR80, 0xFF, 0x0D}, /* LCD Scaling Parameter 7 */
-{VIACR, CR81, 0xFF, 0x13}, /* LCD Scaling Parameter 8 */
-{VIACR, CR82, 0xFF, 0x16}, /* LCD Scaling Parameter 9 */
-{VIACR, CR83, 0xFF, 0x19}, /* LCD Scaling Parameter 10 */
-{VIACR, CR84, 0xFF, 0x1C}, /* LCD Scaling Parameter 11 */
-{VIACR, CR85, 0xFF, 0x1D}, /* LCD Scaling Parameter 12 */
-{VIACR, CR86, 0xFF, 0x1E}, /* LCD Scaling Parameter 13 */
-{VIACR, CR87, 0xFF, 0x1F}, /* LCD Scaling Parameter 14 */
{VIACR, CR88, 0xFF, 0x40}, /* LCD Panel Type */
{VIACR, CR89, 0xFF, 0x00}, /* LCD Timing Control 0 */
{VIACR, CR8A, 0xFF, 0x88}, /* LCD Timing Control 1 */
@@ -390,21 +254,6 @@ struct io_reg CLE266_ModeXregs[] = { {VIASR, SR1E, 0xF0, 0x00},
{VIAGR, GR20, 0xFF, 0x00},
{VIAGR, GR21, 0xFF, 0x00},
{VIAGR, GR22, 0xFF, 0x00},
- /* LCD Parameters */
-{VIACR, CR7A, 0xFF, 0x01}, /* LCD Parameter 1 */
-{VIACR, CR7B, 0xFF, 0x02}, /* LCD Parameter 2 */
-{VIACR, CR7C, 0xFF, 0x03}, /* LCD Parameter 3 */
-{VIACR, CR7D, 0xFF, 0x04}, /* LCD Parameter 4 */
-{VIACR, CR7E, 0xFF, 0x07}, /* LCD Parameter 5 */
-{VIACR, CR7F, 0xFF, 0x0A}, /* LCD Parameter 6 */
-{VIACR, CR80, 0xFF, 0x0D}, /* LCD Parameter 7 */
-{VIACR, CR81, 0xFF, 0x13}, /* LCD Parameter 8 */
-{VIACR, CR82, 0xFF, 0x16}, /* LCD Parameter 9 */
-{VIACR, CR83, 0xFF, 0x19}, /* LCD Parameter 10 */
-{VIACR, CR84, 0xFF, 0x1C}, /* LCD Parameter 11 */
-{VIACR, CR85, 0xFF, 0x1D}, /* LCD Parameter 12 */
-{VIACR, CR86, 0xFF, 0x1E}, /* LCD Parameter 13 */
-{VIACR, CR87, 0xFF, 0x1F}, /* LCD Parameter 14 */
};
@@ -443,328 +292,321 @@ struct VPITTable VPIT = {
/********************/
/* 480x640 */
-struct crt_mode_table CRTM480x640[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM480x640[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_25_175M, M480X640_R60_HSP, M480X640_R60_VSP,
+ {REFRESH_60, M480X640_R60_HSP, M480X640_R60_VSP,
{624, 480, 480, 144, 504, 48, 663, 640, 640, 23, 641, 3} } /* GTF*/
};
/* 640x480*/
-struct crt_mode_table CRTM640x480[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM640x480[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_25_175M, M640X480_R60_HSP, M640X480_R60_VSP,
+ {REFRESH_60, M640X480_R60_HSP, M640X480_R60_VSP,
{800, 640, 648, 144, 656, 96, 525, 480, 480, 45, 490, 2} },
- {REFRESH_75, CLK_31_500M, M640X480_R75_HSP, M640X480_R75_VSP,
+ {REFRESH_75, M640X480_R75_HSP, M640X480_R75_VSP,
{840, 640, 640, 200, 656, 64, 500, 480, 480, 20, 481, 3} },
- {REFRESH_85, CLK_36_000M, M640X480_R85_HSP, M640X480_R85_VSP,
+ {REFRESH_85, M640X480_R85_HSP, M640X480_R85_VSP,
{832, 640, 640, 192, 696, 56, 509, 480, 480, 29, 481, 3} },
- {REFRESH_100, CLK_43_163M, M640X480_R100_HSP, M640X480_R100_VSP,
+ {REFRESH_100, M640X480_R100_HSP, M640X480_R100_VSP,
{848, 640, 640, 208, 680, 64, 509, 480, 480, 29, 481, 3} }, /*GTF*/
- {REFRESH_120, CLK_52_406M, M640X480_R120_HSP,
- M640X480_R120_VSP,
- {848, 640, 640, 208, 680, 64, 515, 480, 480, 35, 481,
- 3} } /*GTF*/
+ {REFRESH_120, M640X480_R120_HSP, M640X480_R120_VSP,
+ {848, 640, 640, 208, 680, 64, 515, 480, 480, 35, 481, 3} } /*GTF*/
};
/*720x480 (GTF)*/
-struct crt_mode_table CRTM720x480[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM720x480[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_26_880M, M720X480_R60_HSP, M720X480_R60_VSP,
+ {REFRESH_60, M720X480_R60_HSP, M720X480_R60_VSP,
{896, 720, 720, 176, 736, 72, 497, 480, 480, 17, 481, 3} }
};
/*720x576 (GTF)*/
-struct crt_mode_table CRTM720x576[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM720x576[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_32_668M, M720X576_R60_HSP, M720X576_R60_VSP,
+ {REFRESH_60, M720X576_R60_HSP, M720X576_R60_VSP,
{912, 720, 720, 192, 744, 72, 597, 576, 576, 21, 577, 3} }
};
/* 800x480 (CVT) */
-struct crt_mode_table CRTM800x480[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM800x480[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_29_581M, M800X480_R60_HSP, M800X480_R60_VSP,
+ {REFRESH_60, M800X480_R60_HSP, M800X480_R60_VSP,
{992, 800, 800, 192, 824, 72, 500, 480, 480, 20, 483, 7} }
};
/* 800x600*/
-struct crt_mode_table CRTM800x600[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM800x600[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_40_000M, M800X600_R60_HSP, M800X600_R60_VSP,
+ {REFRESH_60, M800X600_R60_HSP, M800X600_R60_VSP,
{1056, 800, 800, 256, 840, 128, 628, 600, 600, 28, 601, 4} },
- {REFRESH_75, CLK_49_500M, M800X600_R75_HSP, M800X600_R75_VSP,
+ {REFRESH_75, M800X600_R75_HSP, M800X600_R75_VSP,
{1056, 800, 800, 256, 816, 80, 625, 600, 600, 25, 601, 3} },
- {REFRESH_85, CLK_56_250M, M800X600_R85_HSP, M800X600_R85_VSP,
+ {REFRESH_85, M800X600_R85_HSP, M800X600_R85_VSP,
{1048, 800, 800, 248, 832, 64, 631, 600, 600, 31, 601, 3} },
- {REFRESH_100, CLK_68_179M, M800X600_R100_HSP, M800X600_R100_VSP,
+ {REFRESH_100, M800X600_R100_HSP, M800X600_R100_VSP,
{1072, 800, 800, 272, 848, 88, 636, 600, 600, 36, 601, 3} },
- {REFRESH_120, CLK_83_950M, M800X600_R120_HSP,
- M800X600_R120_VSP,
- {1088, 800, 800, 288, 856, 88, 643, 600, 600, 43, 601,
- 3} }
+ {REFRESH_120, M800X600_R120_HSP, M800X600_R120_VSP,
+ {1088, 800, 800, 288, 856, 88, 643, 600, 600, 43, 601, 3} }
};
/* 848x480 (CVT) */
-struct crt_mode_table CRTM848x480[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM848x480[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_31_500M, M848X480_R60_HSP, M848X480_R60_VSP,
+ {REFRESH_60, M848X480_R60_HSP, M848X480_R60_VSP,
{1056, 848, 848, 208, 872, 80, 500, 480, 480, 20, 483, 5} }
};
/*856x480 (GTF) convert to 852x480*/
-struct crt_mode_table CRTM852x480[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM852x480[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_31_728M, M852X480_R60_HSP, M852X480_R60_VSP,
+ {REFRESH_60, M852X480_R60_HSP, M852X480_R60_VSP,
{1064, 856, 856, 208, 872, 88, 497, 480, 480, 17, 481, 3} }
};
/*1024x512 (GTF)*/
-struct crt_mode_table CRTM1024x512[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1024x512[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_41_291M, M1024X512_R60_HSP, M1024X512_R60_VSP,
+ {REFRESH_60, M1024X512_R60_HSP, M1024X512_R60_VSP,
{1296, 1024, 1024, 272, 1056, 104, 531, 512, 512, 19, 513, 3} }
};
/* 1024x600*/
-struct crt_mode_table CRTM1024x600[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1024x600[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_48_875M, M1024X600_R60_HSP, M1024X600_R60_VSP,
+ {REFRESH_60, M1024X600_R60_HSP, M1024X600_R60_VSP,
{1312, 1024, 1024, 288, 1064, 104, 622, 600, 600, 22, 601, 3} },
};
/* 1024x768*/
-struct crt_mode_table CRTM1024x768[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1024x768[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_65_000M, M1024X768_R60_HSP, M1024X768_R60_VSP,
+ {REFRESH_60, M1024X768_R60_HSP, M1024X768_R60_VSP,
{1344, 1024, 1024, 320, 1048, 136, 806, 768, 768, 38, 771, 6} },
- {REFRESH_75, CLK_78_750M, M1024X768_R75_HSP, M1024X768_R75_VSP,
+ {REFRESH_75, M1024X768_R75_HSP, M1024X768_R75_VSP,
{1312, 1024, 1024, 288, 1040, 96, 800, 768, 768, 32, 769, 3} },
- {REFRESH_85, CLK_94_500M, M1024X768_R85_HSP, M1024X768_R85_VSP,
+ {REFRESH_85, M1024X768_R85_HSP, M1024X768_R85_VSP,
{1376, 1024, 1024, 352, 1072, 96, 808, 768, 768, 40, 769, 3} },
- {REFRESH_100, CLK_113_309M, M1024X768_R100_HSP, M1024X768_R100_VSP,
+ {REFRESH_100, M1024X768_R100_HSP, M1024X768_R100_VSP,
{1392, 1024, 1024, 368, 1096, 112, 814, 768, 768, 46, 769, 3} }
};
/* 1152x864*/
-struct crt_mode_table CRTM1152x864[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1152x864[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_75, CLK_108_000M, M1152X864_R75_HSP, M1152X864_R75_VSP,
+ {REFRESH_75, M1152X864_R75_HSP, M1152X864_R75_VSP,
{1600, 1152, 1152, 448, 1216, 128, 900, 864, 864, 36, 865, 3} }
};
/* 1280x720 (HDMI 720P)*/
-struct crt_mode_table CRTM1280x720[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1280x720[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_74_481M, M1280X720_R60_HSP, M1280X720_R60_VSP,
+ {REFRESH_60, M1280X720_R60_HSP, M1280X720_R60_VSP,
{1648, 1280, 1280, 368, 1392, 40, 750, 720, 720, 30, 725, 5} },
- {REFRESH_50, CLK_60_466M, M1280X720_R50_HSP, M1280X720_R50_VSP,
+ {REFRESH_50, M1280X720_R50_HSP, M1280X720_R50_VSP,
{1632, 1280, 1280, 352, 1328, 128, 741, 720, 720, 21, 721, 3} }
};
/*1280x768 (GTF)*/
-struct crt_mode_table CRTM1280x768[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1280x768[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_80_136M, M1280X768_R60_HSP, M1280X768_R60_VSP,
+ {REFRESH_60, M1280X768_R60_HSP, M1280X768_R60_VSP,
{1680, 1280, 1280, 400, 1344, 136, 795, 768, 768, 27, 769, 3} },
- {REFRESH_50, CLK_65_178M, M1280X768_R50_HSP, M1280X768_R50_VSP,
+ {REFRESH_50, M1280X768_R50_HSP, M1280X768_R50_VSP,
{1648, 1280, 1280, 368, 1336, 128, 791, 768, 768, 23, 769, 3} }
};
/* 1280x800 (CVT) */
-struct crt_mode_table CRTM1280x800[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1280x800[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_83_375M, M1280X800_R60_HSP, M1280X800_R60_VSP,
+ {REFRESH_60, M1280X800_R60_HSP, M1280X800_R60_VSP,
{1680, 1280, 1280, 400, 1352, 128, 831, 800, 800, 31, 803, 6} }
};
/*1280x960*/
-struct crt_mode_table CRTM1280x960[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1280x960[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_108_000M, M1280X960_R60_HSP, M1280X960_R60_VSP,
+ {REFRESH_60, M1280X960_R60_HSP, M1280X960_R60_VSP,
{1800, 1280, 1280, 520, 1376, 112, 1000, 960, 960, 40, 961, 3} }
};
/* 1280x1024*/
-struct crt_mode_table CRTM1280x1024[] = {
- /*r_rate,vclk,,hsp,vsp */
+static struct crt_mode_table CRTM1280x1024[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_108_000M, M1280X1024_R60_HSP, M1280X1024_R60_VSP,
+ {REFRESH_60, M1280X1024_R60_HSP, M1280X1024_R60_VSP,
{1688, 1280, 1280, 408, 1328, 112, 1066, 1024, 1024, 42, 1025,
3} },
- {REFRESH_75, CLK_135_000M, M1280X1024_R75_HSP, M1280X1024_R75_VSP,
+ {REFRESH_75, M1280X1024_R75_HSP, M1280X1024_R75_VSP,
{1688, 1280, 1280, 408, 1296, 144, 1066, 1024, 1024, 42, 1025,
3} },
- {REFRESH_85, CLK_157_500M, M1280X1024_R85_HSP, M1280X1024_R85_VSP,
+ {REFRESH_85, M1280X1024_R85_HSP, M1280X1024_R85_VSP,
{1728, 1280, 1280, 448, 1344, 160, 1072, 1024, 1024, 48, 1025, 3} }
};
/* 1368x768 (GTF) */
-struct crt_mode_table CRTM1368x768[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1368x768[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_85_860M, M1368X768_R60_HSP, M1368X768_R60_VSP,
+ {REFRESH_60, M1368X768_R60_HSP, M1368X768_R60_VSP,
{1800, 1368, 1368, 432, 1440, 144, 795, 768, 768, 27, 769, 3} }
};
/*1440x1050 (GTF)*/
-struct crt_mode_table CRTM1440x1050[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1440x1050[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_125_104M, M1440X1050_R60_HSP, M1440X1050_R60_VSP,
+ {REFRESH_60, M1440X1050_R60_HSP, M1440X1050_R60_VSP,
{1936, 1440, 1440, 496, 1536, 152, 1077, 1040, 1040, 37, 1041, 3} }
};
/* 1600x1200*/
-struct crt_mode_table CRTM1600x1200[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1600x1200[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_162_000M, M1600X1200_R60_HSP, M1600X1200_R60_VSP,
+ {REFRESH_60, M1600X1200_R60_HSP, M1600X1200_R60_VSP,
{2160, 1600, 1600, 560, 1664, 192, 1250, 1200, 1200, 50, 1201,
3} },
- {REFRESH_75, CLK_202_500M, M1600X1200_R75_HSP, M1600X1200_R75_VSP,
+ {REFRESH_75, M1600X1200_R75_HSP, M1600X1200_R75_VSP,
{2160, 1600, 1600, 560, 1664, 192, 1250, 1200, 1200, 50, 1201, 3} }
};
/* 1680x1050 (CVT) */
-struct crt_mode_table CRTM1680x1050[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1680x1050[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_146_760M, M1680x1050_R60_HSP, M1680x1050_R60_VSP,
+ {REFRESH_60, M1680x1050_R60_HSP, M1680x1050_R60_VSP,
{2240, 1680, 1680, 560, 1784, 176, 1089, 1050, 1050, 39, 1053,
6} },
- {REFRESH_75, CLK_187_000M, M1680x1050_R75_HSP, M1680x1050_R75_VSP,
+ {REFRESH_75, M1680x1050_R75_HSP, M1680x1050_R75_VSP,
{2272, 1680, 1680, 592, 1800, 176, 1099, 1050, 1050, 49, 1053, 6} }
};
/* 1680x1050 (CVT Reduce Blanking) */
-struct crt_mode_table CRTM1680x1050_RB[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1680x1050_RB[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_119_000M, M1680x1050_RB_R60_HSP,
- M1680x1050_RB_R60_VSP,
+ {REFRESH_60, M1680x1050_RB_R60_HSP, M1680x1050_RB_R60_VSP,
{1840, 1680, 1680, 160, 1728, 32, 1080, 1050, 1050, 30, 1053, 6} }
};
/* 1920x1080 (CVT)*/
-struct crt_mode_table CRTM1920x1080[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1920x1080[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_172_798M, M1920X1080_R60_HSP, M1920X1080_R60_VSP,
+ {REFRESH_60, M1920X1080_R60_HSP, M1920X1080_R60_VSP,
{2576, 1920, 1920, 656, 2048, 200, 1120, 1080, 1080, 40, 1083, 5} }
};
/* 1920x1080 (CVT with Reduce Blanking) */
-struct crt_mode_table CRTM1920x1080_RB[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1920x1080_RB[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_138_400M, M1920X1080_RB_R60_HSP,
- M1920X1080_RB_R60_VSP,
+ {REFRESH_60, M1920X1080_RB_R60_HSP, M1920X1080_RB_R60_VSP,
{2080, 1920, 1920, 160, 1968, 32, 1111, 1080, 1080, 31, 1083, 5} }
};
/* 1920x1440*/
-struct crt_mode_table CRTM1920x1440[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1920x1440[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_234_000M, M1920X1440_R60_HSP, M1920X1440_R60_VSP,
+ {REFRESH_60, M1920X1440_R60_HSP, M1920X1440_R60_VSP,
{2600, 1920, 1920, 680, 2048, 208, 1500, 1440, 1440, 60, 1441,
3} },
- {REFRESH_75, CLK_297_500M, M1920X1440_R75_HSP, M1920X1440_R75_VSP,
+ {REFRESH_75, M1920X1440_R75_HSP, M1920X1440_R75_VSP,
{2640, 1920, 1920, 720, 2064, 224, 1500, 1440, 1440, 60, 1441, 3} }
};
/* 1400x1050 (CVT) */
-struct crt_mode_table CRTM1400x1050[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1400x1050[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_121_750M, M1400X1050_R60_HSP, M1400X1050_R60_VSP,
+ {REFRESH_60, M1400X1050_R60_HSP, M1400X1050_R60_VSP,
{1864, 1400, 1400, 464, 1488, 144, 1089, 1050, 1050, 39, 1053,
4} },
- {REFRESH_75, CLK_156_000M, M1400X1050_R75_HSP, M1400X1050_R75_VSP,
+ {REFRESH_75, M1400X1050_R75_HSP, M1400X1050_R75_VSP,
{1896, 1400, 1400, 496, 1504, 144, 1099, 1050, 1050, 49, 1053, 4} }
};
/* 1400x1050 (CVT Reduce Blanking) */
-struct crt_mode_table CRTM1400x1050_RB[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1400x1050_RB[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_101_000M, M1400X1050_RB_R60_HSP,
- M1400X1050_RB_R60_VSP,
+ {REFRESH_60, M1400X1050_RB_R60_HSP, M1400X1050_RB_R60_VSP,
{1560, 1400, 1400, 160, 1448, 32, 1080, 1050, 1050, 30, 1053, 4} }
};
/* 960x600 (CVT) */
-struct crt_mode_table CRTM960x600[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM960x600[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_45_250M, M960X600_R60_HSP, M960X600_R60_VSP,
+ {REFRESH_60, M960X600_R60_HSP, M960X600_R60_VSP,
{1216, 960, 960, 256, 992, 96, 624, 600, 600, 24, 603, 6} }
};
/* 1000x600 (GTF) */
-struct crt_mode_table CRTM1000x600[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1000x600[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_48_000M, M1000X600_R60_HSP, M1000X600_R60_VSP,
+ {REFRESH_60, M1000X600_R60_HSP, M1000X600_R60_VSP,
{1288, 1000, 1000, 288, 1040, 104, 622, 600, 600, 22, 601, 3} }
};
/* 1024x576 (GTF) */
-struct crt_mode_table CRTM1024x576[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1024x576[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_46_996M, M1024X576_R60_HSP, M1024X576_R60_VSP,
+ {REFRESH_60, M1024X576_R60_HSP, M1024X576_R60_VSP,
{1312, 1024, 1024, 288, 1064, 104, 597, 576, 576, 21, 577, 3} }
};
/* 1088x612 (CVT) */
-struct crt_mode_table CRTM1088x612[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1088x612[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_52_977M, M1088X612_R60_HSP, M1088X612_R60_VSP,
+ {REFRESH_60, M1088X612_R60_HSP, M1088X612_R60_VSP,
{1392, 1088, 1088, 304, 1136, 104, 636, 612, 612, 24, 615, 5} }
};
/* 1152x720 (CVT) */
-struct crt_mode_table CRTM1152x720[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1152x720[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_66_750M, M1152X720_R60_HSP, M1152X720_R60_VSP,
+ {REFRESH_60, M1152X720_R60_HSP, M1152X720_R60_VSP,
{1488, 1152, 1152, 336, 1208, 112, 748, 720, 720, 28, 723, 6} }
};
/* 1200x720 (GTF) */
-struct crt_mode_table CRTM1200x720[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1200x720[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_70_159M, M1200X720_R60_HSP, M1200X720_R60_VSP,
+ {REFRESH_60, M1200X720_R60_HSP, M1200X720_R60_VSP,
{1568, 1200, 1200, 368, 1256, 128, 746, 720, 720, 26, 721, 3} }
};
/* 1200x900 (DCON) */
-struct crt_mode_table DCON1200x900[] = {
- /* r_rate, vclk, hsp, vsp */
- {REFRESH_60, CLK_57_275M, M1200X900_R60_HSP, M1200X900_R60_VSP,
+static struct crt_mode_table DCON1200x900[] = {
+ /* r_rate, hsp, vsp */
+ {REFRESH_60, M1200X900_R60_HSP, M1200X900_R60_VSP,
/* The correct htotal is 1240, but this doesn't raster on VX855. */
/* Via suggested changing to a multiple of 16, hence 1264. */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
@@ -772,126 +614,122 @@ struct crt_mode_table DCON1200x900[] = {
};
/* 1280x600 (GTF) */
-struct crt_mode_table CRTM1280x600[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1280x600[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_61_500M, M1280x600_R60_HSP, M1280x600_R60_VSP,
+ {REFRESH_60, M1280x600_R60_HSP, M1280x600_R60_VSP,
{1648, 1280, 1280, 368, 1336, 128, 622, 600, 600, 22, 601, 3} }
};
/* 1360x768 (CVT) */
-struct crt_mode_table CRTM1360x768[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1360x768[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_84_750M, M1360X768_R60_HSP, M1360X768_R60_VSP,
+ {REFRESH_60, M1360X768_R60_HSP, M1360X768_R60_VSP,
{1776, 1360, 1360, 416, 1432, 136, 798, 768, 768, 30, 771, 5} }
};
/* 1360x768 (CVT Reduce Blanking) */
-struct crt_mode_table CRTM1360x768_RB[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1360x768_RB[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_72_000M, M1360X768_RB_R60_HSP,
- M1360X768_RB_R60_VSP,
+ {REFRESH_60, M1360X768_RB_R60_HSP, M1360X768_RB_R60_VSP,
{1520, 1360, 1360, 160, 1408, 32, 790, 768, 768, 22, 771, 5} }
};
/* 1366x768 (GTF) */
-struct crt_mode_table CRTM1366x768[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1366x768[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_85_860M, M1368X768_R60_HSP, M1368X768_R60_VSP,
+ {REFRESH_60, M1368X768_R60_HSP, M1368X768_R60_VSP,
{1800, 1368, 1368, 432, 1440, 144, 795, 768, 768, 27, 769, 3} },
- {REFRESH_50, CLK_69_924M, M1368X768_R50_HSP, M1368X768_R50_VSP,
+ {REFRESH_50, M1368X768_R50_HSP, M1368X768_R50_VSP,
{1768, 1368, 1368, 400, 1424, 144, 791, 768, 768, 23, 769, 3} }
};
/* 1440x900 (CVT) */
-struct crt_mode_table CRTM1440x900[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1440x900[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_106_500M, M1440X900_R60_HSP, M1440X900_R60_VSP,
+ {REFRESH_60, M1440X900_R60_HSP, M1440X900_R60_VSP,
{1904, 1440, 1440, 464, 1520, 152, 934, 900, 900, 34, 903, 6} },
- {REFRESH_75, CLK_136_700M, M1440X900_R75_HSP, M1440X900_R75_VSP,
+ {REFRESH_75, M1440X900_R75_HSP, M1440X900_R75_VSP,
{1936, 1440, 1440, 496, 1536, 152, 942, 900, 900, 42, 903, 6} }
};
/* 1440x900 (CVT Reduce Blanking) */
-struct crt_mode_table CRTM1440x900_RB[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1440x900_RB[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_88_750M, M1440X900_RB_R60_HSP,
- M1440X900_RB_R60_VSP,
+ {REFRESH_60, M1440X900_RB_R60_HSP, M1440X900_RB_R60_VSP,
{1600, 1440, 1440, 160, 1488, 32, 926, 900, 900, 26, 903, 6} }
};
/* 1600x900 (CVT) */
-struct crt_mode_table CRTM1600x900[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1600x900[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_118_840M, M1600X900_R60_HSP, M1600X900_R60_VSP,
+ {REFRESH_60, M1600X900_R60_HSP, M1600X900_R60_VSP,
{2112, 1600, 1600, 512, 1688, 168, 934, 900, 900, 34, 903, 5} }
};
/* 1600x900 (CVT Reduce Blanking) */
-struct crt_mode_table CRTM1600x900_RB[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1600x900_RB[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_97_750M, M1600X900_RB_R60_HSP,
- M1600X900_RB_R60_VSP,
+ {REFRESH_60, M1600X900_RB_R60_HSP, M1600X900_RB_R60_VSP,
{1760, 1600, 1600, 160, 1648, 32, 926, 900, 900, 26, 903, 5} }
};
/* 1600x1024 (GTF) */
-struct crt_mode_table CRTM1600x1024[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1600x1024[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_136_700M, M1600X1024_R60_HSP, M1600X1024_R60_VSP,
+ {REFRESH_60, M1600X1024_R60_HSP, M1600X1024_R60_VSP,
{2144, 1600, 1600, 544, 1704, 168, 1060, 1024, 1024, 36, 1025, 3} }
};
/* 1792x1344 (DMT) */
-struct crt_mode_table CRTM1792x1344[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1792x1344[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_204_000M, M1792x1344_R60_HSP, M1792x1344_R60_VSP,
+ {REFRESH_60, M1792x1344_R60_HSP, M1792x1344_R60_VSP,
{2448, 1792, 1792, 656, 1920, 200, 1394, 1344, 1344, 50, 1345, 3} }
};
/* 1856x1392 (DMT) */
-struct crt_mode_table CRTM1856x1392[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1856x1392[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_218_500M, M1856x1392_R60_HSP, M1856x1392_R60_VSP,
+ {REFRESH_60, M1856x1392_R60_HSP, M1856x1392_R60_VSP,
{2528, 1856, 1856, 672, 1952, 224, 1439, 1392, 1392, 47, 1393, 3} }
};
/* 1920x1200 (CVT) */
-struct crt_mode_table CRTM1920x1200[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1920x1200[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_193_295M, M1920X1200_R60_HSP, M1920X1200_R60_VSP,
+ {REFRESH_60, M1920X1200_R60_HSP, M1920X1200_R60_VSP,
{2592, 1920, 1920, 672, 2056, 200, 1245, 1200, 1200, 45, 1203, 6} }
};
/* 1920x1200 (CVT with Reduce Blanking) */
-struct crt_mode_table CRTM1920x1200_RB[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1920x1200_RB[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_153_920M, M1920X1200_RB_R60_HSP,
- M1920X1200_RB_R60_VSP,
+ {REFRESH_60, M1920X1200_RB_R60_HSP, M1920X1200_RB_R60_VSP,
{2080, 1920, 1920, 160, 1968, 32, 1235, 1200, 1200, 35, 1203, 6} }
};
/* 2048x1536 (CVT) */
-struct crt_mode_table CRTM2048x1536[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM2048x1536[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_267_250M, M2048x1536_R60_HSP, M2048x1536_R60_VSP,
+ {REFRESH_60, M2048x1536_R60_HSP, M2048x1536_R60_VSP,
{2800, 2048, 2048, 752, 2200, 224, 1592, 1536, 1536, 56, 1539, 4} }
};
-struct VideoModeTable viafb_modes[] = {
+static struct VideoModeTable viafb_modes[] = {
/* Display : 480x640 (GTF) */
{CRTM480x640, ARRAY_SIZE(CRTM480x640)},
@@ -1016,7 +854,7 @@ struct VideoModeTable viafb_modes[] = {
{CRTM1400x1050, ARRAY_SIZE(CRTM1400x1050)}
};
-struct VideoModeTable viafb_rb_modes[] = {
+static struct VideoModeTable viafb_rb_modes[] = {
/* Display : 1360x768 (CVT Reduce Blanking) */
{CRTM1360x768_RB, ARRAY_SIZE(CRTM1360x768_RB)},
@@ -1040,14 +878,12 @@ struct VideoModeTable viafb_rb_modes[] = {
};
struct crt_mode_table CEAM1280x720[] = {
- {REFRESH_60, CLK_74_270M, M1280X720_CEA_R60_HSP,
- M1280X720_CEA_R60_VSP,
+ {REFRESH_60, M1280X720_CEA_R60_HSP, M1280X720_CEA_R60_VSP,
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
{1650, 1280, 1280, 370, 1390, 40, 750, 720, 720, 30, 725, 5} }
};
struct crt_mode_table CEAM1920x1080[] = {
- {REFRESH_60, CLK_148_500M, M1920X1080_CEA_R60_HSP,
- M1920X1080_CEA_R60_VSP,
+ {REFRESH_60, M1920X1080_CEA_R60_HSP, M1920X1080_CEA_R60_VSP,
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
{2200, 1920, 1920, 300, 2008, 44, 1125, 1080, 1080, 45, 1084, 5} }
};
@@ -1057,7 +893,6 @@ struct VideoModeTable CEA_HDMI_Modes[] = {
{CEAM1920x1080, ARRAY_SIZE(CEAM1920x1080)}
};
-int NUM_TOTAL_RES_MAP_REFRESH = ARRAY_SIZE(res_map_refresh_tbl);
int NUM_TOTAL_CEA_MODES = ARRAY_SIZE(CEA_HDMI_Modes);
int NUM_TOTAL_CN400_ModeXregs = ARRAY_SIZE(CN400_ModeXregs);
int NUM_TOTAL_CN700_ModeXregs = ARRAY_SIZE(CN700_ModeXregs);
diff --git a/drivers/video/via/viamode.h b/drivers/video/via/viamode.h
index 5b1ced86514b..8a67ea1b5ef0 100644
--- a/drivers/video/via/viamode.h
+++ b/drivers/video/via/viamode.h
@@ -41,14 +41,6 @@ struct patch_table {
struct io_reg *io_reg_table;
};
-struct res_map_refresh {
- int hres;
- int vres;
- int pixclock;
- int vmode_refresh;
-};
-
-extern int NUM_TOTAL_RES_MAP_REFRESH;
extern int NUM_TOTAL_CEA_MODES;
extern int NUM_TOTAL_CN400_ModeXregs;
extern int NUM_TOTAL_CN700_ModeXregs;
@@ -66,7 +58,6 @@ extern struct crt_mode_table CEAM1280x720[];
extern struct crt_mode_table CEAM1920x1080[];
extern struct VideoModeTable CEA_HDMI_Modes[];
-extern struct res_map_refresh res_map_refresh_tbl[];
extern struct io_reg CN400_ModeXregs[];
extern struct io_reg CN700_ModeXregs[];
extern struct io_reg KM400_ModeXregs[];
diff --git a/drivers/video/via/vt1636.c b/drivers/video/via/vt1636.c
index 60e4192c2b34..ee2903b472cf 100644
--- a/drivers/video/via/vt1636.c
+++ b/drivers/video/via/vt1636.c
@@ -167,22 +167,6 @@ static int get_clk_range_index(u32 Clk)
return DPA_CLK_RANGE_150M;
}
-static int get_lvds_dpa_setting_index(int panel_size_id,
- struct VT1636_DPA_SETTING *p_vt1636_dpasetting_tbl,
- int tbl_size)
-{
- int i;
-
- for (i = 0; i < tbl_size; i++) {
- if (panel_size_id == p_vt1636_dpasetting_tbl->PanelSizeID)
- return i;
-
- p_vt1636_dpasetting_tbl++;
- }
-
- return 0;
-}
-
static void set_dpa_vt1636(struct lvds_setting_information
*plvds_setting_info, struct lvds_chip_information *plvds_chip_info,
struct VT1636_DPA_SETTING *p_vt1636_dpa_setting)
@@ -206,7 +190,9 @@ void viafb_vt1636_patch_skew_on_vt3324(
struct lvds_setting_information *plvds_setting_info,
struct lvds_chip_information *plvds_chip_info)
{
- int index, size;
+ struct VT1636_DPA_SETTING dpa = {0x00, 0x00}, dpa_16x12 = {0x0B, 0x03},
+ *pdpa;
+ int index;
DEBUG_MSG(KERN_INFO "viafb_vt1636_patch_skew_on_vt3324.\n");
@@ -216,19 +202,21 @@ void viafb_vt1636_patch_skew_on_vt3324(
&GFX_DPA_SETTING_TBL_VT3324[index]);
/* LVDS Transmitter DPA settings: */
- size = ARRAY_SIZE(VT1636_DPA_SETTING_TBL_VT3324);
- index =
- get_lvds_dpa_setting_index(plvds_setting_info->lcd_panel_id,
- VT1636_DPA_SETTING_TBL_VT3324, size);
- set_dpa_vt1636(plvds_setting_info, plvds_chip_info,
- &VT1636_DPA_SETTING_TBL_VT3324[index]);
+ if (plvds_setting_info->lcd_panel_hres == 1600 &&
+ plvds_setting_info->lcd_panel_vres == 1200)
+ pdpa = &dpa_16x12;
+ else
+ pdpa = &dpa;
+
+ set_dpa_vt1636(plvds_setting_info, plvds_chip_info, pdpa);
}
void viafb_vt1636_patch_skew_on_vt3327(
struct lvds_setting_information *plvds_setting_info,
struct lvds_chip_information *plvds_chip_info)
{
- int index, size;
+ struct VT1636_DPA_SETTING dpa = {0x00, 0x00};
+ int index;
DEBUG_MSG(KERN_INFO "viafb_vt1636_patch_skew_on_vt3327.\n");
@@ -238,12 +226,7 @@ void viafb_vt1636_patch_skew_on_vt3327(
&GFX_DPA_SETTING_TBL_VT3327[index]);
/* LVDS Transmitter DPA settings: */
- size = ARRAY_SIZE(VT1636_DPA_SETTING_TBL_VT3327);
- index =
- get_lvds_dpa_setting_index(plvds_setting_info->lcd_panel_id,
- VT1636_DPA_SETTING_TBL_VT3327, size);
- set_dpa_vt1636(plvds_setting_info, plvds_chip_info,
- &VT1636_DPA_SETTING_TBL_VT3327[index]);
+ set_dpa_vt1636(plvds_setting_info, plvds_chip_info, &dpa);
}
void viafb_vt1636_patch_skew_on_vt3364(
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 31649b7b672f..b69d71482554 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -533,6 +533,16 @@ config I6300ESB_WDT
To compile this driver as a module, choose M here: the
module will be called i6300esb.
+config INTEL_SCU_WATCHDOG
+ bool "Intel SCU Watchdog for Mobile Platforms"
+ depends on WATCHDOG
+ depends on INTEL_SCU_IPC
+ ---help---
+ Hardware driver for the watchdog time built into the Intel SCU
+ for Intel Mobile Platforms.
+
+ To compile this driver as a module, choose M here.
+
config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on (X86 || IA64) && PCI
@@ -580,7 +590,7 @@ config IT87_WDT
depends on X86 && EXPERIMENTAL
---help---
This is the driver for the hardware watchdog on the ITE IT8702,
- IT8712, IT8716, IT8718, IT8720, IT8726, IT8712 Super I/O chips.
+ IT8712, IT8716, IT8718, IT8720, IT8721, IT8726 Super I/O chips.
This watchdog simply watches your kernel to make sure it doesn't
freeze, and if it does, it reboots your computer after a certain
amount of time.
@@ -589,18 +599,20 @@ config IT87_WDT
be called it87_wdt.
config HP_WATCHDOG
- tristate "HP Proliant iLO2+ Hardware Watchdog Timer"
+ tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
depends on X86
+ default m
help
A software monitoring watchdog and NMI sourcing driver. This driver
will detect lockups and provide a stack trace. This is a driver that
- will only load on a HP ProLiant system with a minimum of iLO2 support.
+ will only load on an HP ProLiant system with a minimum of iLO2 support.
To compile this driver as a module, choose M here: the module will be
called hpwdt.
config HPWDT_NMI_DECODING
bool "NMI decoding support for the HP ProLiant iLO2+ Hardware Watchdog Timer"
depends on HP_WATCHDOG
+ default y
help
When an NMI occurs this feature will make the necessary BIOS calls to
log the cause of the NMI.
@@ -903,6 +915,12 @@ config INDYDOG
timer expired and no process has written to /dev/watchdog during
that time.
+config JZ4740_WDT
+ tristate "Ingenic jz4740 SoC hardware watchdog"
+ depends on MACH_JZ4740
+ help
+ Hardware driver for the built-in watchdog timer on Ingenic jz4740 SoCs.
+
config WDT_MTX1
tristate "MTX-1 Hardware Watchdog"
depends on MIPS_MTX1
@@ -1083,14 +1101,6 @@ config SH_WDT
To compile this driver as a module, choose M here: the
module will be called shwdt.
-config SH_WDT_MMAP
- bool "Allow mmap of SH WDT"
- default n
- depends on SH_WDT
- help
- If you say Y here, user applications will be able to mmap the
- WDT/CPG registers.
-
# SPARC Architecture
# SPARC64 Architecture
@@ -1119,6 +1129,16 @@ config WATCHDOG_RIO
# XTENSA Architecture
+# Xen Architecture
+
+config XEN_WDT
+ tristate "Xen Watchdog support"
+ depends on XEN
+ help
+ Say Y here to support the hypervisor watchdog capability provided
+ by Xen 4.0 and newer. The watchdog timeout period is normally one
+ minute but can be changed with a boot-time parameter.
+
#
# ISA-based Watchdog Cards
#
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 20e44c4782b3..d520bf9c3355 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -102,6 +102,7 @@ obj-$(CONFIG_W83877F_WDT) += w83877f_wdt.o
obj-$(CONFIG_W83977F_WDT) += w83977f_wdt.o
obj-$(CONFIG_MACHZ_WDT) += machzwd.o
obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
+obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
# M32R Architecture
@@ -114,6 +115,7 @@ obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
obj-$(CONFIG_BCM63XX_WDT) += bcm63xx_wdt.o
obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
obj-$(CONFIG_INDYDOG) += indydog.o
+obj-$(CONFIG_JZ4740_WDT) += jz4740_wdt.o
obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o
obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
@@ -148,6 +150,9 @@ obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o
# XTENSA Architecture
+# Xen
+obj-$(CONFIG_XEN_WDT) += xen_wdt.o
+
# Architecture Independant
obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index fa4d36033552..f16dcbd475fb 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -301,7 +301,7 @@ static int ali_notify_sys(struct notifier_block *this,
* want to register another driver on the same PCI id.
*/
-static struct pci_device_id ali_pci_tbl[] __used = {
+static DEFINE_PCI_DEVICE_TABLE(ali_pci_tbl) __used = {
{ PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,},
{ PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,},
{ 0, },
@@ -362,12 +362,12 @@ static int __init ali_find_watchdog(void)
*/
static const struct file_operations ali_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
.write = ali_write,
.unlocked_ioctl = ali_ioctl,
- .open = ali_open,
- .release = ali_release,
+ .open = ali_open,
+ .release = ali_release,
};
static struct miscdevice ali_miscdev = {
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 4b7a2b4138ed..46f4b85b46de 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -430,7 +430,7 @@ err_out:
module_init(alim7101_wdt_init);
module_exit(alim7101_wdt_unload);
-static struct pci_device_id alim7101_pci_tbl[] __devinitdata __used = {
+static DEFINE_PCI_DEVICE_TABLE(alim7101_pci_tbl) __used = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533) },
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ }
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index 5f245522397b..bd44417c84d4 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -150,8 +150,8 @@ static ssize_t bcm47xx_wdt_write(struct file *file, const char __user *data,
}
static const struct watchdog_info bcm47xx_wdt_info = {
- .identity = DRV_NAME,
- .options = WDIOF_SETTIMEOUT |
+ .identity = DRV_NAME,
+ .options = WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index 9042a95fc98c..b9fa9b71583a 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -63,7 +63,7 @@ static DEFINE_SPINLOCK(bfin_wdt_spinlock);
/**
* bfin_wdt_keepalive - Keep the Userspace Watchdog Alive
*
- * The Userspace watchdog got a KeepAlive: schedule the next timeout.
+ * The Userspace watchdog got a KeepAlive: schedule the next timeout.
*/
static int bfin_wdt_keepalive(void)
{
@@ -337,7 +337,7 @@ static int bfin_wdt_resume(struct platform_device *pdev)
static const struct file_operations bfin_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
- .write = bfin_wdt_write,
+ .write = bfin_wdt_write,
.unlocked_ioctl = bfin_wdt_ioctl,
.open = bfin_wdt_open,
.release = bfin_wdt_release,
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 7e7ec9c35b6a..337265b47305 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -4,7 +4,7 @@
* Author: Matthew McClintock
* Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
- * Copyright 2005, 2008, 2010 Freescale Semiconductor Inc.
+ * Copyright 2005, 2008, 2010-2011 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -221,9 +221,8 @@ static int booke_wdt_open(struct inode *inode, struct file *file)
if (booke_wdt_enabled == 0) {
booke_wdt_enabled = 1;
on_each_cpu(__booke_wdt_enable, NULL, 0);
- printk(KERN_INFO
- "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n",
- booke_wdt_period);
+ pr_debug("booke_wdt: watchdog enabled (timeout = %llu sec)\n",
+ period_to_sec(booke_wdt_period));
}
spin_unlock(&booke_wdt_lock);
@@ -240,6 +239,7 @@ static int booke_wdt_release(struct inode *inode, struct file *file)
*/
on_each_cpu(__booke_wdt_disable, NULL, 0);
booke_wdt_enabled = 0;
+ pr_debug("booke_wdt: watchdog disabled\n");
#endif
clear_bit(0, &wdt_is_active);
@@ -271,21 +271,20 @@ static int __init booke_wdt_init(void)
{
int ret = 0;
- printk(KERN_INFO "PowerPC Book-E Watchdog Timer Loaded\n");
+ pr_info("booke_wdt: powerpc book-e watchdog driver loaded\n");
ident.firmware_version = cur_cpu_spec->pvr_value;
ret = misc_register(&booke_wdt_miscdev);
if (ret) {
- printk(KERN_CRIT "Cannot register miscdev on minor=%d: %d\n",
- WATCHDOG_MINOR, ret);
+ pr_err("booke_wdt: cannot register device (minor=%u, ret=%i)\n",
+ WATCHDOG_MINOR, ret);
return ret;
}
spin_lock(&booke_wdt_lock);
if (booke_wdt_enabled == 1) {
- printk(KERN_INFO
- "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n",
- booke_wdt_period);
+ pr_info("booke_wdt: watchdog enabled (timeout = %llu sec)\n",
+ period_to_sec(booke_wdt_period));
on_each_cpu(__booke_wdt_enable, NULL, 0);
}
spin_unlock(&booke_wdt_lock);
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 65911678453d..1e013e8457b7 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -5,10 +5,10 @@
* interface and Solaris-compatible ioctls as best it is
* able.
*
- * NOTE: CP1400 systems appear to have a defective intr_mask
- * register on the PLD, preventing the disabling of
- * timer interrupts. We use a timer to periodically
- * reset 'stopped' watchdogs on affected platforms.
+ * NOTE: CP1400 systems appear to have a defective intr_mask
+ * register on the PLD, preventing the disabling of
+ * timer interrupts. We use a timer to periodically
+ * reset 'stopped' watchdogs on affected platforms.
*
* Copyright (c) 2000 Eric Brower (ebrower@usa.net)
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
@@ -107,13 +107,13 @@ static struct cpwd *cpwd_device;
* -------------------
* |- counter val -|
* -------------------
- * dcntr - Current 16-bit downcounter value.
- * When downcounter reaches '0' watchdog expires.
- * Reading this register resets downcounter with
- * 'limit' value.
- * limit - 16-bit countdown value in 1/10th second increments.
- * Writing this register begins countdown with input value.
- * Reading from this register does not affect counter.
+ * dcntr - Current 16-bit downcounter value.
+ * When downcounter reaches '0' watchdog expires.
+ * Reading this register resets downcounter with
+ * 'limit' value.
+ * limit - 16-bit countdown value in 1/10th second increments.
+ * Writing this register begins countdown with input value.
+ * Reading from this register does not affect counter.
* NOTES: After watchdog reset, dcntr and limit contain '1'
*
* status register (byte access):
@@ -123,7 +123,7 @@ static struct cpwd *cpwd_device;
* |- UNUSED -| EXP | RUN |
* ---------------------------
* status- Bit 0 - Watchdog is running
- * Bit 1 - Watchdog has expired
+ * Bit 1 - Watchdog has expired
*
*** PLD register block definition (struct wd_pld_regblk)
*
@@ -197,7 +197,7 @@ static u8 cpwd_readb(void __iomem *addr)
* Because of the CP1400 defect this should only be
* called during initialzation or by wd_[start|stop]timer()
*
- * index - sub-device index, or -1 for 'all'
+ * index - sub-device index, or -1 for 'all'
* enable - non-zero to enable interrupts, zero to disable
*/
static void cpwd_toggleintr(struct cpwd *p, int index, int enable)
@@ -317,13 +317,13 @@ static int cpwd_getstatus(struct cpwd *p, int index)
} else {
/* Fudge WD_EXPIRED status for defective CP1400--
* IF timer is running
- * AND brokenstop is set
- * AND an interrupt has been serviced
+ * AND brokenstop is set
+ * AND an interrupt has been serviced
* we are WD_EXPIRED.
*
* IF timer is running
- * AND brokenstop is set
- * AND no interrupt has been serviced
+ * AND brokenstop is set
+ * AND no interrupt has been serviced
* we are WD_FREERUN.
*/
if (p->broken &&
@@ -613,7 +613,7 @@ static int __devinit cpwd_probe(struct platform_device *op)
if (p->broken) {
init_timer(&cpwd_timer);
- cpwd_timer.function = cpwd_brokentimer;
+ cpwd_timer.function = cpwd_brokentimer;
cpwd_timer.data = (unsigned long) p;
cpwd_timer.expires = WD_BTIMEOUT;
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index 3f3dc093ad68..f1d1da662fbe 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -201,7 +201,7 @@ static void eurwdt_ping(void)
static ssize_t eurwdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- if (count) {
+ if (count) {
if (!nowayout) {
size_t i;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 204a5603c4ae..8cb26855bfed 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -52,7 +52,7 @@ static void __iomem *pci_mem_addr; /* the PCI-memory address */
static unsigned long __iomem *hpwdt_timer_reg;
static unsigned long __iomem *hpwdt_timer_con;
-static struct pci_device_id hpwdt_devices[] = {
+static DEFINE_PCI_DEVICE_TABLE(hpwdt_devices) = {
{ PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) }, /* iLO2 */
{ PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) }, /* iLO3 */
{0}, /* terminate list */
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index bb9750a03942..db45091ef434 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -334,7 +334,7 @@ static struct miscdevice esb_miscdev = {
/*
* Data for PCI driver interface
*/
-static struct pci_device_id esb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(esb_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9), },
{ 0, }, /* End of list */
};
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 2c6c2b4ad8bf..35a0d12dad73 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -247,7 +247,7 @@ static struct {
{NULL, 0}
};
-#define ITCO_PCI_DEVICE(dev, data) \
+#define ITCO_PCI_DEVICE(dev, data) \
.vendor = PCI_VENDOR_ID_INTEL, \
.device = dev, \
.subvendor = PCI_ANY_ID, \
@@ -262,7 +262,7 @@ static struct {
* pci_driver, because the I/O Controller Hub has also other
* functions that probably will be registered by other drivers.
*/
-static struct pci_device_id iTCO_wdt_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AA_0, TCO_ICH)},
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AB_0, TCO_ICH0)},
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801BA_0, TCO_ICH2)},
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
new file mode 100644
index 000000000000..919bdd16136f
--- /dev/null
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -0,0 +1,572 @@
+/*
+ * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device
+ * for Intel part #(s):
+ * - AF82MP20 PCH
+ *
+ * Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ *
+ */
+
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/fs.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/sfi.h>
+#include <linux/types.h>
+#include <asm/irq.h>
+#include <asm/atomic.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/apb_timer.h>
+#include <asm/mrst.h>
+
+#include "intel_scu_watchdog.h"
+
+/* Bounds number of times we will retry loading time count */
+/* This retry is a work around for a silicon bug. */
+#define MAX_RETRY 16
+
+#define IPC_SET_WATCHDOG_TIMER 0xF8
+
+static int timer_margin = DEFAULT_SOFT_TO_HARD_MARGIN;
+module_param(timer_margin, int, 0);
+MODULE_PARM_DESC(timer_margin,
+ "Watchdog timer margin"
+ "Time between interrupt and resetting the system"
+ "The range is from 1 to 160"
+ "This is the time for all keep alives to arrive");
+
+static int timer_set = DEFAULT_TIME;
+module_param(timer_set, int, 0);
+MODULE_PARM_DESC(timer_set,
+ "Default Watchdog timer setting"
+ "Complete cycle time"
+ "The range is from 1 to 170"
+ "This is the time for all keep alives to arrive");
+
+/* After watchdog device is closed, check force_boot. If:
+ * force_boot == 0, then force boot on next watchdog interrupt after close,
+ * force_boot == 1, then force boot immediately when device is closed.
+ */
+static int force_boot;
+module_param(force_boot, int, 0);
+MODULE_PARM_DESC(force_boot,
+ "A value of 1 means that the driver will reboot"
+ "the system immediately if the /dev/watchdog device is closed"
+ "A value of 0 means that when /dev/watchdog device is closed"
+ "the watchdog timer will be refreshed for one more interval"
+ "of length: timer_set. At the end of this interval, the"
+ "watchdog timer will reset the system."
+ );
+
+/* there is only one device in the system now; this can be made into
+ * an array in the future if we have more than one device */
+
+static struct intel_scu_watchdog_dev watchdog_device;
+
+/* Forces restart, if force_reboot is set */
+static void watchdog_fire(void)
+{
+ if (force_boot) {
+ printk(KERN_CRIT PFX "Initiating system reboot.\n");
+ emergency_restart();
+ printk(KERN_CRIT PFX "Reboot didn't ?????\n");
+ }
+
+ else {
+ printk(KERN_CRIT PFX "Immediate Reboot Disabled\n");
+ printk(KERN_CRIT PFX
+ "System will reset when watchdog timer times out!\n");
+ }
+}
+
+static int check_timer_margin(int new_margin)
+{
+ if ((new_margin < MIN_TIME_CYCLE) ||
+ (new_margin > MAX_TIME - timer_set)) {
+ pr_debug("Watchdog timer: value of new_margin %d is out of the range %d to %d\n",
+ new_margin, MIN_TIME_CYCLE, MAX_TIME - timer_set);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * IPC operations
+ */
+static int watchdog_set_ipc(int soft_threshold, int threshold)
+{
+ u32 *ipc_wbuf;
+ u8 cbuf[16] = { '\0' };
+ int ipc_ret = 0;
+
+ ipc_wbuf = (u32 *)&cbuf;
+ ipc_wbuf[0] = soft_threshold;
+ ipc_wbuf[1] = threshold;
+
+ ipc_ret = intel_scu_ipc_command(
+ IPC_SET_WATCHDOG_TIMER,
+ 0,
+ ipc_wbuf,
+ 2,
+ NULL,
+ 0);
+
+ if (ipc_ret != 0)
+ pr_err("Error setting SCU watchdog timer: %x\n", ipc_ret);
+
+ return ipc_ret;
+};
+
+/*
+ * Intel_SCU operations
+ */
+
+/* timer interrupt handler */
+static irqreturn_t watchdog_timer_interrupt(int irq, void *dev_id)
+{
+ int int_status;
+ int_status = ioread32(watchdog_device.timer_interrupt_status_addr);
+
+ pr_debug("Watchdog timer: irq, int_status: %x\n", int_status);
+
+ if (int_status != 0)
+ return IRQ_NONE;
+
+ /* has the timer been started? If not, then this is spurious */
+ if (watchdog_device.timer_started == 0) {
+ pr_debug("Watchdog timer: spurious interrupt received\n");
+ return IRQ_HANDLED;
+ }
+
+ /* temporarily disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+
+ /* set the timer to the threshold */
+ iowrite32(watchdog_device.threshold,
+ watchdog_device.timer_load_count_addr);
+
+ /* allow the timer to run */
+ iowrite32(0x00000003, watchdog_device.timer_control_addr);
+
+ return IRQ_HANDLED;
+}
+
+static int intel_scu_keepalive(void)
+{
+
+ /* read eoi register - clears interrupt */
+ ioread32(watchdog_device.timer_clear_interrupt_addr);
+
+ /* temporarily disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+
+ /* set the timer to the soft_threshold */
+ iowrite32(watchdog_device.soft_threshold,
+ watchdog_device.timer_load_count_addr);
+
+ /* allow the timer to run */
+ iowrite32(0x00000003, watchdog_device.timer_control_addr);
+
+ return 0;
+}
+
+static int intel_scu_stop(void)
+{
+ iowrite32(0, watchdog_device.timer_control_addr);
+ return 0;
+}
+
+static int intel_scu_set_heartbeat(u32 t)
+{
+ int ipc_ret;
+ int retry_count;
+ u32 soft_value;
+ u32 hw_pre_value;
+ u32 hw_value;
+
+ watchdog_device.timer_set = t;
+ watchdog_device.threshold =
+ timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
+ watchdog_device.soft_threshold =
+ (watchdog_device.timer_set - timer_margin)
+ * watchdog_device.timer_tbl_ptr->freq_hz;
+
+ pr_debug("Watchdog timer: set_heartbeat: timer freq is %d\n",
+ watchdog_device.timer_tbl_ptr->freq_hz);
+ pr_debug("Watchdog timer: set_heartbeat: timer_set is %x (hex)\n",
+ watchdog_device.timer_set);
+ pr_debug("Watchdog timer: set_hearbeat: timer_margin is %x (hex)\n",
+ timer_margin);
+ pr_debug("Watchdog timer: set_heartbeat: threshold is %x (hex)\n",
+ watchdog_device.threshold);
+ pr_debug("Watchdog timer: set_heartbeat: soft_threshold is %x (hex)\n",
+ watchdog_device.soft_threshold);
+
+ /* Adjust thresholds by FREQ_ADJUSTMENT factor, to make the */
+ /* watchdog timing come out right. */
+ watchdog_device.threshold =
+ watchdog_device.threshold / FREQ_ADJUSTMENT;
+ watchdog_device.soft_threshold =
+ watchdog_device.soft_threshold / FREQ_ADJUSTMENT;
+
+ /* temporarily disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+
+ /* send the threshold and soft_threshold via IPC to the processor */
+ ipc_ret = watchdog_set_ipc(watchdog_device.soft_threshold,
+ watchdog_device.threshold);
+
+ if (ipc_ret != 0) {
+ /* Make sure the watchdog timer is stopped */
+ intel_scu_stop();
+ return ipc_ret;
+ }
+
+ /* Soft Threshold set loop. Early versions of silicon did */
+ /* not always set this count correctly. This loop checks */
+ /* the value and retries if it was not set correctly. */
+
+ retry_count = 0;
+ soft_value = watchdog_device.soft_threshold & 0xFFFF0000;
+ do {
+
+ /* Make sure timer is stopped */
+ intel_scu_stop();
+
+ if (MAX_RETRY < retry_count++) {
+ /* Unable to set timer value */
+ pr_err("Watchdog timer: Unable to set timer\n");
+ return -ENODEV;
+ }
+
+ /* set the timer to the soft threshold */
+ iowrite32(watchdog_device.soft_threshold,
+ watchdog_device.timer_load_count_addr);
+
+ /* read count value before starting timer */
+ hw_pre_value = ioread32(watchdog_device.timer_load_count_addr);
+ hw_pre_value = hw_pre_value & 0xFFFF0000;
+
+ /* Start the timer */
+ iowrite32(0x00000003, watchdog_device.timer_control_addr);
+
+ /* read the value the time loaded into its count reg */
+ hw_value = ioread32(watchdog_device.timer_load_count_addr);
+ hw_value = hw_value & 0xFFFF0000;
+
+
+ } while (soft_value != hw_value);
+
+ watchdog_device.timer_started = 1;
+
+ return 0;
+}
+
+/*
+ * /dev/watchdog handling
+ */
+
+static int intel_scu_open(struct inode *inode, struct file *file)
+{
+
+ /* Set flag to indicate that watchdog device is open */
+ if (test_and_set_bit(0, &watchdog_device.driver_open))
+ return -EBUSY;
+
+ /* Check for reopen of driver. Reopens are not allowed */
+ if (watchdog_device.driver_closed)
+ return -EPERM;
+
+ return nonseekable_open(inode, file);
+}
+
+static int intel_scu_release(struct inode *inode, struct file *file)
+{
+ /*
+ * This watchdog should not be closed, after the timer
+ * is started with the WDIPC_SETTIMEOUT ioctl
+ * If force_boot is set watchdog_fire() will cause an
+ * immediate reset. If force_boot is not set, the watchdog
+ * timer is refreshed for one more interval. At the end
+ * of that interval, the watchdog timer will reset the system.
+ */
+
+ if (!test_and_clear_bit(0, &watchdog_device.driver_open)) {
+ pr_debug("Watchdog timer: intel_scu_release, without open\n");
+ return -ENOTTY;
+ }
+
+ if (!watchdog_device.timer_started) {
+ /* Just close, since timer has not been started */
+ pr_debug("Watchdog timer: closed, without starting timer\n");
+ return 0;
+ }
+
+ printk(KERN_CRIT PFX
+ "Unexpected close of /dev/watchdog!\n");
+
+ /* Since the timer was started, prevent future reopens */
+ watchdog_device.driver_closed = 1;
+
+ /* Refresh the timer for one more interval */
+ intel_scu_keepalive();
+
+ /* Reboot system (if force_boot is set) */
+ watchdog_fire();
+
+ /* We should only reach this point if force_boot is not set */
+ return 0;
+}
+
+static ssize_t intel_scu_write(struct file *file,
+ char const *data,
+ size_t len,
+ loff_t *ppos)
+{
+
+ if (watchdog_device.timer_started)
+ /* Watchdog already started, keep it alive */
+ intel_scu_keepalive();
+ else
+ /* Start watchdog with timer value set by init */
+ intel_scu_set_heartbeat(watchdog_device.timer_set);
+
+ return len;
+}
+
+static long intel_scu_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ u32 __user *p = argp;
+ u32 new_margin;
+
+
+ static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT
+ | WDIOF_KEEPALIVEPING,
+ .firmware_version = 0, /* @todo Get from SCU via
+ ipc_get_scu_fw_version()? */
+ .identity = "Intel_SCU IOH Watchdog" /* len < 32 */
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp,
+ &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+ case WDIOC_KEEPALIVE:
+ intel_scu_keepalive();
+
+ return 0;
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_margin, p))
+ return -EFAULT;
+
+ if (check_timer_margin(new_margin))
+ return -EINVAL;
+
+ if (intel_scu_set_heartbeat(new_margin))
+ return -EINVAL;
+ return 0;
+ case WDIOC_GETTIMEOUT:
+ return put_user(watchdog_device.soft_threshold, p);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+/*
+ * Notifier for system down
+ */
+static int intel_scu_notify_sys(struct notifier_block *this,
+ unsigned long code,
+ void *another_unused)
+{
+ if (code == SYS_DOWN || code == SYS_HALT)
+ /* Turn off the watchdog timer. */
+ intel_scu_stop();
+ return NOTIFY_DONE;
+}
+
+/*
+ * Kernel Interfaces
+ */
+static const struct file_operations intel_scu_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = intel_scu_write,
+ .unlocked_ioctl = intel_scu_ioctl,
+ .open = intel_scu_open,
+ .release = intel_scu_release,
+};
+
+static int __init intel_scu_watchdog_init(void)
+{
+ int ret;
+ u32 __iomem *tmp_addr;
+
+ /*
+ * We don't really need to check this as the SFI timer get will fail
+ * but if we do so we can exit with a clearer reason and no noise.
+ *
+ * If it isn't an intel MID device then it doesn't have this watchdog
+ */
+ if (!mrst_identify_cpu())
+ return -ENODEV;
+
+ /* Check boot parameters to verify that their initial values */
+ /* are in range. */
+ /* Check value of timer_set boot parameter */
+ if ((timer_set < MIN_TIME_CYCLE) ||
+ (timer_set > MAX_TIME - MIN_TIME_CYCLE)) {
+ pr_err("Watchdog timer: value of timer_set %x (hex) "
+ "is out of range from %x to %x (hex)\n",
+ timer_set, MIN_TIME_CYCLE, MAX_TIME - MIN_TIME_CYCLE);
+ return -EINVAL;
+ }
+
+ /* Check value of timer_margin boot parameter */
+ if (check_timer_margin(timer_margin))
+ return -EINVAL;
+
+ watchdog_device.timer_tbl_ptr = sfi_get_mtmr(sfi_mtimer_num-1);
+
+ if (watchdog_device.timer_tbl_ptr == NULL) {
+ pr_debug("Watchdog timer - Intel SCU watchdog: timer is not available\n");
+ return -ENODEV;
+ }
+ /* make sure the timer exists */
+ if (watchdog_device.timer_tbl_ptr->phys_addr == 0) {
+ pr_debug("Watchdog timer - Intel SCU watchdog - timer %d does not have valid physical memory\n",
+ sfi_mtimer_num);
+ return -ENODEV;
+ }
+
+ if (watchdog_device.timer_tbl_ptr->irq == 0) {
+ pr_debug("Watchdog timer: timer %d invalid irq\n",
+ sfi_mtimer_num);
+ return -ENODEV;
+ }
+
+ tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr,
+ 20);
+
+ if (tmp_addr == NULL) {
+ pr_debug("Watchdog timer: timer unable to ioremap\n");
+ return -ENOMEM;
+ }
+
+ watchdog_device.timer_load_count_addr = tmp_addr++;
+ watchdog_device.timer_current_value_addr = tmp_addr++;
+ watchdog_device.timer_control_addr = tmp_addr++;
+ watchdog_device.timer_clear_interrupt_addr = tmp_addr++;
+ watchdog_device.timer_interrupt_status_addr = tmp_addr++;
+
+ /* Set the default time values in device structure */
+
+ watchdog_device.timer_set = timer_set;
+ watchdog_device.threshold =
+ timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
+ watchdog_device.soft_threshold =
+ (watchdog_device.timer_set - timer_margin)
+ * watchdog_device.timer_tbl_ptr->freq_hz;
+
+
+ watchdog_device.intel_scu_notifier.notifier_call =
+ intel_scu_notify_sys;
+
+ ret = register_reboot_notifier(&watchdog_device.intel_scu_notifier);
+ if (ret) {
+ pr_err("Watchdog timer: cannot register notifier %d)\n", ret);
+ goto register_reboot_error;
+ }
+
+ watchdog_device.miscdev.minor = WATCHDOG_MINOR;
+ watchdog_device.miscdev.name = "watchdog";
+ watchdog_device.miscdev.fops = &intel_scu_fops;
+
+ ret = misc_register(&watchdog_device.miscdev);
+ if (ret) {
+ pr_err("Watchdog timer: cannot register miscdev %d err =%d\n",
+ WATCHDOG_MINOR, ret);
+ goto misc_register_error;
+ }
+
+ ret = request_irq((unsigned int)watchdog_device.timer_tbl_ptr->irq,
+ watchdog_timer_interrupt,
+ IRQF_SHARED, "watchdog",
+ &watchdog_device.timer_load_count_addr);
+ if (ret) {
+ pr_err("Watchdog timer: error requesting irq %d\n", ret);
+ goto request_irq_error;
+ }
+ /* Make sure timer is disabled before returning */
+ intel_scu_stop();
+ return 0;
+
+/* error cleanup */
+
+request_irq_error:
+ misc_deregister(&watchdog_device.miscdev);
+misc_register_error:
+ unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
+register_reboot_error:
+ intel_scu_stop();
+ iounmap(watchdog_device.timer_load_count_addr);
+ return ret;
+}
+
+static void __exit intel_scu_watchdog_exit(void)
+{
+
+ misc_deregister(&watchdog_device.miscdev);
+ unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
+ /* disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+ iounmap(watchdog_device.timer_load_count_addr);
+}
+
+late_initcall(intel_scu_watchdog_init);
+module_exit(intel_scu_watchdog_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_VERSION(WDT_VER);
diff --git a/drivers/watchdog/intel_scu_watchdog.h b/drivers/watchdog/intel_scu_watchdog.h
new file mode 100644
index 000000000000..d2b074a82db6
--- /dev/null
+++ b/drivers/watchdog/intel_scu_watchdog.h
@@ -0,0 +1,66 @@
+/*
+ * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device
+ * for Intel part #(s):
+ * - AF82MP20 PCH
+ *
+ * Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ *
+ */
+
+#ifndef __INTEL_SCU_WATCHDOG_H
+#define __INTEL_SCU_WATCHDOG_H
+
+#define PFX "Intel_SCU: "
+#define WDT_VER "0.3"
+
+/* minimum time between interrupts */
+#define MIN_TIME_CYCLE 1
+
+/* Time from warning to reboot is 2 seconds */
+#define DEFAULT_SOFT_TO_HARD_MARGIN 2
+
+#define MAX_TIME 170
+
+#define DEFAULT_TIME 5
+
+#define MAX_SOFT_TO_HARD_MARGIN (MAX_TIME-MIN_TIME_CYCLE)
+
+/* Ajustment to clock tick frequency to make timing come out right */
+#define FREQ_ADJUSTMENT 8
+
+struct intel_scu_watchdog_dev {
+ ulong driver_open;
+ ulong driver_closed;
+ u32 timer_started;
+ u32 timer_set;
+ u32 threshold;
+ u32 soft_threshold;
+ u32 __iomem *timer_load_count_addr;
+ u32 __iomem *timer_current_value_addr;
+ u32 __iomem *timer_control_addr;
+ u32 __iomem *timer_clear_interrupt_addr;
+ u32 __iomem *timer_interrupt_status_addr;
+ struct sfi_timer_table_entry *timer_tbl_ptr;
+ struct notifier_block intel_scu_notifier;
+ struct miscdevice miscdev;
+};
+
+extern int sfi_mtimer_num;
+
+/* extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); */
+#endif /* __INTEL_SCU_WATCHDOG_H */
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index b32c6c045b1a..6143f52ba6b8 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -69,7 +69,7 @@ static unsigned short address;
#define IT8712F_DEVID 0x8712
#define LDN_GPIO 0x07 /* GPIO and Watch Dog Timer */
-#define LDN_GAME 0x09 /* Game Port */
+#define LDN_GAME 0x09 /* Game Port */
#define WDT_CONTROL 0x71 /* WDT Register: Control */
#define WDT_CONFIG 0x72 /* WDT Register: Configuration */
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index dad29245a6a7..b1bc72f9a209 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -12,7 +12,7 @@
* http://www.ite.com.tw/
*
* Support of the watchdog timers, which are available on
- * IT8702, IT8712, IT8716, IT8718, IT8720 and IT8726.
+ * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721 and IT8726.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -45,7 +45,7 @@
#include <asm/system.h>
-#define WATCHDOG_VERSION "1.13"
+#define WATCHDOG_VERSION "1.14"
#define WATCHDOG_NAME "IT87 WDT"
#define PFX WATCHDOG_NAME ": "
#define DRIVER_VERSION WATCHDOG_NAME " driver, v" WATCHDOG_VERSION "\n"
@@ -54,7 +54,7 @@
/* Defaults for Module Parameter */
#define DEFAULT_NOGAMEPORT 0
#define DEFAULT_EXCLUSIVE 1
-#define DEFAULT_TIMEOUT 60
+#define DEFAULT_TIMEOUT 60
#define DEFAULT_TESTMODE 0
#define DEFAULT_NOWAYOUT WATCHDOG_NOWAYOUT
@@ -70,9 +70,9 @@
/* Configuration Registers and Functions */
#define LDNREG 0x07
#define CHIPID 0x20
-#define CHIPREV 0x22
+#define CHIPREV 0x22
#define ACTREG 0x30
-#define BASEREG 0x60
+#define BASEREG 0x60
/* Chip Id numbers */
#define NO_DEV_ID 0xffff
@@ -82,10 +82,11 @@
#define IT8716_ID 0x8716
#define IT8718_ID 0x8718
#define IT8720_ID 0x8720
+#define IT8721_ID 0x8721
#define IT8726_ID 0x8726 /* the data sheet suggest wrongly 0x8716 */
/* GPIO Configuration Registers LDN=0x07 */
-#define WDTCTRL 0x71
+#define WDTCTRL 0x71
#define WDTCFG 0x72
#define WDTVALLSB 0x73
#define WDTVALMSB 0x74
@@ -94,7 +95,7 @@
#define WDT_CIRINT 0x80
#define WDT_MOUSEINT 0x40
#define WDT_KYBINT 0x20
-#define WDT_GAMEPORT 0x10 /* not in it8718, it8720 */
+#define WDT_GAMEPORT 0x10 /* not in it8718, it8720, it8721 */
#define WDT_FORCE 0x02
#define WDT_ZERO 0x01
@@ -102,11 +103,11 @@
#define WDT_TOV1 0x80
#define WDT_KRST 0x40
#define WDT_TOVE 0x20
-#define WDT_PWROK 0x10
+#define WDT_PWROK 0x10 /* not in it8721 */
#define WDT_INT_MASK 0x0f
/* CIR Configuration Register LDN=0x0a */
-#define CIR_ILS 0x70
+#define CIR_ILS 0x70
/* The default Base address is not always available, we use this */
#define CIR_BASE 0x0208
@@ -134,7 +135,7 @@
#define WDTS_USE_GP 4
#define WDTS_EXPECTED 5
-static unsigned int base, gpact, ciract, max_units;
+static unsigned int base, gpact, ciract, max_units, chip_type;
static unsigned long wdt_status;
static DEFINE_SPINLOCK(spinlock);
@@ -215,7 +216,7 @@ static inline void superio_outw(int val, int reg)
/* Internal function, should be called after superio_select(GPIO) */
static void wdt_update_timeout(void)
{
- unsigned char cfg = WDT_KRST | WDT_PWROK;
+ unsigned char cfg = WDT_KRST;
int tm = timeout;
if (testmode)
@@ -226,6 +227,9 @@ static void wdt_update_timeout(void)
else
tm /= 60;
+ if (chip_type != IT8721_ID)
+ cfg |= WDT_PWROK;
+
superio_outb(cfg, WDTCFG);
superio_outb(tm, WDTVALLSB);
if (max_units > 255)
@@ -555,7 +559,6 @@ static int __init it87_wdt_init(void)
{
int rc = 0;
int try_gameport = !nogameport;
- u16 chip_type;
u8 chip_rev;
unsigned long flags;
@@ -581,6 +584,7 @@ static int __init it87_wdt_init(void)
break;
case IT8718_ID:
case IT8720_ID:
+ case IT8721_ID:
max_units = 65535;
try_gameport = 0;
break;
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
new file mode 100644
index 000000000000..684ba01fb540
--- /dev/null
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2010, Paul Cercueil <paul@crapouillou.net>
+ * JZ4740 Watchdog driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include <asm/mach-jz4740/timer.h>
+
+#define JZ_REG_WDT_TIMER_DATA 0x0
+#define JZ_REG_WDT_COUNTER_ENABLE 0x4
+#define JZ_REG_WDT_TIMER_COUNTER 0x8
+#define JZ_REG_WDT_TIMER_CONTROL 0xC
+
+#define JZ_WDT_CLOCK_PCLK 0x1
+#define JZ_WDT_CLOCK_RTC 0x2
+#define JZ_WDT_CLOCK_EXT 0x4
+
+#define WDT_IN_USE 0
+#define WDT_OK_TO_CLOSE 1
+
+#define JZ_WDT_CLOCK_DIV_SHIFT 3
+
+#define JZ_WDT_CLOCK_DIV_1 (0 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_4 (1 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_16 (2 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_64 (3 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_256 (4 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_1024 (5 << JZ_WDT_CLOCK_DIV_SHIFT)
+
+#define DEFAULT_HEARTBEAT 5
+#define MAX_HEARTBEAT 2048
+
+static struct {
+ void __iomem *base;
+ struct resource *mem;
+ struct clk *rtc_clk;
+ unsigned long status;
+} jz4740_wdt;
+
+static int heartbeat = DEFAULT_HEARTBEAT;
+
+
+static void jz4740_wdt_service(void)
+{
+ writew(0x0, jz4740_wdt.base + JZ_REG_WDT_TIMER_COUNTER);
+}
+
+static void jz4740_wdt_set_heartbeat(int new_heartbeat)
+{
+ unsigned int rtc_clk_rate;
+ unsigned int timeout_value;
+ unsigned short clock_div = JZ_WDT_CLOCK_DIV_1;
+
+ heartbeat = new_heartbeat;
+
+ rtc_clk_rate = clk_get_rate(jz4740_wdt.rtc_clk);
+
+ timeout_value = rtc_clk_rate * heartbeat;
+ while (timeout_value > 0xffff) {
+ if (clock_div == JZ_WDT_CLOCK_DIV_1024) {
+ /* Requested timeout too high;
+ * use highest possible value. */
+ timeout_value = 0xffff;
+ break;
+ }
+ timeout_value >>= 2;
+ clock_div += (1 << JZ_WDT_CLOCK_DIV_SHIFT);
+ }
+
+ writeb(0x0, jz4740_wdt.base + JZ_REG_WDT_COUNTER_ENABLE);
+ writew(clock_div, jz4740_wdt.base + JZ_REG_WDT_TIMER_CONTROL);
+
+ writew((u16)timeout_value, jz4740_wdt.base + JZ_REG_WDT_TIMER_DATA);
+ writew(0x0, jz4740_wdt.base + JZ_REG_WDT_TIMER_COUNTER);
+ writew(clock_div | JZ_WDT_CLOCK_RTC,
+ jz4740_wdt.base + JZ_REG_WDT_TIMER_CONTROL);
+
+ writeb(0x1, jz4740_wdt.base + JZ_REG_WDT_COUNTER_ENABLE);
+}
+
+static void jz4740_wdt_enable(void)
+{
+ jz4740_timer_enable_watchdog();
+ jz4740_wdt_set_heartbeat(heartbeat);
+}
+
+static void jz4740_wdt_disable(void)
+{
+ jz4740_timer_disable_watchdog();
+ writeb(0x0, jz4740_wdt.base + JZ_REG_WDT_COUNTER_ENABLE);
+}
+
+static int jz4740_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(WDT_IN_USE, &jz4740_wdt.status))
+ return -EBUSY;
+
+ jz4740_wdt_enable();
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t jz4740_wdt_write(struct file *file, const char *data,
+ size_t len, loff_t *ppos)
+{
+ if (len) {
+ size_t i;
+
+ clear_bit(WDT_OK_TO_CLOSE, &jz4740_wdt.status);
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, data + i))
+ return -EFAULT;
+
+ if (c == 'V')
+ set_bit(WDT_OK_TO_CLOSE, &jz4740_wdt.status);
+ }
+ jz4740_wdt_service();
+ }
+
+ return len;
+}
+
+static const struct watchdog_info ident = {
+ .options = WDIOF_KEEPALIVEPING,
+ .identity = "jz4740 Watchdog",
+};
+
+static long jz4740_wdt_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOTTY;
+ int heartbeat_seconds;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ ret = copy_to_user((struct watchdog_info *)arg, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+ break;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ ret = put_user(0, (int *)arg);
+ break;
+
+ case WDIOC_KEEPALIVE:
+ jz4740_wdt_service();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(heartbeat_seconds, (int __user *)arg))
+ return -EFAULT;
+
+ jz4740_wdt_set_heartbeat(heartbeat_seconds);
+ return 0;
+
+ case WDIOC_GETTIMEOUT:
+ return put_user(heartbeat, (int *)arg);
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int jz4740_wdt_release(struct inode *inode, struct file *file)
+{
+ jz4740_wdt_service();
+
+ if (test_and_clear_bit(WDT_OK_TO_CLOSE, &jz4740_wdt.status))
+ jz4740_wdt_disable();
+
+ clear_bit(WDT_IN_USE, &jz4740_wdt.status);
+ return 0;
+}
+
+static const struct file_operations jz4740_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = jz4740_wdt_write,
+ .unlocked_ioctl = jz4740_wdt_ioctl,
+ .open = jz4740_wdt_open,
+ .release = jz4740_wdt_release,
+};
+
+static struct miscdevice jz4740_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &jz4740_wdt_fops,
+};
+
+static int __devinit jz4740_wdt_probe(struct platform_device *pdev)
+{
+ int ret = 0, size;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "failed to get memory region resource\n");
+ return -ENXIO;
+ }
+
+ size = resource_size(res);
+ jz4740_wdt.mem = request_mem_region(res->start, size, pdev->name);
+ if (jz4740_wdt.mem == NULL) {
+ dev_err(dev, "failed to get memory region\n");
+ return -EBUSY;
+ }
+
+ jz4740_wdt.base = ioremap_nocache(res->start, size);
+ if (jz4740_wdt.base == NULL) {
+ dev_err(dev, "failed to map memory region\n");
+ ret = -EBUSY;
+ goto err_release_region;
+ }
+
+ jz4740_wdt.rtc_clk = clk_get(NULL, "rtc");
+ if (IS_ERR(jz4740_wdt.rtc_clk)) {
+ dev_err(dev, "cannot find RTC clock\n");
+ ret = PTR_ERR(jz4740_wdt.rtc_clk);
+ goto err_iounmap;
+ }
+
+ ret = misc_register(&jz4740_wdt_miscdev);
+ if (ret < 0) {
+ dev_err(dev, "cannot register misc device\n");
+ goto err_disable_clk;
+ }
+
+ return 0;
+
+err_disable_clk:
+ clk_put(jz4740_wdt.rtc_clk);
+err_iounmap:
+ iounmap(jz4740_wdt.base);
+err_release_region:
+ release_mem_region(jz4740_wdt.mem->start,
+ resource_size(jz4740_wdt.mem));
+ return ret;
+}
+
+
+static int __devexit jz4740_wdt_remove(struct platform_device *pdev)
+{
+ jz4740_wdt_disable();
+ misc_deregister(&jz4740_wdt_miscdev);
+ clk_put(jz4740_wdt.rtc_clk);
+
+ iounmap(jz4740_wdt.base);
+ jz4740_wdt.base = NULL;
+
+ release_mem_region(jz4740_wdt.mem->start,
+ resource_size(jz4740_wdt.mem));
+ jz4740_wdt.mem = NULL;
+
+ return 0;
+}
+
+
+static struct platform_driver jz4740_wdt_driver = {
+ .probe = jz4740_wdt_probe,
+ .remove = __devexit_p(jz4740_wdt_remove),
+ .driver = {
+ .name = "jz4740-wdt",
+ .owner = THIS_MODULE,
+ },
+};
+
+
+static int __init jz4740_wdt_init(void)
+{
+ return platform_driver_register(&jz4740_wdt_driver);
+}
+module_init(jz4740_wdt_init);
+
+static void __exit jz4740_wdt_exit(void)
+{
+ platform_driver_unregister(&jz4740_wdt_driver);
+}
+module_exit(jz4740_wdt_exit);
+
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("jz4740 Watchdog Driver");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat,
+ "Watchdog heartbeat period in seconds from 1 to "
+ __MODULE_STRING(MAX_HEARTBEAT) ", default "
+ __MODULE_STRING(DEFAULT_HEARTBEAT));
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:jz4740-wdt");
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 928035069396..1332b838cc58 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -54,7 +54,7 @@
/* indexes */ /* size */
#define ZFL_VERSION 0x02 /* 16 */
-#define CONTROL 0x10 /* 16 */
+#define CONTROL 0x10 /* 16 */
#define STATUS 0x12 /* 8 */
#define COUNTER_1 0x0C /* 16 */
#define COUNTER_2 0x0E /* 8 */
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 3053ff05ca41..7a82ce5a6337 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -41,7 +41,7 @@ static int nowayout = WATCHDOG_NOWAYOUT;
* to ping the watchdog.
*/
#define MAX6369_WDSET (7 << 0)
-#define MAX6369_WDI (1 << 3)
+#define MAX6369_WDI (1 << 3)
static DEFINE_SPINLOCK(io_lock);
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index ea438ad53055..6709d723e017 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -2,9 +2,9 @@
* mpc8xxx_wdt.c - MPC8xx/MPC83xx/MPC86xx watchdog userspace interface
*
* Authors: Dave Updegraff <dave@cray.org>
- * Kumar Gala <galak@kernel.crashing.org>
- * Attribution: from 83xx_wst: Florian Schirmer <jolt@tuxbox.org>
- * ..and from sc520_wdt
+ * Kumar Gala <galak@kernel.crashing.org>
+ * Attribution: from 83xx_wst: Florian Schirmer <jolt@tuxbox.org>
+ * ..and from sc520_wdt
* Copyright (c) 2008 MontaVista Software, Inc.
* Anton Vorontsov <avorontsov@ru.mvista.com>
*
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index b8ec7aca3c8e..2b4af222b5f2 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -172,7 +172,7 @@ static int mpcore_wdt_release(struct inode *inode, struct file *file)
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (wdt->expect_close == 42)
mpcore_wdt_stop(wdt);
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 08e8a6ab74e1..5ec5ac1f7878 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -190,19 +190,19 @@ static ssize_t mtx1_wdt_write(struct file *file, const char *buf,
}
static const struct file_operations mtx1_wdt_fops = {
- .owner = THIS_MODULE,
+ .owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = mtx1_wdt_ioctl,
- .open = mtx1_wdt_open,
- .write = mtx1_wdt_write,
- .release = mtx1_wdt_release,
+ .open = mtx1_wdt_open,
+ .write = mtx1_wdt_write,
+ .release = mtx1_wdt_release,
};
static struct miscdevice mtx1_wdt_misc = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &mtx1_wdt_fops,
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &mtx1_wdt_fops,
};
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index 1a50aa7079bf..267377a5a83e 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -289,7 +289,7 @@ static struct miscdevice nv_tco_miscdev = {
* register a pci_driver, because someone else might one day
* want to register another driver on the same PCI id.
*/
-static struct pci_device_id tco_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tco_pci_tbl) = {
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
diff --git a/drivers/watchdog/omap_wdt.h b/drivers/watchdog/omap_wdt.h
index fc02ec6a0386..09b774cf75b9 100644
--- a/drivers/watchdog/omap_wdt.h
+++ b/drivers/watchdog/omap_wdt.h
@@ -44,7 +44,7 @@
* months before firing. These limits work without scaling,
* with the 60 second default assumed by most tools and docs.
*/
-#define TIMER_MARGIN_MAX (24 * 60 * 60) /* 1 day */
+#define TIMER_MARGIN_MAX (24 * 60 * 60) /* 1 day */
#define TIMER_MARGIN_DEFAULT 60 /* 60 secs */
#define TIMER_MARGIN_MIN 1
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 3a56bc360924..139d773300c6 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -514,7 +514,7 @@ static struct miscdevice pc87413_miscdev = {
/* -- Module init functions -------------------------------------*/
/**
- * pc87413_init: module's "constructor"
+ * pc87413_init: module's "constructor"
*
* Set up the WDT watchdog board. All we have to do is grab the
* resources we require and bitch if anyone beat us to them.
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index 64374d636f09..b8d14f88f0b5 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -817,7 +817,7 @@ static void __devexit pcipcwd_card_exit(struct pci_dev *pdev)
cards_found--;
}
-static struct pci_device_id pcipcwd_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pcipcwd_pci_tbl) = {
{ PCI_VENDOR_ID_QUICKLOGIC, PCI_DEVICE_ID_WATCHDOG_PCIPCWD,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }, /* End of list */
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index bf5b97c546eb..c7cf4cbf8ab3 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -4,7 +4,7 @@
* Watchdog driver for PNX4008 board
*
* Authors: Dmitry Chigirev <source@mvista.com>,
- * Vitaly Wool <vitalywool@gmail.com>
+ * Vitaly Wool <vitalywool@gmail.com>
* Based on sa1100 driver,
* Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
*
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index ae53662c29bc..25b39bf35925 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -224,7 +224,7 @@ static int s3c2410wdt_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (expect_close == 42)
diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c
index 68e2e2d6f73d..514ec23050f7 100644
--- a/drivers/watchdog/sbc8360.c
+++ b/drivers/watchdog/sbc8360.c
@@ -114,7 +114,7 @@ static char expect_close;
* C | 6.5s 65s 650s 1300s
* D | 7s 70s 700s 1400s
* E | 7.5s 75s 750s 1500s
- * F | 8s 80s 800s 1600s
+ * F | 8s 80s 800s 1600s
*
* Another way to say the same things is:
* For N=1, Timeout = (M+1) * 0.5s
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 79906255eeb6..d5d399464599 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -41,7 +41,7 @@ static DEFINE_MUTEX(wdt_lock);
#define IFACE_ON_COMMAND 1
#define REBOOT_COMMAND 2
-#define WATCHDOG_NAME "SBC-FITPC2 Watchdog"
+#define WATCHDOG_NAME "SBC-FITPC2 Watchdog"
static void wdt_send_data(unsigned char command, unsigned char data)
{
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 6fc74065abee..4e3e7eb5919c 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -1,9 +1,9 @@
/*
- * drivers/char/watchdog/shwdt.c
+ * drivers/watchdog/shwdt.c
*
* Watchdog driver for integrated watchdog in the SuperH processors.
*
- * Copyright (C) 2001, 2002, 2003 Paul Mundt <lethal@linux-sh.org>
+ * Copyright (C) 2001 - 2010 Paul Mundt <lethal@linux-sh.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -19,6 +19,7 @@
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
@@ -28,11 +29,12 @@
#include <linux/ioport.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <asm/watchdog.h>
-#define PFX "shwdt: "
+#define DRV_NAME "sh-wdt"
/*
* Default clock division ratio is 5.25 msecs. For an additional table of
@@ -62,37 +64,36 @@
* misses its deadline, the kernel timer will allow the WDT to overflow.
*/
static int clock_division_ratio = WTCSR_CKS_4096;
-
#define next_ping_period(cks) msecs_to_jiffies(cks - 4)
-static void sh_wdt_ping(unsigned long data);
-
-static unsigned long shwdt_is_open;
static const struct watchdog_info sh_wdt_info;
-static char shwdt_expect_close;
-static DEFINE_TIMER(timer, sh_wdt_ping, 0, 0);
-static unsigned long next_heartbeat;
+static struct platform_device *sh_wdt_dev;
static DEFINE_SPINLOCK(shwdt_lock);
#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
-
static int nowayout = WATCHDOG_NOWAYOUT;
+static unsigned long next_heartbeat;
-/**
- * sh_wdt_start - Start the Watchdog
- *
- * Starts the watchdog.
- */
-static void sh_wdt_start(void)
+struct sh_wdt {
+ void __iomem *base;
+ struct device *dev;
+
+ struct timer_list timer;
+
+ unsigned long enabled;
+ char expect_close;
+};
+
+static void sh_wdt_start(struct sh_wdt *wdt)
{
- __u8 csr;
unsigned long flags;
+ u8 csr;
spin_lock_irqsave(&shwdt_lock, flags);
next_heartbeat = jiffies + (heartbeat * HZ);
- mod_timer(&timer, next_ping_period(clock_division_ratio));
+ mod_timer(&wdt->timer, next_ping_period(clock_division_ratio));
csr = sh_wdt_read_csr();
csr |= WTCSR_WT | clock_division_ratio;
@@ -114,15 +115,6 @@ static void sh_wdt_start(void)
sh_wdt_write_csr(csr);
#ifdef CONFIG_CPU_SH2
- /*
- * Whoever came up with the RSTCSR semantics must've been smoking
- * some of the good stuff, since in addition to the WTCSR/WTCNT write
- * brain-damage, it's managed to fuck things up one step further..
- *
- * If we need to clear the WOVF bit, the upper byte has to be 0xa5..
- * but if we want to touch RSTE or RSTS, the upper byte has to be
- * 0x5a..
- */
csr = sh_wdt_read_rstcsr();
csr &= ~RSTCSR_RSTS;
sh_wdt_write_rstcsr(csr);
@@ -130,30 +122,23 @@ static void sh_wdt_start(void)
spin_unlock_irqrestore(&shwdt_lock, flags);
}
-/**
- * sh_wdt_stop - Stop the Watchdog
- * Stops the watchdog.
- */
-static void sh_wdt_stop(void)
+static void sh_wdt_stop(struct sh_wdt *wdt)
{
- __u8 csr;
unsigned long flags;
+ u8 csr;
spin_lock_irqsave(&shwdt_lock, flags);
- del_timer(&timer);
+ del_timer(&wdt->timer);
csr = sh_wdt_read_csr();
csr &= ~WTCSR_TME;
sh_wdt_write_csr(csr);
+
spin_unlock_irqrestore(&shwdt_lock, flags);
}
-/**
- * sh_wdt_keepalive - Keep the Userspace Watchdog Alive
- * The Userspace watchdog got a KeepAlive: schedule the next heartbeat.
- */
-static inline void sh_wdt_keepalive(void)
+static inline void sh_wdt_keepalive(struct sh_wdt *wdt)
{
unsigned long flags;
@@ -162,10 +147,6 @@ static inline void sh_wdt_keepalive(void)
spin_unlock_irqrestore(&shwdt_lock, flags);
}
-/**
- * sh_wdt_set_heartbeat - Set the Userspace Watchdog heartbeat
- * Set the Userspace Watchdog heartbeat
- */
static int sh_wdt_set_heartbeat(int t)
{
unsigned long flags;
@@ -179,19 +160,14 @@ static int sh_wdt_set_heartbeat(int t)
return 0;
}
-/**
- * sh_wdt_ping - Ping the Watchdog
- * @data: Unused
- *
- * Clears overflow bit, resets timer counter.
- */
static void sh_wdt_ping(unsigned long data)
{
+ struct sh_wdt *wdt = (struct sh_wdt *)data;
unsigned long flags;
spin_lock_irqsave(&shwdt_lock, flags);
if (time_before(jiffies, next_heartbeat)) {
- __u8 csr;
+ u8 csr;
csr = sh_wdt_read_csr();
csr &= ~WTCSR_IOVF;
@@ -199,148 +175,76 @@ static void sh_wdt_ping(unsigned long data)
sh_wdt_write_cnt(0);
- mod_timer(&timer, next_ping_period(clock_division_ratio));
+ mod_timer(&wdt->timer, next_ping_period(clock_division_ratio));
} else
- printk(KERN_WARNING PFX "Heartbeat lost! Will not ping "
- "the watchdog\n");
+ dev_warn(wdt->dev, "Heartbeat lost! Will not ping "
+ "the watchdog\n");
spin_unlock_irqrestore(&shwdt_lock, flags);
}
-/**
- * sh_wdt_open - Open the Device
- * @inode: inode of device
- * @file: file handle of device
- *
- * Watchdog device is opened and started.
- */
static int sh_wdt_open(struct inode *inode, struct file *file)
{
- if (test_and_set_bit(0, &shwdt_is_open))
+ struct sh_wdt *wdt = platform_get_drvdata(sh_wdt_dev);
+
+ if (test_and_set_bit(0, &wdt->enabled))
return -EBUSY;
if (nowayout)
__module_get(THIS_MODULE);
- sh_wdt_start();
+ file->private_data = wdt;
+
+ sh_wdt_start(wdt);
return nonseekable_open(inode, file);
}
-/**
- * sh_wdt_close - Close the Device
- * @inode: inode of device
- * @file: file handle of device
- *
- * Watchdog device is closed and stopped.
- */
static int sh_wdt_close(struct inode *inode, struct file *file)
{
- if (shwdt_expect_close == 42) {
- sh_wdt_stop();
+ struct sh_wdt *wdt = file->private_data;
+
+ if (wdt->expect_close == 42) {
+ sh_wdt_stop(wdt);
} else {
- printk(KERN_CRIT PFX "Unexpected close, not "
- "stopping watchdog!\n");
- sh_wdt_keepalive();
+ dev_crit(wdt->dev, "Unexpected close, not "
+ "stopping watchdog!\n");
+ sh_wdt_keepalive(wdt);
}
- clear_bit(0, &shwdt_is_open);
- shwdt_expect_close = 0;
+ clear_bit(0, &wdt->enabled);
+ wdt->expect_close = 0;
return 0;
}
-/**
- * sh_wdt_write - Write to Device
- * @file: file handle of device
- * @buf: buffer to write
- * @count: length of buffer
- * @ppos: offset
- *
- * Pings the watchdog on write.
- */
static ssize_t sh_wdt_write(struct file *file, const char *buf,
size_t count, loff_t *ppos)
{
+ struct sh_wdt *wdt = file->private_data;
+
if (count) {
if (!nowayout) {
size_t i;
- shwdt_expect_close = 0;
+ wdt->expect_close = 0;
for (i = 0; i != count; i++) {
char c;
if (get_user(c, buf + i))
return -EFAULT;
if (c == 'V')
- shwdt_expect_close = 42;
+ wdt->expect_close = 42;
}
}
- sh_wdt_keepalive();
+ sh_wdt_keepalive(wdt);
}
return count;
}
-/**
- * sh_wdt_mmap - map WDT/CPG registers into userspace
- * @file: file structure for the device
- * @vma: VMA to map the registers into
- *
- * A simple mmap() implementation for the corner cases where the counter
- * needs to be mapped in userspace directly. Due to the relatively small
- * size of the area, neighbouring registers not necessarily tied to the
- * CPG will also be accessible through the register page, so this remains
- * configurable for users that really know what they're doing.
- *
- * Additionaly, the register page maps in the CPG register base relative
- * to the nearest page-aligned boundary, which requires that userspace do
- * the appropriate CPU subtype math for calculating the page offset for
- * the counter value.
- */
-static int sh_wdt_mmap(struct file *file, struct vm_area_struct *vma)
-{
- int ret = -ENOSYS;
-
-#ifdef CONFIG_SH_WDT_MMAP
- unsigned long addr;
-
- /* Only support the simple cases where we map in a register page. */
- if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
- return -EINVAL;
-
- /*
- * Pick WTCNT as the start, it's usually the first register after the
- * FRQCR, and neither one are generally page-aligned out of the box.
- */
- addr = WTCNT & ~(PAGE_SIZE - 1);
-
- vma->vm_flags |= VM_IO;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
- PAGE_SIZE, vma->vm_page_prot)) {
- printk(KERN_ERR PFX "%s: io_remap_pfn_range failed\n",
- __func__);
- return -EAGAIN;
- }
-
- ret = 0;
-#endif
-
- return ret;
-}
-
-/**
- * sh_wdt_ioctl - Query Device
- * @file: file handle of device
- * @cmd: watchdog command
- * @arg: argument
- *
- * Query basic information from the device or ping it, as outlined by the
- * watchdog API.
- */
static long sh_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
+ struct sh_wdt *wdt = file->private_data;
int new_heartbeat;
int options, retval = -EINVAL;
@@ -356,18 +260,18 @@ static long sh_wdt_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
if (options & WDIOS_DISABLECARD) {
- sh_wdt_stop();
+ sh_wdt_stop(wdt);
retval = 0;
}
if (options & WDIOS_ENABLECARD) {
- sh_wdt_start();
+ sh_wdt_start(wdt);
retval = 0;
}
return retval;
case WDIOC_KEEPALIVE:
- sh_wdt_keepalive();
+ sh_wdt_keepalive(wdt);
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_heartbeat, (int *)arg))
@@ -376,7 +280,7 @@ static long sh_wdt_ioctl(struct file *file, unsigned int cmd,
if (sh_wdt_set_heartbeat(new_heartbeat))
return -EINVAL;
- sh_wdt_keepalive();
+ sh_wdt_keepalive(wdt);
/* Fall */
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, (int *)arg);
@@ -386,20 +290,13 @@ static long sh_wdt_ioctl(struct file *file, unsigned int cmd,
return 0;
}
-/**
- * sh_wdt_notify_sys - Notifier Handler
- * @this: notifier block
- * @code: notifier event
- * @unused: unused
- *
- * Handles specific events, such as turning off the watchdog during a
- * shutdown event.
- */
static int sh_wdt_notify_sys(struct notifier_block *this,
unsigned long code, void *unused)
{
+ struct sh_wdt *wdt = platform_get_drvdata(sh_wdt_dev);
+
if (code == SYS_DOWN || code == SYS_HALT)
- sh_wdt_stop();
+ sh_wdt_stop(wdt);
return NOTIFY_DONE;
}
@@ -411,7 +308,6 @@ static const struct file_operations sh_wdt_fops = {
.unlocked_ioctl = sh_wdt_ioctl,
.open = sh_wdt_open,
.release = sh_wdt_close,
- .mmap = sh_wdt_mmap,
};
static const struct watchdog_info sh_wdt_info = {
@@ -431,66 +327,148 @@ static struct miscdevice sh_wdt_miscdev = {
.fops = &sh_wdt_fops,
};
-/**
- * sh_wdt_init - Initialize module
- * Registers the device and notifier handler. Actual device
- * initialization is handled by sh_wdt_open().
- */
-static int __init sh_wdt_init(void)
+static int __devinit sh_wdt_probe(struct platform_device *pdev)
{
+ struct sh_wdt *wdt;
+ struct resource *res;
int rc;
- if (clock_division_ratio < 0x5 || clock_division_ratio > 0x7) {
- clock_division_ratio = WTCSR_CKS_4096;
- printk(KERN_INFO PFX
- "clock_division_ratio value must be 0x5<=x<=0x7, using %d\n",
- clock_division_ratio);
+ /*
+ * As this driver only covers the global watchdog case, reject
+ * any attempts to register per-CPU watchdogs.
+ */
+ if (pdev->id != -1)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!res))
+ return -EINVAL;
+
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), DRV_NAME))
+ return -EBUSY;
+
+ wdt = devm_kzalloc(&pdev->dev, sizeof(struct sh_wdt), GFP_KERNEL);
+ if (unlikely(!wdt)) {
+ rc = -ENOMEM;
+ goto out_release;
}
- rc = sh_wdt_set_heartbeat(heartbeat);
- if (unlikely(rc)) {
- heartbeat = WATCHDOG_HEARTBEAT;
- printk(KERN_INFO PFX
- "heartbeat value must be 1<=x<=3600, using %d\n",
- heartbeat);
+ wdt->dev = &pdev->dev;
+
+ wdt->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (unlikely(!wdt->base)) {
+ rc = -ENXIO;
+ goto out_err;
}
rc = register_reboot_notifier(&sh_wdt_notifier);
if (unlikely(rc)) {
- printk(KERN_ERR PFX
+ dev_err(&pdev->dev,
"Can't register reboot notifier (err=%d)\n", rc);
- return rc;
+ goto out_unmap;
}
+ sh_wdt_miscdev.parent = wdt->dev;
+
rc = misc_register(&sh_wdt_miscdev);
if (unlikely(rc)) {
- printk(KERN_ERR PFX
+ dev_err(&pdev->dev,
"Can't register miscdev on minor=%d (err=%d)\n",
sh_wdt_miscdev.minor, rc);
- unregister_reboot_notifier(&sh_wdt_notifier);
- return rc;
+ goto out_unreg;
}
- printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n",
- heartbeat, nowayout);
+ init_timer(&wdt->timer);
+ wdt->timer.function = sh_wdt_ping;
+ wdt->timer.data = (unsigned long)wdt;
+ wdt->timer.expires = next_ping_period(clock_division_ratio);
+
+ platform_set_drvdata(pdev, wdt);
+ sh_wdt_dev = pdev;
+
+ dev_info(&pdev->dev, "initialized.\n");
return 0;
+
+out_unreg:
+ unregister_reboot_notifier(&sh_wdt_notifier);
+out_unmap:
+ devm_iounmap(&pdev->dev, wdt->base);
+out_err:
+ devm_kfree(&pdev->dev, wdt);
+out_release:
+ devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
+
+ return rc;
}
-/**
- * sh_wdt_exit - Deinitialize module
- * Unregisters the device and notifier handler. Actual device
- * deinitialization is handled by sh_wdt_close().
- */
-static void __exit sh_wdt_exit(void)
+static int __devexit sh_wdt_remove(struct platform_device *pdev)
{
+ struct sh_wdt *wdt = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ platform_set_drvdata(pdev, NULL);
+
misc_deregister(&sh_wdt_miscdev);
+
+ sh_wdt_dev = NULL;
+
unregister_reboot_notifier(&sh_wdt_notifier);
+ devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
+ devm_iounmap(&pdev->dev, wdt->base);
+ devm_kfree(&pdev->dev, wdt);
+
+ return 0;
}
+static struct platform_driver sh_wdt_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+
+ .probe = sh_wdt_probe,
+ .remove = __devexit_p(sh_wdt_remove),
+};
+
+static int __init sh_wdt_init(void)
+{
+ int rc;
+
+ if (unlikely(clock_division_ratio < 0x5 ||
+ clock_division_ratio > 0x7)) {
+ clock_division_ratio = WTCSR_CKS_4096;
+
+ pr_info("%s: divisor must be 0x5<=x<=0x7, using %d\n",
+ DRV_NAME, clock_division_ratio);
+ }
+
+ rc = sh_wdt_set_heartbeat(heartbeat);
+ if (unlikely(rc)) {
+ heartbeat = WATCHDOG_HEARTBEAT;
+
+ pr_info("%s: heartbeat value must be 1<=x<=3600, using %d\n",
+ DRV_NAME, heartbeat);
+ }
+
+ pr_info("%s: configured with heartbeat=%d sec (nowayout=%d)\n",
+ DRV_NAME, heartbeat, nowayout);
+
+ return platform_driver_register(&sh_wdt_driver);
+}
+
+static void __exit sh_wdt_exit(void)
+{
+ platform_driver_unregister(&sh_wdt_driver);
+}
+module_init(sh_wdt_init);
+module_exit(sh_wdt_exit);
+
MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
MODULE_DESCRIPTION("SuperH watchdog driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(clock_division_ratio, int, 0);
@@ -507,6 +485,3 @@ module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
-module_init(sh_wdt_init);
-module_exit(sh_wdt_exit);
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index 8a1f0bc3e271..df88cfa05f35 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -434,11 +434,11 @@ static long wb_smsc_wdt_ioctl(struct file *file,
} uarg;
static const struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING |
+ .options = WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
.firmware_version = 0,
- .identity = "SMsC 37B787 Watchdog",
+ .identity = "SMsC 37B787 Watchdog",
};
uarg.i = (int __user *)arg;
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index 833f49f43d43..100b114e3c3c 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -151,7 +151,7 @@ static int softdog_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (expect_close == 42) {
softdog_stop();
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 808372883e88..1bc493848ed4 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -259,7 +259,7 @@ static struct miscdevice sp5100_tco_miscdev = {
* register a pci_driver, because someone else might
* want to register another driver on the same PCI id.
*/
-static struct pci_device_id sp5100_tco_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sp5100_tco_pci_tbl) = {
{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_ANY_ID,
PCI_ANY_ID, },
{ 0, }, /* End of list */
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 18cdeb4c4258..5a90a4a871dd 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -68,7 +68,7 @@ struct platform_device *ts72xx_wdt_pdev;
* to control register):
* value description
* -------------------------
- * 0x00 watchdog disabled
+ * 0x00 watchdog disabled
* 0x01 250ms
* 0x02 500ms
* 0x03 1s
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index df2a64dc9672..be9c4d839e15 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -87,10 +87,10 @@ static int w83697ug_select_wd_register(void)
outb_p(0x87, WDT_EFER); /* Enter extended function mode */
outb_p(0x87, WDT_EFER); /* Again according to manual */
- outb(0x20, WDT_EFER); /* check chip version */
+ outb(0x20, WDT_EFER); /* check chip version */
version = inb(WDT_EFDR);
- if (version == 0x68) { /* W83697UG */
+ if (version == 0x68) { /* W83697UG */
printk(KERN_INFO PFX "Watchdog chip version 0x%02x = "
"W83697UG/UF found at 0x%04x\n", version, wdt_io);
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index 552a4381e78f..bb03e151a1d0 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -581,7 +581,7 @@ static void __exit wdt_exit(void)
}
/**
- * wdt_init:
+ * wdt_init:
*
* Set up the WDT watchdog board. All we have to do is grab the
* resources we require and bitch if anyone beat us to them.
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index 5c2521fc836c..a2f01c9f5c34 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -281,7 +281,7 @@ static int wdt977_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (expect_close == 42) {
wdt977_stop();
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 6130c88fa5ac..172dad6c7693 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -31,7 +31,7 @@
* Jeff Garzik : PCI cleanups
* Tigran Aivazian : Restructured wdtpci_init_one() to handle
* failures
- * Joel Becker : Added WDIOC_GET/SETTIMEOUT
+ * Joel Becker : Added WDIOC_GET/SETTIMEOUT
* Zwane Mwaikambo : Magic char closing, locking changes,
* cleanups
* Matt Domsch : nowayout module option
@@ -727,7 +727,7 @@ static void __devexit wdtpci_remove_one(struct pci_dev *pdev)
}
-static struct pci_device_id wdtpci_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(wdtpci_pci_tbl) = {
{
.vendor = PCI_VENDOR_ID_ACCESSIO,
.device = PCI_DEVICE_ID_ACCESSIO_WDG_CSM,
@@ -764,7 +764,7 @@ static void __exit wdtpci_cleanup(void)
/**
- * wdtpci_init:
+ * wdtpci_init:
*
* Set up the WDT watchdog board. All we have to do is grab the
* resources we require and bitch if anyone beat us to them.
diff --git a/drivers/watchdog/xen_wdt.c b/drivers/watchdog/xen_wdt.c
new file mode 100644
index 000000000000..49bd9d395562
--- /dev/null
+++ b/drivers/watchdog/xen_wdt.c
@@ -0,0 +1,359 @@
+/*
+ * Xen Watchdog Driver
+ *
+ * (c) Copyright 2010 Novell, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define DRV_NAME "wdt"
+#define DRV_VERSION "0.01"
+#define PFX DRV_NAME ": "
+
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+#include <xen/xen.h>
+#include <asm/xen/hypercall.h>
+#include <xen/interface/sched.h>
+
+static struct platform_device *platform_device;
+static DEFINE_SPINLOCK(wdt_lock);
+static struct sched_watchdog wdt;
+static __kernel_time_t wdt_expires;
+static bool is_active, expect_release;
+
+#define WATCHDOG_TIMEOUT 60 /* in seconds */
+static unsigned int timeout = WATCHDOG_TIMEOUT;
+module_param(timeout, uint, S_IRUGO);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds "
+ "(default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, S_IRUGO);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static inline __kernel_time_t set_timeout(void)
+{
+ wdt.timeout = timeout;
+ return ktime_to_timespec(ktime_get()).tv_sec + timeout;
+}
+
+static int xen_wdt_start(void)
+{
+ __kernel_time_t expires;
+ int err;
+
+ spin_lock(&wdt_lock);
+
+ expires = set_timeout();
+ if (!wdt.id)
+ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt);
+ else
+ err = -EBUSY;
+ if (err > 0) {
+ wdt.id = err;
+ wdt_expires = expires;
+ err = 0;
+ } else
+ BUG_ON(!err);
+
+ spin_unlock(&wdt_lock);
+
+ return err;
+}
+
+static int xen_wdt_stop(void)
+{
+ int err = 0;
+
+ spin_lock(&wdt_lock);
+
+ wdt.timeout = 0;
+ if (wdt.id)
+ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt);
+ if (!err)
+ wdt.id = 0;
+
+ spin_unlock(&wdt_lock);
+
+ return err;
+}
+
+static int xen_wdt_kick(void)
+{
+ __kernel_time_t expires;
+ int err;
+
+ spin_lock(&wdt_lock);
+
+ expires = set_timeout();
+ if (wdt.id)
+ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt);
+ else
+ err = -ENXIO;
+ if (!err)
+ wdt_expires = expires;
+
+ spin_unlock(&wdt_lock);
+
+ return err;
+}
+
+static int xen_wdt_open(struct inode *inode, struct file *file)
+{
+ int err;
+
+ /* /dev/watchdog can only be opened once */
+ if (xchg(&is_active, true))
+ return -EBUSY;
+
+ err = xen_wdt_start();
+ if (err == -EBUSY)
+ err = xen_wdt_kick();
+ return err ?: nonseekable_open(inode, file);
+}
+
+static int xen_wdt_release(struct inode *inode, struct file *file)
+{
+ if (expect_release)
+ xen_wdt_stop();
+ else {
+ printk(KERN_CRIT PFX
+ "unexpected close, not stopping watchdog!\n");
+ xen_wdt_kick();
+ }
+ is_active = false;
+ expect_release = false;
+ return 0;
+}
+
+static ssize_t xen_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ /* See if we got the magic character 'V' and reload the timer */
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ /* in case it was set long ago */
+ expect_release = false;
+
+ /* scan to see whether or not we got the magic
+ character */
+ for (i = 0; i != len; i++) {
+ char c;
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_release = true;
+ }
+ }
+
+ /* someone wrote to us, we should reload the timer */
+ xen_wdt_kick();
+ }
+ return len;
+}
+
+static long xen_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int new_options, retval = -EINVAL;
+ int new_timeout;
+ int __user *argp = (void __user *)arg;
+ static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
+ .firmware_version = 0,
+ .identity = DRV_NAME,
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, argp);
+
+ case WDIOC_SETOPTIONS:
+ if (get_user(new_options, argp))
+ return -EFAULT;
+
+ if (new_options & WDIOS_DISABLECARD)
+ retval = xen_wdt_stop();
+ if (new_options & WDIOS_ENABLECARD) {
+ retval = xen_wdt_start();
+ if (retval == -EBUSY)
+ retval = xen_wdt_kick();
+ }
+ return retval;
+
+ case WDIOC_KEEPALIVE:
+ xen_wdt_kick();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_timeout, argp))
+ return -EFAULT;
+ if (!new_timeout)
+ return -EINVAL;
+ timeout = new_timeout;
+ xen_wdt_kick();
+ /* fall through */
+ case WDIOC_GETTIMEOUT:
+ return put_user(timeout, argp);
+
+ case WDIOC_GETTIMELEFT:
+ retval = wdt_expires - ktime_to_timespec(ktime_get()).tv_sec;
+ return put_user(retval, argp);
+ }
+
+ return -ENOTTY;
+}
+
+static const struct file_operations xen_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = xen_wdt_write,
+ .unlocked_ioctl = xen_wdt_ioctl,
+ .open = xen_wdt_open,
+ .release = xen_wdt_release,
+};
+
+static struct miscdevice xen_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &xen_wdt_fops,
+};
+
+static int __devinit xen_wdt_probe(struct platform_device *dev)
+{
+ struct sched_watchdog wd = { .id = ~0 };
+ int ret = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wd);
+
+ switch (ret) {
+ case -EINVAL:
+ if (!timeout) {
+ timeout = WATCHDOG_TIMEOUT;
+ printk(KERN_INFO PFX
+ "timeout value invalid, using %d\n", timeout);
+ }
+
+ ret = misc_register(&xen_wdt_miscdev);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "cannot register miscdev on minor=%d (%d)\n",
+ WATCHDOG_MINOR, ret);
+ break;
+ }
+
+ printk(KERN_INFO PFX
+ "initialized (timeout=%ds, nowayout=%d)\n",
+ timeout, nowayout);
+ break;
+
+ case -ENOSYS:
+ printk(KERN_INFO PFX "not supported\n");
+ ret = -ENODEV;
+ break;
+
+ default:
+ printk(KERN_INFO PFX "bogus return value %d\n", ret);
+ break;
+ }
+
+ return ret;
+}
+
+static int __devexit xen_wdt_remove(struct platform_device *dev)
+{
+ /* Stop the timer before we leave */
+ if (!nowayout)
+ xen_wdt_stop();
+
+ misc_deregister(&xen_wdt_miscdev);
+
+ return 0;
+}
+
+static void xen_wdt_shutdown(struct platform_device *dev)
+{
+ xen_wdt_stop();
+}
+
+static int xen_wdt_suspend(struct platform_device *dev, pm_message_t state)
+{
+ return xen_wdt_stop();
+}
+
+static int xen_wdt_resume(struct platform_device *dev)
+{
+ return xen_wdt_start();
+}
+
+static struct platform_driver xen_wdt_driver = {
+ .probe = xen_wdt_probe,
+ .remove = __devexit_p(xen_wdt_remove),
+ .shutdown = xen_wdt_shutdown,
+ .suspend = xen_wdt_suspend,
+ .resume = xen_wdt_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ },
+};
+
+static int __init xen_wdt_init_module(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ printk(KERN_INFO PFX "Xen WatchDog Timer Driver v%s\n", DRV_VERSION);
+
+ err = platform_driver_register(&xen_wdt_driver);
+ if (err)
+ return err;
+
+ platform_device = platform_device_register_simple(DRV_NAME,
+ -1, NULL, 0);
+ if (IS_ERR(platform_device)) {
+ err = PTR_ERR(platform_device);
+ platform_driver_unregister(&xen_wdt_driver);
+ }
+
+ return err;
+}
+
+static void __exit xen_wdt_cleanup_module(void)
+{
+ platform_device_unregister(platform_device);
+ platform_driver_unregister(&xen_wdt_driver);
+ printk(KERN_INFO PFX "module unloaded\n");
+}
+
+module_init(xen_wdt_init_module);
+module_exit(xen_wdt_cleanup_module);
+
+MODULE_AUTHOR("Jan Beulich <jbeulich@novell.com>");
+MODULE_DESCRIPTION("Xen WatchDog Timer Driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 07bec09d1dad..a59638b37c1a 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -76,10 +76,20 @@ config XEN_XENBUS_FRONTEND
config XEN_GNTDEV
tristate "userspace grant access device driver"
depends on XEN
+ default m
select MMU_NOTIFIER
help
Allows userspace processes to use grants.
+config XEN_GRANT_DEV_ALLOC
+ tristate "User-space grant reference allocator driver"
+ depends on XEN
+ default m
+ help
+ Allows userspace processes to create pages with access granted
+ to other domains. This can be used to implement frontend drivers
+ or as part of an inter-domain shared memory channel.
+
config XEN_PLATFORM_PCI
tristate "xen platform pci device driver"
depends on XEN_PVHVM && PCI
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 5088cc2e6fe2..f420f1ff7f13 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,4 +1,4 @@
-obj-y += grant-table.o features.o events.o manage.o
+obj-y += grant-table.o features.o events.o manage.o balloon.o
obj-y += xenbus/
nostackp := $(call cc-option, -fno-stack-protector)
@@ -7,9 +7,10 @@ CFLAGS_features.o := $(nostackp)
obj-$(CONFIG_BLOCK) += biomerge.o
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
-obj-$(CONFIG_XEN_BALLOON) += balloon.o
+obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
+obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o
obj-$(CONFIG_XENFS) += xenfs/
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o
@@ -18,5 +19,6 @@ obj-$(CONFIG_XEN_DOM0) += pci.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
+xen-gntalloc-y := gntalloc.o
xen-platform-pci-y := platform-pci.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 718050ace08f..043af8ad6b60 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -1,6 +1,4 @@
/******************************************************************************
- * balloon.c
- *
* Xen balloon driver - enables returning/claiming memory to/from Xen.
*
* Copyright (c) 2003, B Dragovic
@@ -33,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/mm.h>
@@ -42,13 +39,11 @@
#include <linux/highmem.h>
#include <linux/mutex.h>
#include <linux/list.h>
-#include <linux/sysdev.h>
#include <linux/gfp.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/e820.h>
@@ -58,35 +53,29 @@
#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
-#include <xen/xenbus.h>
+#include <xen/balloon.h>
#include <xen/features.h>
#include <xen/page.h>
-#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
-
-#define BALLOON_CLASS_NAME "xen_memory"
+/*
+ * balloon_process() state:
+ *
+ * BP_DONE: done or nothing to do,
+ * BP_EAGAIN: error, go to sleep,
+ * BP_ECANCELED: error, balloon operation canceled.
+ */
-struct balloon_stats {
- /* We aim for 'current allocation' == 'target allocation'. */
- unsigned long current_pages;
- unsigned long target_pages;
- /*
- * Drivers may alter the memory reservation independently, but they
- * must inform the balloon driver so we avoid hitting the hard limit.
- */
- unsigned long driver_pages;
- /* Number of pages in high- and low-memory balloons. */
- unsigned long balloon_low;
- unsigned long balloon_high;
+enum bp_state {
+ BP_DONE,
+ BP_EAGAIN,
+ BP_ECANCELED
};
-static DEFINE_MUTEX(balloon_mutex);
-
-static struct sys_device balloon_sysdev;
-static int register_balloon(struct sys_device *sysdev);
+static DEFINE_MUTEX(balloon_mutex);
-static struct balloon_stats balloon_stats;
+struct balloon_stats balloon_stats;
+EXPORT_SYMBOL_GPL(balloon_stats);
/* We increase/decrease in batches which fit in a page */
static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
@@ -104,8 +93,7 @@ static LIST_HEAD(ballooned_pages);
/* Main work function, always executed in process context. */
static void balloon_process(struct work_struct *work);
-static DECLARE_WORK(balloon_worker, balloon_process);
-static struct timer_list balloon_timer;
+static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
/* When ballooning out (allocating memory to return to Xen) we don't really
want the kernel to try too hard since that can trigger the oom killer. */
@@ -140,14 +128,17 @@ static void balloon_append(struct page *page)
}
/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
-static struct page *balloon_retrieve(void)
+static struct page *balloon_retrieve(bool prefer_highmem)
{
struct page *page;
if (list_empty(&ballooned_pages))
return NULL;
- page = list_entry(ballooned_pages.next, struct page, lru);
+ if (prefer_highmem)
+ page = list_entry(ballooned_pages.prev, struct page, lru);
+ else
+ page = list_entry(ballooned_pages.next, struct page, lru);
list_del(&page->lru);
if (PageHighMem(page)) {
@@ -177,9 +168,29 @@ static struct page *balloon_next_page(struct page *page)
return list_entry(next, struct page, lru);
}
-static void balloon_alarm(unsigned long unused)
+static enum bp_state update_schedule(enum bp_state state)
{
- schedule_work(&balloon_worker);
+ if (state == BP_DONE) {
+ balloon_stats.schedule_delay = 1;
+ balloon_stats.retry_count = 1;
+ return BP_DONE;
+ }
+
+ ++balloon_stats.retry_count;
+
+ if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
+ balloon_stats.retry_count > balloon_stats.max_retry_count) {
+ balloon_stats.schedule_delay = 1;
+ balloon_stats.retry_count = 1;
+ return BP_ECANCELED;
+ }
+
+ balloon_stats.schedule_delay <<= 1;
+
+ if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
+ balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
+
+ return BP_EAGAIN;
}
static unsigned long current_target(void)
@@ -194,11 +205,11 @@ static unsigned long current_target(void)
return target;
}
-static int increase_reservation(unsigned long nr_pages)
+static enum bp_state increase_reservation(unsigned long nr_pages)
{
+ int rc;
unsigned long pfn, i;
struct page *page;
- long rc;
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = 0,
@@ -210,7 +221,10 @@ static int increase_reservation(unsigned long nr_pages)
page = balloon_first_page();
for (i = 0; i < nr_pages; i++) {
- BUG_ON(page == NULL);
+ if (!page) {
+ nr_pages = i;
+ break;
+ }
frame_list[i] = page_to_pfn(page);
page = balloon_next_page(page);
}
@@ -218,11 +232,11 @@ static int increase_reservation(unsigned long nr_pages)
set_xen_guest_handle(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
- if (rc < 0)
- goto out;
+ if (rc <= 0)
+ return BP_EAGAIN;
for (i = 0; i < rc; i++) {
- page = balloon_retrieve();
+ page = balloon_retrieve(false);
BUG_ON(page == NULL);
pfn = page_to_pfn(page);
@@ -249,15 +263,14 @@ static int increase_reservation(unsigned long nr_pages)
balloon_stats.current_pages += rc;
- out:
- return rc < 0 ? rc : rc != nr_pages;
+ return BP_DONE;
}
-static int decrease_reservation(unsigned long nr_pages)
+static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
{
+ enum bp_state state = BP_DONE;
unsigned long pfn, i;
struct page *page;
- int need_sleep = 0;
int ret;
struct xen_memory_reservation reservation = {
.address_bits = 0,
@@ -269,9 +282,9 @@ static int decrease_reservation(unsigned long nr_pages)
nr_pages = ARRAY_SIZE(frame_list);
for (i = 0; i < nr_pages; i++) {
- if ((page = alloc_page(GFP_BALLOON)) == NULL) {
+ if ((page = alloc_page(gfp)) == NULL) {
nr_pages = i;
- need_sleep = 1;
+ state = BP_EAGAIN;
break;
}
@@ -307,7 +320,7 @@ static int decrease_reservation(unsigned long nr_pages)
balloon_stats.current_pages -= nr_pages;
- return need_sleep;
+ return state;
}
/*
@@ -318,77 +331,101 @@ static int decrease_reservation(unsigned long nr_pages)
*/
static void balloon_process(struct work_struct *work)
{
- int need_sleep = 0;
+ enum bp_state state = BP_DONE;
long credit;
mutex_lock(&balloon_mutex);
do {
credit = current_target() - balloon_stats.current_pages;
+
if (credit > 0)
- need_sleep = (increase_reservation(credit) != 0);
+ state = increase_reservation(credit);
+
if (credit < 0)
- need_sleep = (decrease_reservation(-credit) != 0);
+ state = decrease_reservation(-credit, GFP_BALLOON);
+
+ state = update_schedule(state);
#ifndef CONFIG_PREEMPT
if (need_resched())
schedule();
#endif
- } while ((credit != 0) && !need_sleep);
+ } while (credit && state == BP_DONE);
/* Schedule more work if there is some still to be done. */
- if (current_target() != balloon_stats.current_pages)
- mod_timer(&balloon_timer, jiffies + HZ);
+ if (state == BP_EAGAIN)
+ schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
mutex_unlock(&balloon_mutex);
}
/* Resets the Xen limit, sets new target, and kicks off processing. */
-static void balloon_set_new_target(unsigned long target)
+void balloon_set_new_target(unsigned long target)
{
/* No need for lock. Not read-modify-write updates. */
balloon_stats.target_pages = target;
- schedule_work(&balloon_worker);
+ schedule_delayed_work(&balloon_worker, 0);
}
+EXPORT_SYMBOL_GPL(balloon_set_new_target);
-static struct xenbus_watch target_watch =
-{
- .node = "memory/target"
-};
-
-/* React to a change in the target key */
-static void watch_target(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
+/**
+ * alloc_xenballooned_pages - get pages that have been ballooned out
+ * @nr_pages: Number of pages to get
+ * @pages: pages returned
+ * @return 0 on success, error otherwise
+ */
+int alloc_xenballooned_pages(int nr_pages, struct page** pages)
{
- unsigned long long new_target;
- int err;
-
- err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
- if (err != 1) {
- /* This is ok (for domain0 at least) - so just return */
- return;
+ int pgno = 0;
+ struct page* page;
+ mutex_lock(&balloon_mutex);
+ while (pgno < nr_pages) {
+ page = balloon_retrieve(true);
+ if (page) {
+ pages[pgno++] = page;
+ } else {
+ enum bp_state st;
+ st = decrease_reservation(nr_pages - pgno, GFP_HIGHUSER);
+ if (st != BP_DONE)
+ goto out_undo;
+ }
}
-
- /* The given memory/target value is in KiB, so it needs converting to
- * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
- */
- balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
+ mutex_unlock(&balloon_mutex);
+ return 0;
+ out_undo:
+ while (pgno)
+ balloon_append(pages[--pgno]);
+ /* Free the memory back to the kernel soon */
+ schedule_delayed_work(&balloon_worker, 0);
+ mutex_unlock(&balloon_mutex);
+ return -ENOMEM;
}
+EXPORT_SYMBOL(alloc_xenballooned_pages);
-static int balloon_init_watcher(struct notifier_block *notifier,
- unsigned long event,
- void *data)
+/**
+ * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
+ * @nr_pages: Number of pages
+ * @pages: pages to return
+ */
+void free_xenballooned_pages(int nr_pages, struct page** pages)
{
- int err;
+ int i;
- err = register_xenbus_watch(&target_watch);
- if (err)
- printk(KERN_ERR "Failed to set balloon watcher\n");
+ mutex_lock(&balloon_mutex);
- return NOTIFY_DONE;
-}
+ for (i = 0; i < nr_pages; i++) {
+ if (pages[i])
+ balloon_append(pages[i]);
+ }
+
+ /* The balloon may be too large now. Shrink it if needed. */
+ if (current_target() != balloon_stats.current_pages)
+ schedule_delayed_work(&balloon_worker, 0);
-static struct notifier_block xenstore_notifier;
+ mutex_unlock(&balloon_mutex);
+}
+EXPORT_SYMBOL(free_xenballooned_pages);
static int __init balloon_init(void)
{
@@ -398,7 +435,7 @@ static int __init balloon_init(void)
if (!xen_domain())
return -ENODEV;
- pr_info("xen_balloon: Initialising balloon driver.\n");
+ pr_info("xen/balloon: Initialising balloon driver.\n");
if (xen_pv_domain())
nr_pages = xen_start_info->nr_pages;
@@ -408,13 +445,11 @@ static int __init balloon_init(void)
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
- balloon_stats.driver_pages = 0UL;
-
- init_timer(&balloon_timer);
- balloon_timer.data = 0;
- balloon_timer.function = balloon_alarm;
- register_balloon(&balloon_sysdev);
+ balloon_stats.schedule_delay = 1;
+ balloon_stats.max_schedule_delay = 32;
+ balloon_stats.retry_count = 1;
+ balloon_stats.max_retry_count = RETRY_UNLIMITED;
/*
* Initialise the balloon with excess memory space. We need
@@ -436,153 +471,9 @@ static int __init balloon_init(void)
__balloon_append(page);
}
- target_watch.callback = watch_target;
- xenstore_notifier.notifier_call = balloon_init_watcher;
-
- register_xenstore_notifier(&xenstore_notifier);
-
return 0;
}
subsys_initcall(balloon_init);
-static void balloon_exit(void)
-{
- /* XXX - release balloon here */
- return;
-}
-
-module_exit(balloon_exit);
-
-#define BALLOON_SHOW(name, format, args...) \
- static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
- char *buf) \
- { \
- return sprintf(buf, format, ##args); \
- } \
- static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
-
-BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
-BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
-BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
-BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages));
-
-static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
-}
-
-static ssize_t store_target_kb(struct sys_device *dev,
- struct sysdev_attribute *attr,
- const char *buf,
- size_t count)
-{
- char *endchar;
- unsigned long long target_bytes;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- target_bytes = simple_strtoull(buf, &endchar, 0) * 1024;
-
- balloon_set_new_target(target_bytes >> PAGE_SHIFT);
-
- return count;
-}
-
-static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
- show_target_kb, store_target_kb);
-
-
-static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%llu\n",
- (unsigned long long)balloon_stats.target_pages
- << PAGE_SHIFT);
-}
-
-static ssize_t store_target(struct sys_device *dev,
- struct sysdev_attribute *attr,
- const char *buf,
- size_t count)
-{
- char *endchar;
- unsigned long long target_bytes;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- target_bytes = memparse(buf, &endchar);
-
- balloon_set_new_target(target_bytes >> PAGE_SHIFT);
-
- return count;
-}
-
-static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR,
- show_target, store_target);
-
-
-static struct sysdev_attribute *balloon_attrs[] = {
- &attr_target_kb,
- &attr_target,
-};
-
-static struct attribute *balloon_info_attrs[] = {
- &attr_current_kb.attr,
- &attr_low_kb.attr,
- &attr_high_kb.attr,
- &attr_driver_kb.attr,
- NULL
-};
-
-static struct attribute_group balloon_info_group = {
- .name = "info",
- .attrs = balloon_info_attrs,
-};
-
-static struct sysdev_class balloon_sysdev_class = {
- .name = BALLOON_CLASS_NAME,
-};
-
-static int register_balloon(struct sys_device *sysdev)
-{
- int i, error;
-
- error = sysdev_class_register(&balloon_sysdev_class);
- if (error)
- return error;
-
- sysdev->id = 0;
- sysdev->cls = &balloon_sysdev_class;
-
- error = sysdev_register(sysdev);
- if (error) {
- sysdev_class_unregister(&balloon_sysdev_class);
- return error;
- }
-
- for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
- error = sysdev_create_file(sysdev, balloon_attrs[i]);
- if (error)
- goto fail;
- }
-
- error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
- if (error)
- goto fail;
-
- return 0;
-
- fail:
- while (--i >= 0)
- sysdev_remove_file(sysdev, balloon_attrs[i]);
- sysdev_unregister(sysdev);
- sysdev_class_unregister(&balloon_sysdev_class);
- return error;
-}
-
MODULE_LICENSE("GPL");
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 65f5068afd84..02b5a9c05cfa 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -56,6 +56,8 @@
*/
static DEFINE_SPINLOCK(irq_mapping_update_lock);
+static LIST_HEAD(xen_irq_list_head);
+
/* IRQ <-> VIRQ mapping. */
static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
@@ -85,7 +87,9 @@ enum xen_irq_type {
*/
struct irq_info
{
+ struct list_head list;
enum xen_irq_type type; /* type */
+ unsigned irq;
unsigned short evtchn; /* event channel */
unsigned short cpu; /* cpu bound */
@@ -103,23 +107,10 @@ struct irq_info
#define PIRQ_NEEDS_EOI (1 << 0)
#define PIRQ_SHAREABLE (1 << 1)
-static struct irq_info *irq_info;
-static int *pirq_to_irq;
-
static int *evtchn_to_irq;
-struct cpu_evtchn_s {
- unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
-};
-static __initdata struct cpu_evtchn_s init_evtchn_mask = {
- .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
-};
-static struct cpu_evtchn_s __refdata *cpu_evtchn_mask_p = &init_evtchn_mask;
-
-static inline unsigned long *cpu_evtchn_mask(int cpu)
-{
- return cpu_evtchn_mask_p[cpu].bits;
-}
+static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
+ cpu_evtchn_mask);
/* Xen will never allocate port zero for any purpose. */
#define VALID_EVTCHN(chn) ((chn) != 0)
@@ -128,46 +119,86 @@ static struct irq_chip xen_dynamic_chip;
static struct irq_chip xen_percpu_chip;
static struct irq_chip xen_pirq_chip;
-/* Constructor for packed IRQ information. */
-static struct irq_info mk_unbound_info(void)
+/* Get info for IRQ */
+static struct irq_info *info_for_irq(unsigned irq)
+{
+ return get_irq_data(irq);
+}
+
+/* Constructors for packed IRQ information. */
+static void xen_irq_info_common_init(struct irq_info *info,
+ unsigned irq,
+ enum xen_irq_type type,
+ unsigned short evtchn,
+ unsigned short cpu)
{
- return (struct irq_info) { .type = IRQT_UNBOUND };
+
+ BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
+
+ info->type = type;
+ info->irq = irq;
+ info->evtchn = evtchn;
+ info->cpu = cpu;
+
+ evtchn_to_irq[evtchn] = irq;
}
-static struct irq_info mk_evtchn_info(unsigned short evtchn)
+static void xen_irq_info_evtchn_init(unsigned irq,
+ unsigned short evtchn)
{
- return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
- .cpu = 0 };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
}
-static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
+static void xen_irq_info_ipi_init(unsigned cpu,
+ unsigned irq,
+ unsigned short evtchn,
+ enum ipi_vector ipi)
{
- return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
- .cpu = 0, .u.ipi = ipi };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
+
+ info->u.ipi = ipi;
+
+ per_cpu(ipi_to_irq, cpu)[ipi] = irq;
}
-static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
+static void xen_irq_info_virq_init(unsigned cpu,
+ unsigned irq,
+ unsigned short evtchn,
+ unsigned short virq)
{
- return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
- .cpu = 0, .u.virq = virq };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
+
+ info->u.virq = virq;
+
+ per_cpu(virq_to_irq, cpu)[virq] = irq;
}
-static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq,
- unsigned short gsi, unsigned short vector)
+static void xen_irq_info_pirq_init(unsigned irq,
+ unsigned short evtchn,
+ unsigned short pirq,
+ unsigned short gsi,
+ unsigned short vector,
+ unsigned char flags)
{
- return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
- .cpu = 0,
- .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
+
+ info->u.pirq.pirq = pirq;
+ info->u.pirq.gsi = gsi;
+ info->u.pirq.vector = vector;
+ info->u.pirq.flags = flags;
}
/*
* Accessors for packed IRQ information.
*/
-static struct irq_info *info_for_irq(unsigned irq)
-{
- return &irq_info[irq];
-}
-
static unsigned int evtchn_from_irq(unsigned irq)
{
if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
@@ -212,26 +243,6 @@ static unsigned pirq_from_irq(unsigned irq)
return info->u.pirq.pirq;
}
-static unsigned gsi_from_irq(unsigned irq)
-{
- struct irq_info *info = info_for_irq(irq);
-
- BUG_ON(info == NULL);
- BUG_ON(info->type != IRQT_PIRQ);
-
- return info->u.pirq.gsi;
-}
-
-static unsigned vector_from_irq(unsigned irq)
-{
- struct irq_info *info = info_for_irq(irq);
-
- BUG_ON(info == NULL);
- BUG_ON(info->type != IRQT_PIRQ);
-
- return info->u.pirq.vector;
-}
-
static enum xen_irq_type type_from_irq(unsigned irq)
{
return info_for_irq(irq)->type;
@@ -267,7 +278,7 @@ static inline unsigned long active_evtchns(unsigned int cpu,
unsigned int idx)
{
return (sh->evtchn_pending[idx] &
- cpu_evtchn_mask(cpu)[idx] &
+ per_cpu(cpu_evtchn_mask, cpu)[idx] &
~sh->evtchn_mask[idx]);
}
@@ -280,28 +291,28 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
#endif
- clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
- set_bit(chn, cpu_evtchn_mask(cpu));
+ clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
+ set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
- irq_info[irq].cpu = cpu;
+ info_for_irq(irq)->cpu = cpu;
}
static void init_evtchn_cpu_bindings(void)
{
int i;
#ifdef CONFIG_SMP
- struct irq_desc *desc;
+ struct irq_info *info;
/* By default all event channels notify CPU#0. */
- for_each_irq_desc(i, desc) {
+ list_for_each_entry(info, &xen_irq_list_head, list) {
+ struct irq_desc *desc = irq_to_desc(info->irq);
cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
}
#endif
for_each_possible_cpu(i)
- memset(cpu_evtchn_mask(i),
- (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s));
-
+ memset(per_cpu(cpu_evtchn_mask, i),
+ (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
}
static inline void clear_evtchn(int port)
@@ -376,7 +387,28 @@ static void unmask_evtchn(int port)
put_cpu();
}
-static int xen_allocate_irq_dynamic(void)
+static void xen_irq_init(unsigned irq)
+{
+ struct irq_info *info;
+ struct irq_desc *desc = irq_to_desc(irq);
+
+#ifdef CONFIG_SMP
+ /* By default all event channels notify CPU#0. */
+ cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
+#endif
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL)
+ panic("Unable to allocate metadata for IRQ%d\n", irq);
+
+ info->type = IRQT_UNBOUND;
+
+ set_irq_data(irq, info);
+
+ list_add_tail(&info->list, &xen_irq_list_head);
+}
+
+static int __must_check xen_allocate_irq_dynamic(void)
{
int first = 0;
int irq;
@@ -393,22 +425,14 @@ static int xen_allocate_irq_dynamic(void)
first = get_nr_irqs_gsi();
#endif
-retry:
irq = irq_alloc_desc_from(first, -1);
- if (irq == -ENOMEM && first > NR_IRQS_LEGACY) {
- printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n");
- first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY);
- goto retry;
- }
-
- if (irq < 0)
- panic("No available IRQ to bind to: increase nr_irqs!\n");
+ xen_irq_init(irq);
return irq;
}
-static int xen_allocate_irq_gsi(unsigned gsi)
+static int __must_check xen_allocate_irq_gsi(unsigned gsi)
{
int irq;
@@ -423,17 +447,25 @@ static int xen_allocate_irq_gsi(unsigned gsi)
/* Legacy IRQ descriptors are already allocated by the arch. */
if (gsi < NR_IRQS_LEGACY)
- return gsi;
+ irq = gsi;
+ else
+ irq = irq_alloc_desc_at(gsi, -1);
- irq = irq_alloc_desc_at(gsi, -1);
- if (irq < 0)
- panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq);
+ xen_irq_init(irq);
return irq;
}
static void xen_free_irq(unsigned irq)
{
+ struct irq_info *info = get_irq_data(irq);
+
+ list_del(&info->list);
+
+ set_irq_data(irq, NULL);
+
+ kfree(info);
+
/* Legacy IRQ descriptors are managed by the arch. */
if (irq < NR_IRQS_LEGACY)
return;
@@ -563,51 +595,39 @@ static void ack_pirq(struct irq_data *data)
static int find_irq_by_gsi(unsigned gsi)
{
- int irq;
+ struct irq_info *info;
- for (irq = 0; irq < nr_irqs; irq++) {
- struct irq_info *info = info_for_irq(irq);
-
- if (info == NULL || info->type != IRQT_PIRQ)
+ list_for_each_entry(info, &xen_irq_list_head, list) {
+ if (info->type != IRQT_PIRQ)
continue;
- if (gsi_from_irq(irq) == gsi)
- return irq;
+ if (info->u.pirq.gsi == gsi)
+ return info->irq;
}
return -1;
}
-int xen_allocate_pirq(unsigned gsi, int shareable, char *name)
+int xen_allocate_pirq_gsi(unsigned gsi)
{
- return xen_map_pirq_gsi(gsi, gsi, shareable, name);
+ return gsi;
}
-/* xen_map_pirq_gsi might allocate irqs from the top down, as a
- * consequence don't assume that the irq number returned has a low value
- * or can be used as a pirq number unless you know otherwise.
- *
- * One notable exception is when xen_map_pirq_gsi is called passing an
- * hardware gsi as argument, in that case the irq number returned
- * matches the gsi number passed as second argument.
+/*
+ * Do not make any assumptions regarding the relationship between the
+ * IRQ number returned here and the Xen pirq argument.
*
* Note: We don't assign an event channel until the irq actually started
* up. Return an existing irq if we've already got one for the gsi.
*/
-int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
+int xen_bind_pirq_gsi_to_irq(unsigned gsi,
+ unsigned pirq, int shareable, char *name)
{
- int irq = 0;
+ int irq = -1;
struct physdev_irq irq_op;
spin_lock(&irq_mapping_update_lock);
- if ((pirq > nr_irqs) || (gsi > nr_irqs)) {
- printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n",
- pirq > nr_irqs ? "pirq" :"",
- gsi > nr_irqs ? "gsi" : "");
- goto out;
- }
-
irq = find_irq_by_gsi(gsi);
if (irq != -1) {
printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
@@ -616,6 +636,8 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
}
irq = xen_allocate_irq_gsi(gsi);
+ if (irq < 0)
+ goto out;
set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
handle_level_irq, name);
@@ -633,9 +655,8 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
goto out;
}
- irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector);
- irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0;
- pirq_to_irq[pirq] = irq;
+ xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector,
+ shareable ? PIRQ_SHAREABLE : 0);
out:
spin_unlock(&irq_mapping_update_lock);
@@ -672,8 +693,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
handle_level_irq, name);
- irq_info[irq] = mk_pirq_info(0, pirq, 0, vector);
- pirq_to_irq[pirq] = irq;
+ xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0);
ret = irq_set_msi_desc(irq, msidesc);
if (ret < 0)
goto error_irq;
@@ -709,9 +729,6 @@ int xen_destroy_irq(int irq)
goto out;
}
}
- pirq_to_irq[info->u.pirq.pirq] = -1;
-
- irq_info[irq] = mk_unbound_info();
xen_free_irq(irq);
@@ -720,19 +737,26 @@ out:
return rc;
}
-int xen_vector_from_irq(unsigned irq)
+int xen_irq_from_pirq(unsigned pirq)
{
- return vector_from_irq(irq);
-}
+ int irq;
-int xen_gsi_from_irq(unsigned irq)
-{
- return gsi_from_irq(irq);
-}
+ struct irq_info *info;
-int xen_irq_from_pirq(unsigned pirq)
-{
- return pirq_to_irq[pirq];
+ spin_lock(&irq_mapping_update_lock);
+
+ list_for_each_entry(info, &xen_irq_list_head, list) {
+ if (info == NULL || info->type != IRQT_PIRQ)
+ continue;
+ irq = info->irq;
+ if (info->u.pirq.pirq == pirq)
+ goto out;
+ }
+ irq = -1;
+out:
+ spin_unlock(&irq_mapping_update_lock);
+
+ return irq;
}
int bind_evtchn_to_irq(unsigned int evtchn)
@@ -745,14 +769,16 @@ int bind_evtchn_to_irq(unsigned int evtchn)
if (irq == -1) {
irq = xen_allocate_irq_dynamic();
+ if (irq == -1)
+ goto out;
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
handle_fasteoi_irq, "event");
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_evtchn_info(evtchn);
+ xen_irq_info_evtchn_init(irq, evtchn);
}
+out:
spin_unlock(&irq_mapping_update_lock);
return irq;
@@ -782,9 +808,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
BUG();
evtchn = bind_ipi.port;
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_ipi_info(evtchn, ipi);
- per_cpu(ipi_to_irq, cpu)[ipi] = irq;
+ xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
bind_evtchn_to_cpu(evtchn, cpu);
}
@@ -821,6 +845,8 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
if (irq == -1) {
irq = xen_allocate_irq_dynamic();
+ if (irq == -1)
+ goto out;
set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
handle_percpu_irq, "virq");
@@ -832,14 +858,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
BUG();
evtchn = bind_virq.port;
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_virq_info(evtchn, virq);
-
- per_cpu(virq_to_irq, cpu)[virq] = irq;
+ xen_irq_info_virq_init(cpu, irq, evtchn, virq);
bind_evtchn_to_cpu(evtchn, cpu);
}
+out:
spin_unlock(&irq_mapping_update_lock);
return irq;
@@ -876,11 +900,9 @@ static void unbind_from_irq(unsigned int irq)
evtchn_to_irq[evtchn] = -1;
}
- if (irq_info[irq].type != IRQT_UNBOUND) {
- irq_info[irq] = mk_unbound_info();
+ BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
- xen_free_irq(irq);
- }
+ xen_free_irq(irq);
spin_unlock(&irq_mapping_update_lock);
}
@@ -894,6 +916,8 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
int retval;
irq = bind_evtchn_to_irq(evtchn);
+ if (irq < 0)
+ return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
@@ -935,6 +959,8 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
int retval;
irq = bind_virq_to_irq(virq, cpu);
+ if (irq < 0)
+ return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
@@ -986,7 +1012,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
{
struct shared_info *sh = HYPERVISOR_shared_info;
int cpu = smp_processor_id();
- unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu);
+ unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
int i;
unsigned long flags;
static DEFINE_SPINLOCK(debug_lock);
@@ -1064,6 +1090,13 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
}
static DEFINE_PER_CPU(unsigned, xed_nesting_count);
+static DEFINE_PER_CPU(unsigned int, current_word_idx);
+static DEFINE_PER_CPU(unsigned int, current_bit_idx);
+
+/*
+ * Mask out the i least significant bits of w
+ */
+#define MASK_LSBS(w, i) (w & ((~0UL) << i))
/*
* Search the CPUs pending events bitmasks. For each one found, map
@@ -1076,6 +1109,9 @@ static DEFINE_PER_CPU(unsigned, xed_nesting_count);
*/
static void __xen_evtchn_do_upcall(void)
{
+ int start_word_idx, start_bit_idx;
+ int word_idx, bit_idx;
+ int i;
int cpu = get_cpu();
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
@@ -1094,17 +1130,57 @@ static void __xen_evtchn_do_upcall(void)
wmb();
#endif
pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
- while (pending_words != 0) {
+
+ start_word_idx = __this_cpu_read(current_word_idx);
+ start_bit_idx = __this_cpu_read(current_bit_idx);
+
+ word_idx = start_word_idx;
+
+ for (i = 0; pending_words != 0; i++) {
unsigned long pending_bits;
- int word_idx = __ffs(pending_words);
- pending_words &= ~(1UL << word_idx);
+ unsigned long words;
- while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
- int bit_idx = __ffs(pending_bits);
- int port = (word_idx * BITS_PER_LONG) + bit_idx;
- int irq = evtchn_to_irq[port];
+ words = MASK_LSBS(pending_words, word_idx);
+
+ /*
+ * If we masked out all events, wrap to beginning.
+ */
+ if (words == 0) {
+ word_idx = 0;
+ bit_idx = 0;
+ continue;
+ }
+ word_idx = __ffs(words);
+
+ pending_bits = active_evtchns(cpu, s, word_idx);
+ bit_idx = 0; /* usually scan entire word from start */
+ if (word_idx == start_word_idx) {
+ /* We scan the starting word in two parts */
+ if (i == 0)
+ /* 1st time: start in the middle */
+ bit_idx = start_bit_idx;
+ else
+ /* 2nd time: mask bits done already */
+ bit_idx &= (1UL << start_bit_idx) - 1;
+ }
+
+ do {
+ unsigned long bits;
+ int port, irq;
struct irq_desc *desc;
+ bits = MASK_LSBS(pending_bits, bit_idx);
+
+ /* If we masked out all events, move on. */
+ if (bits == 0)
+ break;
+
+ bit_idx = __ffs(bits);
+
+ /* Process port. */
+ port = (word_idx * BITS_PER_LONG) + bit_idx;
+ irq = evtchn_to_irq[port];
+
mask_evtchn(port);
clear_evtchn(port);
@@ -1113,7 +1189,21 @@ static void __xen_evtchn_do_upcall(void)
if (desc)
generic_handle_irq_desc(irq, desc);
}
- }
+
+ bit_idx = (bit_idx + 1) % BITS_PER_LONG;
+
+ /* Next caller starts at last processed + 1 */
+ __this_cpu_write(current_word_idx,
+ bit_idx ? word_idx :
+ (word_idx+1) % BITS_PER_LONG);
+ __this_cpu_write(current_bit_idx, bit_idx);
+ } while (bit_idx != 0);
+
+ /* Scan start_l1i twice; all others once. */
+ if ((word_idx != start_word_idx) || (i != 0))
+ pending_words &= ~(1UL << word_idx);
+
+ word_idx = (word_idx + 1) % BITS_PER_LONG;
}
BUG_ON(!irqs_disabled());
@@ -1163,8 +1253,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
so there should be a proper type */
BUG_ON(info->type == IRQT_UNBOUND);
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_evtchn_info(evtchn);
+ xen_irq_info_evtchn_init(irq, evtchn);
spin_unlock(&irq_mapping_update_lock);
@@ -1181,10 +1270,14 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
struct evtchn_bind_vcpu bind_vcpu;
int evtchn = evtchn_from_irq(irq);
- /* events delivered via platform PCI interrupts are always
- * routed to vcpu 0 */
- if (!VALID_EVTCHN(evtchn) ||
- (xen_hvm_domain() && !xen_have_vector_callback))
+ if (!VALID_EVTCHN(evtchn))
+ return -1;
+
+ /*
+ * Events delivered via platform PCI interrupts are always
+ * routed to vcpu 0 and hence cannot be rebound.
+ */
+ if (xen_hvm_domain() && !xen_have_vector_callback)
return -1;
/* Send future instances of this interrupt to other vcpu. */
@@ -1271,19 +1364,22 @@ static int retrigger_dynirq(struct irq_data *data)
return ret;
}
-static void restore_cpu_pirqs(void)
+static void restore_pirqs(void)
{
int pirq, rc, irq, gsi;
struct physdev_map_pirq map_irq;
+ struct irq_info *info;
- for (pirq = 0; pirq < nr_irqs; pirq++) {
- irq = pirq_to_irq[pirq];
- if (irq == -1)
+ list_for_each_entry(info, &xen_irq_list_head, list) {
+ if (info->type != IRQT_PIRQ)
continue;
+ pirq = info->u.pirq.pirq;
+ gsi = info->u.pirq.gsi;
+ irq = info->irq;
+
/* save/restore of PT devices doesn't work, so at this point the
* only devices present are GSI based emulated devices */
- gsi = gsi_from_irq(irq);
if (!gsi)
continue;
@@ -1296,8 +1392,7 @@ static void restore_cpu_pirqs(void)
if (rc) {
printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
gsi, irq, pirq, rc);
- irq_info[irq] = mk_unbound_info();
- pirq_to_irq[pirq] = -1;
+ xen_free_irq(irq);
continue;
}
@@ -1327,8 +1422,7 @@ static void restore_cpu_virqs(unsigned int cpu)
evtchn = bind_virq.port;
/* Record the new mapping. */
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_virq_info(evtchn, virq);
+ xen_irq_info_virq_init(cpu, irq, evtchn, virq);
bind_evtchn_to_cpu(evtchn, cpu);
}
}
@@ -1352,8 +1446,7 @@ static void restore_cpu_ipis(unsigned int cpu)
evtchn = bind_ipi.port;
/* Record the new mapping. */
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_ipi_info(evtchn, ipi);
+ xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
bind_evtchn_to_cpu(evtchn, cpu);
}
}
@@ -1413,7 +1506,8 @@ void xen_poll_irq(int irq)
void xen_irq_resume(void)
{
- unsigned int cpu, irq, evtchn;
+ unsigned int cpu, evtchn;
+ struct irq_info *info;
init_evtchn_cpu_bindings();
@@ -1422,8 +1516,8 @@ void xen_irq_resume(void)
mask_evtchn(evtchn);
/* No IRQ <-> event-channel mappings. */
- for (irq = 0; irq < nr_irqs; irq++)
- irq_info[irq].evtchn = 0; /* zap event-channel binding */
+ list_for_each_entry(info, &xen_irq_list_head, list)
+ info->evtchn = 0; /* zap event-channel binding */
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
evtchn_to_irq[evtchn] = -1;
@@ -1433,7 +1527,7 @@ void xen_irq_resume(void)
restore_cpu_ipis(cpu);
}
- restore_cpu_pirqs();
+ restore_pirqs();
}
static struct irq_chip xen_dynamic_chip __read_mostly = {
@@ -1519,17 +1613,6 @@ void __init xen_init_IRQ(void)
{
int i;
- cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
- GFP_KERNEL);
- irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
-
- /* We are using nr_irqs as the maximum number of pirq available but
- * that number is actually chosen by Xen and we don't know exactly
- * what it is. Be careful choosing high pirq numbers. */
- pirq_to_irq = kcalloc(nr_irqs, sizeof(*pirq_to_irq), GFP_KERNEL);
- for (i = 0; i < nr_irqs; i++)
- pirq_to_irq[i] = -1;
-
evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
GFP_KERNEL);
for (i = 0; i < NR_EVENT_CHANNELS; i++)
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
new file mode 100644
index 000000000000..a7ffdfe19fc9
--- /dev/null
+++ b/drivers/xen/gntalloc.c
@@ -0,0 +1,545 @@
+/******************************************************************************
+ * gntalloc.c
+ *
+ * Device for creating grant references (in user-space) that may be shared
+ * with other domains.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * This driver exists to allow userspace programs in Linux to allocate kernel
+ * memory that will later be shared with another domain. Without this device,
+ * Linux userspace programs cannot create grant references.
+ *
+ * How this stuff works:
+ * X -> granting a page to Y
+ * Y -> mapping the grant from X
+ *
+ * 1. X uses the gntalloc device to allocate a page of kernel memory, P.
+ * 2. X creates an entry in the grant table that says domid(Y) can access P.
+ * This is done without a hypercall unless the grant table needs expansion.
+ * 3. X gives the grant reference identifier, GREF, to Y.
+ * 4. Y maps the page, either directly into kernel memory for use in a backend
+ * driver, or via a the gntdev device to map into the address space of an
+ * application running in Y. This is the first point at which Xen does any
+ * tracking of the page.
+ * 5. A program in X mmap()s a segment of the gntalloc device that corresponds
+ * to the shared page, and can now communicate with Y over the shared page.
+ *
+ *
+ * NOTE TO USERSPACE LIBRARIES:
+ * The grant allocation and mmap()ing are, naturally, two separate operations.
+ * You set up the sharing by calling the create ioctl() and then the mmap().
+ * Teardown requires munmap() and either close() or ioctl().
+ *
+ * WARNING: Since Xen does not allow a guest to forcibly end the use of a grant
+ * reference, this device can be used to consume kernel memory by leaving grant
+ * references mapped by another domain when an application exits. Therefore,
+ * there is a global limit on the number of pages that can be allocated. When
+ * all references to the page are unmapped, it will be freed during the next
+ * grant operation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/highmem.h>
+
+#include <xen/xen.h>
+#include <xen/page.h>
+#include <xen/grant_table.h>
+#include <xen/gntalloc.h>
+#include <xen/events.h>
+
+static int limit = 1024;
+module_param(limit, int, 0644);
+MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by "
+ "the gntalloc device");
+
+static LIST_HEAD(gref_list);
+static DEFINE_SPINLOCK(gref_lock);
+static int gref_size;
+
+struct notify_info {
+ uint16_t pgoff:12; /* Bits 0-11: Offset of the byte to clear */
+ uint16_t flags:2; /* Bits 12-13: Unmap notification flags */
+ int event; /* Port (event channel) to notify */
+};
+
+/* Metadata on a grant reference. */
+struct gntalloc_gref {
+ struct list_head next_gref; /* list entry gref_list */
+ struct list_head next_file; /* list entry file->list, if open */
+ struct page *page; /* The shared page */
+ uint64_t file_index; /* File offset for mmap() */
+ unsigned int users; /* Use count - when zero, waiting on Xen */
+ grant_ref_t gref_id; /* The grant reference number */
+ struct notify_info notify; /* Unmap notification */
+};
+
+struct gntalloc_file_private_data {
+ struct list_head list;
+ uint64_t index;
+};
+
+static void __del_gref(struct gntalloc_gref *gref);
+
+static void do_cleanup(void)
+{
+ struct gntalloc_gref *gref, *n;
+ list_for_each_entry_safe(gref, n, &gref_list, next_gref) {
+ if (!gref->users)
+ __del_gref(gref);
+ }
+}
+
+static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
+ uint32_t *gref_ids, struct gntalloc_file_private_data *priv)
+{
+ int i, rc, readonly;
+ LIST_HEAD(queue_gref);
+ LIST_HEAD(queue_file);
+ struct gntalloc_gref *gref;
+
+ readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
+ rc = -ENOMEM;
+ for (i = 0; i < op->count; i++) {
+ gref = kzalloc(sizeof(*gref), GFP_KERNEL);
+ if (!gref)
+ goto undo;
+ list_add_tail(&gref->next_gref, &queue_gref);
+ list_add_tail(&gref->next_file, &queue_file);
+ gref->users = 1;
+ gref->file_index = op->index + i * PAGE_SIZE;
+ gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ if (!gref->page)
+ goto undo;
+
+ /* Grant foreign access to the page. */
+ gref->gref_id = gnttab_grant_foreign_access(op->domid,
+ pfn_to_mfn(page_to_pfn(gref->page)), readonly);
+ if (gref->gref_id < 0) {
+ rc = gref->gref_id;
+ goto undo;
+ }
+ gref_ids[i] = gref->gref_id;
+ }
+
+ /* Add to gref lists. */
+ spin_lock(&gref_lock);
+ list_splice_tail(&queue_gref, &gref_list);
+ list_splice_tail(&queue_file, &priv->list);
+ spin_unlock(&gref_lock);
+
+ return 0;
+
+undo:
+ spin_lock(&gref_lock);
+ gref_size -= (op->count - i);
+
+ list_for_each_entry(gref, &queue_file, next_file) {
+ /* __del_gref does not remove from queue_file */
+ __del_gref(gref);
+ }
+
+ /* It's possible for the target domain to map the just-allocated grant
+ * references by blindly guessing their IDs; if this is done, then
+ * __del_gref will leave them in the queue_gref list. They need to be
+ * added to the global list so that we can free them when they are no
+ * longer referenced.
+ */
+ if (unlikely(!list_empty(&queue_gref)))
+ list_splice_tail(&queue_gref, &gref_list);
+ spin_unlock(&gref_lock);
+ return rc;
+}
+
+static void __del_gref(struct gntalloc_gref *gref)
+{
+ if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
+ uint8_t *tmp = kmap(gref->page);
+ tmp[gref->notify.pgoff] = 0;
+ kunmap(gref->page);
+ }
+ if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+ notify_remote_via_evtchn(gref->notify.event);
+
+ gref->notify.flags = 0;
+
+ if (gref->gref_id > 0) {
+ if (gnttab_query_foreign_access(gref->gref_id))
+ return;
+
+ if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
+ return;
+ }
+
+ gref_size--;
+ list_del(&gref->next_gref);
+
+ if (gref->page)
+ __free_page(gref->page);
+
+ kfree(gref);
+}
+
+/* finds contiguous grant references in a file, returns the first */
+static struct gntalloc_gref *find_grefs(struct gntalloc_file_private_data *priv,
+ uint64_t index, uint32_t count)
+{
+ struct gntalloc_gref *rv = NULL, *gref;
+ list_for_each_entry(gref, &priv->list, next_file) {
+ if (gref->file_index == index && !rv)
+ rv = gref;
+ if (rv) {
+ if (gref->file_index != index)
+ return NULL;
+ index += PAGE_SIZE;
+ count--;
+ if (count == 0)
+ return rv;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * -------------------------------------
+ * File operations.
+ * -------------------------------------
+ */
+static int gntalloc_open(struct inode *inode, struct file *filp)
+{
+ struct gntalloc_file_private_data *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto out_nomem;
+ INIT_LIST_HEAD(&priv->list);
+
+ filp->private_data = priv;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ return 0;
+
+out_nomem:
+ return -ENOMEM;
+}
+
+static int gntalloc_release(struct inode *inode, struct file *filp)
+{
+ struct gntalloc_file_private_data *priv = filp->private_data;
+ struct gntalloc_gref *gref;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ spin_lock(&gref_lock);
+ while (!list_empty(&priv->list)) {
+ gref = list_entry(priv->list.next,
+ struct gntalloc_gref, next_file);
+ list_del(&gref->next_file);
+ gref->users--;
+ if (gref->users == 0)
+ __del_gref(gref);
+ }
+ kfree(priv);
+ spin_unlock(&gref_lock);
+
+ return 0;
+}
+
+static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
+ struct ioctl_gntalloc_alloc_gref __user *arg)
+{
+ int rc = 0;
+ struct ioctl_gntalloc_alloc_gref op;
+ uint32_t *gref_ids;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ if (copy_from_user(&op, arg, sizeof(op))) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ gref_ids = kzalloc(sizeof(gref_ids[0]) * op.count, GFP_TEMPORARY);
+ if (!gref_ids) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock(&gref_lock);
+ /* Clean up pages that were at zero (local) users but were still mapped
+ * by remote domains. Since those pages count towards the limit that we
+ * are about to enforce, removing them here is a good idea.
+ */
+ do_cleanup();
+ if (gref_size + op.count > limit) {
+ spin_unlock(&gref_lock);
+ rc = -ENOSPC;
+ goto out_free;
+ }
+ gref_size += op.count;
+ op.index = priv->index;
+ priv->index += op.count * PAGE_SIZE;
+ spin_unlock(&gref_lock);
+
+ rc = add_grefs(&op, gref_ids, priv);
+ if (rc < 0)
+ goto out_free;
+
+ /* Once we finish add_grefs, it is unsafe to touch the new reference,
+ * since it is possible for a concurrent ioctl to remove it (by guessing
+ * its index). If the userspace application doesn't provide valid memory
+ * to write the IDs to, then it will need to close the file in order to
+ * release - which it will do by segfaulting when it tries to access the
+ * IDs to close them.
+ */
+ if (copy_to_user(arg, &op, sizeof(op))) {
+ rc = -EFAULT;
+ goto out_free;
+ }
+ if (copy_to_user(arg->gref_ids, gref_ids,
+ sizeof(gref_ids[0]) * op.count)) {
+ rc = -EFAULT;
+ goto out_free;
+ }
+
+out_free:
+ kfree(gref_ids);
+out:
+ return rc;
+}
+
+static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
+ void __user *arg)
+{
+ int i, rc = 0;
+ struct ioctl_gntalloc_dealloc_gref op;
+ struct gntalloc_gref *gref, *n;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ if (copy_from_user(&op, arg, sizeof(op))) {
+ rc = -EFAULT;
+ goto dealloc_grant_out;
+ }
+
+ spin_lock(&gref_lock);
+ gref = find_grefs(priv, op.index, op.count);
+ if (gref) {
+ /* Remove from the file list only, and decrease reference count.
+ * The later call to do_cleanup() will remove from gref_list and
+ * free the memory if the pages aren't mapped anywhere.
+ */
+ for (i = 0; i < op.count; i++) {
+ n = list_entry(gref->next_file.next,
+ struct gntalloc_gref, next_file);
+ list_del(&gref->next_file);
+ gref->users--;
+ gref = n;
+ }
+ } else {
+ rc = -EINVAL;
+ }
+
+ do_cleanup();
+
+ spin_unlock(&gref_lock);
+dealloc_grant_out:
+ return rc;
+}
+
+static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
+ void __user *arg)
+{
+ struct ioctl_gntalloc_unmap_notify op;
+ struct gntalloc_gref *gref;
+ uint64_t index;
+ int pgoff;
+ int rc;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ index = op.index & ~(PAGE_SIZE - 1);
+ pgoff = op.index & (PAGE_SIZE - 1);
+
+ spin_lock(&gref_lock);
+
+ gref = find_grefs(priv, index, 1);
+ if (!gref) {
+ rc = -ENOENT;
+ goto unlock_out;
+ }
+
+ if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) {
+ rc = -EINVAL;
+ goto unlock_out;
+ }
+
+ gref->notify.flags = op.action;
+ gref->notify.pgoff = pgoff;
+ gref->notify.event = op.event_channel_port;
+ rc = 0;
+ unlock_out:
+ spin_unlock(&gref_lock);
+ return rc;
+}
+
+static long gntalloc_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct gntalloc_file_private_data *priv = filp->private_data;
+
+ switch (cmd) {
+ case IOCTL_GNTALLOC_ALLOC_GREF:
+ return gntalloc_ioctl_alloc(priv, (void __user *)arg);
+
+ case IOCTL_GNTALLOC_DEALLOC_GREF:
+ return gntalloc_ioctl_dealloc(priv, (void __user *)arg);
+
+ case IOCTL_GNTALLOC_SET_UNMAP_NOTIFY:
+ return gntalloc_ioctl_unmap_notify(priv, (void __user *)arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return 0;
+}
+
+static void gntalloc_vma_close(struct vm_area_struct *vma)
+{
+ struct gntalloc_gref *gref = vma->vm_private_data;
+ if (!gref)
+ return;
+
+ spin_lock(&gref_lock);
+ gref->users--;
+ if (gref->users == 0)
+ __del_gref(gref);
+ spin_unlock(&gref_lock);
+}
+
+static struct vm_operations_struct gntalloc_vmops = {
+ .close = gntalloc_vma_close,
+};
+
+static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct gntalloc_file_private_data *priv = filp->private_data;
+ struct gntalloc_gref *gref;
+ int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int rv, i;
+
+ pr_debug("%s: priv %p, page %lu+%d\n", __func__,
+ priv, vma->vm_pgoff, count);
+
+ if (!(vma->vm_flags & VM_SHARED)) {
+ printk(KERN_ERR "%s: Mapping must be shared.\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock(&gref_lock);
+ gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count);
+ if (gref == NULL) {
+ rv = -ENOENT;
+ pr_debug("%s: Could not find grant reference",
+ __func__);
+ goto out_unlock;
+ }
+
+ vma->vm_private_data = gref;
+
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTCOPY;
+ vma->vm_flags |= VM_PFNMAP | VM_PFN_AT_MMAP;
+
+ vma->vm_ops = &gntalloc_vmops;
+
+ for (i = 0; i < count; i++) {
+ gref->users++;
+ rv = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
+ gref->page);
+ if (rv)
+ goto out_unlock;
+
+ gref = list_entry(gref->next_file.next,
+ struct gntalloc_gref, next_file);
+ }
+ rv = 0;
+
+out_unlock:
+ spin_unlock(&gref_lock);
+ return rv;
+}
+
+static const struct file_operations gntalloc_fops = {
+ .owner = THIS_MODULE,
+ .open = gntalloc_open,
+ .release = gntalloc_release,
+ .unlocked_ioctl = gntalloc_ioctl,
+ .mmap = gntalloc_mmap
+};
+
+/*
+ * -------------------------------------
+ * Module creation/destruction.
+ * -------------------------------------
+ */
+static struct miscdevice gntalloc_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/gntalloc",
+ .fops = &gntalloc_fops,
+};
+
+static int __init gntalloc_init(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ err = misc_register(&gntalloc_miscdev);
+ if (err != 0) {
+ printk(KERN_ERR "Could not register misc gntalloc device\n");
+ return err;
+ }
+
+ pr_debug("Created grant allocation device at %d,%d\n",
+ MISC_MAJOR, gntalloc_miscdev.minor);
+
+ return 0;
+}
+
+static void __exit gntalloc_exit(void)
+{
+ misc_deregister(&gntalloc_miscdev);
+}
+
+module_init(gntalloc_init);
+module_exit(gntalloc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Carter Weatherly <carter.weatherly@jhuapl.edu>, "
+ "Daniel De Graaf <dgdegra@tycho.nsa.gov>");
+MODULE_DESCRIPTION("User-space grant reference allocator driver");
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 1e31cdcdae1e..017ce600fbc6 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -32,10 +32,13 @@
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
+#include <xen/balloon.h>
#include <xen/gntdev.h>
+#include <xen/events.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
@@ -45,35 +48,46 @@ MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
"Gerd Hoffmann <kraxel@redhat.com>");
MODULE_DESCRIPTION("User-space granted page access driver");
-static int limit = 1024;
+static int limit = 1024*1024;
module_param(limit, int, 0644);
-MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped at "
- "once by a gntdev instance");
+MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
+ "the gntdev device");
+
+static atomic_t pages_mapped = ATOMIC_INIT(0);
+
+static int use_ptemod;
struct gntdev_priv {
struct list_head maps;
- uint32_t used;
- uint32_t limit;
/* lock protects maps from concurrent changes */
spinlock_t lock;
struct mm_struct *mm;
struct mmu_notifier mn;
};
+struct unmap_notify {
+ int flags;
+ /* Address relative to the start of the grant_map */
+ int addr;
+ int event;
+};
+
struct grant_map {
struct list_head next;
- struct gntdev_priv *priv;
struct vm_area_struct *vma;
int index;
int count;
int flags;
- int is_mapped;
+ atomic_t users;
+ struct unmap_notify notify;
struct ioctl_gntdev_grant_ref *grants;
struct gnttab_map_grant_ref *map_ops;
struct gnttab_unmap_grant_ref *unmap_ops;
struct page **pages;
};
+static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
+
/* ------------------------------------------------------------------ */
static void gntdev_print_maps(struct gntdev_priv *priv,
@@ -82,9 +96,7 @@ static void gntdev_print_maps(struct gntdev_priv *priv,
#ifdef DEBUG
struct grant_map *map;
- pr_debug("maps list (priv %p, usage %d/%d)\n",
- priv, priv->used, priv->limit);
-
+ pr_debug("%s: maps list (priv %p)\n", __func__, priv);
list_for_each_entry(map, &priv->maps, next)
pr_debug(" index %2d, count %2d %s\n",
map->index, map->count,
@@ -111,27 +123,21 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
NULL == add->pages)
goto err;
+ if (alloc_xenballooned_pages(count, add->pages))
+ goto err;
+
for (i = 0; i < count; i++) {
- add->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
- if (add->pages[i] == NULL)
- goto err;
+ add->map_ops[i].handle = -1;
+ add->unmap_ops[i].handle = -1;
}
add->index = 0;
add->count = count;
- add->priv = priv;
-
- if (add->count + priv->used > priv->limit)
- goto err;
+ atomic_set(&add->users, 1);
return add;
err:
- if (add->pages)
- for (i = 0; i < count; i++) {
- if (add->pages[i])
- __free_page(add->pages[i]);
- }
kfree(add->pages);
kfree(add->grants);
kfree(add->map_ops);
@@ -154,7 +160,6 @@ static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
list_add_tail(&add->next, &priv->maps);
done:
- priv->used += add->count;
gntdev_print_maps(priv, "[new]", add->index);
}
@@ -166,57 +171,33 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
list_for_each_entry(map, &priv->maps, next) {
if (map->index != index)
continue;
- if (map->count != count)
+ if (count && map->count != count)
continue;
return map;
}
return NULL;
}
-static struct grant_map *gntdev_find_map_vaddr(struct gntdev_priv *priv,
- unsigned long vaddr)
+static void gntdev_put_map(struct grant_map *map)
{
- struct grant_map *map;
-
- list_for_each_entry(map, &priv->maps, next) {
- if (!map->vma)
- continue;
- if (vaddr < map->vma->vm_start)
- continue;
- if (vaddr >= map->vma->vm_end)
- continue;
- return map;
- }
- return NULL;
-}
-
-static int gntdev_del_map(struct grant_map *map)
-{
- int i;
+ if (!map)
+ return;
- if (map->vma)
- return -EBUSY;
- for (i = 0; i < map->count; i++)
- if (map->unmap_ops[i].handle)
- return -EBUSY;
+ if (!atomic_dec_and_test(&map->users))
+ return;
- map->priv->used -= map->count;
- list_del(&map->next);
- return 0;
-}
+ atomic_sub(map->count, &pages_mapped);
-static void gntdev_free_map(struct grant_map *map)
-{
- int i;
+ if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
+ notify_remote_via_evtchn(map->notify.event);
+ }
- if (!map)
- return;
+ if (map->pages) {
+ if (!use_ptemod)
+ unmap_grant_pages(map, 0, map->count);
- if (map->pages)
- for (i = 0; i < map->count; i++) {
- if (map->pages[i])
- __free_page(map->pages[i]);
- }
+ free_xenballooned_pages(map->count, map->pages);
+ }
kfree(map->pages);
kfree(map->grants);
kfree(map->map_ops);
@@ -231,18 +212,17 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
{
struct grant_map *map = data;
unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
+ int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
u64 pte_maddr;
BUG_ON(pgnr >= map->count);
pte_maddr = arbitrary_virt_to_machine(pte).maddr;
- gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr,
- GNTMAP_contains_pte | map->flags,
+ gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
map->grants[pgnr].ref,
map->grants[pgnr].domid);
- gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr,
- GNTMAP_contains_pte | map->flags,
- 0 /* handle */);
+ gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
+ -1 /* handle */);
return 0;
}
@@ -250,6 +230,21 @@ static int map_grant_pages(struct grant_map *map)
{
int i, err = 0;
+ if (!use_ptemod) {
+ /* Note: it could already be mapped */
+ if (map->map_ops[0].handle != -1)
+ return 0;
+ for (i = 0; i < map->count; i++) {
+ unsigned long addr = (unsigned long)
+ pfn_to_kaddr(page_to_pfn(map->pages[i]));
+ gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
+ map->grants[i].ref,
+ map->grants[i].domid);
+ gnttab_set_unmap_op(&map->unmap_ops[i], addr,
+ map->flags, -1 /* handle */);
+ }
+ }
+
pr_debug("map %d+%d\n", map->index, map->count);
err = gnttab_map_refs(map->map_ops, map->pages, map->count);
if (err)
@@ -258,28 +253,81 @@ static int map_grant_pages(struct grant_map *map)
for (i = 0; i < map->count; i++) {
if (map->map_ops[i].status)
err = -EINVAL;
- map->unmap_ops[i].handle = map->map_ops[i].handle;
+ else {
+ BUG_ON(map->map_ops[i].handle == -1);
+ map->unmap_ops[i].handle = map->map_ops[i].handle;
+ pr_debug("map handle=%d\n", map->map_ops[i].handle);
+ }
}
return err;
}
-static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
{
int i, err = 0;
- pr_debug("map %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
- err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages, pages);
+ if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
+ int pgno = (map->notify.addr >> PAGE_SHIFT);
+ if (pgno >= offset && pgno < offset + pages && use_ptemod) {
+ void __user *tmp = (void __user *)
+ map->vma->vm_start + map->notify.addr;
+ err = copy_to_user(tmp, &err, 1);
+ if (err)
+ return err;
+ map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
+ } else if (pgno >= offset && pgno < offset + pages) {
+ uint8_t *tmp = kmap(map->pages[pgno]);
+ tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
+ kunmap(map->pages[pgno]);
+ map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
+ }
+ }
+
+ err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages);
if (err)
return err;
for (i = 0; i < pages; i++) {
if (map->unmap_ops[offset+i].status)
err = -EINVAL;
- map->unmap_ops[offset+i].handle = 0;
+ pr_debug("unmap handle=%d st=%d\n",
+ map->unmap_ops[offset+i].handle,
+ map->unmap_ops[offset+i].status);
+ map->unmap_ops[offset+i].handle = -1;
}
return err;
}
+static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+{
+ int range, err = 0;
+
+ pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
+
+ /* It is possible the requested range will have a "hole" where we
+ * already unmapped some of the grants. Only unmap valid ranges.
+ */
+ while (pages && !err) {
+ while (pages && map->unmap_ops[offset].handle == -1) {
+ offset++;
+ pages--;
+ }
+ range = 0;
+ while (range < pages) {
+ if (map->unmap_ops[offset+range].handle == -1) {
+ range--;
+ break;
+ }
+ range++;
+ }
+ err = __unmap_grant_pages(map, offset, range);
+ offset += range;
+ pages -= range;
+ }
+
+ return err;
+}
+
/* ------------------------------------------------------------------ */
static void gntdev_vma_close(struct vm_area_struct *vma)
@@ -287,22 +335,13 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
struct grant_map *map = vma->vm_private_data;
pr_debug("close %p\n", vma);
- map->is_mapped = 0;
map->vma = NULL;
vma->vm_private_data = NULL;
-}
-
-static int gntdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- pr_debug("vaddr %p, pgoff %ld (shouldn't happen)\n",
- vmf->virtual_address, vmf->pgoff);
- vmf->flags = VM_FAULT_ERROR;
- return 0;
+ gntdev_put_map(map);
}
static struct vm_operations_struct gntdev_vmops = {
.close = gntdev_vma_close,
- .fault = gntdev_vma_fault,
};
/* ------------------------------------------------------------------ */
@@ -320,8 +359,6 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
list_for_each_entry(map, &priv->maps, next) {
if (!map->vma)
continue;
- if (!map->is_mapped)
- continue;
if (map->vma->vm_start >= end)
continue;
if (map->vma->vm_end <= start)
@@ -386,16 +423,17 @@ static int gntdev_open(struct inode *inode, struct file *flip)
INIT_LIST_HEAD(&priv->maps);
spin_lock_init(&priv->lock);
- priv->limit = limit;
- priv->mm = get_task_mm(current);
- if (!priv->mm) {
- kfree(priv);
- return -ENOMEM;
+ if (use_ptemod) {
+ priv->mm = get_task_mm(current);
+ if (!priv->mm) {
+ kfree(priv);
+ return -ENOMEM;
+ }
+ priv->mn.ops = &gntdev_mmu_ops;
+ ret = mmu_notifier_register(&priv->mn, priv->mm);
+ mmput(priv->mm);
}
- priv->mn.ops = &gntdev_mmu_ops;
- ret = mmu_notifier_register(&priv->mn, priv->mm);
- mmput(priv->mm);
if (ret) {
kfree(priv);
@@ -412,21 +450,19 @@ static int gntdev_release(struct inode *inode, struct file *flip)
{
struct gntdev_priv *priv = flip->private_data;
struct grant_map *map;
- int err;
pr_debug("priv %p\n", priv);
spin_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
map = list_entry(priv->maps.next, struct grant_map, next);
- err = gntdev_del_map(map);
- if (WARN_ON(err))
- gntdev_free_map(map);
-
+ list_del(&map->next);
+ gntdev_put_map(map);
}
spin_unlock(&priv->lock);
- mmu_notifier_unregister(&priv->mn, priv->mm);
+ if (use_ptemod)
+ mmu_notifier_unregister(&priv->mn, priv->mm);
kfree(priv);
return 0;
}
@@ -443,16 +479,21 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
pr_debug("priv %p, add %d\n", priv, op.count);
if (unlikely(op.count <= 0))
return -EINVAL;
- if (unlikely(op.count > priv->limit))
- return -EINVAL;
err = -ENOMEM;
map = gntdev_alloc_map(priv, op.count);
if (!map)
return err;
+
+ if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
+ pr_debug("can't map: over limit\n");
+ gntdev_put_map(map);
+ return err;
+ }
+
if (copy_from_user(map->grants, &u->refs,
sizeof(map->grants[0]) * op.count) != 0) {
- gntdev_free_map(map);
+ gntdev_put_map(map);
return err;
}
@@ -461,13 +502,9 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
op.index = map->index << PAGE_SHIFT;
spin_unlock(&priv->lock);
- if (copy_to_user(u, &op, sizeof(op)) != 0) {
- spin_lock(&priv->lock);
- gntdev_del_map(map);
- spin_unlock(&priv->lock);
- gntdev_free_map(map);
- return err;
- }
+ if (copy_to_user(u, &op, sizeof(op)) != 0)
+ return -EFAULT;
+
return 0;
}
@@ -484,11 +521,12 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
spin_lock(&priv->lock);
map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
- if (map)
- err = gntdev_del_map(map);
+ if (map) {
+ list_del(&map->next);
+ gntdev_put_map(map);
+ err = 0;
+ }
spin_unlock(&priv->lock);
- if (!err)
- gntdev_free_map(map);
return err;
}
@@ -496,43 +534,66 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
struct ioctl_gntdev_get_offset_for_vaddr __user *u)
{
struct ioctl_gntdev_get_offset_for_vaddr op;
+ struct vm_area_struct *vma;
struct grant_map *map;
if (copy_from_user(&op, u, sizeof(op)) != 0)
return -EFAULT;
pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
- spin_lock(&priv->lock);
- map = gntdev_find_map_vaddr(priv, op.vaddr);
- if (map == NULL ||
- map->vma->vm_start != op.vaddr) {
- spin_unlock(&priv->lock);
+ vma = find_vma(current->mm, op.vaddr);
+ if (!vma || vma->vm_ops != &gntdev_vmops)
return -EINVAL;
- }
+
+ map = vma->vm_private_data;
+ if (!map)
+ return -EINVAL;
+
op.offset = map->index << PAGE_SHIFT;
op.count = map->count;
- spin_unlock(&priv->lock);
if (copy_to_user(u, &op, sizeof(op)) != 0)
return -EFAULT;
return 0;
}
-static long gntdev_ioctl_set_max_grants(struct gntdev_priv *priv,
- struct ioctl_gntdev_set_max_grants __user *u)
+static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
{
- struct ioctl_gntdev_set_max_grants op;
+ struct ioctl_gntdev_unmap_notify op;
+ struct grant_map *map;
+ int rc;
- if (copy_from_user(&op, u, sizeof(op)) != 0)
+ if (copy_from_user(&op, u, sizeof(op)))
return -EFAULT;
- pr_debug("priv %p, limit %d\n", priv, op.count);
- if (op.count > limit)
- return -E2BIG;
+
+ if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
+ return -EINVAL;
spin_lock(&priv->lock);
- priv->limit = op.count;
+
+ list_for_each_entry(map, &priv->maps, next) {
+ uint64_t begin = map->index << PAGE_SHIFT;
+ uint64_t end = (map->index + map->count) << PAGE_SHIFT;
+ if (op.index >= begin && op.index < end)
+ goto found;
+ }
+ rc = -ENOENT;
+ goto unlock_out;
+
+ found:
+ if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
+ (map->flags & GNTMAP_readonly)) {
+ rc = -EINVAL;
+ goto unlock_out;
+ }
+
+ map->notify.flags = op.action;
+ map->notify.addr = op.index - (map->index << PAGE_SHIFT);
+ map->notify.event = op.event_channel_port;
+ rc = 0;
+ unlock_out:
spin_unlock(&priv->lock);
- return 0;
+ return rc;
}
static long gntdev_ioctl(struct file *flip,
@@ -551,8 +612,8 @@ static long gntdev_ioctl(struct file *flip,
case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
- case IOCTL_GNTDEV_SET_MAX_GRANTS:
- return gntdev_ioctl_set_max_grants(priv, ptr);
+ case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
+ return gntdev_ioctl_notify(priv, ptr);
default:
pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
@@ -568,7 +629,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
int index = vma->vm_pgoff;
int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
struct grant_map *map;
- int err = -EINVAL;
+ int i, err = -EINVAL;
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
return -EINVAL;
@@ -580,47 +641,70 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
map = gntdev_find_map_index(priv, index, count);
if (!map)
goto unlock_out;
- if (map->vma)
+ if (use_ptemod && map->vma)
goto unlock_out;
- if (priv->mm != vma->vm_mm) {
+ if (use_ptemod && priv->mm != vma->vm_mm) {
printk(KERN_WARNING "Huh? Other mm?\n");
goto unlock_out;
}
+ atomic_inc(&map->users);
+
vma->vm_ops = &gntdev_vmops;
vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP;
vma->vm_private_data = map;
- map->vma = vma;
- map->flags = GNTMAP_host_map | GNTMAP_application_map;
- if (!(vma->vm_flags & VM_WRITE))
- map->flags |= GNTMAP_readonly;
+ if (use_ptemod)
+ map->vma = vma;
+
+ if (map->flags) {
+ if ((vma->vm_flags & VM_WRITE) &&
+ (map->flags & GNTMAP_readonly))
+ return -EINVAL;
+ } else {
+ map->flags = GNTMAP_host_map;
+ if (!(vma->vm_flags & VM_WRITE))
+ map->flags |= GNTMAP_readonly;
+ }
spin_unlock(&priv->lock);
- err = apply_to_page_range(vma->vm_mm, vma->vm_start,
- vma->vm_end - vma->vm_start,
- find_grant_ptes, map);
- if (err) {
- printk(KERN_WARNING "find_grant_ptes() failure.\n");
- return err;
+ if (use_ptemod) {
+ err = apply_to_page_range(vma->vm_mm, vma->vm_start,
+ vma->vm_end - vma->vm_start,
+ find_grant_ptes, map);
+ if (err) {
+ printk(KERN_WARNING "find_grant_ptes() failure.\n");
+ goto out_put_map;
+ }
}
err = map_grant_pages(map);
- if (err) {
- printk(KERN_WARNING "map_grant_pages() failure.\n");
- return err;
- }
+ if (err)
+ goto out_put_map;
- map->is_mapped = 1;
+ if (!use_ptemod) {
+ for (i = 0; i < count; i++) {
+ err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
+ map->pages[i]);
+ if (err)
+ goto out_put_map;
+ }
+ }
return 0;
unlock_out:
spin_unlock(&priv->lock);
return err;
+
+out_put_map:
+ if (use_ptemod)
+ map->vma = NULL;
+ gntdev_put_map(map);
+ return err;
}
static const struct file_operations gntdev_fops = {
@@ -646,6 +730,8 @@ static int __init gntdev_init(void)
if (!xen_domain())
return -ENODEV;
+ use_ptemod = xen_pv_domain();
+
err = misc_register(&gntdev_miscdev);
if (err != 0) {
printk(KERN_ERR "Could not register gntdev device\n");
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 9ef54ebc1194..3745a318defc 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -458,7 +458,14 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
if (ret)
return ret;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return ret;
+
for (i = 0; i < count; i++) {
+ /* Do not add to override if the map failed. */
+ if (map_ops[i].status)
+ continue;
+
/* m2p override only supported for GNTMAP_contains_pte mappings */
if (!(map_ops[i].flags & GNTMAP_contains_pte))
continue;
@@ -483,6 +490,9 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
if (ret)
return ret;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return ret;
+
for (i = 0; i < count; i++) {
ret = m2p_remove_override(pages[i]);
if (ret)
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index ebb292859b59..95143dd6904d 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -61,7 +61,7 @@ static void xen_post_suspend(int cancelled)
xen_mm_unpin_all();
}
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_HIBERNATION
static int xen_suspend(void *data)
{
struct suspend_info *si = data;
@@ -69,7 +69,7 @@ static int xen_suspend(void *data)
BUG_ON(!irqs_disabled());
- err = sysdev_suspend(PMSG_SUSPEND);
+ err = sysdev_suspend(PMSG_FREEZE);
if (err) {
printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n",
err);
@@ -118,7 +118,7 @@ static void do_suspend(void)
}
#endif
- err = dpm_suspend_start(PMSG_SUSPEND);
+ err = dpm_suspend_start(PMSG_FREEZE);
if (err) {
printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err);
goto out_thaw;
@@ -127,7 +127,7 @@ static void do_suspend(void)
printk(KERN_DEBUG "suspending xenstore...\n");
xs_suspend();
- err = dpm_suspend_noirq(PMSG_SUSPEND);
+ err = dpm_suspend_noirq(PMSG_FREEZE);
if (err) {
printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
goto out_resume;
@@ -147,7 +147,7 @@ static void do_suspend(void)
err = stop_machine(xen_suspend, &si, cpumask_of(0));
- dpm_resume_noirq(PMSG_RESUME);
+ dpm_resume_noirq(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
if (err) {
printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
@@ -161,7 +161,7 @@ out_resume:
} else
xs_suspend_cancel();
- dpm_resume_end(PMSG_RESUME);
+ dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
/* Make sure timer events get retriggered on all CPUs */
clock_was_set();
@@ -173,7 +173,7 @@ out:
#endif
shutting_down = SHUTDOWN_INVALID;
}
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_HIBERNATION */
struct shutdown_handler {
const char *command;
@@ -202,7 +202,7 @@ static void shutdown_handler(struct xenbus_watch *watch,
{ "poweroff", do_poweroff },
{ "halt", do_poweroff },
{ "reboot", do_reboot },
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_HIBERNATION
{ "suspend", do_suspend },
#endif
{NULL, NULL},
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
new file mode 100644
index 000000000000..a4ff225ee868
--- /dev/null
+++ b/drivers/xen/xen-balloon.c
@@ -0,0 +1,256 @@
+/******************************************************************************
+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
+ *
+ * Copyright (c) 2003, B Dragovic
+ * Copyright (c) 2003-2004, M Williamson, K Fraser
+ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/capability.h>
+
+#include <xen/xen.h>
+#include <xen/interface/xen.h>
+#include <xen/balloon.h>
+#include <xen/xenbus.h>
+#include <xen/features.h>
+#include <xen/page.h>
+
+#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
+
+#define BALLOON_CLASS_NAME "xen_memory"
+
+static struct sys_device balloon_sysdev;
+
+static int register_balloon(struct sys_device *sysdev);
+
+static struct xenbus_watch target_watch =
+{
+ .node = "memory/target"
+};
+
+/* React to a change in the target key */
+static void watch_target(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ unsigned long long new_target;
+ int err;
+
+ err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
+ if (err != 1) {
+ /* This is ok (for domain0 at least) - so just return */
+ return;
+ }
+
+ /* The given memory/target value is in KiB, so it needs converting to
+ * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
+ */
+ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
+}
+
+static int balloon_init_watcher(struct notifier_block *notifier,
+ unsigned long event,
+ void *data)
+{
+ int err;
+
+ err = register_xenbus_watch(&target_watch);
+ if (err)
+ printk(KERN_ERR "Failed to set balloon watcher\n");
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block xenstore_notifier;
+
+static int __init balloon_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ pr_info("xen-balloon: Initialising balloon driver.\n");
+
+ register_balloon(&balloon_sysdev);
+
+ target_watch.callback = watch_target;
+ xenstore_notifier.notifier_call = balloon_init_watcher;
+
+ register_xenstore_notifier(&xenstore_notifier);
+
+ return 0;
+}
+subsys_initcall(balloon_init);
+
+static void balloon_exit(void)
+{
+ /* XXX - release balloon here */
+ return;
+}
+
+module_exit(balloon_exit);
+
+#define BALLOON_SHOW(name, format, args...) \
+ static ssize_t show_##name(struct sys_device *dev, \
+ struct sysdev_attribute *attr, \
+ char *buf) \
+ { \
+ return sprintf(buf, format, ##args); \
+ } \
+ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
+
+BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
+BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
+BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
+
+static SYSDEV_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
+static SYSDEV_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
+static SYSDEV_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
+static SYSDEV_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
+
+static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
+}
+
+static ssize_t store_target_kb(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ char *endchar;
+ unsigned long long target_bytes;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ target_bytes = simple_strtoull(buf, &endchar, 0) * 1024;
+
+ balloon_set_new_target(target_bytes >> PAGE_SHIFT);
+
+ return count;
+}
+
+static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
+ show_target_kb, store_target_kb);
+
+
+static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)balloon_stats.target_pages
+ << PAGE_SHIFT);
+}
+
+static ssize_t store_target(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ char *endchar;
+ unsigned long long target_bytes;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ target_bytes = memparse(buf, &endchar);
+
+ balloon_set_new_target(target_bytes >> PAGE_SHIFT);
+
+ return count;
+}
+
+static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR,
+ show_target, store_target);
+
+
+static struct sysdev_attribute *balloon_attrs[] = {
+ &attr_target_kb,
+ &attr_target,
+ &attr_schedule_delay.attr,
+ &attr_max_schedule_delay.attr,
+ &attr_retry_count.attr,
+ &attr_max_retry_count.attr
+};
+
+static struct attribute *balloon_info_attrs[] = {
+ &attr_current_kb.attr,
+ &attr_low_kb.attr,
+ &attr_high_kb.attr,
+ NULL
+};
+
+static struct attribute_group balloon_info_group = {
+ .name = "info",
+ .attrs = balloon_info_attrs
+};
+
+static struct sysdev_class balloon_sysdev_class = {
+ .name = BALLOON_CLASS_NAME
+};
+
+static int register_balloon(struct sys_device *sysdev)
+{
+ int i, error;
+
+ error = sysdev_class_register(&balloon_sysdev_class);
+ if (error)
+ return error;
+
+ sysdev->id = 0;
+ sysdev->cls = &balloon_sysdev_class;
+
+ error = sysdev_register(sysdev);
+ if (error) {
+ sysdev_class_unregister(&balloon_sysdev_class);
+ return error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
+ error = sysdev_create_file(sysdev, balloon_attrs[i]);
+ if (error)
+ goto fail;
+ }
+
+ error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
+ if (error)
+ goto fail;
+
+ return 0;
+
+ fail:
+ while (--i >= 0)
+ sysdev_remove_file(sysdev, balloon_attrs[i]);
+ sysdev_unregister(sysdev);
+ sysdev_class_unregister(&balloon_sysdev_class);
+ return error;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index baa65e7fbbc7..739769551e33 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -577,7 +577,7 @@ void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
}
EXPORT_SYMBOL_GPL(xenbus_dev_changed);
-int xenbus_dev_suspend(struct device *dev, pm_message_t state)
+int xenbus_dev_suspend(struct device *dev)
{
int err = 0;
struct xenbus_driver *drv;
@@ -590,7 +590,7 @@ int xenbus_dev_suspend(struct device *dev, pm_message_t state)
return 0;
drv = to_xenbus_driver(dev->driver);
if (drv->suspend)
- err = drv->suspend(xdev, state);
+ err = drv->suspend(xdev);
if (err)
printk(KERN_WARNING
"xenbus: suspend %s failed: %i\n", dev_name(dev), err);
@@ -642,6 +642,14 @@ int xenbus_dev_resume(struct device *dev)
}
EXPORT_SYMBOL_GPL(xenbus_dev_resume);
+int xenbus_dev_cancel(struct device *dev)
+{
+ /* Do nothing */
+ DPRINTK("cancel");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xenbus_dev_cancel);
+
/* A flag to determine if xenstored is 'ready' (i.e. has started) */
int xenstored_ready = 0;
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 24665812316a..888b9900ca08 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -64,8 +64,9 @@ extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
extern void xenbus_dev_shutdown(struct device *_dev);
-extern int xenbus_dev_suspend(struct device *dev, pm_message_t state);
+extern int xenbus_dev_suspend(struct device *dev);
extern int xenbus_dev_resume(struct device *dev);
+extern int xenbus_dev_cancel(struct device *dev);
extern void xenbus_otherend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len,
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 5bcc2d6cf129..b6a2690c9d49 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -85,6 +85,14 @@ static struct device_attribute xenbus_frontend_dev_attrs[] = {
__ATTR_NULL
};
+static const struct dev_pm_ops xenbus_pm_ops = {
+ .suspend = xenbus_dev_suspend,
+ .resume = xenbus_dev_resume,
+ .freeze = xenbus_dev_suspend,
+ .thaw = xenbus_dev_cancel,
+ .restore = xenbus_dev_resume,
+};
+
static struct xen_bus_type xenbus_frontend = {
.root = "device",
.levels = 2, /* device/type/<id> */
@@ -100,8 +108,7 @@ static struct xen_bus_type xenbus_frontend = {
.shutdown = xenbus_dev_shutdown,
.dev_attrs = xenbus_frontend_dev_attrs,
- .suspend = xenbus_dev_suspend,
- .resume = xenbus_dev_resume,
+ .pm = &xenbus_pm_ops,
},
};
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index 045995c8ce5a..153242187fce 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -1991,6 +1991,7 @@ ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, unsigned int group,
spin_unlock(sb_bgl_lock(sbi, group));
percpu_counter_sub(&sbi->s_freeblocks_counter, next - start);
+ free_blocks -= next - start;
/* Do not issue a TRIM on extents smaller than minblocks */
if ((next - start) < minblocks)
goto free_extent;
@@ -2040,7 +2041,7 @@ free_extent:
cond_resched();
/* No more suitable extents */
- if ((free_blocks - count) < minblocks)
+ if (free_blocks < minblocks)
break;
}
@@ -2090,7 +2091,8 @@ int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
ext3_fsblk_t max_blks = le32_to_cpu(es->s_blocks_count);
int ret = 0;
- start = range->start >> sb->s_blocksize_bits;
+ start = (range->start >> sb->s_blocksize_bits) +
+ le32_to_cpu(es->s_first_data_block);
len = range->len >> sb->s_blocksize_bits;
minlen = range->minlen >> sb->s_blocksize_bits;
trimmed = 0;
@@ -2099,10 +2101,6 @@ int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
return -EINVAL;
if (start >= max_blks)
goto out;
- if (start < le32_to_cpu(es->s_first_data_block)) {
- len -= le32_to_cpu(es->s_first_data_block) - start;
- start = le32_to_cpu(es->s_first_data_block);
- }
if (start + len > max_blks)
len = max_blks - start;
@@ -2129,10 +2127,15 @@ int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
if (free_blocks < minlen)
continue;
- if (len >= EXT3_BLOCKS_PER_GROUP(sb))
- len -= (EXT3_BLOCKS_PER_GROUP(sb) - first_block);
- else
+ /*
+ * For all the groups except the last one, last block will
+ * always be EXT3_BLOCKS_PER_GROUP(sb), so we only need to
+ * change it for the last group in which case first_block +
+ * len < EXT3_BLOCKS_PER_GROUP(sb).
+ */
+ if (first_block + len < EXT3_BLOCKS_PER_GROUP(sb))
last_block = first_block + len;
+ len -= last_block - first_block;
ret = ext3_trim_all_free(sb, group, first_block,
last_block, minlen);
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 0521a007ae6d..32f3b8695859 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -1540,8 +1540,8 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
goto cleanup;
node2 = (struct dx_node *)(bh2->b_data);
entries2 = node2->entries;
+ memset(&node2->fake, 0, sizeof(struct fake_dirent));
node2->fake.rec_len = ext3_rec_len_to_disk(sb->s_blocksize);
- node2->fake.inode = 0;
BUFFER_TRACE(frame->bh, "get_write_access");
err = ext3_journal_get_write_access(handle, frame->bh);
if (err)
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 9cc19a1dea8e..071689f86e18 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1464,6 +1464,13 @@ static void ext3_orphan_cleanup (struct super_block * sb,
return;
}
+ /* Check if feature set allows readwrite operations */
+ if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) {
+ ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
+ "unknown ROCOMPAT features");
+ return;
+ }
+
if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) {
if (es->s_last_orphan)
jbd_debug(1, "Errors on filesystem, "
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index da1b5e4ffce1..eb11601f2e00 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -839,7 +839,7 @@ journal_t * journal_init_inode (struct inode *inode)
err = journal_bmap(journal, 0, &blocknr);
/* If that failed, give up */
if (err) {
- printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
+ printk(KERN_ERR "%s: Cannot locate journal superblock\n",
__func__);
goto out_err;
}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 97e73469b2c4..90407b8fece7 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -991,7 +991,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
err = jbd2_journal_bmap(journal, 0, &blocknr);
/* If that failed, give up */
if (err) {
- printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
+ printk(KERN_ERR "%s: Cannot locate journal superblock\n",
__func__);
goto out_err;
}
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 89587573fe50..2f41dccea18e 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -188,10 +188,10 @@ static u32 initiate_bulk_draining(struct nfs_client *clp,
rv = NFS4ERR_DELAY;
list_del_init(&lo->plh_bulk_recall);
spin_unlock(&ino->i_lock);
+ pnfs_free_lseg_list(&free_me_list);
put_layout_hdr(lo);
iput(ino);
}
- pnfs_free_lseg_list(&free_me_list);
return rv;
}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index bd3ca32879e7..139be9647d80 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -82,6 +82,11 @@ retry:
#endif /* CONFIG_NFS_V4 */
/*
+ * Turn off NFSv4 uid/gid mapping when using AUTH_SYS
+ */
+static int nfs4_disable_idmapping = 0;
+
+/*
* RPC cruft for NFS
*/
static struct rpc_version *nfs_version[5] = {
@@ -481,7 +486,12 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
* Look up a client by IP address and protocol version
* - creates a new record if one doesn't yet exist
*/
-static struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
+static struct nfs_client *
+nfs_get_client(const struct nfs_client_initdata *cl_init,
+ const struct rpc_timeout *timeparms,
+ const char *ip_addr,
+ rpc_authflavor_t authflavour,
+ int noresvport)
{
struct nfs_client *clp, *new = NULL;
int error;
@@ -512,6 +522,13 @@ install_client:
clp = new;
list_add(&clp->cl_share_link, &nfs_client_list);
spin_unlock(&nfs_client_lock);
+
+ error = cl_init->rpc_ops->init_client(clp, timeparms, ip_addr,
+ authflavour, noresvport);
+ if (error < 0) {
+ nfs_put_client(clp);
+ return ERR_PTR(error);
+ }
dprintk("--> nfs_get_client() = %p [new]\n", clp);
return clp;
@@ -767,9 +784,9 @@ static int nfs_init_server_rpcclient(struct nfs_server *server,
/*
* Initialise an NFS2 or NFS3 client
*/
-static int nfs_init_client(struct nfs_client *clp,
- const struct rpc_timeout *timeparms,
- const struct nfs_parsed_mount_data *data)
+int nfs_init_client(struct nfs_client *clp, const struct rpc_timeout *timeparms,
+ const char *ip_addr, rpc_authflavor_t authflavour,
+ int noresvport)
{
int error;
@@ -784,7 +801,7 @@ static int nfs_init_client(struct nfs_client *clp,
* - RFC 2623, sec 2.3.2
*/
error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX,
- 0, data->flags & NFS_MOUNT_NORESVPORT);
+ 0, noresvport);
if (error < 0)
goto error;
nfs_mark_client_ready(clp, NFS_CS_READY);
@@ -820,19 +837,17 @@ static int nfs_init_server(struct nfs_server *server,
cl_init.rpc_ops = &nfs_v3_clientops;
#endif
+ nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
+ data->timeo, data->retrans);
+
/* Allocate or find a client reference we can use */
- clp = nfs_get_client(&cl_init);
+ clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX,
+ data->flags & NFS_MOUNT_NORESVPORT);
if (IS_ERR(clp)) {
dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp));
return PTR_ERR(clp);
}
- nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
- data->timeo, data->retrans);
- error = nfs_init_client(clp, &timeparms, data);
- if (error < 0)
- goto error;
-
server->nfs_client = clp;
/* Initialise the client representation from the mount data */
@@ -1009,14 +1024,19 @@ static void nfs_server_insert_lists(struct nfs_server *server)
spin_lock(&nfs_client_lock);
list_add_tail_rcu(&server->client_link, &clp->cl_superblocks);
list_add_tail(&server->master_link, &nfs_volume_list);
+ clear_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state);
spin_unlock(&nfs_client_lock);
}
static void nfs_server_remove_lists(struct nfs_server *server)
{
+ struct nfs_client *clp = server->nfs_client;
+
spin_lock(&nfs_client_lock);
list_del_rcu(&server->client_link);
+ if (clp && list_empty(&clp->cl_superblocks))
+ set_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state);
list_del(&server->master_link);
spin_unlock(&nfs_client_lock);
@@ -1307,11 +1327,11 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
/*
* Initialise an NFS4 client record
*/
-static int nfs4_init_client(struct nfs_client *clp,
- const struct rpc_timeout *timeparms,
- const char *ip_addr,
- rpc_authflavor_t authflavour,
- int flags)
+int nfs4_init_client(struct nfs_client *clp,
+ const struct rpc_timeout *timeparms,
+ const char *ip_addr,
+ rpc_authflavor_t authflavour,
+ int noresvport)
{
int error;
@@ -1325,7 +1345,7 @@ static int nfs4_init_client(struct nfs_client *clp,
clp->rpc_ops = &nfs_v4_clientops;
error = nfs_create_rpc_client(clp, timeparms, authflavour,
- 1, flags & NFS_MOUNT_NORESVPORT);
+ 1, noresvport);
if (error < 0)
goto error;
strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
@@ -1378,27 +1398,71 @@ static int nfs4_set_client(struct nfs_server *server,
dprintk("--> nfs4_set_client()\n");
/* Allocate or find a client reference we can use */
- clp = nfs_get_client(&cl_init);
+ clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour,
+ server->flags & NFS_MOUNT_NORESVPORT);
if (IS_ERR(clp)) {
error = PTR_ERR(clp);
goto error;
}
- error = nfs4_init_client(clp, timeparms, ip_addr, authflavour,
- server->flags);
- if (error < 0)
- goto error_put;
+
+ /*
+ * Query for the lease time on clientid setup or renewal
+ *
+ * Note that this will be set on nfs_clients that were created
+ * only for the DS role and did not set this bit, but now will
+ * serve a dual role.
+ */
+ set_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state);
server->nfs_client = clp;
dprintk("<-- nfs4_set_client() = 0 [new %p]\n", clp);
return 0;
-
-error_put:
- nfs_put_client(clp);
error:
dprintk("<-- nfs4_set_client() = xerror %d\n", error);
return error;
}
+/*
+ * Set up a pNFS Data Server client.
+ *
+ * Return any existing nfs_client that matches server address,port,version
+ * and minorversion.
+ *
+ * For a new nfs_client, use a soft mount (default), a low retrans and a
+ * low timeout interval so that if a connection is lost, we retry through
+ * the MDS.
+ */
+struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
+ const struct sockaddr *ds_addr,
+ int ds_addrlen, int ds_proto)
+{
+ struct nfs_client_initdata cl_init = {
+ .addr = ds_addr,
+ .addrlen = ds_addrlen,
+ .rpc_ops = &nfs_v4_clientops,
+ .proto = ds_proto,
+ .minorversion = mds_clp->cl_minorversion,
+ };
+ struct rpc_timeout ds_timeout = {
+ .to_initval = 15 * HZ,
+ .to_maxval = 15 * HZ,
+ .to_retries = 1,
+ .to_exponential = 1,
+ };
+ struct nfs_client *clp;
+
+ /*
+ * Set an authflavor equual to the MDS value. Use the MDS nfs_client
+ * cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
+ * (section 13.1 RFC 5661).
+ */
+ clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
+ mds_clp->cl_rpcclient->cl_auth->au_flavor, 0);
+
+ dprintk("<-- %s %p\n", __func__, clp);
+ return clp;
+}
+EXPORT_SYMBOL(nfs4_set_ds_client);
/*
* Session has been established, and the client marked ready.
@@ -1435,6 +1499,10 @@ static int nfs4_server_common_setup(struct nfs_server *server,
BUG_ON(!server->nfs_client->rpc_ops);
BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
+ /* data servers support only a subset of NFSv4.1 */
+ if (is_ds_only_client(server->nfs_client))
+ return -EPROTONOSUPPORT;
+
fattr = nfs_alloc_fattr();
if (fattr == NULL)
return -ENOMEM;
@@ -1504,6 +1572,13 @@ static int nfs4_init_server(struct nfs_server *server,
if (error < 0)
goto error;
+ /*
+ * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower
+ * authentication.
+ */
+ if (nfs4_disable_idmapping && data->auth_flavors[0] == RPC_AUTH_UNIX)
+ server->caps |= NFS_CAP_UIDGID_NOMAP;
+
if (data->rsize)
server->rsize = nfs_block_size(data->rsize, NULL);
if (data->wsize)
@@ -1921,3 +1996,7 @@ void nfs_fs_proc_exit(void)
}
#endif /* CONFIG_PROC_FS */
+
+module_param(nfs4_disable_idmapping, bool, 0644);
+MODULE_PARM_DESC(nfs4_disable_idmapping,
+ "Turn off NFSv4 idmapping when using 'sec=sys'");
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 9943a75bb6d1..8eea25366717 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -45,6 +45,7 @@
#include <linux/pagemap.h>
#include <linux/kref.h>
#include <linux/slab.h>
+#include <linux/task_io_accounting_ops.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
@@ -649,8 +650,7 @@ static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
- if (nfs_writeback_done(task, data) != 0)
- return;
+ nfs_writeback_done(task, data);
}
/*
@@ -938,6 +938,8 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
if (retval)
goto out;
+ task_io_account_read(count);
+
retval = nfs_direct_read(iocb, iov, nr_segs, pos);
if (retval > 0)
iocb->ki_pos = pos + retval;
@@ -999,6 +1001,8 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
if (retval)
goto out;
+ task_io_account_write(count);
+
retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
if (retval > 0)
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 7bf029ef4084..d85a534b15cd 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -387,10 +387,6 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
file->f_path.dentry->d_name.name,
mapping->host->i_ino, len, (long long) pos);
- pnfs_update_layout(mapping->host,
- nfs_file_open_context(file),
- IOMODE_RW);
-
start:
/*
* Prevent starvation issues if someone is doing a consistency
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 18696882f1c6..79664a1025af 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -33,16 +33,41 @@
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+static int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res)
+{
+ unsigned long val;
+ char buf[16];
+
+ if (memchr(name, '@', namelen) != NULL || namelen >= sizeof(buf))
+ return 0;
+ memcpy(buf, name, namelen);
+ buf[namelen] = '\0';
+ if (strict_strtoul(buf, 0, &val) != 0)
+ return 0;
+ *res = val;
+ return 1;
+}
+
+static int nfs_map_numeric_to_string(__u32 id, char *buf, size_t buflen)
+{
+ return snprintf(buf, buflen, "%u", id);
+}
#ifdef CONFIG_NFS_USE_NEW_IDMAPPER
#include <linux/slab.h>
#include <linux/cred.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs_sb.h>
#include <linux/nfs_idmap.h>
#include <linux/keyctl.h>
#include <linux/key-type.h>
#include <linux/rcupdate.h>
-#include <linux/kernel.h>
#include <linux/err.h>
#include <keys/user-type.h>
@@ -219,23 +244,39 @@ static int nfs_idmap_lookup_id(const char *name, size_t namelen,
return ret;
}
-int nfs_map_name_to_uid(struct nfs_client *clp, const char *name, size_t namelen, __u32 *uid)
+int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid)
{
+ if (nfs_map_string_to_numeric(name, namelen, uid))
+ return 0;
return nfs_idmap_lookup_id(name, namelen, "uid", uid);
}
-int nfs_map_group_to_gid(struct nfs_client *clp, const char *name, size_t namelen, __u32 *gid)
+int nfs_map_group_to_gid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *gid)
{
+ if (nfs_map_string_to_numeric(name, namelen, gid))
+ return 0;
return nfs_idmap_lookup_id(name, namelen, "gid", gid);
}
-int nfs_map_uid_to_name(struct nfs_client *clp, __u32 uid, char *buf, size_t buflen)
+int nfs_map_uid_to_name(const struct nfs_server *server, __u32 uid, char *buf, size_t buflen)
{
- return nfs_idmap_lookup_name(uid, "user", buf, buflen);
+ int ret = -EINVAL;
+
+ if (!(server->caps & NFS_CAP_UIDGID_NOMAP))
+ ret = nfs_idmap_lookup_name(uid, "user", buf, buflen);
+ if (ret < 0)
+ ret = nfs_map_numeric_to_string(uid, buf, buflen);
+ return ret;
}
-int nfs_map_gid_to_group(struct nfs_client *clp, __u32 gid, char *buf, size_t buflen)
+int nfs_map_gid_to_group(const struct nfs_server *server, __u32 gid, char *buf, size_t buflen)
{
- return nfs_idmap_lookup_name(gid, "group", buf, buflen);
+ int ret = -EINVAL;
+
+ if (!(server->caps & NFS_CAP_UIDGID_NOMAP))
+ ret = nfs_idmap_lookup_name(gid, "group", buf, buflen);
+ if (ret < 0)
+ ret = nfs_map_numeric_to_string(gid, buf, buflen);
+ return ret;
}
#else /* CONFIG_NFS_USE_NEW_IDMAPPER not defined */
@@ -243,7 +284,6 @@ int nfs_map_gid_to_group(struct nfs_client *clp, __u32 gid, char *buf, size_t bu
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/init.h>
-#include <linux/types.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/in.h>
@@ -695,31 +735,45 @@ static unsigned int fnvhash32(const void *buf, size_t buflen)
return hash;
}
-int nfs_map_name_to_uid(struct nfs_client *clp, const char *name, size_t namelen, __u32 *uid)
+int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid)
{
- struct idmap *idmap = clp->cl_idmap;
+ struct idmap *idmap = server->nfs_client->cl_idmap;
+ if (nfs_map_string_to_numeric(name, namelen, uid))
+ return 0;
return nfs_idmap_id(idmap, &idmap->idmap_user_hash, name, namelen, uid);
}
-int nfs_map_group_to_gid(struct nfs_client *clp, const char *name, size_t namelen, __u32 *uid)
+int nfs_map_group_to_gid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid)
{
- struct idmap *idmap = clp->cl_idmap;
+ struct idmap *idmap = server->nfs_client->cl_idmap;
+ if (nfs_map_string_to_numeric(name, namelen, uid))
+ return 0;
return nfs_idmap_id(idmap, &idmap->idmap_group_hash, name, namelen, uid);
}
-int nfs_map_uid_to_name(struct nfs_client *clp, __u32 uid, char *buf, size_t buflen)
+int nfs_map_uid_to_name(const struct nfs_server *server, __u32 uid, char *buf, size_t buflen)
{
- struct idmap *idmap = clp->cl_idmap;
+ struct idmap *idmap = server->nfs_client->cl_idmap;
+ int ret = -EINVAL;
- return nfs_idmap_name(idmap, &idmap->idmap_user_hash, uid, buf);
+ if (!(server->caps & NFS_CAP_UIDGID_NOMAP))
+ ret = nfs_idmap_name(idmap, &idmap->idmap_user_hash, uid, buf);
+ if (ret < 0)
+ ret = nfs_map_numeric_to_string(uid, buf, buflen);
+ return ret;
}
-int nfs_map_gid_to_group(struct nfs_client *clp, __u32 uid, char *buf, size_t buflen)
+int nfs_map_gid_to_group(const struct nfs_server *server, __u32 uid, char *buf, size_t buflen)
{
- struct idmap *idmap = clp->cl_idmap;
+ struct idmap *idmap = server->nfs_client->cl_idmap;
+ int ret = -EINVAL;
- return nfs_idmap_name(idmap, &idmap->idmap_group_hash, uid, buf);
+ if (!(server->caps & NFS_CAP_UIDGID_NOMAP))
+ ret = nfs_idmap_name(idmap, &idmap->idmap_group_hash, uid, buf);
+ if (ret < 0)
+ ret = nfs_map_numeric_to_string(uid, buf, buflen);
+ return ret;
}
#endif /* CONFIG_NFS_USE_NEW_IDMAPPER */
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e94ad22da5d2..72e0bddf7a2f 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -148,6 +148,9 @@ extern struct nfs_server *nfs_clone_server(struct nfs_server *,
struct nfs_fattr *);
extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
extern int nfs4_check_client_ready(struct nfs_client *clp);
+extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
+ const struct sockaddr *ds_addr,
+ int ds_addrlen, int ds_proto);
#ifdef CONFIG_PROC_FS
extern int __init nfs_fs_proc_init(void);
extern void nfs_fs_proc_exit(void);
@@ -213,8 +216,14 @@ extern const u32 nfs41_maxwrite_overhead;
extern struct rpc_procinfo nfs4_procedures[];
#endif
+extern int nfs4_init_ds_session(struct nfs_client *clp);
+
/* proc.c */
void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
+extern int nfs_init_client(struct nfs_client *clp,
+ const struct rpc_timeout *timeparms,
+ const char *ip_addr, rpc_authflavor_t authflavour,
+ int noresvport);
/* dir.c */
extern int nfs_access_cache_shrinker(struct shrinker *shrink,
@@ -262,9 +271,15 @@ extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh);
#endif
/* read.c */
+extern int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
+ const struct rpc_call_ops *call_ops);
extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
/* write.c */
+extern int nfs_initiate_write(struct nfs_write_data *data,
+ struct rpc_clnt *clnt,
+ const struct rpc_call_ops *call_ops,
+ int how);
extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
#ifdef CONFIG_MIGRATION
extern int nfs_migrate_page(struct address_space *,
@@ -274,6 +289,13 @@ extern int nfs_migrate_page(struct address_space *,
#endif
/* nfs4proc.c */
+extern void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data);
+extern int nfs4_init_client(struct nfs_client *clp,
+ const struct rpc_timeout *timeparms,
+ const char *ip_addr,
+ rpc_authflavor_t authflavour,
+ int noresvport);
+extern void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data);
extern int _nfs4_call_sync(struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index ce939c062a52..d0c80d8b3f96 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -885,4 +885,5 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
.lock = nfs3_proc_lock,
.clear_acl_cache = nfs3_forget_cached_acls,
.close_context = nfs_close_context,
+ .init_client = nfs_init_client,
};
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 1be36cf65bfc..c64be1cff080 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -252,6 +252,9 @@ static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *ser
extern int nfs4_setup_sequence(const struct nfs_server *server,
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
int cache_reply, struct rpc_task *task);
+extern int nfs41_setup_sequence(struct nfs4_session *session,
+ struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
+ int cache_reply, struct rpc_task *task);
extern void nfs4_destroy_session(struct nfs4_session *session);
extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
extern int nfs4_proc_create_session(struct nfs_client *);
@@ -259,6 +262,19 @@ extern int nfs4_proc_destroy_session(struct nfs4_session *);
extern int nfs4_init_session(struct nfs_server *server);
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
struct nfs_fsinfo *fsinfo);
+
+static inline bool
+is_ds_only_client(struct nfs_client *clp)
+{
+ return (clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) ==
+ EXCHGID4_FLAG_USE_PNFS_DS;
+}
+
+static inline bool
+is_ds_client(struct nfs_client *clp)
+{
+ return clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_DS;
+}
#else /* CONFIG_NFS_v4_1 */
static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
{
@@ -276,6 +292,18 @@ static inline int nfs4_init_session(struct nfs_server *server)
{
return 0;
}
+
+static inline bool
+is_ds_only_client(struct nfs_client *clp)
+{
+ return false;
+}
+
+static inline bool
+is_ds_client(struct nfs_client *clp)
+{
+ return false;
+}
#endif /* CONFIG_NFS_V4_1 */
extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[];
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 23f930caf1e2..428558464817 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -40,32 +40,309 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dean Hildebrand <dhildebz@umich.edu>");
MODULE_DESCRIPTION("The NFSv4 file layout driver");
-static int
-filelayout_set_layoutdriver(struct nfs_server *nfss)
+#define FILELAYOUT_POLL_RETRY_MAX (15*HZ)
+
+static loff_t
+filelayout_get_dense_offset(struct nfs4_filelayout_segment *flseg,
+ loff_t offset)
{
- int status = pnfs_alloc_init_deviceid_cache(nfss->nfs_client,
- nfs4_fl_free_deviceid_callback);
- if (status) {
- printk(KERN_WARNING "%s: deviceid cache could not be "
- "initialized\n", __func__);
- return status;
+ u32 stripe_width = flseg->stripe_unit * flseg->dsaddr->stripe_count;
+ u64 tmp;
+
+ offset -= flseg->pattern_offset;
+ tmp = offset;
+ do_div(tmp, stripe_width);
+
+ return tmp * flseg->stripe_unit + do_div(offset, flseg->stripe_unit);
+}
+
+/* This function is used by the layout driver to calculate the
+ * offset of the file on the dserver based on whether the
+ * layout type is STRIPE_DENSE or STRIPE_SPARSE
+ */
+static loff_t
+filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset)
+{
+ struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
+
+ switch (flseg->stripe_type) {
+ case STRIPE_SPARSE:
+ return offset;
+
+ case STRIPE_DENSE:
+ return filelayout_get_dense_offset(flseg, offset);
}
- dprintk("%s: deviceid cache has been initialized successfully\n",
- __func__);
+
+ BUG();
+}
+
+/* For data server errors we don't recover from */
+static void
+filelayout_set_lo_fail(struct pnfs_layout_segment *lseg)
+{
+ if (lseg->pls_range.iomode == IOMODE_RW) {
+ dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
+ set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
+ } else {
+ dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
+ set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+ }
+}
+
+static int filelayout_async_handle_error(struct rpc_task *task,
+ struct nfs4_state *state,
+ struct nfs_client *clp,
+ int *reset)
+{
+ if (task->tk_status >= 0)
+ return 0;
+
+ *reset = 0;
+
+ switch (task->tk_status) {
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ dprintk("%s ERROR %d, Reset session. Exchangeid "
+ "flags 0x%x\n", __func__, task->tk_status,
+ clp->cl_exchange_flags);
+ nfs4_schedule_session_recovery(clp->cl_session);
+ break;
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_GRACE:
+ case -EKEYEXPIRED:
+ rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX);
+ break;
+ default:
+ dprintk("%s DS error. Retry through MDS %d\n", __func__,
+ task->tk_status);
+ *reset = 1;
+ break;
+ }
+ task->tk_status = 0;
+ return -EAGAIN;
+}
+
+/* NFS_PROTO call done callback routines */
+
+static int filelayout_read_done_cb(struct rpc_task *task,
+ struct nfs_read_data *data)
+{
+ struct nfs_client *clp = data->ds_clp;
+ int reset = 0;
+
+ dprintk("%s DS read\n", __func__);
+
+ if (filelayout_async_handle_error(task, data->args.context->state,
+ data->ds_clp, &reset) == -EAGAIN) {
+ dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
+ __func__, data->ds_clp, data->ds_clp->cl_session);
+ if (reset) {
+ filelayout_set_lo_fail(data->lseg);
+ nfs4_reset_read(task, data);
+ clp = NFS_SERVER(data->inode)->nfs_client;
+ }
+ nfs_restart_rpc(task, clp);
+ return -EAGAIN;
+ }
+
return 0;
}
-/* Clear out the layout by destroying its device list */
-static int
-filelayout_clear_layoutdriver(struct nfs_server *nfss)
+/*
+ * Call ops for the async read/write cases
+ * In the case of dense layouts, the offset needs to be reset to its
+ * original value.
+ */
+static void filelayout_read_prepare(struct rpc_task *task, void *data)
{
- dprintk("--> %s\n", __func__);
+ struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+
+ rdata->read_done_cb = filelayout_read_done_cb;
+
+ if (nfs41_setup_sequence(rdata->ds_clp->cl_session,
+ &rdata->args.seq_args, &rdata->res.seq_res,
+ 0, task))
+ return;
+
+ rpc_call_start(task);
+}
+
+static void filelayout_read_call_done(struct rpc_task *task, void *data)
+{
+ struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+
+ dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
+
+ /* Note this may cause RPC to be resent */
+ rdata->mds_ops->rpc_call_done(task, data);
+}
+
+static void filelayout_read_release(void *data)
+{
+ struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+
+ rdata->mds_ops->rpc_release(data);
+}
+
+static int filelayout_write_done_cb(struct rpc_task *task,
+ struct nfs_write_data *data)
+{
+ int reset = 0;
+
+ if (filelayout_async_handle_error(task, data->args.context->state,
+ data->ds_clp, &reset) == -EAGAIN) {
+ struct nfs_client *clp;
+
+ dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
+ __func__, data->ds_clp, data->ds_clp->cl_session);
+ if (reset) {
+ filelayout_set_lo_fail(data->lseg);
+ nfs4_reset_write(task, data);
+ clp = NFS_SERVER(data->inode)->nfs_client;
+ } else
+ clp = data->ds_clp;
+ nfs_restart_rpc(task, clp);
+ return -EAGAIN;
+ }
- if (nfss->nfs_client->cl_devid_cache)
- pnfs_put_deviceid_cache(nfss->nfs_client);
return 0;
}
+static void filelayout_write_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+
+ if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
+ &wdata->args.seq_args, &wdata->res.seq_res,
+ 0, task))
+ return;
+
+ rpc_call_start(task);
+}
+
+static void filelayout_write_call_done(struct rpc_task *task, void *data)
+{
+ struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+
+ /* Note this may cause RPC to be resent */
+ wdata->mds_ops->rpc_call_done(task, data);
+}
+
+static void filelayout_write_release(void *data)
+{
+ struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+
+ wdata->mds_ops->rpc_release(data);
+}
+
+struct rpc_call_ops filelayout_read_call_ops = {
+ .rpc_call_prepare = filelayout_read_prepare,
+ .rpc_call_done = filelayout_read_call_done,
+ .rpc_release = filelayout_read_release,
+};
+
+struct rpc_call_ops filelayout_write_call_ops = {
+ .rpc_call_prepare = filelayout_write_prepare,
+ .rpc_call_done = filelayout_write_call_done,
+ .rpc_release = filelayout_write_release,
+};
+
+static enum pnfs_try_status
+filelayout_read_pagelist(struct nfs_read_data *data)
+{
+ struct pnfs_layout_segment *lseg = data->lseg;
+ struct nfs4_pnfs_ds *ds;
+ loff_t offset = data->args.offset;
+ u32 j, idx;
+ struct nfs_fh *fh;
+ int status;
+
+ dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
+ __func__, data->inode->i_ino,
+ data->args.pgbase, (size_t)data->args.count, offset);
+
+ /* Retrieve the correct rpc_client for the byte range */
+ j = nfs4_fl_calc_j_index(lseg, offset);
+ idx = nfs4_fl_calc_ds_index(lseg, j);
+ ds = nfs4_fl_prepare_ds(lseg, idx);
+ if (!ds) {
+ /* Either layout fh index faulty, or ds connect failed */
+ set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
+ set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+ return PNFS_NOT_ATTEMPTED;
+ }
+ dprintk("%s USE DS:ip %x %hu\n", __func__,
+ ntohl(ds->ds_ip_addr), ntohs(ds->ds_port));
+
+ /* No multipath support. Use first DS */
+ data->ds_clp = ds->ds_clp;
+ fh = nfs4_fl_select_ds_fh(lseg, j);
+ if (fh)
+ data->args.fh = fh;
+
+ data->args.offset = filelayout_get_dserver_offset(lseg, offset);
+ data->mds_offset = offset;
+
+ /* Perform an asynchronous read to ds */
+ status = nfs_initiate_read(data, ds->ds_clp->cl_rpcclient,
+ &filelayout_read_call_ops);
+ BUG_ON(status != 0);
+ return PNFS_ATTEMPTED;
+}
+
+/* Perform async writes. */
+static enum pnfs_try_status
+filelayout_write_pagelist(struct nfs_write_data *data, int sync)
+{
+ struct pnfs_layout_segment *lseg = data->lseg;
+ struct nfs4_pnfs_ds *ds;
+ loff_t offset = data->args.offset;
+ u32 j, idx;
+ struct nfs_fh *fh;
+ int status;
+
+ /* Retrieve the correct rpc_client for the byte range */
+ j = nfs4_fl_calc_j_index(lseg, offset);
+ idx = nfs4_fl_calc_ds_index(lseg, j);
+ ds = nfs4_fl_prepare_ds(lseg, idx);
+ if (!ds) {
+ printk(KERN_ERR "%s: prepare_ds failed, use MDS\n", __func__);
+ set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
+ set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+ return PNFS_NOT_ATTEMPTED;
+ }
+ dprintk("%s ino %lu sync %d req %Zu@%llu DS:%x:%hu\n", __func__,
+ data->inode->i_ino, sync, (size_t) data->args.count, offset,
+ ntohl(ds->ds_ip_addr), ntohs(ds->ds_port));
+
+ /* We can't handle commit to ds yet */
+ if (!FILELAYOUT_LSEG(lseg)->commit_through_mds)
+ data->args.stable = NFS_FILE_SYNC;
+
+ data->write_done_cb = filelayout_write_done_cb;
+ data->ds_clp = ds->ds_clp;
+ fh = nfs4_fl_select_ds_fh(lseg, j);
+ if (fh)
+ data->args.fh = fh;
+ /*
+ * Get the file offset on the dserver. Set the write offset to
+ * this offset and save the original offset.
+ */
+ data->args.offset = filelayout_get_dserver_offset(lseg, offset);
+ data->mds_offset = offset;
+
+ /* Perform an asynchronous write */
+ status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient,
+ &filelayout_write_call_ops, sync);
+ BUG_ON(status != 0);
+ return PNFS_ATTEMPTED;
+}
+
/*
* filelayout_check_layout()
*
@@ -92,14 +369,14 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
goto out;
}
- if (fl->stripe_unit % PAGE_SIZE) {
- dprintk("%s Stripe unit (%u) not page aligned\n",
+ if (!fl->stripe_unit || fl->stripe_unit % PAGE_SIZE) {
+ dprintk("%s Invalid stripe unit (%u)\n",
__func__, fl->stripe_unit);
goto out;
}
/* find and reference the deviceid */
- dsaddr = nfs4_fl_find_get_deviceid(nfss->nfs_client, id);
+ dsaddr = nfs4_fl_find_get_deviceid(id);
if (dsaddr == NULL) {
dsaddr = get_device_info(lo->plh_inode, id);
if (dsaddr == NULL)
@@ -134,7 +411,7 @@ out:
dprintk("--> %s returns %d\n", __func__, status);
return status;
out_put:
- pnfs_put_deviceid(nfss->nfs_client->cl_devid_cache, &dsaddr->deviceid);
+ nfs4_fl_put_deviceid(dsaddr);
goto out;
}
@@ -243,23 +520,47 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
static void
filelayout_free_lseg(struct pnfs_layout_segment *lseg)
{
- struct nfs_server *nfss = NFS_SERVER(lseg->pls_layout->plh_inode);
struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
dprintk("--> %s\n", __func__);
- pnfs_put_deviceid(nfss->nfs_client->cl_devid_cache,
- &fl->dsaddr->deviceid);
+ nfs4_fl_put_deviceid(fl->dsaddr);
_filelayout_free_lseg(fl);
}
+/*
+ * filelayout_pg_test(). Called by nfs_can_coalesce_requests()
+ *
+ * return 1 : coalesce page
+ * return 0 : don't coalesce page
+ */
+int
+filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
+ struct nfs_page *req)
+{
+ u64 p_stripe, r_stripe;
+ u32 stripe_unit;
+
+ if (!pgio->pg_lseg)
+ return 1;
+ p_stripe = (u64)prev->wb_index << PAGE_CACHE_SHIFT;
+ r_stripe = (u64)req->wb_index << PAGE_CACHE_SHIFT;
+ stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
+
+ do_div(p_stripe, stripe_unit);
+ do_div(r_stripe, stripe_unit);
+
+ return (p_stripe == r_stripe);
+}
+
static struct pnfs_layoutdriver_type filelayout_type = {
- .id = LAYOUT_NFSV4_1_FILES,
- .name = "LAYOUT_NFSV4_1_FILES",
- .owner = THIS_MODULE,
- .set_layoutdriver = filelayout_set_layoutdriver,
- .clear_layoutdriver = filelayout_clear_layoutdriver,
- .alloc_lseg = filelayout_alloc_lseg,
- .free_lseg = filelayout_free_lseg,
+ .id = LAYOUT_NFSV4_1_FILES,
+ .name = "LAYOUT_NFSV4_1_FILES",
+ .owner = THIS_MODULE,
+ .alloc_lseg = filelayout_alloc_lseg,
+ .free_lseg = filelayout_free_lseg,
+ .pg_test = filelayout_pg_test,
+ .read_pagelist = filelayout_read_pagelist,
+ .write_pagelist = filelayout_write_pagelist,
};
static int __init nfs4filelayout_init(void)
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index bbf60dd2ab9d..ee0c907742b5 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -55,8 +55,14 @@ struct nfs4_pnfs_ds {
atomic_t ds_count;
};
+/* nfs4_file_layout_dsaddr flags */
+#define NFS4_DEVICE_ID_NEG_ENTRY 0x00000001
+
struct nfs4_file_layout_dsaddr {
- struct pnfs_deviceid_node deviceid;
+ struct hlist_node node;
+ struct nfs4_deviceid deviceid;
+ atomic_t ref;
+ unsigned long flags;
u32 stripe_count;
u8 *stripe_indices;
u32 ds_num;
@@ -83,11 +89,18 @@ FILELAYOUT_LSEG(struct pnfs_layout_segment *lseg)
generic_hdr);
}
-extern void nfs4_fl_free_deviceid_callback(struct pnfs_deviceid_node *);
+extern struct nfs_fh *
+nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j);
+
extern void print_ds(struct nfs4_pnfs_ds *ds);
extern void print_deviceid(struct nfs4_deviceid *dev_id);
+u32 nfs4_fl_calc_j_index(struct pnfs_layout_segment *lseg, loff_t offset);
+u32 nfs4_fl_calc_ds_index(struct pnfs_layout_segment *lseg, u32 j);
+struct nfs4_pnfs_ds *nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg,
+ u32 ds_idx);
extern struct nfs4_file_layout_dsaddr *
-nfs4_fl_find_get_deviceid(struct nfs_client *, struct nfs4_deviceid *dev_id);
+nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id);
+extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
struct nfs4_file_layout_dsaddr *
get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id);
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index b73c34375f60..68143c162e3b 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -37,6 +37,30 @@
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
/*
+ * Device ID RCU cache. A device ID is unique per client ID and layout type.
+ */
+#define NFS4_FL_DEVICE_ID_HASH_BITS 5
+#define NFS4_FL_DEVICE_ID_HASH_SIZE (1 << NFS4_FL_DEVICE_ID_HASH_BITS)
+#define NFS4_FL_DEVICE_ID_HASH_MASK (NFS4_FL_DEVICE_ID_HASH_SIZE - 1)
+
+static inline u32
+nfs4_fl_deviceid_hash(struct nfs4_deviceid *id)
+{
+ unsigned char *cptr = (unsigned char *)id->data;
+ unsigned int nbytes = NFS4_DEVICEID4_SIZE;
+ u32 x = 0;
+
+ while (nbytes--) {
+ x *= 37;
+ x += *cptr++;
+ }
+ return x & NFS4_FL_DEVICE_ID_HASH_MASK;
+}
+
+static struct hlist_head filelayout_deviceid_cache[NFS4_FL_DEVICE_ID_HASH_SIZE];
+static DEFINE_SPINLOCK(filelayout_deviceid_lock);
+
+/*
* Data server cache
*
* Data servers can be mapped to different device ids.
@@ -104,6 +128,67 @@ _data_server_lookup_locked(u32 ip_addr, u32 port)
return NULL;
}
+/*
+ * Create an rpc connection to the nfs4_pnfs_ds data server
+ * Currently only support IPv4
+ */
+static int
+nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
+{
+ struct nfs_client *clp;
+ struct sockaddr_in sin;
+ int status = 0;
+
+ dprintk("--> %s ip:port %x:%hu au_flavor %d\n", __func__,
+ ntohl(ds->ds_ip_addr), ntohs(ds->ds_port),
+ mds_srv->nfs_client->cl_rpcclient->cl_auth->au_flavor);
+
+ sin.sin_family = AF_INET;
+ sin.sin_addr.s_addr = ds->ds_ip_addr;
+ sin.sin_port = ds->ds_port;
+
+ clp = nfs4_set_ds_client(mds_srv->nfs_client, (struct sockaddr *)&sin,
+ sizeof(sin), IPPROTO_TCP);
+ if (IS_ERR(clp)) {
+ status = PTR_ERR(clp);
+ goto out;
+ }
+
+ if ((clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) != 0) {
+ if (!is_ds_client(clp)) {
+ status = -ENODEV;
+ goto out_put;
+ }
+ ds->ds_clp = clp;
+ dprintk("%s [existing] ip=%x, port=%hu\n", __func__,
+ ntohl(ds->ds_ip_addr), ntohs(ds->ds_port));
+ goto out;
+ }
+
+ /*
+ * Do not set NFS_CS_CHECK_LEASE_TIME instead set the DS lease to
+ * be equal to the MDS lease. Renewal is scheduled in create_session.
+ */
+ spin_lock(&mds_srv->nfs_client->cl_lock);
+ clp->cl_lease_time = mds_srv->nfs_client->cl_lease_time;
+ spin_unlock(&mds_srv->nfs_client->cl_lock);
+ clp->cl_last_renewal = jiffies;
+
+ /* New nfs_client */
+ status = nfs4_init_ds_session(clp);
+ if (status)
+ goto out_put;
+
+ ds->ds_clp = clp;
+ dprintk("%s [new] ip=%x, port=%hu\n", __func__, ntohl(ds->ds_ip_addr),
+ ntohs(ds->ds_port));
+out:
+ return status;
+out_put:
+ nfs_put_client(clp);
+ goto out;
+}
+
static void
destroy_ds(struct nfs4_pnfs_ds *ds)
{
@@ -122,7 +207,7 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
struct nfs4_pnfs_ds *ds;
int i;
- print_deviceid(&dsaddr->deviceid.de_id);
+ print_deviceid(&dsaddr->deviceid);
for (i = 0; i < dsaddr->ds_num; i++) {
ds = dsaddr->ds_list[i];
@@ -139,15 +224,6 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
kfree(dsaddr);
}
-void
-nfs4_fl_free_deviceid_callback(struct pnfs_deviceid_node *device)
-{
- struct nfs4_file_layout_dsaddr *dsaddr =
- container_of(device, struct nfs4_file_layout_dsaddr, deviceid);
-
- nfs4_fl_free_deviceid(dsaddr);
-}
-
static struct nfs4_pnfs_ds *
nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port)
{
@@ -300,7 +376,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
dsaddr->stripe_count = cnt;
dsaddr->ds_num = num;
- memcpy(&dsaddr->deviceid.de_id, &pdev->dev_id, sizeof(pdev->dev_id));
+ memcpy(&dsaddr->deviceid, &pdev->dev_id, sizeof(pdev->dev_id));
/* Go back an read stripe indices */
p = indicesp;
@@ -350,28 +426,37 @@ out_err:
}
/*
- * Decode the opaque device specified in 'dev'
- * and add it to the list of available devices.
- * If the deviceid is already cached, nfs4_add_deviceid will return
- * a pointer to the cached struct and throw away the new.
+ * Decode the opaque device specified in 'dev' and add it to the cache of
+ * available devices.
*/
-static struct nfs4_file_layout_dsaddr*
+static struct nfs4_file_layout_dsaddr *
decode_and_add_device(struct inode *inode, struct pnfs_device *dev)
{
- struct nfs4_file_layout_dsaddr *dsaddr;
- struct pnfs_deviceid_node *d;
+ struct nfs4_file_layout_dsaddr *d, *new;
+ long hash;
- dsaddr = decode_device(inode, dev);
- if (!dsaddr) {
+ new = decode_device(inode, dev);
+ if (!new) {
printk(KERN_WARNING "%s: Could not decode or add device\n",
__func__);
return NULL;
}
- d = pnfs_add_deviceid(NFS_SERVER(inode)->nfs_client->cl_devid_cache,
- &dsaddr->deviceid);
+ spin_lock(&filelayout_deviceid_lock);
+ d = nfs4_fl_find_get_deviceid(&new->deviceid);
+ if (d) {
+ spin_unlock(&filelayout_deviceid_lock);
+ nfs4_fl_free_deviceid(new);
+ return d;
+ }
+
+ INIT_HLIST_NODE(&new->node);
+ atomic_set(&new->ref, 1);
+ hash = nfs4_fl_deviceid_hash(&new->deviceid);
+ hlist_add_head_rcu(&new->node, &filelayout_deviceid_cache[hash]);
+ spin_unlock(&filelayout_deviceid_lock);
- return container_of(d, struct nfs4_file_layout_dsaddr, deviceid);
+ return new;
}
/*
@@ -446,12 +531,123 @@ out_free:
return dsaddr;
}
+void
+nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
+{
+ if (atomic_dec_and_lock(&dsaddr->ref, &filelayout_deviceid_lock)) {
+ hlist_del_rcu(&dsaddr->node);
+ spin_unlock(&filelayout_deviceid_lock);
+
+ synchronize_rcu();
+ nfs4_fl_free_deviceid(dsaddr);
+ }
+}
+
struct nfs4_file_layout_dsaddr *
-nfs4_fl_find_get_deviceid(struct nfs_client *clp, struct nfs4_deviceid *id)
+nfs4_fl_find_get_deviceid(struct nfs4_deviceid *id)
+{
+ struct nfs4_file_layout_dsaddr *d;
+ struct hlist_node *n;
+ long hash = nfs4_fl_deviceid_hash(id);
+
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(d, n, &filelayout_deviceid_cache[hash], node) {
+ if (!memcmp(&d->deviceid, id, sizeof(*id))) {
+ if (!atomic_inc_not_zero(&d->ref))
+ goto fail;
+ rcu_read_unlock();
+ return d;
+ }
+ }
+fail:
+ rcu_read_unlock();
+ return NULL;
+}
+
+/*
+ * Want res = (offset - layout->pattern_offset)/ layout->stripe_unit
+ * Then: ((res + fsi) % dsaddr->stripe_count)
+ */
+u32
+nfs4_fl_calc_j_index(struct pnfs_layout_segment *lseg, loff_t offset)
+{
+ struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
+ u64 tmp;
+
+ tmp = offset - flseg->pattern_offset;
+ do_div(tmp, flseg->stripe_unit);
+ tmp += flseg->first_stripe_index;
+ return do_div(tmp, flseg->dsaddr->stripe_count);
+}
+
+u32
+nfs4_fl_calc_ds_index(struct pnfs_layout_segment *lseg, u32 j)
+{
+ return FILELAYOUT_LSEG(lseg)->dsaddr->stripe_indices[j];
+}
+
+struct nfs_fh *
+nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j)
+{
+ struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
+ u32 i;
+
+ if (flseg->stripe_type == STRIPE_SPARSE) {
+ if (flseg->num_fh == 1)
+ i = 0;
+ else if (flseg->num_fh == 0)
+ /* Use the MDS OPEN fh set in nfs_read_rpcsetup */
+ return NULL;
+ else
+ i = nfs4_fl_calc_ds_index(lseg, j);
+ } else
+ i = j;
+ return flseg->fh_array[i];
+}
+
+static void
+filelayout_mark_devid_negative(struct nfs4_file_layout_dsaddr *dsaddr,
+ int err, u32 ds_addr)
+{
+ u32 *p = (u32 *)&dsaddr->deviceid;
+
+ printk(KERN_ERR "NFS: data server %x connection error %d."
+ " Deviceid [%x%x%x%x] marked out of use.\n",
+ ds_addr, err, p[0], p[1], p[2], p[3]);
+
+ spin_lock(&filelayout_deviceid_lock);
+ dsaddr->flags |= NFS4_DEVICE_ID_NEG_ENTRY;
+ spin_unlock(&filelayout_deviceid_lock);
+}
+
+struct nfs4_pnfs_ds *
+nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
{
- struct pnfs_deviceid_node *d;
+ struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr;
+ struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
- d = pnfs_find_get_deviceid(clp->cl_devid_cache, id);
- return (d == NULL) ? NULL :
- container_of(d, struct nfs4_file_layout_dsaddr, deviceid);
+ if (ds == NULL) {
+ printk(KERN_ERR "%s: No data server for offset index %d\n",
+ __func__, ds_idx);
+ return NULL;
+ }
+
+ if (!ds->ds_clp) {
+ struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
+ int err;
+
+ if (dsaddr->flags & NFS4_DEVICE_ID_NEG_ENTRY) {
+ /* Already tried to connect, don't try again */
+ dprintk("%s Deviceid marked out of use\n", __func__);
+ return NULL;
+ }
+ err = nfs4_ds_connect(s, ds);
+ if (err) {
+ filelayout_mark_devid_negative(dsaddr, err,
+ ntohl(ds->ds_ip_addr));
+ return NULL;
+ }
+ }
+ return ds;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 0a07e353a961..1d84e7088af9 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -85,6 +85,9 @@ static int nfs4_map_errors(int err)
switch (err) {
case -NFS4ERR_RESOURCE:
return -EREMOTEIO;
+ case -NFS4ERR_BADOWNER:
+ case -NFS4ERR_BADNAME:
+ return -EINVAL;
default:
dprintk("%s could not handle NFSv4 error %d\n",
__func__, -err);
@@ -241,7 +244,7 @@ static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
/* This is the error handling routine for processes that are allowed
* to sleep.
*/
-static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
+static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_state *state = exception->state;
@@ -293,6 +296,19 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
break;
case -NFS4ERR_OLD_STATEID:
exception->retry = 1;
+ break;
+ case -NFS4ERR_BADOWNER:
+ /* The following works around a Linux server bug! */
+ case -NFS4ERR_BADNAME:
+ if (server->caps & NFS_CAP_UIDGID_NOMAP) {
+ server->caps &= ~NFS_CAP_UIDGID_NOMAP;
+ exception->retry = 1;
+ printk(KERN_WARNING "NFS: v4 server %s "
+ "does not accept raw "
+ "uid/gids. "
+ "Reenabling the idmapper.\n",
+ server->nfs_client->cl_hostname);
+ }
}
/* We failed to handle the error */
return nfs4_map_errors(ret);
@@ -505,7 +521,7 @@ out:
return ret_id;
}
-static int nfs41_setup_sequence(struct nfs4_session *session,
+int nfs41_setup_sequence(struct nfs4_session *session,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
int cache_reply,
@@ -571,6 +587,7 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
res->sr_status = 1;
return 0;
}
+EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
int nfs4_setup_sequence(const struct nfs_server *server,
struct nfs4_sequence_args *args,
@@ -1573,9 +1590,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
return 0;
}
-static int nfs4_recover_expired_lease(struct nfs_server *server)
+static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
{
- struct nfs_client *clp = server->nfs_client;
unsigned int loop;
int ret;
@@ -1592,6 +1608,11 @@ static int nfs4_recover_expired_lease(struct nfs_server *server)
return ret;
}
+static int nfs4_recover_expired_lease(struct nfs_server *server)
+{
+ return nfs4_client_recover_expired_lease(server->nfs_client);
+}
+
/*
* OPEN_EXPIRED:
* reclaim state on the server after a network partition.
@@ -3069,15 +3090,10 @@ static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
return err;
}
-static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
+static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
{
struct nfs_server *server = NFS_SERVER(data->inode);
- dprintk("--> %s\n", __func__);
-
- if (!nfs4_sequence_done(task, &data->res.seq_res))
- return -EAGAIN;
-
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
nfs_restart_rpc(task, server->nfs_client);
return -EAGAIN;
@@ -3089,19 +3105,44 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
return 0;
}
+static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
+{
+
+ dprintk("--> %s\n", __func__);
+
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return -EAGAIN;
+
+ return data->read_done_cb(task, data);
+}
+
static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
{
data->timestamp = jiffies;
+ data->read_done_cb = nfs4_read_done_cb;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
}
-static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
+/* Reset the the nfs_read_data to send the read to the MDS. */
+void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
+{
+ dprintk("%s Reset task for i/o through\n", __func__);
+ put_lseg(data->lseg);
+ data->lseg = NULL;
+ /* offsets will differ in the dense stripe case */
+ data->args.offset = data->mds_offset;
+ data->ds_clp = NULL;
+ data->args.fh = NFS_FH(data->inode);
+ data->read_done_cb = nfs4_read_done_cb;
+ task->tk_ops = data->mds_ops;
+ rpc_task_reset_client(task, NFS_CLIENT(data->inode));
+}
+EXPORT_SYMBOL_GPL(nfs4_reset_read);
+
+static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
- if (!nfs4_sequence_done(task, &data->res.seq_res))
- return -EAGAIN;
-
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
nfs_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
return -EAGAIN;
@@ -3113,11 +3154,41 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
return 0;
}
+static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
+{
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return -EAGAIN;
+ return data->write_done_cb(task, data);
+}
+
+/* Reset the the nfs_write_data to send the write to the MDS. */
+void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data)
+{
+ dprintk("%s Reset task for i/o through\n", __func__);
+ put_lseg(data->lseg);
+ data->lseg = NULL;
+ data->ds_clp = NULL;
+ data->write_done_cb = nfs4_write_done_cb;
+ data->args.fh = NFS_FH(data->inode);
+ data->args.bitmask = data->res.server->cache_consistency_bitmask;
+ data->args.offset = data->mds_offset;
+ data->res.fattr = &data->fattr;
+ task->tk_ops = data->mds_ops;
+ rpc_task_reset_client(task, NFS_CLIENT(data->inode));
+}
+EXPORT_SYMBOL_GPL(nfs4_reset_write);
+
static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
{
struct nfs_server *server = NFS_SERVER(data->inode);
- data->args.bitmask = server->cache_consistency_bitmask;
+ if (data->lseg) {
+ data->args.bitmask = NULL;
+ data->res.fattr = NULL;
+ } else
+ data->args.bitmask = server->cache_consistency_bitmask;
+ if (!data->write_done_cb)
+ data->write_done_cb = nfs4_write_done_cb;
data->res.server = server;
data->timestamp = jiffies;
@@ -5118,6 +5189,27 @@ int nfs4_init_session(struct nfs_server *server)
return ret;
}
+int nfs4_init_ds_session(struct nfs_client *clp)
+{
+ struct nfs4_session *session = clp->cl_session;
+ int ret;
+
+ if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
+ return 0;
+
+ ret = nfs4_client_recover_expired_lease(clp);
+ if (!ret)
+ /* Test for the DS role */
+ if (!is_ds_client(clp))
+ ret = -ENODEV;
+ if (!ret)
+ ret = nfs4_check_client_ready(clp);
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
+
+
/*
* Renew the cl_session lease.
*/
@@ -5648,6 +5740,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.clear_acl_cache = nfs4_zap_acl_attr,
.close_context = nfs4_close_context,
.open_context = nfs4_atomic_open,
+ .init_client = nfs4_init_client,
};
static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 402143d75fc5..df8e7f3ca56d 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -64,12 +64,8 @@ nfs4_renew_state(struct work_struct *work)
ops = clp->cl_mvops->state_renewal_ops;
dprintk("%s: start\n", __func__);
- rcu_read_lock();
- if (list_empty(&clp->cl_superblocks)) {
- rcu_read_unlock();
+ if (test_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state))
goto out;
- }
- rcu_read_unlock();
spin_lock(&clp->cl_lock);
lease = clp->cl_lease_time;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 0592288f9f06..ab1bf5bb021f 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -153,6 +153,11 @@ static int nfs41_setup_state_renewal(struct nfs_client *clp)
int status;
struct nfs_fsinfo fsinfo;
+ if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
+ nfs4_schedule_state_renewal(clp);
+ return 0;
+ }
+
status = nfs4_proc_get_lease_time(clp, &fsinfo);
if (status == 0) {
/* Update lease time and schedule renewal */
@@ -1448,6 +1453,7 @@ void nfs4_schedule_session_recovery(struct nfs4_session *session)
{
nfs4_schedule_lease_recovery(session->clp);
}
+EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
void nfs41_handle_recall_slot(struct nfs_client *clp)
{
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 94d50e86a124..0cf560f77884 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -844,7 +844,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const
if (iap->ia_valid & ATTR_MODE)
len += 4;
if (iap->ia_valid & ATTR_UID) {
- owner_namelen = nfs_map_uid_to_name(server->nfs_client, iap->ia_uid, owner_name, IDMAP_NAMESZ);
+ owner_namelen = nfs_map_uid_to_name(server, iap->ia_uid, owner_name, IDMAP_NAMESZ);
if (owner_namelen < 0) {
dprintk("nfs: couldn't resolve uid %d to string\n",
iap->ia_uid);
@@ -856,7 +856,7 @@ static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const
len += 4 + (XDR_QUADLEN(owner_namelen) << 2);
}
if (iap->ia_valid & ATTR_GID) {
- owner_grouplen = nfs_map_gid_to_group(server->nfs_client, iap->ia_gid, owner_group, IDMAP_NAMESZ);
+ owner_grouplen = nfs_map_gid_to_group(server, iap->ia_gid, owner_group, IDMAP_NAMESZ);
if (owner_grouplen < 0) {
dprintk("nfs: couldn't resolve gid %d to string\n",
iap->ia_gid);
@@ -1384,7 +1384,7 @@ static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
hdr->replen += decode_putrootfh_maxsz;
}
-static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx, const struct nfs_lock_context *l_ctx)
+static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx, const struct nfs_lock_context *l_ctx, int zero_seqid)
{
nfs4_stateid stateid;
__be32 *p;
@@ -1392,6 +1392,8 @@ static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context
p = reserve_space(xdr, NFS4_STATEID_SIZE);
if (ctx->state != NULL) {
nfs4_copy_stateid(&stateid, ctx->state, l_ctx->lockowner, l_ctx->pid);
+ if (zero_seqid)
+ stateid.stateid.seqid = 0;
xdr_encode_opaque_fixed(p, stateid.data, NFS4_STATEID_SIZE);
} else
xdr_encode_opaque_fixed(p, zero_stateid.data, NFS4_STATEID_SIZE);
@@ -1404,7 +1406,8 @@ static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args,
p = reserve_space(xdr, 4);
*p = cpu_to_be32(OP_READ);
- encode_stateid(xdr, args->context, args->lock_context);
+ encode_stateid(xdr, args->context, args->lock_context,
+ hdr->minorversion);
p = reserve_space(xdr, 12);
p = xdr_encode_hyper(p, args->offset);
@@ -1592,7 +1595,8 @@ static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *arg
p = reserve_space(xdr, 4);
*p = cpu_to_be32(OP_WRITE);
- encode_stateid(xdr, args->context, args->lock_context);
+ encode_stateid(xdr, args->context, args->lock_context,
+ hdr->minorversion);
p = reserve_space(xdr, 16);
p = xdr_encode_hyper(p, args->offset);
@@ -2271,7 +2275,8 @@ static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_putfh(xdr, args->fh, &hdr);
encode_write(xdr, args, &hdr);
req->rq_snd_buf.flags |= XDRBUF_WRITE;
- encode_getfattr(xdr, args->bitmask, &hdr);
+ if (args->bitmask)
+ encode_getfattr(xdr, args->bitmask, &hdr);
encode_nops(&hdr);
}
@@ -3382,7 +3387,7 @@ out_overflow:
}
static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
- struct nfs_client *clp, uint32_t *uid, int may_sleep)
+ const struct nfs_server *server, uint32_t *uid, int may_sleep)
{
uint32_t len;
__be32 *p;
@@ -3402,7 +3407,7 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
if (!may_sleep) {
/* do nothing */
} else if (len < XDR_MAX_NETOBJ) {
- if (nfs_map_name_to_uid(clp, (char *)p, len, uid) == 0)
+ if (nfs_map_name_to_uid(server, (char *)p, len, uid) == 0)
ret = NFS_ATTR_FATTR_OWNER;
else
dprintk("%s: nfs_map_name_to_uid failed!\n",
@@ -3420,7 +3425,7 @@ out_overflow:
}
static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
- struct nfs_client *clp, uint32_t *gid, int may_sleep)
+ const struct nfs_server *server, uint32_t *gid, int may_sleep)
{
uint32_t len;
__be32 *p;
@@ -3440,7 +3445,7 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
if (!may_sleep) {
/* do nothing */
} else if (len < XDR_MAX_NETOBJ) {
- if (nfs_map_group_to_gid(clp, (char *)p, len, gid) == 0)
+ if (nfs_map_group_to_gid(server, (char *)p, len, gid) == 0)
ret = NFS_ATTR_FATTR_GROUP;
else
dprintk("%s: nfs_map_group_to_gid failed!\n",
@@ -3939,14 +3944,12 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
goto xdr_error;
fattr->valid |= status;
- status = decode_attr_owner(xdr, bitmap, server->nfs_client,
- &fattr->uid, may_sleep);
+ status = decode_attr_owner(xdr, bitmap, server, &fattr->uid, may_sleep);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
- status = decode_attr_group(xdr, bitmap, server->nfs_client,
- &fattr->gid, may_sleep);
+ status = decode_attr_group(xdr, bitmap, server, &fattr->gid, may_sleep);
if (status < 0)
goto xdr_error;
fattr->valid |= status;
@@ -5690,8 +5693,9 @@ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_write(xdr, res);
if (status)
goto out;
- decode_getfattr(xdr, res->fattr, res->server,
- !RPC_IS_ASYNC(rqstp->rq_task));
+ if (res->fattr)
+ decode_getfattr(xdr, res->fattr, res->server,
+ !RPC_IS_ASYNC(rqstp->rq_task));
if (!status)
status = res->count;
out:
@@ -6167,8 +6171,6 @@ static struct {
{ NFS4ERR_DQUOT, -EDQUOT },
{ NFS4ERR_STALE, -ESTALE },
{ NFS4ERR_BADHANDLE, -EBADHANDLE },
- { NFS4ERR_BADOWNER, -EINVAL },
- { NFS4ERR_BADNAME, -EINVAL },
{ NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
{ NFS4ERR_NOTSUPP, -ENOTSUPP },
{ NFS4ERR_TOOSMALL, -ETOOSMALL },
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index e1164e3f9e69..23e794410669 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -20,6 +20,7 @@
#include <linux/nfs_mount.h>
#include "internal.h"
+#include "pnfs.h"
static struct kmem_cache *nfs_page_cachep;
@@ -213,7 +214,7 @@ nfs_wait_on_request(struct nfs_page *req)
*/
void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
struct inode *inode,
- int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int),
+ int (*doio)(struct nfs_pageio_descriptor *),
size_t bsize,
int io_flags)
{
@@ -226,6 +227,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_doio = doio;
desc->pg_ioflags = io_flags;
desc->pg_error = 0;
+ desc->pg_lseg = NULL;
}
/**
@@ -240,7 +242,8 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
* Return 'true' if this is the case, else return 'false'.
*/
static int nfs_can_coalesce_requests(struct nfs_page *prev,
- struct nfs_page *req)
+ struct nfs_page *req,
+ struct nfs_pageio_descriptor *pgio)
{
if (req->wb_context->cred != prev->wb_context->cred)
return 0;
@@ -254,6 +257,12 @@ static int nfs_can_coalesce_requests(struct nfs_page *prev,
return 0;
if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
return 0;
+ /*
+ * Non-whole file layouts need to check that req is inside of
+ * pgio->pg_lseg.
+ */
+ if (pgio->pg_test && !pgio->pg_test(pgio, prev, req))
+ return 0;
return 1;
}
@@ -286,7 +295,7 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
if (newlen > desc->pg_bsize)
return 0;
prev = nfs_list_entry(desc->pg_list.prev);
- if (!nfs_can_coalesce_requests(prev, req))
+ if (!nfs_can_coalesce_requests(prev, req, desc))
return 0;
} else
desc->pg_base = req->wb_pgbase;
@@ -302,12 +311,7 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
{
if (!list_empty(&desc->pg_list)) {
- int error = desc->pg_doio(desc->pg_inode,
- &desc->pg_list,
- nfs_page_array_len(desc->pg_base,
- desc->pg_count),
- desc->pg_count,
- desc->pg_ioflags);
+ int error = desc->pg_doio(desc);
if (error < 0)
desc->pg_error = error;
else
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 1b1bc1a0fb0a..f38813a0a295 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -30,6 +30,7 @@
#include <linux/nfs_fs.h>
#include "internal.h"
#include "pnfs.h"
+#include "iostat.h"
#define NFSDBG_FACILITY NFSDBG_PNFS
@@ -74,10 +75,8 @@ find_pnfs_driver(u32 id)
void
unset_pnfs_layoutdriver(struct nfs_server *nfss)
{
- if (nfss->pnfs_curr_ld) {
- nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
+ if (nfss->pnfs_curr_ld)
module_put(nfss->pnfs_curr_ld->owner);
- }
nfss->pnfs_curr_ld = NULL;
}
@@ -115,13 +114,7 @@ set_pnfs_layoutdriver(struct nfs_server *server, u32 id)
goto out_no_driver;
}
server->pnfs_curr_ld = ld_type;
- if (ld_type->set_layoutdriver(server)) {
- printk(KERN_ERR
- "%s: Error initializing mount point for layout driver %u.\n",
- __func__, id);
- module_put(ld_type->owner);
- goto out_no_driver;
- }
+
dprintk("%s: pNFS module for %u set\n", __func__, id);
return;
@@ -230,37 +223,41 @@ static void free_lseg(struct pnfs_layout_segment *lseg)
put_layout_hdr(NFS_I(ino)->layout);
}
-/* The use of tmp_list is necessary because pnfs_curr_ld->free_lseg
- * could sleep, so must be called outside of the lock.
- * Returns 1 if object was removed, otherwise return 0.
- */
-static int
-put_lseg_locked(struct pnfs_layout_segment *lseg,
- struct list_head *tmp_list)
+static void
+put_lseg_common(struct pnfs_layout_segment *lseg)
+{
+ struct inode *inode = lseg->pls_layout->plh_inode;
+
+ BUG_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
+ list_del_init(&lseg->pls_list);
+ if (list_empty(&lseg->pls_layout->plh_segs)) {
+ set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags);
+ /* Matched by initial refcount set in alloc_init_layout_hdr */
+ put_layout_hdr_locked(lseg->pls_layout);
+ }
+ rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
+}
+
+void
+put_lseg(struct pnfs_layout_segment *lseg)
{
+ struct inode *inode;
+
+ if (!lseg)
+ return;
+
dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
atomic_read(&lseg->pls_refcount),
test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
- if (atomic_dec_and_test(&lseg->pls_refcount)) {
- struct inode *ino = lseg->pls_layout->plh_inode;
+ inode = lseg->pls_layout->plh_inode;
+ if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
+ LIST_HEAD(free_me);
- BUG_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
- list_del(&lseg->pls_list);
- if (list_empty(&lseg->pls_layout->plh_segs)) {
- struct nfs_client *clp;
-
- clp = NFS_SERVER(ino)->nfs_client;
- spin_lock(&clp->cl_lock);
- /* List does not take a reference, so no need for put here */
- list_del_init(&lseg->pls_layout->plh_layouts);
- spin_unlock(&clp->cl_lock);
- clear_bit(NFS_LAYOUT_BULK_RECALL, &lseg->pls_layout->plh_flags);
- }
- rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq);
- list_add(&lseg->pls_list, tmp_list);
- return 1;
+ put_lseg_common(lseg);
+ list_add(&lseg->pls_list, &free_me);
+ spin_unlock(&inode->i_lock);
+ pnfs_free_lseg_list(&free_me);
}
- return 0;
}
static bool
@@ -281,7 +278,13 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
* list. It will now be removed when all
* outstanding io is finished.
*/
- rv = put_lseg_locked(lseg, tmp_list);
+ dprintk("%s: lseg %p ref %d\n", __func__, lseg,
+ atomic_read(&lseg->pls_refcount));
+ if (atomic_dec_and_test(&lseg->pls_refcount)) {
+ put_lseg_common(lseg);
+ list_add(&lseg->pls_list, tmp_list);
+ rv = 1;
+ }
}
return rv;
}
@@ -299,6 +302,11 @@ mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
dprintk("%s:Begin lo %p\n", __func__, lo);
+ if (list_empty(&lo->plh_segs)) {
+ if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
+ put_layout_hdr_locked(lo);
+ return 0;
+ }
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
if (should_free_lseg(lseg->pls_range.iomode, iomode)) {
dprintk("%s: freeing lseg %p iomode %d "
@@ -312,11 +320,27 @@ mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
return invalid - removed;
}
+/* note free_me must contain lsegs from a single layout_hdr */
void
pnfs_free_lseg_list(struct list_head *free_me)
{
struct pnfs_layout_segment *lseg, *tmp;
+ struct pnfs_layout_hdr *lo;
+
+ if (list_empty(free_me))
+ return;
+ lo = list_first_entry(free_me, struct pnfs_layout_segment,
+ pls_list)->pls_layout;
+
+ if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) {
+ struct nfs_client *clp;
+
+ clp = NFS_SERVER(lo->plh_inode)->nfs_client;
+ spin_lock(&clp->cl_lock);
+ list_del_init(&lo->plh_layouts);
+ spin_unlock(&clp->cl_lock);
+ }
list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
list_del(&lseg->pls_list);
free_lseg(lseg);
@@ -332,10 +356,8 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
spin_lock(&nfsi->vfs_inode.i_lock);
lo = nfsi->layout;
if (lo) {
- set_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags);
+ lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
mark_matching_lsegs_invalid(lo, &tmp_list, IOMODE_ANY);
- /* Matched by refcount set to 1 in alloc_init_layout_hdr */
- put_layout_hdr_locked(lo);
}
spin_unlock(&nfsi->vfs_inode.i_lock);
pnfs_free_lseg_list(&tmp_list);
@@ -403,6 +425,7 @@ pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
(int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0)
return true;
return lo->plh_block_lgets ||
+ test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) ||
test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
(list_empty(&lo->plh_segs) &&
(atomic_read(&lo->plh_outstanding) > lget));
@@ -674,7 +697,7 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
is_matching_lseg(lseg, iomode)) {
- ret = lseg;
+ ret = get_lseg(lseg);
break;
}
if (cmp_layout(iomode, lseg->pls_range.iomode) > 0)
@@ -699,6 +722,7 @@ pnfs_update_layout(struct inode *ino,
struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
struct pnfs_layout_hdr *lo;
struct pnfs_layout_segment *lseg = NULL;
+ bool first = false;
if (!pnfs_enabled_sb(NFS_SERVER(ino)))
return NULL;
@@ -715,21 +739,25 @@ pnfs_update_layout(struct inode *ino,
dprintk("%s matches recall, use MDS\n", __func__);
goto out_unlock;
}
- /* Check to see if the layout for the given range already exists */
- lseg = pnfs_find_lseg(lo, iomode);
- if (lseg)
- goto out_unlock;
/* if LAYOUTGET already failed once we don't try again */
if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
goto out_unlock;
+ /* Check to see if the layout for the given range already exists */
+ lseg = pnfs_find_lseg(lo, iomode);
+ if (lseg)
+ goto out_unlock;
+
if (pnfs_layoutgets_blocked(lo, NULL, 0))
goto out_unlock;
atomic_inc(&lo->plh_outstanding);
get_layout_hdr(lo);
- if (list_empty(&lo->plh_segs)) {
+ if (list_empty(&lo->plh_segs))
+ first = true;
+ spin_unlock(&ino->i_lock);
+ if (first) {
/* The lo must be on the clp list if there is any
* chance of a CB_LAYOUTRECALL(FILE) coming in.
*/
@@ -738,24 +766,18 @@ pnfs_update_layout(struct inode *ino,
list_add_tail(&lo->plh_layouts, &clp->cl_layouts);
spin_unlock(&clp->cl_lock);
}
- spin_unlock(&ino->i_lock);
lseg = send_layoutget(lo, ctx, iomode);
- if (!lseg) {
- spin_lock(&ino->i_lock);
- if (list_empty(&lo->plh_segs)) {
- spin_lock(&clp->cl_lock);
- list_del_init(&lo->plh_layouts);
- spin_unlock(&clp->cl_lock);
- clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
- }
- spin_unlock(&ino->i_lock);
+ if (!lseg && first) {
+ spin_lock(&clp->cl_lock);
+ list_del_init(&lo->plh_layouts);
+ spin_unlock(&clp->cl_lock);
}
atomic_dec(&lo->plh_outstanding);
put_layout_hdr(lo);
out:
dprintk("%s end, state 0x%lx lseg %p\n", __func__,
- nfsi->layout->plh_flags, lseg);
+ nfsi->layout ? nfsi->layout->plh_flags : -1, lseg);
return lseg;
out_unlock:
spin_unlock(&ino->i_lock);
@@ -808,7 +830,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
}
init_lseg(lo, lseg);
lseg->pls_range = res->range;
- *lgp->lsegpp = lseg;
+ *lgp->lsegpp = get_lseg(lseg);
pnfs_insert_layout(lo, lseg);
if (res->return_on_close) {
@@ -829,137 +851,97 @@ out_forget_reply:
goto out;
}
-/*
- * Device ID cache. Currently supports one layout type per struct nfs_client.
- * Add layout type to the lookup key to expand to support multiple types.
- */
-int
-pnfs_alloc_init_deviceid_cache(struct nfs_client *clp,
- void (*free_callback)(struct pnfs_deviceid_node *))
+static int pnfs_read_pg_test(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *prev,
+ struct nfs_page *req)
{
- struct pnfs_deviceid_cache *c;
-
- c = kzalloc(sizeof(struct pnfs_deviceid_cache), GFP_KERNEL);
- if (!c)
- return -ENOMEM;
- spin_lock(&clp->cl_lock);
- if (clp->cl_devid_cache != NULL) {
- atomic_inc(&clp->cl_devid_cache->dc_ref);
- dprintk("%s [kref [%d]]\n", __func__,
- atomic_read(&clp->cl_devid_cache->dc_ref));
- kfree(c);
- } else {
- /* kzalloc initializes hlists */
- spin_lock_init(&c->dc_lock);
- atomic_set(&c->dc_ref, 1);
- c->dc_free_callback = free_callback;
- clp->cl_devid_cache = c;
- dprintk("%s [new]\n", __func__);
+ if (pgio->pg_count == prev->wb_bytes) {
+ /* This is first coelesce call for a series of nfs_pages */
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ prev->wb_context,
+ IOMODE_READ);
}
- spin_unlock(&clp->cl_lock);
- return 0;
+ return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
}
-EXPORT_SYMBOL_GPL(pnfs_alloc_init_deviceid_cache);
-/*
- * Called from pnfs_layoutdriver_type->free_lseg
- * last layout segment reference frees deviceid
- */
void
-pnfs_put_deviceid(struct pnfs_deviceid_cache *c,
- struct pnfs_deviceid_node *devid)
+pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
{
- struct nfs4_deviceid *id = &devid->de_id;
- struct pnfs_deviceid_node *d;
- struct hlist_node *n;
- long h = nfs4_deviceid_hash(id);
+ struct pnfs_layoutdriver_type *ld;
- dprintk("%s [%d]\n", __func__, atomic_read(&devid->de_ref));
- if (!atomic_dec_and_lock(&devid->de_ref, &c->dc_lock))
- return;
+ ld = NFS_SERVER(inode)->pnfs_curr_ld;
+ pgio->pg_test = (ld && ld->pg_test) ? pnfs_read_pg_test : NULL;
+}
- hlist_for_each_entry_rcu(d, n, &c->dc_deviceids[h], de_node)
- if (!memcmp(&d->de_id, id, sizeof(*id))) {
- hlist_del_rcu(&d->de_node);
- spin_unlock(&c->dc_lock);
- synchronize_rcu();
- c->dc_free_callback(devid);
- return;
- }
- spin_unlock(&c->dc_lock);
- /* Why wasn't it found in the list? */
- BUG();
-}
-EXPORT_SYMBOL_GPL(pnfs_put_deviceid);
-
-/* Find and reference a deviceid */
-struct pnfs_deviceid_node *
-pnfs_find_get_deviceid(struct pnfs_deviceid_cache *c, struct nfs4_deviceid *id)
-{
- struct pnfs_deviceid_node *d;
- struct hlist_node *n;
- long hash = nfs4_deviceid_hash(id);
-
- dprintk("--> %s hash %ld\n", __func__, hash);
- rcu_read_lock();
- hlist_for_each_entry_rcu(d, n, &c->dc_deviceids[hash], de_node) {
- if (!memcmp(&d->de_id, id, sizeof(*id))) {
- if (!atomic_inc_not_zero(&d->de_ref)) {
- goto fail;
- } else {
- rcu_read_unlock();
- return d;
- }
- }
+static int pnfs_write_pg_test(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *prev,
+ struct nfs_page *req)
+{
+ if (pgio->pg_count == prev->wb_bytes) {
+ /* This is first coelesce call for a series of nfs_pages */
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ prev->wb_context,
+ IOMODE_RW);
}
-fail:
- rcu_read_unlock();
- return NULL;
+ return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
+}
+
+void
+pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode)
+{
+ struct pnfs_layoutdriver_type *ld;
+
+ ld = NFS_SERVER(inode)->pnfs_curr_ld;
+ pgio->pg_test = (ld && ld->pg_test) ? pnfs_write_pg_test : NULL;
+}
+
+enum pnfs_try_status
+pnfs_try_to_write_data(struct nfs_write_data *wdata,
+ const struct rpc_call_ops *call_ops, int how)
+{
+ struct inode *inode = wdata->inode;
+ enum pnfs_try_status trypnfs;
+ struct nfs_server *nfss = NFS_SERVER(inode);
+
+ wdata->mds_ops = call_ops;
+
+ dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
+ inode->i_ino, wdata->args.count, wdata->args.offset, how);
+
+ trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
+ if (trypnfs == PNFS_NOT_ATTEMPTED) {
+ put_lseg(wdata->lseg);
+ wdata->lseg = NULL;
+ } else
+ nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
+
+ dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
+ return trypnfs;
}
-EXPORT_SYMBOL_GPL(pnfs_find_get_deviceid);
/*
- * Add a deviceid to the cache.
- * GETDEVICEINFOs for same deviceid can race. If deviceid is found, discard new
+ * Call the appropriate parallel I/O subsystem read function.
*/
-struct pnfs_deviceid_node *
-pnfs_add_deviceid(struct pnfs_deviceid_cache *c, struct pnfs_deviceid_node *new)
-{
- struct pnfs_deviceid_node *d;
- long hash = nfs4_deviceid_hash(&new->de_id);
-
- dprintk("--> %s hash %ld\n", __func__, hash);
- spin_lock(&c->dc_lock);
- d = pnfs_find_get_deviceid(c, &new->de_id);
- if (d) {
- spin_unlock(&c->dc_lock);
- dprintk("%s [discard]\n", __func__);
- c->dc_free_callback(new);
- return d;
- }
- INIT_HLIST_NODE(&new->de_node);
- atomic_set(&new->de_ref, 1);
- hlist_add_head_rcu(&new->de_node, &c->dc_deviceids[hash]);
- spin_unlock(&c->dc_lock);
- dprintk("%s [new]\n", __func__);
- return new;
-}
-EXPORT_SYMBOL_GPL(pnfs_add_deviceid);
-
-void
-pnfs_put_deviceid_cache(struct nfs_client *clp)
+enum pnfs_try_status
+pnfs_try_to_read_data(struct nfs_read_data *rdata,
+ const struct rpc_call_ops *call_ops)
{
- struct pnfs_deviceid_cache *local = clp->cl_devid_cache;
+ struct inode *inode = rdata->inode;
+ struct nfs_server *nfss = NFS_SERVER(inode);
+ enum pnfs_try_status trypnfs;
- dprintk("--> %s ({%d})\n", __func__, atomic_read(&local->dc_ref));
- if (atomic_dec_and_lock(&local->dc_ref, &clp->cl_lock)) {
- int i;
- /* Verify cache is empty */
- for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i++)
- BUG_ON(!hlist_empty(&local->dc_deviceids[i]));
- clp->cl_devid_cache = NULL;
- spin_unlock(&clp->cl_lock);
- kfree(local);
+ rdata->mds_ops = call_ops;
+
+ dprintk("%s: Reading ino:%lu %u@%llu\n",
+ __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
+
+ trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
+ if (trypnfs == PNFS_NOT_ATTEMPTED) {
+ put_lseg(rdata->lseg);
+ rdata->lseg = NULL;
+ } else {
+ nfs_inc_stats(inode, NFSIOS_PNFS_READ);
}
+ dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
+ return trypnfs;
}
-EXPORT_SYMBOL_GPL(pnfs_put_deviceid_cache);
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index e2612ea0cbed..6380b9405bcd 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -30,6 +30,8 @@
#ifndef FS_NFS_PNFS_H
#define FS_NFS_PNFS_H
+#include <linux/nfs_page.h>
+
enum {
NFS_LSEG_VALID = 0, /* cleared when lseg is recalled/returned */
NFS_LSEG_ROC, /* roc bit received from server */
@@ -43,6 +45,11 @@ struct pnfs_layout_segment {
struct pnfs_layout_hdr *pls_layout;
};
+enum pnfs_try_status {
+ PNFS_ATTEMPTED = 0,
+ PNFS_NOT_ATTEMPTED = 1,
+};
+
#ifdef CONFIG_NFS_V4_1
#define LAYOUT_NFSV4_1_MODULE_PREFIX "nfs-layouttype4"
@@ -61,10 +68,18 @@ struct pnfs_layoutdriver_type {
const u32 id;
const char *name;
struct module *owner;
- int (*set_layoutdriver) (struct nfs_server *);
- int (*clear_layoutdriver) (struct nfs_server *);
struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr);
void (*free_lseg) (struct pnfs_layout_segment *lseg);
+
+ /* test for nfs page cache coalescing */
+ int (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *);
+
+ /*
+ * Return PNFS_ATTEMPTED to indicate the layout code has attempted
+ * I/O, else return PNFS_NOT_ATTEMPTED to fall back to normal NFS
+ */
+ enum pnfs_try_status (*read_pagelist) (struct nfs_read_data *nfs_data);
+ enum pnfs_try_status (*write_pagelist) (struct nfs_write_data *nfs_data, int how);
};
struct pnfs_layout_hdr {
@@ -90,52 +105,6 @@ struct pnfs_device {
unsigned int pglen;
};
-/*
- * Device ID RCU cache. A device ID is unique per client ID and layout type.
- */
-#define NFS4_DEVICE_ID_HASH_BITS 5
-#define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
-#define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
-
-static inline u32
-nfs4_deviceid_hash(struct nfs4_deviceid *id)
-{
- unsigned char *cptr = (unsigned char *)id->data;
- unsigned int nbytes = NFS4_DEVICEID4_SIZE;
- u32 x = 0;
-
- while (nbytes--) {
- x *= 37;
- x += *cptr++;
- }
- return x & NFS4_DEVICE_ID_HASH_MASK;
-}
-
-struct pnfs_deviceid_node {
- struct hlist_node de_node;
- struct nfs4_deviceid de_id;
- atomic_t de_ref;
-};
-
-struct pnfs_deviceid_cache {
- spinlock_t dc_lock;
- atomic_t dc_ref;
- void (*dc_free_callback)(struct pnfs_deviceid_node *);
- struct hlist_head dc_deviceids[NFS4_DEVICE_ID_HASH_SIZE];
-};
-
-extern int pnfs_alloc_init_deviceid_cache(struct nfs_client *,
- void (*free_callback)(struct pnfs_deviceid_node *));
-extern void pnfs_put_deviceid_cache(struct nfs_client *);
-extern struct pnfs_deviceid_node *pnfs_find_get_deviceid(
- struct pnfs_deviceid_cache *,
- struct nfs4_deviceid *);
-extern struct pnfs_deviceid_node *pnfs_add_deviceid(
- struct pnfs_deviceid_cache *,
- struct pnfs_deviceid_node *);
-extern void pnfs_put_deviceid(struct pnfs_deviceid_cache *c,
- struct pnfs_deviceid_node *devid);
-
extern int pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *);
extern void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *);
@@ -146,11 +115,18 @@ extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
/* pnfs.c */
void get_layout_hdr(struct pnfs_layout_hdr *lo);
+void put_lseg(struct pnfs_layout_segment *lseg);
struct pnfs_layout_segment *
pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
enum pnfs_iomode access_type);
void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
void unset_pnfs_layoutdriver(struct nfs_server *);
+enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *,
+ const struct rpc_call_ops *, int);
+enum pnfs_try_status pnfs_try_to_read_data(struct nfs_read_data *,
+ const struct rpc_call_ops *);
+void pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *);
+void pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *);
int pnfs_layout_process(struct nfs4_layoutget *lgp);
void pnfs_free_lseg_list(struct list_head *tmp_list);
void pnfs_destroy_layout(struct nfs_inode *);
@@ -177,6 +153,16 @@ static inline int lo_fail_bit(u32 iomode)
NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
}
+static inline struct pnfs_layout_segment *
+get_lseg(struct pnfs_layout_segment *lseg)
+{
+ if (lseg) {
+ atomic_inc(&lseg->pls_refcount);
+ smp_mb__after_atomic_inc();
+ }
+ return lseg;
+}
+
/* Return true if a layout driver is being used for this mountpoint */
static inline int pnfs_enabled_sb(struct nfs_server *nfss)
{
@@ -194,12 +180,36 @@ static inline void pnfs_destroy_layout(struct nfs_inode *nfsi)
}
static inline struct pnfs_layout_segment *
+get_lseg(struct pnfs_layout_segment *lseg)
+{
+ return NULL;
+}
+
+static inline void put_lseg(struct pnfs_layout_segment *lseg)
+{
+}
+
+static inline struct pnfs_layout_segment *
pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
enum pnfs_iomode access_type)
{
return NULL;
}
+static inline enum pnfs_try_status
+pnfs_try_to_read_data(struct nfs_read_data *data,
+ const struct rpc_call_ops *call_ops)
+{
+ return PNFS_NOT_ATTEMPTED;
+}
+
+static inline enum pnfs_try_status
+pnfs_try_to_write_data(struct nfs_write_data *data,
+ const struct rpc_call_ops *call_ops, int how)
+{
+ return PNFS_NOT_ATTEMPTED;
+}
+
static inline bool
pnfs_roc(struct inode *ino)
{
@@ -230,6 +240,18 @@ static inline void unset_pnfs_layoutdriver(struct nfs_server *s)
{
}
+static inline void
+pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *ino)
+{
+ pgio->pg_test = NULL;
+}
+
+static inline void
+pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *ino)
+{
+ pgio->pg_test = NULL;
+}
+
#endif /* CONFIG_NFS_V4_1 */
#endif /* FS_NFS_PNFS_H */
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 77d5e21c4ad6..b8ec170f2a0f 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -741,4 +741,5 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
.lock = nfs_proc_lock,
.lock_check_bounds = nfs_lock_check_bounds,
.close_context = nfs_close_context,
+ .init_client = nfs_init_client,
};
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index aedcaa7f291f..7cded2b12a05 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -18,19 +18,20 @@
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
+#include <linux/module.h>
#include <asm/system.h>
+#include "pnfs.h"
#include "nfs4_fs.h"
#include "internal.h"
#include "iostat.h"
#include "fscache.h"
-#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
-static int nfs_pagein_multi(struct inode *, struct list_head *, unsigned int, size_t, int);
-static int nfs_pagein_one(struct inode *, struct list_head *, unsigned int, size_t, int);
+static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc);
+static int nfs_pagein_one(struct nfs_pageio_descriptor *desc);
static const struct rpc_call_ops nfs_read_partial_ops;
static const struct rpc_call_ops nfs_read_full_ops;
@@ -69,6 +70,7 @@ void nfs_readdata_free(struct nfs_read_data *p)
static void nfs_readdata_release(struct nfs_read_data *rdata)
{
+ put_lseg(rdata->lseg);
put_nfs_open_context(rdata->args.context);
nfs_readdata_free(rdata);
}
@@ -114,14 +116,13 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
struct page *page)
{
- LIST_HEAD(one_request);
struct nfs_page *new;
unsigned int len;
+ struct nfs_pageio_descriptor pgio;
len = nfs_page_length(page);
if (len == 0)
return nfs_return_empty_page(page);
- pnfs_update_layout(inode, ctx, IOMODE_READ);
new = nfs_create_request(ctx, inode, page, 0, len);
if (IS_ERR(new)) {
unlock_page(page);
@@ -130,11 +131,14 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
if (len < PAGE_CACHE_SIZE)
zero_user_segment(page, len, PAGE_CACHE_SIZE);
- nfs_list_add_request(new, &one_request);
+ nfs_pageio_init(&pgio, inode, NULL, 0, 0);
+ nfs_list_add_request(new, &pgio.pg_list);
+ pgio.pg_count = len;
+
if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
- nfs_pagein_multi(inode, &one_request, 1, len, 0);
+ nfs_pagein_multi(&pgio);
else
- nfs_pagein_one(inode, &one_request, 1, len, 0);
+ nfs_pagein_one(&pgio);
return 0;
}
@@ -155,24 +159,20 @@ static void nfs_readpage_release(struct nfs_page *req)
nfs_release_request(req);
}
-/*
- * Set up the NFS read request struct
- */
-static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
- const struct rpc_call_ops *call_ops,
- unsigned int count, unsigned int offset)
+int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
+ const struct rpc_call_ops *call_ops)
{
- struct inode *inode = req->wb_context->path.dentry->d_inode;
+ struct inode *inode = data->inode;
int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
- .rpc_cred = req->wb_context->cred,
+ .rpc_cred = data->cred,
};
struct rpc_task_setup task_setup_data = {
.task = &data->task,
- .rpc_client = NFS_CLIENT(inode),
+ .rpc_client = clnt,
.rpc_message = &msg,
.callback_ops = call_ops,
.callback_data = data,
@@ -180,9 +180,39 @@ static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
.flags = RPC_TASK_ASYNC | swap_flags,
};
+ /* Set up the initial task struct. */
+ NFS_PROTO(inode)->read_setup(data, &msg);
+
+ dprintk("NFS: %5u initiated read call (req %s/%lld, %u bytes @ "
+ "offset %llu)\n",
+ data->task.tk_pid,
+ inode->i_sb->s_id,
+ (long long)NFS_FILEID(inode),
+ data->args.count,
+ (unsigned long long)data->args.offset);
+
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ rpc_put_task(task);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nfs_initiate_read);
+
+/*
+ * Set up the NFS read request struct
+ */
+static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
+ const struct rpc_call_ops *call_ops,
+ unsigned int count, unsigned int offset,
+ struct pnfs_layout_segment *lseg)
+{
+ struct inode *inode = req->wb_context->path.dentry->d_inode;
+
data->req = req;
data->inode = inode;
- data->cred = msg.rpc_cred;
+ data->cred = req->wb_context->cred;
+ data->lseg = get_lseg(lseg);
data->args.fh = NFS_FH(inode);
data->args.offset = req_offset(req) + offset;
@@ -197,21 +227,11 @@ static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
data->res.eof = 0;
nfs_fattr_init(&data->fattr);
- /* Set up the initial task struct. */
- NFS_PROTO(inode)->read_setup(data, &msg);
-
- dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
- data->task.tk_pid,
- inode->i_sb->s_id,
- (long long)NFS_FILEID(inode),
- count,
- (unsigned long long)data->args.offset);
+ if (data->lseg &&
+ (pnfs_try_to_read_data(data, call_ops) == PNFS_ATTEMPTED))
+ return 0;
- task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task))
- return PTR_ERR(task);
- rpc_put_task(task);
- return 0;
+ return nfs_initiate_read(data, NFS_CLIENT(inode), call_ops);
}
static void
@@ -240,20 +260,21 @@ nfs_async_read_error(struct list_head *head)
* won't see the new data until our attribute cache is updated. This is more
* or less conventional NFS client behavior.
*/
-static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
+static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc)
{
- struct nfs_page *req = nfs_list_entry(head->next);
+ struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
struct page *page = req->wb_page;
struct nfs_read_data *data;
- size_t rsize = NFS_SERVER(inode)->rsize, nbytes;
+ size_t rsize = NFS_SERVER(desc->pg_inode)->rsize, nbytes;
unsigned int offset;
int requests = 0;
int ret = 0;
+ struct pnfs_layout_segment *lseg;
LIST_HEAD(list);
nfs_list_remove_request(req);
- nbytes = count;
+ nbytes = desc->pg_count;
do {
size_t len = min(nbytes,rsize);
@@ -266,9 +287,11 @@ static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigne
} while(nbytes != 0);
atomic_set(&req->wb_complete, requests);
+ BUG_ON(desc->pg_lseg != NULL);
+ lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ);
ClearPageError(page);
offset = 0;
- nbytes = count;
+ nbytes = desc->pg_count;
do {
int ret2;
@@ -280,12 +303,14 @@ static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigne
if (nbytes < rsize)
rsize = nbytes;
ret2 = nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
- rsize, offset);
+ rsize, offset, lseg);
if (ret == 0)
ret = ret2;
offset += rsize;
nbytes -= rsize;
} while (nbytes != 0);
+ put_lseg(lseg);
+ desc->pg_lseg = NULL;
return ret;
@@ -300,16 +325,21 @@ out_bad:
return -ENOMEM;
}
-static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int flags)
+static int nfs_pagein_one(struct nfs_pageio_descriptor *desc)
{
struct nfs_page *req;
struct page **pages;
struct nfs_read_data *data;
+ struct list_head *head = &desc->pg_list;
+ struct pnfs_layout_segment *lseg = desc->pg_lseg;
int ret = -ENOMEM;
- data = nfs_readdata_alloc(npages);
- if (!data)
- goto out_bad;
+ data = nfs_readdata_alloc(nfs_page_array_len(desc->pg_base,
+ desc->pg_count));
+ if (!data) {
+ nfs_async_read_error(head);
+ goto out;
+ }
pages = data->pagevec;
while (!list_empty(head)) {
@@ -320,10 +350,14 @@ static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned
*pages++ = req->wb_page;
}
req = nfs_list_entry(data->pages.next);
+ if ((!lseg) && list_is_singular(&data->pages))
+ lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ);
- return nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
-out_bad:
- nfs_async_read_error(head);
+ ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count,
+ 0, lseg);
+out:
+ put_lseg(lseg);
+ desc->pg_lseg = NULL;
return ret;
}
@@ -366,6 +400,7 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
return;
/* Yes, so retry the read at the end of the data */
+ data->mds_offset += resp->count;
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
@@ -625,7 +660,7 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
if (ret == 0)
goto read_complete; /* all pages were read */
- pnfs_update_layout(inode, desc.ctx, IOMODE_READ);
+ pnfs_pageio_init_read(&pgio, inode);
if (rsize < PAGE_CACHE_SIZE)
nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0);
else
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index d3286583009a..2b8e9a5e366a 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1008,6 +1008,27 @@ static int nfs_parse_security_flavors(char *value,
return 1;
}
+static int nfs_get_option_str(substring_t args[], char **option)
+{
+ kfree(*option);
+ *option = match_strdup(args);
+ return !option;
+}
+
+static int nfs_get_option_ul(substring_t args[], unsigned long *option)
+{
+ int rc;
+ char *string;
+
+ string = match_strdup(args);
+ if (string == NULL)
+ return -ENOMEM;
+ rc = strict_strtoul(string, 10, option);
+ kfree(string);
+
+ return rc;
+}
+
/*
* Error-check and convert a string of mount options from user space into
* a data structure. The whole mount string is processed; bad options are
@@ -1156,155 +1177,82 @@ static int nfs_parse_mount_options(char *raw,
* options that take numeric values
*/
case Opt_port:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0 || option > USHRT_MAX)
+ if (nfs_get_option_ul(args, &option) ||
+ option > USHRT_MAX)
goto out_invalid_value;
mnt->nfs_server.port = option;
break;
case Opt_rsize:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0)
+ if (nfs_get_option_ul(args, &option))
goto out_invalid_value;
mnt->rsize = option;
break;
case Opt_wsize:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0)
+ if (nfs_get_option_ul(args, &option))
goto out_invalid_value;
mnt->wsize = option;
break;
case Opt_bsize:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0)
+ if (nfs_get_option_ul(args, &option))
goto out_invalid_value;
mnt->bsize = option;
break;
case Opt_timeo:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0 || option == 0)
+ if (nfs_get_option_ul(args, &option) || option == 0)
goto out_invalid_value;
mnt->timeo = option;
break;
case Opt_retrans:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0 || option == 0)
+ if (nfs_get_option_ul(args, &option) || option == 0)
goto out_invalid_value;
mnt->retrans = option;
break;
case Opt_acregmin:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0)
+ if (nfs_get_option_ul(args, &option))
goto out_invalid_value;
mnt->acregmin = option;
break;
case Opt_acregmax:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0)
+ if (nfs_get_option_ul(args, &option))
goto out_invalid_value;
mnt->acregmax = option;
break;
case Opt_acdirmin:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0)
+ if (nfs_get_option_ul(args, &option))
goto out_invalid_value;
mnt->acdirmin = option;
break;
case Opt_acdirmax:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0)
+ if (nfs_get_option_ul(args, &option))
goto out_invalid_value;
mnt->acdirmax = option;
break;
case Opt_actimeo:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0)
+ if (nfs_get_option_ul(args, &option))
goto out_invalid_value;
mnt->acregmin = mnt->acregmax =
mnt->acdirmin = mnt->acdirmax = option;
break;
case Opt_namelen:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0)
+ if (nfs_get_option_ul(args, &option))
goto out_invalid_value;
mnt->namlen = option;
break;
case Opt_mountport:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0 || option > USHRT_MAX)
+ if (nfs_get_option_ul(args, &option) ||
+ option > USHRT_MAX)
goto out_invalid_value;
mnt->mount_server.port = option;
break;
case Opt_mountvers:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0 ||
+ if (nfs_get_option_ul(args, &option) ||
option < NFS_MNT_VERSION ||
option > NFS_MNT3_VERSION)
goto out_invalid_value;
mnt->mount_server.version = option;
break;
case Opt_nfsvers:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0)
+ if (nfs_get_option_ul(args, &option))
goto out_invalid_value;
switch (option) {
case NFS2_VERSION:
@@ -1324,12 +1272,7 @@ static int nfs_parse_mount_options(char *raw,
}
break;
case Opt_minorversion:
- string = match_strdup(args);
- if (string == NULL)
- goto out_nomem;
- rc = strict_strtoul(string, 10, &option);
- kfree(string);
- if (rc != 0)
+ if (nfs_get_option_ul(args, &option))
goto out_invalid_value;
if (option > NFS4_MAX_MINOR_VERSION)
goto out_invalid_value;
@@ -1365,21 +1308,18 @@ static int nfs_parse_mount_options(char *raw,
case Opt_xprt_udp:
mnt->flags &= ~NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
- kfree(string);
break;
case Opt_xprt_tcp6:
protofamily = AF_INET6;
case Opt_xprt_tcp:
mnt->flags |= NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
- kfree(string);
break;
case Opt_xprt_rdma:
/* vector side protocols to TCP */
mnt->flags |= NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
xprt_load_transport(string);
- kfree(string);
break;
default:
dfprintk(MOUNT, "NFS: unrecognized "
@@ -1387,6 +1327,7 @@ static int nfs_parse_mount_options(char *raw,
kfree(string);
return 0;
}
+ kfree(string);
break;
case Opt_mountproto:
string = match_strdup(args);
@@ -1429,18 +1370,13 @@ static int nfs_parse_mount_options(char *raw,
goto out_invalid_address;
break;
case Opt_clientaddr:
- string = match_strdup(args);
- if (string == NULL)
+ if (nfs_get_option_str(args, &mnt->client_address))
goto out_nomem;
- kfree(mnt->client_address);
- mnt->client_address = string;
break;
case Opt_mounthost:
- string = match_strdup(args);
- if (string == NULL)
+ if (nfs_get_option_str(args,
+ &mnt->mount_server.hostname))
goto out_nomem;
- kfree(mnt->mount_server.hostname);
- mnt->mount_server.hostname = string;
break;
case Opt_mountaddr:
string = match_strdup(args);
@@ -1480,11 +1416,8 @@ static int nfs_parse_mount_options(char *raw,
};
break;
case Opt_fscache_uniq:
- string = match_strdup(args);
- if (string == NULL)
+ if (nfs_get_option_str(args, &mnt->fscache_uniq))
goto out_nomem;
- kfree(mnt->fscache_uniq);
- mnt->fscache_uniq = string;
mnt->options |= NFS_OPTION_FSCACHE;
break;
case Opt_local_lock:
@@ -1694,99 +1627,59 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
return nfs_walk_authlist(args, &request);
}
-static int nfs_parse_simple_hostname(const char *dev_name,
- char **hostname, size_t maxnamlen,
- char **export_path, size_t maxpathlen)
+/*
+ * Split "dev_name" into "hostname:export_path".
+ *
+ * The leftmost colon demarks the split between the server's hostname
+ * and the export path. If the hostname starts with a left square
+ * bracket, then it may contain colons.
+ *
+ * Note: caller frees hostname and export path, even on error.
+ */
+static int nfs_parse_devname(const char *dev_name,
+ char **hostname, size_t maxnamlen,
+ char **export_path, size_t maxpathlen)
{
size_t len;
- char *colon, *comma;
+ char *end;
- colon = strchr(dev_name, ':');
- if (colon == NULL)
- goto out_bad_devname;
-
- len = colon - dev_name;
- if (len > maxnamlen)
- goto out_hostname;
-
- /* N.B. caller will free nfs_server.hostname in all cases */
- *hostname = kstrndup(dev_name, len, GFP_KERNEL);
- if (!*hostname)
- goto out_nomem;
-
- /* kill possible hostname list: not supported */
- comma = strchr(*hostname, ',');
- if (comma != NULL) {
- if (comma == *hostname)
+ /* Is the host name protected with square brakcets? */
+ if (*dev_name == '[') {
+ end = strchr(++dev_name, ']');
+ if (end == NULL || end[1] != ':')
goto out_bad_devname;
- *comma = '\0';
- }
-
- colon++;
- len = strlen(colon);
- if (len > maxpathlen)
- goto out_path;
- *export_path = kstrndup(colon, len, GFP_KERNEL);
- if (!*export_path)
- goto out_nomem;
-
- dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", *export_path);
- return 0;
-
-out_bad_devname:
- dfprintk(MOUNT, "NFS: device name not in host:path format\n");
- return -EINVAL;
-out_nomem:
- dfprintk(MOUNT, "NFS: not enough memory to parse device name\n");
- return -ENOMEM;
-
-out_hostname:
- dfprintk(MOUNT, "NFS: server hostname too long\n");
- return -ENAMETOOLONG;
-
-out_path:
- dfprintk(MOUNT, "NFS: export pathname too long\n");
- return -ENAMETOOLONG;
-}
-
-/*
- * Hostname has square brackets around it because it contains one or
- * more colons. We look for the first closing square bracket, and a
- * colon must follow it.
- */
-static int nfs_parse_protected_hostname(const char *dev_name,
- char **hostname, size_t maxnamlen,
- char **export_path, size_t maxpathlen)
-{
- size_t len;
- char *start, *end;
+ len = end - dev_name;
+ end++;
+ } else {
+ char *comma;
- start = (char *)(dev_name + 1);
+ end = strchr(dev_name, ':');
+ if (end == NULL)
+ goto out_bad_devname;
+ len = end - dev_name;
- end = strchr(start, ']');
- if (end == NULL)
- goto out_bad_devname;
- if (*(end + 1) != ':')
- goto out_bad_devname;
+ /* kill possible hostname list: not supported */
+ comma = strchr(dev_name, ',');
+ if (comma != NULL && comma < end)
+ *comma = 0;
+ }
- len = end - start;
if (len > maxnamlen)
goto out_hostname;
/* N.B. caller will free nfs_server.hostname in all cases */
- *hostname = kstrndup(start, len, GFP_KERNEL);
+ *hostname = kstrndup(dev_name, len, GFP_KERNEL);
if (*hostname == NULL)
goto out_nomem;
-
- end += 2;
- len = strlen(end);
+ len = strlen(++end);
if (len > maxpathlen)
goto out_path;
*export_path = kstrndup(end, len, GFP_KERNEL);
if (!*export_path)
goto out_nomem;
+ dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", *export_path);
return 0;
out_bad_devname:
@@ -1807,29 +1700,6 @@ out_path:
}
/*
- * Split "dev_name" into "hostname:export_path".
- *
- * The leftmost colon demarks the split between the server's hostname
- * and the export path. If the hostname starts with a left square
- * bracket, then it may contain colons.
- *
- * Note: caller frees hostname and export path, even on error.
- */
-static int nfs_parse_devname(const char *dev_name,
- char **hostname, size_t maxnamlen,
- char **export_path, size_t maxpathlen)
-{
- if (*dev_name == '[')
- return nfs_parse_protected_hostname(dev_name,
- hostname, maxnamlen,
- export_path, maxpathlen);
-
- return nfs_parse_simple_hostname(dev_name,
- hostname, maxnamlen,
- export_path, maxpathlen);
-}
-
-/*
* Validate the NFS2/NFS3 mount data
* - fills in the mount root filehandle
*
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 42b92d7a9cc4..47a3ad63e0d5 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -28,6 +28,7 @@
#include "iostat.h"
#include "nfs4_fs.h"
#include "fscache.h"
+#include "pnfs.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
@@ -96,6 +97,7 @@ void nfs_writedata_free(struct nfs_write_data *p)
static void nfs_writedata_release(struct nfs_write_data *wdata)
{
+ put_lseg(wdata->lseg);
put_nfs_open_context(wdata->args.context);
nfs_writedata_free(wdata);
}
@@ -781,25 +783,21 @@ static int flush_task_priority(int how)
return RPC_PRIORITY_NORMAL;
}
-/*
- * Set up the argument/result storage required for the RPC call.
- */
-static int nfs_write_rpcsetup(struct nfs_page *req,
- struct nfs_write_data *data,
- const struct rpc_call_ops *call_ops,
- unsigned int count, unsigned int offset,
- int how)
+int nfs_initiate_write(struct nfs_write_data *data,
+ struct rpc_clnt *clnt,
+ const struct rpc_call_ops *call_ops,
+ int how)
{
- struct inode *inode = req->wb_context->path.dentry->d_inode;
+ struct inode *inode = data->inode;
int priority = flush_task_priority(how);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
- .rpc_cred = req->wb_context->cred,
+ .rpc_cred = data->cred,
};
struct rpc_task_setup task_setup_data = {
- .rpc_client = NFS_CLIENT(inode),
+ .rpc_client = clnt,
.task = &data->task,
.rpc_message = &msg,
.callback_ops = call_ops,
@@ -810,12 +808,52 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
};
int ret = 0;
+ /* Set up the initial task struct. */
+ NFS_PROTO(inode)->write_setup(data, &msg);
+
+ dprintk("NFS: %5u initiated write call "
+ "(req %s/%lld, %u bytes @ offset %llu)\n",
+ data->task.tk_pid,
+ inode->i_sb->s_id,
+ (long long)NFS_FILEID(inode),
+ data->args.count,
+ (unsigned long long)data->args.offset);
+
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task)) {
+ ret = PTR_ERR(task);
+ goto out;
+ }
+ if (how & FLUSH_SYNC) {
+ ret = rpc_wait_for_completion_task(task);
+ if (ret == 0)
+ ret = task->tk_status;
+ }
+ rpc_put_task(task);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nfs_initiate_write);
+
+/*
+ * Set up the argument/result storage required for the RPC call.
+ */
+static int nfs_write_rpcsetup(struct nfs_page *req,
+ struct nfs_write_data *data,
+ const struct rpc_call_ops *call_ops,
+ unsigned int count, unsigned int offset,
+ struct pnfs_layout_segment *lseg,
+ int how)
+{
+ struct inode *inode = req->wb_context->path.dentry->d_inode;
+
/* Set up the RPC argument and reply structs
* NB: take care not to mess about with data->commit et al. */
data->req = req;
data->inode = inode = req->wb_context->path.dentry->d_inode;
- data->cred = msg.rpc_cred;
+ data->cred = req->wb_context->cred;
+ data->lseg = get_lseg(lseg);
data->args.fh = NFS_FH(inode);
data->args.offset = req_offset(req) + offset;
@@ -836,30 +874,11 @@ static int nfs_write_rpcsetup(struct nfs_page *req,
data->res.verf = &data->verf;
nfs_fattr_init(&data->fattr);
- /* Set up the initial task struct. */
- NFS_PROTO(inode)->write_setup(data, &msg);
-
- dprintk("NFS: %5u initiated write call "
- "(req %s/%lld, %u bytes @ offset %llu)\n",
- data->task.tk_pid,
- inode->i_sb->s_id,
- (long long)NFS_FILEID(inode),
- count,
- (unsigned long long)data->args.offset);
+ if (data->lseg &&
+ (pnfs_try_to_write_data(data, call_ops, how) == PNFS_ATTEMPTED))
+ return 0;
- task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task)) {
- ret = PTR_ERR(task);
- goto out;
- }
- if (how & FLUSH_SYNC) {
- ret = rpc_wait_for_completion_task(task);
- if (ret == 0)
- ret = task->tk_status;
- }
- rpc_put_task(task);
-out:
- return ret;
+ return nfs_initiate_write(data, NFS_CLIENT(inode), call_ops, how);
}
/* If a nfs_flush_* function fails, it should remove reqs from @head and
@@ -879,20 +898,21 @@ static void nfs_redirty_request(struct nfs_page *req)
* Generate multiple small requests to write out a single
* contiguous dirty area on one page.
*/
-static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
+static int nfs_flush_multi(struct nfs_pageio_descriptor *desc)
{
- struct nfs_page *req = nfs_list_entry(head->next);
+ struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
struct page *page = req->wb_page;
struct nfs_write_data *data;
- size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
+ size_t wsize = NFS_SERVER(desc->pg_inode)->wsize, nbytes;
unsigned int offset;
int requests = 0;
int ret = 0;
+ struct pnfs_layout_segment *lseg;
LIST_HEAD(list);
nfs_list_remove_request(req);
- nbytes = count;
+ nbytes = desc->pg_count;
do {
size_t len = min(nbytes, wsize);
@@ -905,9 +925,11 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned
} while (nbytes != 0);
atomic_set(&req->wb_complete, requests);
+ BUG_ON(desc->pg_lseg);
+ lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW);
ClearPageError(page);
offset = 0;
- nbytes = count;
+ nbytes = desc->pg_count;
do {
int ret2;
@@ -919,13 +941,15 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned
if (nbytes < wsize)
wsize = nbytes;
ret2 = nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
- wsize, offset, how);
+ wsize, offset, lseg, desc->pg_ioflags);
if (ret == 0)
ret = ret2;
offset += wsize;
nbytes -= wsize;
} while (nbytes != 0);
+ put_lseg(lseg);
+ desc->pg_lseg = NULL;
return ret;
out_bad:
@@ -946,16 +970,26 @@ out_bad:
* This is the case if nfs_updatepage detects a conflicting request
* that has been written but not committed.
*/
-static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
+static int nfs_flush_one(struct nfs_pageio_descriptor *desc)
{
struct nfs_page *req;
struct page **pages;
struct nfs_write_data *data;
+ struct list_head *head = &desc->pg_list;
+ struct pnfs_layout_segment *lseg = desc->pg_lseg;
+ int ret;
- data = nfs_writedata_alloc(npages);
- if (!data)
- goto out_bad;
-
+ data = nfs_writedata_alloc(nfs_page_array_len(desc->pg_base,
+ desc->pg_count));
+ if (!data) {
+ while (!list_empty(head)) {
+ req = nfs_list_entry(head->next);
+ nfs_list_remove_request(req);
+ nfs_redirty_request(req);
+ }
+ ret = -ENOMEM;
+ goto out;
+ }
pages = data->pagevec;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
@@ -965,16 +999,15 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned i
*pages++ = req->wb_page;
}
req = nfs_list_entry(data->pages.next);
+ if ((!lseg) && list_is_singular(&data->pages))
+ lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW);
/* Set up the argument struct */
- return nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
- out_bad:
- while (!list_empty(head)) {
- req = nfs_list_entry(head->next);
- nfs_list_remove_request(req);
- nfs_redirty_request(req);
- }
- return -ENOMEM;
+ ret = nfs_write_rpcsetup(req, data, &nfs_write_full_ops, desc->pg_count, 0, lseg, desc->pg_ioflags);
+out:
+ put_lseg(lseg); /* Cleans any gotten in ->pg_test */
+ desc->pg_lseg = NULL;
+ return ret;
}
static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
@@ -982,6 +1015,8 @@ static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
{
size_t wsize = NFS_SERVER(inode)->wsize;
+ pnfs_pageio_init_write(pgio, inode);
+
if (wsize < PAGE_CACHE_SIZE)
nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
else
@@ -1132,7 +1167,7 @@ static const struct rpc_call_ops nfs_write_full_ops = {
/*
* This function is called when the WRITE call is complete.
*/
-int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
+void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
{
struct nfs_writeargs *argp = &data->args;
struct nfs_writeres *resp = &data->res;
@@ -1151,7 +1186,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
*/
status = NFS_PROTO(data->inode)->write_done(task, data);
if (status != 0)
- return status;
+ return;
nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
@@ -1166,6 +1201,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
*/
static unsigned long complain;
+ /* Note this will print the MDS for a DS write */
if (time_before(complain, jiffies)) {
dprintk("NFS: faulty NFS server %s:"
" (committed = %d) != (stable = %d)\n",
@@ -1186,6 +1222,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
/* Was this an NFSv2 write or an NFSv3 stable write? */
if (resp->verf->committed != NFS_UNSTABLE) {
/* Resend from where the server left off */
+ data->mds_offset += resp->count;
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
@@ -1196,7 +1233,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
argp->stable = NFS_FILE_SYNC;
}
nfs_restart_rpc(task, server->nfs_client);
- return -EAGAIN;
+ return;
}
if (time_before(complain, jiffies)) {
printk(KERN_WARNING
@@ -1207,7 +1244,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
/* Can't do anything about it except throw an error. */
task->tk_status = -EIO;
}
- return 0;
+ return;
}
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index 65444d29406b..f1ab3604db5a 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -112,7 +112,7 @@ static int v2_read_file_info(struct super_block *sb, int type)
if (!info->dqi_priv) {
printk(KERN_WARNING
"Not enough memory for quota information structure.\n");
- return -1;
+ return -ENOMEM;
}
qinfo = info->dqi_priv;
if (version == 0) {
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 306ee39ef2c3..8994dd041660 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -31,7 +31,7 @@
#define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
#define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
#define udf_find_next_one_bit(addr, size, offset) \
- ext2_find_next_bit(addr, size, offset)
+ ext2_find_next_bit((unsigned long *)(addr), size, offset)
static int read_block_bitmap(struct super_block *sb,
struct udf_bitmap *bitmap, unsigned int block,
@@ -297,7 +297,7 @@ repeat:
break;
}
} else {
- bit = udf_find_next_one_bit((char *)bh->b_data,
+ bit = udf_find_next_one_bit(bh->b_data,
sb->s_blocksize << 3,
group_start << 3);
if (bit < sb->s_blocksize << 3)
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 89c78486cbbe..f391a2adc699 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -123,8 +123,8 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (inode->i_sb->s_blocksize <
(udf_file_entry_alloc_offset(inode) +
pos + count)) {
- udf_expand_file_adinicb(inode, pos + count, &err);
- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ err = udf_expand_file_adinicb(inode);
+ if (err) {
udf_debug("udf_expand_adinicb: err=%d\n", err);
up_write(&iinfo->i_data_sem);
return err;
@@ -237,7 +237,7 @@ static int udf_setattr(struct dentry *dentry, struct iattr *attr)
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
- error = vmtruncate(inode, attr->ia_size);
+ error = udf_setsize(inode, attr->ia_size);
if (error)
return error;
}
@@ -249,5 +249,4 @@ static int udf_setattr(struct dentry *dentry, struct iattr *attr)
const struct inode_operations udf_file_inode_operations = {
.setattr = udf_setattr,
- .truncate = udf_truncate,
};
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index c6a2e782b97b..ccc814321414 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -73,14 +73,12 @@ void udf_evict_inode(struct inode *inode)
struct udf_inode_info *iinfo = UDF_I(inode);
int want_delete = 0;
- truncate_inode_pages(&inode->i_data, 0);
-
if (!inode->i_nlink && !is_bad_inode(inode)) {
want_delete = 1;
- inode->i_size = 0;
- udf_truncate(inode);
+ udf_setsize(inode, 0);
udf_update_inode(inode, IS_SYNC(inode));
- }
+ } else
+ truncate_inode_pages(&inode->i_data, 0);
invalidate_inode_buffers(inode);
end_writeback(inode);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
@@ -117,9 +115,18 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block);
if (unlikely(ret)) {
- loff_t isize = mapping->host->i_size;
- if (pos + len > isize)
- vmtruncate(mapping->host, isize);
+ struct inode *inode = mapping->host;
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ loff_t isize = inode->i_size;
+
+ if (pos + len > isize) {
+ truncate_pagecache(inode, pos + len, isize);
+ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
+ down_write(&iinfo->i_data_sem);
+ udf_truncate_extents(inode);
+ up_write(&iinfo->i_data_sem);
+ }
+ }
}
return ret;
@@ -139,30 +146,31 @@ const struct address_space_operations udf_aops = {
.bmap = udf_bmap,
};
-void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err)
+int udf_expand_file_adinicb(struct inode *inode)
{
struct page *page;
char *kaddr;
struct udf_inode_info *iinfo = UDF_I(inode);
+ int err;
struct writeback_control udf_wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = 1,
};
- /* from now on we have normal address_space methods */
- inode->i_data.a_ops = &udf_aops;
-
if (!iinfo->i_lenAlloc) {
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
+ /* from now on we have normal address_space methods */
+ inode->i_data.a_ops = &udf_aops;
mark_inode_dirty(inode);
- return;
+ return 0;
}
- page = grab_cache_page(inode->i_mapping, 0);
- BUG_ON(!PageLocked(page));
+ page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
+ if (!page)
+ return -ENOMEM;
if (!PageUptodate(page)) {
kaddr = kmap(page);
@@ -181,11 +189,24 @@ void udf_expand_file_adinicb(struct inode *inode, int newsize, int *err)
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
-
- inode->i_data.a_ops->writepage(page, &udf_wbc);
+ /* from now on we have normal address_space methods */
+ inode->i_data.a_ops = &udf_aops;
+ err = inode->i_data.a_ops->writepage(page, &udf_wbc);
+ if (err) {
+ /* Restore everything back so that we don't lose data... */
+ lock_page(page);
+ kaddr = kmap(page);
+ memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr,
+ inode->i_size);
+ kunmap(page);
+ unlock_page(page);
+ iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
+ inode->i_data.a_ops = &udf_adinicb_aops;
+ }
page_cache_release(page);
-
mark_inode_dirty(inode);
+
+ return err;
}
struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
@@ -348,8 +369,10 @@ static struct buffer_head *udf_getblk(struct inode *inode, long block,
}
/* Extend the file by 'blocks' blocks, return the number of extents added */
-int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
- struct kernel_long_ad *last_ext, sector_t blocks)
+static int udf_do_extend_file(struct inode *inode,
+ struct extent_position *last_pos,
+ struct kernel_long_ad *last_ext,
+ sector_t blocks)
{
sector_t add;
int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
@@ -357,6 +380,7 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
struct kernel_lb_addr prealloc_loc = {};
int prealloc_len = 0;
struct udf_inode_info *iinfo;
+ int err;
/* The previous extent is fake and we should not extend by anything
* - there's nothing to do... */
@@ -422,26 +446,29 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
/* Create enough extents to cover the whole hole */
while (blocks > add) {
blocks -= add;
- if (udf_add_aext(inode, last_pos, &last_ext->extLocation,
- last_ext->extLength, 1) == -1)
- return -1;
+ err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
+ last_ext->extLength, 1);
+ if (err)
+ return err;
count++;
}
if (blocks) {
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
(blocks << sb->s_blocksize_bits);
- if (udf_add_aext(inode, last_pos, &last_ext->extLocation,
- last_ext->extLength, 1) == -1)
- return -1;
+ err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
+ last_ext->extLength, 1);
+ if (err)
+ return err;
count++;
}
out:
/* Do we have some preallocated blocks saved? */
if (prealloc_len) {
- if (udf_add_aext(inode, last_pos, &prealloc_loc,
- prealloc_len, 1) == -1)
- return -1;
+ err = udf_add_aext(inode, last_pos, &prealloc_loc,
+ prealloc_len, 1);
+ if (err)
+ return err;
last_ext->extLocation = prealloc_loc;
last_ext->extLength = prealloc_len;
count++;
@@ -453,11 +480,68 @@ out:
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
last_pos->offset -= sizeof(struct long_ad);
else
- return -1;
+ return -EIO;
return count;
}
+static int udf_extend_file(struct inode *inode, loff_t newsize)
+{
+
+ struct extent_position epos;
+ struct kernel_lb_addr eloc;
+ uint32_t elen;
+ int8_t etype;
+ struct super_block *sb = inode->i_sb;
+ sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
+ int adsize;
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ struct kernel_long_ad extent;
+ int err;
+
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ adsize = sizeof(struct short_ad);
+ else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
+ adsize = sizeof(struct long_ad);
+ else
+ BUG();
+
+ etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
+
+ /* File has extent covering the new size (could happen when extending
+ * inside a block)? */
+ if (etype != -1)
+ return 0;
+ if (newsize & (sb->s_blocksize - 1))
+ offset++;
+ /* Extended file just to the boundary of the last file block? */
+ if (offset == 0)
+ return 0;
+
+ /* Truncate is extending the file by 'offset' blocks */
+ if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
+ (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
+ /* File has no extents at all or has empty last
+ * indirect extent! Create a fake extent... */
+ extent.extLocation.logicalBlockNum = 0;
+ extent.extLocation.partitionReferenceNum = 0;
+ extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
+ } else {
+ epos.offset -= adsize;
+ etype = udf_next_aext(inode, &epos, &extent.extLocation,
+ &extent.extLength, 0);
+ extent.extLength |= etype << 30;
+ }
+ err = udf_do_extend_file(inode, &epos, &extent, offset);
+ if (err < 0)
+ goto out;
+ err = 0;
+ iinfo->i_lenExtents = newsize;
+out:
+ brelse(epos.bh);
+ return err;
+}
+
static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
int *err, sector_t *phys, int *new)
{
@@ -540,7 +624,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
elen = EXT_RECORDED_ALLOCATED |
((elen + inode->i_sb->s_blocksize - 1) &
~(inode->i_sb->s_blocksize - 1));
- etype = udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
+ udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
}
brelse(prev_epos.bh);
brelse(cur_epos.bh);
@@ -564,19 +648,17 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
memset(&laarr[0].extLocation, 0x00,
sizeof(struct kernel_lb_addr));
laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
- /* Will udf_extend_file() create real extent from
+ /* Will udf_do_extend_file() create real extent from
a fake one? */
startnum = (offset > 0);
}
/* Create extents for the hole between EOF and offset */
- ret = udf_extend_file(inode, &prev_epos, laarr, offset);
- if (ret == -1) {
+ ret = udf_do_extend_file(inode, &prev_epos, laarr, offset);
+ if (ret < 0) {
brelse(prev_epos.bh);
brelse(cur_epos.bh);
brelse(next_epos.bh);
- /* We don't really know the error here so we just make
- * something up */
- *err = -ENOSPC;
+ *err = ret;
return NULL;
}
c = 0;
@@ -1005,52 +1087,66 @@ struct buffer_head *udf_bread(struct inode *inode, int block,
return NULL;
}
-void udf_truncate(struct inode *inode)
+int udf_setsize(struct inode *inode, loff_t newsize)
{
- int offset;
int err;
struct udf_inode_info *iinfo;
+ int bsize = 1 << inode->i_blkbits;
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
- return;
+ return -EINVAL;
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
- return;
+ return -EPERM;
iinfo = UDF_I(inode);
- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ if (newsize > inode->i_size) {
down_write(&iinfo->i_data_sem);
- if (inode->i_sb->s_blocksize <
- (udf_file_entry_alloc_offset(inode) +
- inode->i_size)) {
- udf_expand_file_adinicb(inode, inode->i_size, &err);
- if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
- inode->i_size = iinfo->i_lenAlloc;
- up_write(&iinfo->i_data_sem);
- return;
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ if (bsize <
+ (udf_file_entry_alloc_offset(inode) + newsize)) {
+ err = udf_expand_file_adinicb(inode);
+ if (err) {
+ up_write(&iinfo->i_data_sem);
+ return err;
+ }
} else
- udf_truncate_extents(inode);
- } else {
- offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
- memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset,
- 0x00, inode->i_sb->s_blocksize -
- offset - udf_file_entry_alloc_offset(inode));
- iinfo->i_lenAlloc = inode->i_size;
+ iinfo->i_lenAlloc = newsize;
+ }
+ err = udf_extend_file(inode, newsize);
+ if (err) {
+ up_write(&iinfo->i_data_sem);
+ return err;
}
+ truncate_setsize(inode, newsize);
up_write(&iinfo->i_data_sem);
} else {
- block_truncate_page(inode->i_mapping, inode->i_size,
- udf_get_block);
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ down_write(&iinfo->i_data_sem);
+ memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize,
+ 0x00, bsize - newsize -
+ udf_file_entry_alloc_offset(inode));
+ iinfo->i_lenAlloc = newsize;
+ truncate_setsize(inode, newsize);
+ up_write(&iinfo->i_data_sem);
+ goto update_time;
+ }
+ err = block_truncate_page(inode->i_mapping, newsize,
+ udf_get_block);
+ if (err)
+ return err;
down_write(&iinfo->i_data_sem);
+ truncate_setsize(inode, newsize);
udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem);
}
-
+update_time:
inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
if (IS_SYNC(inode))
udf_sync_inode(inode);
else
mark_inode_dirty(inode);
+ return 0;
}
static void __udf_read_inode(struct inode *inode)
@@ -1637,14 +1733,13 @@ struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino)
return NULL;
}
-int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
- struct kernel_lb_addr *eloc, uint32_t elen, int inc)
+int udf_add_aext(struct inode *inode, struct extent_position *epos,
+ struct kernel_lb_addr *eloc, uint32_t elen, int inc)
{
int adsize;
struct short_ad *sad = NULL;
struct long_ad *lad = NULL;
struct allocExtDesc *aed;
- int8_t etype;
uint8_t *ptr;
struct udf_inode_info *iinfo = UDF_I(inode);
@@ -1660,7 +1755,7 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
adsize = sizeof(struct long_ad);
else
- return -1;
+ return -EIO;
if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) {
unsigned char *sptr, *dptr;
@@ -1672,12 +1767,12 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
obloc.partitionReferenceNum,
obloc.logicalBlockNum, &err);
if (!epos->block.logicalBlockNum)
- return -1;
+ return -ENOSPC;
nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
&epos->block,
0));
if (!nbh)
- return -1;
+ return -EIO;
lock_buffer(nbh);
memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
set_buffer_uptodate(nbh);
@@ -1746,7 +1841,7 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
epos->bh = nbh;
}
- etype = udf_write_aext(inode, epos, eloc, elen, inc);
+ udf_write_aext(inode, epos, eloc, elen, inc);
if (!epos->bh) {
iinfo->i_lenAlloc += adsize;
@@ -1764,11 +1859,11 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
mark_buffer_dirty_inode(epos->bh, inode);
}
- return etype;
+ return 0;
}
-int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
- struct kernel_lb_addr *eloc, uint32_t elen, int inc)
+void udf_write_aext(struct inode *inode, struct extent_position *epos,
+ struct kernel_lb_addr *eloc, uint32_t elen, int inc)
{
int adsize;
uint8_t *ptr;
@@ -1798,7 +1893,7 @@ int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
adsize = sizeof(struct long_ad);
break;
default:
- return -1;
+ return;
}
if (epos->bh) {
@@ -1817,8 +1912,6 @@ int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
if (inc)
epos->offset += adsize;
-
- return (elen >> 30);
}
int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index 225527cdc885..8424308db4b4 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -197,6 +197,11 @@ static void udf_update_alloc_ext_desc(struct inode *inode,
mark_buffer_dirty_inode(epos->bh, inode);
}
+/*
+ * Truncate extents of inode to inode->i_size. This function can be used only
+ * for making file shorter. For making file longer, udf_extend_file() has to
+ * be used.
+ */
void udf_truncate_extents(struct inode *inode)
{
struct extent_position epos;
@@ -219,96 +224,65 @@ void udf_truncate_extents(struct inode *inode)
etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
byte_offset = (offset << sb->s_blocksize_bits) +
(inode->i_size & (sb->s_blocksize - 1));
- if (etype != -1) {
- epos.offset -= adsize;
- extent_trunc(inode, &epos, &eloc, etype, elen, byte_offset);
- epos.offset += adsize;
- if (byte_offset)
- lenalloc = epos.offset;
- else
- lenalloc = epos.offset - adsize;
-
- if (!epos.bh)
- lenalloc -= udf_file_entry_alloc_offset(inode);
- else
- lenalloc -= sizeof(struct allocExtDesc);
-
- while ((etype = udf_current_aext(inode, &epos, &eloc,
- &elen, 0)) != -1) {
- if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
- udf_write_aext(inode, &epos, &neloc, nelen, 0);
- if (indirect_ext_len) {
- /* We managed to free all extents in the
- * indirect extent - free it too */
- BUG_ON(!epos.bh);
- udf_free_blocks(sb, inode, &epos.block,
- 0, indirect_ext_len);
- } else if (!epos.bh) {
- iinfo->i_lenAlloc = lenalloc;
- mark_inode_dirty(inode);
- } else
- udf_update_alloc_ext_desc(inode,
- &epos, lenalloc);
- brelse(epos.bh);
- epos.offset = sizeof(struct allocExtDesc);
- epos.block = eloc;
- epos.bh = udf_tread(sb,
- udf_get_lb_pblock(sb, &eloc, 0));
- if (elen)
- indirect_ext_len =
- (elen + sb->s_blocksize - 1) >>
- sb->s_blocksize_bits;
- else
- indirect_ext_len = 1;
- } else {
- extent_trunc(inode, &epos, &eloc, etype,
- elen, 0);
- epos.offset += adsize;
- }
- }
+ if (etype == -1) {
+ /* We should extend the file? */
+ WARN_ON(byte_offset);
+ return;
+ }
+ epos.offset -= adsize;
+ extent_trunc(inode, &epos, &eloc, etype, elen, byte_offset);
+ epos.offset += adsize;
+ if (byte_offset)
+ lenalloc = epos.offset;
+ else
+ lenalloc = epos.offset - adsize;
- if (indirect_ext_len) {
- BUG_ON(!epos.bh);
- udf_free_blocks(sb, inode, &epos.block, 0,
- indirect_ext_len);
- } else if (!epos.bh) {
- iinfo->i_lenAlloc = lenalloc;
- mark_inode_dirty(inode);
- } else
- udf_update_alloc_ext_desc(inode, &epos, lenalloc);
- } else if (inode->i_size) {
- if (byte_offset) {
- struct kernel_long_ad extent;
+ if (!epos.bh)
+ lenalloc -= udf_file_entry_alloc_offset(inode);
+ else
+ lenalloc -= sizeof(struct allocExtDesc);
- /*
- * OK, there is not extent covering inode->i_size and
- * no extent above inode->i_size => truncate is
- * extending the file by 'offset' blocks.
- */
- if ((!epos.bh &&
- epos.offset ==
- udf_file_entry_alloc_offset(inode)) ||
- (epos.bh && epos.offset ==
- sizeof(struct allocExtDesc))) {
- /* File has no extents at all or has empty last
- * indirect extent! Create a fake extent... */
- extent.extLocation.logicalBlockNum = 0;
- extent.extLocation.partitionReferenceNum = 0;
- extent.extLength =
- EXT_NOT_RECORDED_NOT_ALLOCATED;
- } else {
- epos.offset -= adsize;
- etype = udf_next_aext(inode, &epos,
- &extent.extLocation,
- &extent.extLength, 0);
- extent.extLength |= etype << 30;
- }
- udf_extend_file(inode, &epos, &extent,
- offset +
- ((inode->i_size &
- (sb->s_blocksize - 1)) != 0));
+ while ((etype = udf_current_aext(inode, &epos, &eloc,
+ &elen, 0)) != -1) {
+ if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
+ udf_write_aext(inode, &epos, &neloc, nelen, 0);
+ if (indirect_ext_len) {
+ /* We managed to free all extents in the
+ * indirect extent - free it too */
+ BUG_ON(!epos.bh);
+ udf_free_blocks(sb, inode, &epos.block,
+ 0, indirect_ext_len);
+ } else if (!epos.bh) {
+ iinfo->i_lenAlloc = lenalloc;
+ mark_inode_dirty(inode);
+ } else
+ udf_update_alloc_ext_desc(inode,
+ &epos, lenalloc);
+ brelse(epos.bh);
+ epos.offset = sizeof(struct allocExtDesc);
+ epos.block = eloc;
+ epos.bh = udf_tread(sb,
+ udf_get_lb_pblock(sb, &eloc, 0));
+ if (elen)
+ indirect_ext_len =
+ (elen + sb->s_blocksize - 1) >>
+ sb->s_blocksize_bits;
+ else
+ indirect_ext_len = 1;
+ } else {
+ extent_trunc(inode, &epos, &eloc, etype, elen, 0);
+ epos.offset += adsize;
}
}
+
+ if (indirect_ext_len) {
+ BUG_ON(!epos.bh);
+ udf_free_blocks(sb, inode, &epos.block, 0, indirect_ext_len);
+ } else if (!epos.bh) {
+ iinfo->i_lenAlloc = lenalloc;
+ mark_inode_dirty(inode);
+ } else
+ udf_update_alloc_ext_desc(inode, &epos, lenalloc);
iinfo->i_lenExtents = inode->i_size;
brelse(epos.bh);
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index eba48209f9f3..dbd52d4b5eed 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -136,22 +136,20 @@ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
extern long udf_ioctl(struct file *, unsigned int, unsigned long);
/* inode.c */
extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *);
-extern void udf_expand_file_adinicb(struct inode *, int, int *);
+extern int udf_expand_file_adinicb(struct inode *);
extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
-extern void udf_truncate(struct inode *);
+extern int udf_setsize(struct inode *, loff_t);
extern void udf_read_inode(struct inode *);
extern void udf_evict_inode(struct inode *);
extern int udf_write_inode(struct inode *, struct writeback_control *wbc);
extern long udf_block_map(struct inode *, sector_t);
-extern int udf_extend_file(struct inode *, struct extent_position *,
- struct kernel_long_ad *, sector_t);
extern int8_t inode_bmap(struct inode *, sector_t, struct extent_position *,
struct kernel_lb_addr *, uint32_t *, sector_t *);
-extern int8_t udf_add_aext(struct inode *, struct extent_position *,
+extern int udf_add_aext(struct inode *, struct extent_position *,
+ struct kernel_lb_addr *, uint32_t, int);
+extern void udf_write_aext(struct inode *, struct extent_position *,
struct kernel_lb_addr *, uint32_t, int);
-extern int8_t udf_write_aext(struct inode *, struct extent_position *,
- struct kernel_lb_addr *, uint32_t, int);
extern int8_t udf_delete_aext(struct inode *, struct extent_position,
struct kernel_lb_addr, uint32_t);
extern int8_t udf_next_aext(struct inode *, struct extent_position *,
diff --git a/include/asm-generic/ftrace.h b/include/asm-generic/ftrace.h
new file mode 100644
index 000000000000..51abba9ea7ad
--- /dev/null
+++ b/include/asm-generic/ftrace.h
@@ -0,0 +1,16 @@
+/*
+ * linux/include/asm-generic/ftrace.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_GENERIC_FTRACE_H__
+#define __ASM_GENERIC_FTRACE_H__
+
+/*
+ * Not all architectures need their own ftrace.h, the most
+ * common definitions are already in linux/ftrace.h.
+ */
+
+#endif /* __ASM_GENERIC_FTRACE_H__ */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 4644c9a7f724..e0ffa3ddb02a 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -94,6 +94,10 @@ static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
#define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr)
#endif
+#ifndef PCI_IOBASE
+#define PCI_IOBASE ((void __iomem *) 0)
+#endif
+
/*****************************************************************************/
/*
* traditional input/output functions
@@ -101,32 +105,32 @@ static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
static inline u8 inb(unsigned long addr)
{
- return readb((volatile void __iomem *) addr);
+ return readb(addr + PCI_IOBASE);
}
static inline u16 inw(unsigned long addr)
{
- return readw((volatile void __iomem *) addr);
+ return readw(addr + PCI_IOBASE);
}
static inline u32 inl(unsigned long addr)
{
- return readl((volatile void __iomem *) addr);
+ return readl(addr + PCI_IOBASE);
}
static inline void outb(u8 b, unsigned long addr)
{
- writeb(b, (volatile void __iomem *) addr);
+ writeb(b, addr + PCI_IOBASE);
}
static inline void outw(u16 b, unsigned long addr)
{
- writew(b, (volatile void __iomem *) addr);
+ writew(b, addr + PCI_IOBASE);
}
static inline void outl(u32 b, unsigned long addr)
{
- writel(b, (volatile void __iomem *) addr);
+ writel(b, addr + PCI_IOBASE);
}
#define inb_p(addr) inb(addr)
@@ -213,32 +217,32 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
static inline void readsl(const void __iomem *addr, void *buf, int len)
{
- insl((unsigned long)addr, buf, len);
+ insl(addr - PCI_IOBASE, buf, len);
}
static inline void readsw(const void __iomem *addr, void *buf, int len)
{
- insw((unsigned long)addr, buf, len);
+ insw(addr - PCI_IOBASE, buf, len);
}
static inline void readsb(const void __iomem *addr, void *buf, int len)
{
- insb((unsigned long)addr, buf, len);
+ insb(addr - PCI_IOBASE, buf, len);
}
static inline void writesl(const void __iomem *addr, const void *buf, int len)
{
- outsl((unsigned long)addr, buf, len);
+ outsl(addr - PCI_IOBASE, buf, len);
}
static inline void writesw(const void __iomem *addr, const void *buf, int len)
{
- outsw((unsigned long)addr, buf, len);
+ outsw(addr - PCI_IOBASE, buf, len);
}
static inline void writesb(const void __iomem *addr, const void *buf, int len)
{
- outsb((unsigned long)addr, buf, len);
+ outsb(addr - PCI_IOBASE, buf, len);
}
#ifndef CONFIG_GENERIC_IOMAP
@@ -269,8 +273,9 @@ static inline void writesb(const void __iomem *addr, const void *buf, int len)
outsl((unsigned long) (p), (src), (count))
#endif /* CONFIG_GENERIC_IOMAP */
-
-#define IO_SPACE_LIMIT 0xffffffff
+#ifndef IO_SPACE_LIMIT
+#define IO_SPACE_LIMIT 0xffff
+#endif
#ifdef __KERNEL__
diff --git a/include/asm-generic/sizes.h b/include/asm-generic/sizes.h
new file mode 100644
index 000000000000..ea5d4ef81061
--- /dev/null
+++ b/include/asm-generic/sizes.h
@@ -0,0 +1,47 @@
+/*
+ * linux/include/asm-generic/sizes.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_GENERIC_SIZES_H__
+#define __ASM_GENERIC_SIZES_H__
+
+#define SZ_1 0x00000001
+#define SZ_2 0x00000002
+#define SZ_4 0x00000004
+#define SZ_8 0x00000008
+#define SZ_16 0x00000010
+#define SZ_32 0x00000020
+#define SZ_64 0x00000040
+#define SZ_128 0x00000080
+#define SZ_256 0x00000100
+#define SZ_512 0x00000200
+
+#define SZ_1K 0x00000400
+#define SZ_2K 0x00000800
+#define SZ_4K 0x00001000
+#define SZ_8K 0x00002000
+#define SZ_16K 0x00004000
+#define SZ_32K 0x00008000
+#define SZ_64K 0x00010000
+#define SZ_128K 0x00020000
+#define SZ_256K 0x00040000
+#define SZ_512K 0x00080000
+
+#define SZ_1M 0x00100000
+#define SZ_2M 0x00200000
+#define SZ_4M 0x00400000
+#define SZ_8M 0x00800000
+#define SZ_16M 0x01000000
+#define SZ_32M 0x02000000
+#define SZ_64M 0x04000000
+#define SZ_128M 0x08000000
+#define SZ_256M 0x10000000
+#define SZ_512M 0x20000000
+
+#define SZ_1G 0x40000000
+#define SZ_2G 0x80000000
+
+#endif /* __ASM_GENERIC_SIZES_H__ */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index b218b8513d04..ac68c999b6c2 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -288,14 +288,16 @@ strncpy_from_user(char *dst, const char __user *src, long count)
*
* Return 0 on exception, a value greater than N if too long
*/
-#ifndef strnlen_user
+#ifndef __strnlen_user
+#define __strnlen_user strnlen
+#endif
+
static inline long strnlen_user(const char __user *src, long n)
{
if (!access_ok(VERIFY_READ, src, 1))
return 0;
- return strlen((void * __force)src) + 1;
+ return __strnlen_user(src, n);
}
-#endif
static inline long strlen_user(const char __user *src)
{
diff --git a/include/drm/Kbuild b/include/drm/Kbuild
index ffec177f3481..3a60ac889520 100644
--- a/include/drm/Kbuild
+++ b/include/drm/Kbuild
@@ -2,7 +2,6 @@ header-y += drm.h
header-y += drm_mode.h
header-y += drm_sarea.h
header-y += i810_drm.h
-header-y += i830_drm.h
header-y += i915_drm.h
header-y += mga_drm.h
header-y += nouveau_drm.h
diff --git a/include/drm/drm.h b/include/drm/drm.h
index e5f70617dec5..9ac431396176 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -608,6 +608,12 @@ struct drm_gem_open {
__u64 size;
};
+/** DRM_IOCTL_GET_CAP ioctl argument type */
+struct drm_get_cap {
+ __u64 capability;
+ __u64 value;
+};
+
#include "drm_mode.h"
#define DRM_IOCTL_BASE 'd'
@@ -628,6 +634,7 @@ struct drm_gem_open {
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
+#define DRM_IOCTL_GET_CAP DRM_IOWR(0x0c, struct drm_get_cap)
#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
@@ -701,6 +708,10 @@ struct drm_gem_open {
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
+#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
+#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
+#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
+
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x99.
@@ -741,6 +752,8 @@ struct drm_event_vblank {
__u32 reserved;
};
+#define DRM_CAP_DUMB_BUFFER 0x1
+
/* typedef area */
#ifndef __KERNEL__
typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 348843b80150..ad5770f2315c 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -145,7 +145,10 @@ extern void drm_ut_debug_printk(unsigned int request_level,
#define DRIVER_IRQ_VBL2 0x800
#define DRIVER_GEM 0x1000
#define DRIVER_MODESET 0x2000
-#define DRIVER_USE_PLATFORM_DEVICE 0x4000
+
+#define DRIVER_BUS_PCI 0x1
+#define DRIVER_BUS_PLATFORM 0x2
+#define DRIVER_BUS_USB 0x3
/***********************************************************************/
/** \name Begin the DRM... */
@@ -698,6 +701,19 @@ struct drm_master {
#define DRM_SCANOUTPOS_INVBL (1 << 1)
#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
+struct drm_bus {
+ int bus_type;
+ int (*get_irq)(struct drm_device *dev);
+ const char *(*get_name)(struct drm_device *dev);
+ int (*set_busid)(struct drm_device *dev, struct drm_master *master);
+ int (*set_unique)(struct drm_device *dev, struct drm_master *master,
+ struct drm_unique *unique);
+ int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
+ /* hooks that are for PCI */
+ int (*agp_init)(struct drm_device *dev);
+
+};
+
/**
* DRM driver structure. This structure represent the common code for
* a family of cards. There will one drm_device for each card present
@@ -880,6 +896,17 @@ struct drm_driver {
/* vga arb irq handler */
void (*vgaarb_irq)(struct drm_device *dev, bool state);
+ /* dumb alloc support */
+ int (*dumb_create)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+ int (*dumb_map_offset)(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
+ int (*dumb_destroy)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+
/* Driver private ops for this object */
struct vm_operations_struct *gem_vm_ops;
@@ -895,8 +922,13 @@ struct drm_driver {
struct drm_ioctl_desc *ioctls;
int num_ioctls;
struct file_operations fops;
- struct pci_driver pci_driver;
- struct platform_device *platform_device;
+ union {
+ struct pci_driver *pci;
+ struct platform_device *platform_device;
+ struct usb_driver *usb;
+ } kdriver;
+ struct drm_bus *bus;
+
/* List of devices hanging off this driver */
struct list_head device_list;
};
@@ -1099,6 +1131,7 @@ struct drm_device {
#endif
struct platform_device *platformdev; /**< Platform device struture */
+ struct usb_device *usbdev;
struct drm_sg_mem *sg; /**< Scatter gather memory */
unsigned int num_crtcs; /**< Number of CRTCs on this device */
@@ -1136,28 +1169,9 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev,
static inline int drm_dev_to_irq(struct drm_device *dev)
{
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return platform_get_irq(dev->platformdev, 0);
- else
- return dev->pdev->irq;
+ return dev->driver->bus->get_irq(dev);
}
-static inline int drm_get_pci_domain(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return 0;
-
-#ifndef __alpha__
- /* For historical reasons, drm_get_pci_domain() is busticated
- * on most archs and has to remain so for userspace interface
- * < 1.4, except on alpha which was right from the beginning
- */
- if (dev->if_version < 0x10004)
- return 0;
-#endif /* __alpha__ */
-
- return pci_domain_nr(dev->pdev->bus);
-}
#if __OS_HAS_AGP
static inline int drm_core_has_AGP(struct drm_device *dev)
@@ -1211,8 +1225,6 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
/*@{*/
/* Driver support (drm_drv.h) */
-extern int drm_init(struct drm_driver *driver);
-extern void drm_exit(struct drm_driver *driver);
extern long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
extern long drm_compat_ioctl(struct file *filp,
@@ -1264,6 +1276,8 @@ extern int drm_getclient(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_getstats(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int drm_getcap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern int drm_setversion(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_noop(struct drm_device *dev, void *data,
@@ -1422,11 +1436,7 @@ extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_master *drm_master_create(struct drm_minor *minor);
extern struct drm_master *drm_master_get(struct drm_master *master);
extern void drm_master_put(struct drm_master **master);
-extern int drm_get_pci_dev(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- struct drm_driver *driver);
-extern int drm_get_platform_dev(struct platform_device *pdev,
- struct drm_driver *driver);
+
extern void drm_put_dev(struct drm_device *dev);
extern int drm_put_minor(struct drm_minor **minor);
extern unsigned int drm_debug;
@@ -1544,6 +1554,7 @@ drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
+int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
static inline void
drm_gem_object_handle_reference(struct drm_gem_object *obj)
@@ -1616,11 +1627,21 @@ static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
return NULL;
}
-static __inline__ int drm_device_is_agp(struct drm_device *dev)
+static __inline__ void drm_core_dropmap(struct drm_local_map *map)
{
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return 0;
+}
+#include "drm_mem_util.h"
+
+extern int drm_fill_in_dev(struct drm_device *dev,
+ const struct pci_device_id *ent,
+ struct drm_driver *driver);
+int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
+/*@}*/
+
+/* PCI section */
+static __inline__ int drm_pci_device_is_agp(struct drm_device *dev)
+{
if (dev->driver->device_is_agp != NULL) {
int err = (*dev->driver->device_is_agp) (dev);
@@ -1632,35 +1653,26 @@ static __inline__ int drm_device_is_agp(struct drm_device *dev)
return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
}
-static __inline__ int drm_device_is_pcie(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return 0;
- else
- return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
-}
-static __inline__ void drm_core_dropmap(struct drm_local_map *map)
+static __inline__ int drm_pci_device_is_pcie(struct drm_device *dev)
{
+ return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
}
-#include "drm_mem_util.h"
-
-static inline void *drm_get_device(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return dev->platformdev;
- else
- return dev->pdev;
-}
-extern int drm_platform_init(struct drm_driver *driver);
-extern int drm_pci_init(struct drm_driver *driver);
-extern int drm_fill_in_dev(struct drm_device *dev,
+extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
+extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+extern int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver);
-int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
-/*@}*/
+
+
+/* platform section */
+extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
+extern void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device);
+
+extern int drm_get_platform_dev(struct platform_device *pdev,
+ struct drm_driver *driver);
#endif /* __KERNEL__ */
#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 801be59f4f15..60edf9be31e5 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -659,7 +659,7 @@ extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
extern void drm_mode_config_init(struct drm_device *dev);
extern void drm_mode_config_reset(struct drm_device *dev);
@@ -685,8 +685,8 @@ extern void drm_mode_validate_size(struct drm_device *dev,
extern void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
extern void drm_mode_sort(struct list_head *mode_list);
-extern int drm_mode_hsync(struct drm_display_mode *mode);
-extern int drm_mode_vrefresh(struct drm_display_mode *mode);
+extern int drm_mode_hsync(const struct drm_display_mode *mode);
+extern int drm_mode_vrefresh(const struct drm_display_mode *mode);
extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
int adjust_flags);
extern void drm_mode_connector_list_update(struct drm_connector *connector);
@@ -798,4 +798,11 @@ extern int drm_add_modes_noedid(struct drm_connector *connector,
extern bool drm_edid_is_valid(struct edid *edid);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh);
+
+extern int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
index 0af087a4d3b3..3650d5d011ee 100644
--- a/include/drm/drm_hashtab.h
+++ b/include/drm/drm_hashtab.h
@@ -45,14 +45,10 @@ struct drm_hash_item {
};
struct drm_open_hash {
- unsigned int size;
- unsigned int order;
- unsigned int fill;
struct hlist_head *table;
- int use_vmalloc;
+ u8 order;
};
-
extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index e39177778601..b1e7809e5e15 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -42,23 +42,25 @@
#endif
struct drm_mm_node {
- struct list_head free_stack;
struct list_head node_list;
- unsigned free : 1;
+ struct list_head hole_stack;
+ unsigned hole_follows : 1;
unsigned scanned_block : 1;
unsigned scanned_prev_free : 1;
unsigned scanned_next_free : 1;
+ unsigned scanned_preceeds_hole : 1;
+ unsigned allocated : 1;
unsigned long start;
unsigned long size;
struct drm_mm *mm;
};
struct drm_mm {
- /* List of free memory blocks, most recently freed ordered. */
- struct list_head free_stack;
- /* List of all memory nodes, ordered according to the (increasing) start
- * address of the memory node. */
- struct list_head node_list;
+ /* List of all memory nodes that immediatly preceed a free hole. */
+ struct list_head hole_stack;
+ /* head_node.node_list is the list of all memory nodes, ordered
+ * according to the (increasing) start address of the memory node. */
+ struct drm_mm_node head_node;
struct list_head unused_nodes;
int num_unused;
spinlock_t unused_lock;
@@ -70,8 +72,28 @@ struct drm_mm {
unsigned scanned_blocks;
unsigned long scan_start;
unsigned long scan_end;
+ struct drm_mm_node *prev_scanned_node;
};
+static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
+{
+ return node->allocated;
+}
+
+static inline bool drm_mm_initialized(struct drm_mm *mm)
+{
+ return mm->hole_stack.next;
+}
+#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
+ &(mm)->head_node.node_list, \
+ node_list);
+#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
+ for (entry = (mm)->prev_scanned_node, \
+ next = entry ? list_entry(entry->node_list.next, \
+ struct drm_mm_node, node_list) : NULL; \
+ entry != NULL; entry = next, \
+ next = entry ? list_entry(entry->node_list.next, \
+ struct drm_mm_node, node_list) : NULL) \
/*
* Basic range manager support (drm_mm.c)
*/
@@ -118,7 +140,15 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
return drm_mm_get_block_range_generic(parent, size, alignment,
start, end, 1);
}
+extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment);
+extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end);
extern void drm_mm_put_block(struct drm_mm_node *cur);
+extern void drm_mm_remove_node(struct drm_mm_node *node);
+extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
@@ -134,11 +164,6 @@ extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm);
extern int drm_mm_clean(struct drm_mm *mm);
-extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
-extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
- unsigned long size);
-extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
- unsigned long size, int atomic);
extern int drm_mm_pre_get(struct drm_mm *mm);
static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 0fc7397c8f1f..ae6b7a3dbec7 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -344,4 +344,33 @@ struct drm_mode_crtc_page_flip {
__u64 user_data;
};
+/* create a dumb scanout buffer */
+struct drm_mode_create_dumb {
+ uint32_t height;
+ uint32_t width;
+ uint32_t bpp;
+ uint32_t flags;
+ /* handle, pitch, size will be returned */
+ uint32_t handle;
+ uint32_t pitch;
+ uint64_t size;
+};
+
+/* set up for mmap of a dumb scanout buffer */
+struct drm_mode_map_dumb {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+};
+
+struct drm_mode_destroy_dumb {
+ uint32_t handle;
+};
+
#endif
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 5ff1194dc2ea..820ee9029482 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -141,6 +141,20 @@
{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6703, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6704, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6705, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6706, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6707, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6708, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6709, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6718, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6719, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x671c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x671d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/drm_usb.h b/include/drm/drm_usb.h
new file mode 100644
index 000000000000..33506c11da8b
--- /dev/null
+++ b/include/drm/drm_usb.h
@@ -0,0 +1,15 @@
+#ifndef DRM_USB_H
+#define DRM_USB_H
+
+#include <drmP.h>
+
+#include <linux/usb.h>
+
+extern int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver);
+extern void drm_usb_exit(struct drm_driver *driver, struct usb_driver *udriver);
+
+int drm_get_usb_dev(struct usb_interface *interface,
+ const struct usb_device_id *id,
+ struct drm_driver *driver);
+
+#endif
diff --git a/include/drm/i830_drm.h b/include/drm/i830_drm.h
deleted file mode 100644
index 61315c29b8f3..000000000000
--- a/include/drm/i830_drm.h
+++ /dev/null
@@ -1,342 +0,0 @@
-#ifndef _I830_DRM_H_
-#define _I830_DRM_H_
-
-/* WARNING: These defines must be the same as what the Xserver uses.
- * if you change them, you must change the defines in the Xserver.
- *
- * KW: Actually, you can't ever change them because doing so would
- * break backwards compatibility.
- */
-
-#ifndef _I830_DEFINES_
-#define _I830_DEFINES_
-
-#define I830_DMA_BUF_ORDER 12
-#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
-#define I830_DMA_BUF_NR 256
-#define I830_NR_SAREA_CLIPRECTS 8
-
-/* Each region is a minimum of 64k, and there are at most 64 of them.
- */
-#define I830_NR_TEX_REGIONS 64
-#define I830_LOG_MIN_TEX_REGION_SIZE 16
-
-/* KW: These aren't correct but someone set them to two and then
- * released the module. Now we can't change them as doing so would
- * break backwards compatibility.
- */
-#define I830_TEXTURE_COUNT 2
-#define I830_TEXBLEND_COUNT I830_TEXTURE_COUNT
-
-#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
-
-#define I830_UPLOAD_CTX 0x1
-#define I830_UPLOAD_BUFFERS 0x2
-#define I830_UPLOAD_CLIPRECTS 0x4
-#define I830_UPLOAD_TEX0_IMAGE 0x100 /* handled clientside */
-#define I830_UPLOAD_TEX0_CUBE 0x200 /* handled clientside */
-#define I830_UPLOAD_TEX1_IMAGE 0x400 /* handled clientside */
-#define I830_UPLOAD_TEX1_CUBE 0x800 /* handled clientside */
-#define I830_UPLOAD_TEX2_IMAGE 0x1000 /* handled clientside */
-#define I830_UPLOAD_TEX2_CUBE 0x2000 /* handled clientside */
-#define I830_UPLOAD_TEX3_IMAGE 0x4000 /* handled clientside */
-#define I830_UPLOAD_TEX3_CUBE 0x8000 /* handled clientside */
-#define I830_UPLOAD_TEX_N_IMAGE(n) (0x100 << (n * 2))
-#define I830_UPLOAD_TEX_N_CUBE(n) (0x200 << (n * 2))
-#define I830_UPLOAD_TEXIMAGE_MASK 0xff00
-#define I830_UPLOAD_TEX0 0x10000
-#define I830_UPLOAD_TEX1 0x20000
-#define I830_UPLOAD_TEX2 0x40000
-#define I830_UPLOAD_TEX3 0x80000
-#define I830_UPLOAD_TEX_N(n) (0x10000 << (n))
-#define I830_UPLOAD_TEX_MASK 0xf0000
-#define I830_UPLOAD_TEXBLEND0 0x100000
-#define I830_UPLOAD_TEXBLEND1 0x200000
-#define I830_UPLOAD_TEXBLEND2 0x400000
-#define I830_UPLOAD_TEXBLEND3 0x800000
-#define I830_UPLOAD_TEXBLEND_N(n) (0x100000 << (n))
-#define I830_UPLOAD_TEXBLEND_MASK 0xf00000
-#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
-#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
-#define I830_UPLOAD_STIPPLE 0x8000000
-
-/* Indices into buf.Setup where various bits of state are mirrored per
- * context and per buffer. These can be fired at the card as a unit,
- * or in a piecewise fashion as required.
- */
-
-/* Destbuffer state
- * - backbuffer linear offset and pitch -- invarient in the current dri
- * - zbuffer linear offset and pitch -- also invarient
- * - drawing origin in back and depth buffers.
- *
- * Keep the depth/back buffer state here to accommodate private buffers
- * in the future.
- */
-
-#define I830_DESTREG_CBUFADDR 0
-#define I830_DESTREG_DBUFADDR 1
-#define I830_DESTREG_DV0 2
-#define I830_DESTREG_DV1 3
-#define I830_DESTREG_SENABLE 4
-#define I830_DESTREG_SR0 5
-#define I830_DESTREG_SR1 6
-#define I830_DESTREG_SR2 7
-#define I830_DESTREG_DR0 8
-#define I830_DESTREG_DR1 9
-#define I830_DESTREG_DR2 10
-#define I830_DESTREG_DR3 11
-#define I830_DESTREG_DR4 12
-#define I830_DEST_SETUP_SIZE 13
-
-/* Context state
- */
-#define I830_CTXREG_STATE1 0
-#define I830_CTXREG_STATE2 1
-#define I830_CTXREG_STATE3 2
-#define I830_CTXREG_STATE4 3
-#define I830_CTXREG_STATE5 4
-#define I830_CTXREG_IALPHAB 5
-#define I830_CTXREG_STENCILTST 6
-#define I830_CTXREG_ENABLES_1 7
-#define I830_CTXREG_ENABLES_2 8
-#define I830_CTXREG_AA 9
-#define I830_CTXREG_FOGCOLOR 10
-#define I830_CTXREG_BLENDCOLR0 11
-#define I830_CTXREG_BLENDCOLR 12 /* Dword 1 of 2 dword command */
-#define I830_CTXREG_VF 13
-#define I830_CTXREG_VF2 14
-#define I830_CTXREG_MCSB0 15
-#define I830_CTXREG_MCSB1 16
-#define I830_CTX_SETUP_SIZE 17
-
-/* 1.3: Stipple state
- */
-#define I830_STPREG_ST0 0
-#define I830_STPREG_ST1 1
-#define I830_STP_SETUP_SIZE 2
-
-/* Texture state (per tex unit)
- */
-
-#define I830_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (6 dwords) */
-#define I830_TEXREG_MI1 1
-#define I830_TEXREG_MI2 2
-#define I830_TEXREG_MI3 3
-#define I830_TEXREG_MI4 4
-#define I830_TEXREG_MI5 5
-#define I830_TEXREG_MF 6 /* GFX_OP_MAP_FILTER */
-#define I830_TEXREG_MLC 7 /* GFX_OP_MAP_LOD_CTL */
-#define I830_TEXREG_MLL 8 /* GFX_OP_MAP_LOD_LIMITS */
-#define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */
-#define I830_TEX_SETUP_SIZE 10
-
-#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
-#define I830_TEXREG_TM0S0 1
-#define I830_TEXREG_TM0S1 2
-#define I830_TEXREG_TM0S2 3
-#define I830_TEXREG_TM0S3 4
-#define I830_TEXREG_TM0S4 5
-#define I830_TEXREG_NOP0 6 /* noop */
-#define I830_TEXREG_NOP1 7 /* noop */
-#define I830_TEXREG_NOP2 8 /* noop */
-#define __I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS -- shared */
-#define __I830_TEX_SETUP_SIZE 10
-
-#define I830_FRONT 0x1
-#define I830_BACK 0x2
-#define I830_DEPTH 0x4
-
-#endif /* _I830_DEFINES_ */
-
-typedef struct _drm_i830_init {
- enum {
- I830_INIT_DMA = 0x01,
- I830_CLEANUP_DMA = 0x02
- } func;
- unsigned int mmio_offset;
- unsigned int buffers_offset;
- int sarea_priv_offset;
- unsigned int ring_start;
- unsigned int ring_end;
- unsigned int ring_size;
- unsigned int front_offset;
- unsigned int back_offset;
- unsigned int depth_offset;
- unsigned int w;
- unsigned int h;
- unsigned int pitch;
- unsigned int pitch_bits;
- unsigned int back_pitch;
- unsigned int depth_pitch;
- unsigned int cpp;
-} drm_i830_init_t;
-
-/* Warning: If you change the SAREA structure you must change the Xserver
- * structure as well */
-
-typedef struct _drm_i830_tex_region {
- unsigned char next, prev; /* indices to form a circular LRU */
- unsigned char in_use; /* owned by a client, or free? */
- int age; /* tracked by clients to update local LRU's */
-} drm_i830_tex_region_t;
-
-typedef struct _drm_i830_sarea {
- unsigned int ContextState[I830_CTX_SETUP_SIZE];
- unsigned int BufferState[I830_DEST_SETUP_SIZE];
- unsigned int TexState[I830_TEXTURE_COUNT][I830_TEX_SETUP_SIZE];
- unsigned int TexBlendState[I830_TEXBLEND_COUNT][I830_TEXBLEND_SIZE];
- unsigned int TexBlendStateWordsUsed[I830_TEXBLEND_COUNT];
- unsigned int Palette[2][256];
- unsigned int dirty;
-
- unsigned int nbox;
- struct drm_clip_rect boxes[I830_NR_SAREA_CLIPRECTS];
-
- /* Maintain an LRU of contiguous regions of texture space. If
- * you think you own a region of texture memory, and it has an
- * age different to the one you set, then you are mistaken and
- * it has been stolen by another client. If global texAge
- * hasn't changed, there is no need to walk the list.
- *
- * These regions can be used as a proxy for the fine-grained
- * texture information of other clients - by maintaining them
- * in the same lru which is used to age their own textures,
- * clients have an approximate lru for the whole of global
- * texture space, and can make informed decisions as to which
- * areas to kick out. There is no need to choose whether to
- * kick out your own texture or someone else's - simply eject
- * them all in LRU order.
- */
-
- drm_i830_tex_region_t texList[I830_NR_TEX_REGIONS + 1];
- /* Last elt is sentinal */
- int texAge; /* last time texture was uploaded */
- int last_enqueue; /* last time a buffer was enqueued */
- int last_dispatch; /* age of the most recently dispatched buffer */
- int last_quiescent; /* */
- int ctxOwner; /* last context to upload state */
-
- int vertex_prim;
-
- int pf_enabled; /* is pageflipping allowed? */
- int pf_active;
- int pf_current_page; /* which buffer is being displayed? */
-
- int perf_boxes; /* performance boxes to be displayed */
-
- /* Here's the state for texunits 2,3:
- */
- unsigned int TexState2[I830_TEX_SETUP_SIZE];
- unsigned int TexBlendState2[I830_TEXBLEND_SIZE];
- unsigned int TexBlendStateWordsUsed2;
-
- unsigned int TexState3[I830_TEX_SETUP_SIZE];
- unsigned int TexBlendState3[I830_TEXBLEND_SIZE];
- unsigned int TexBlendStateWordsUsed3;
-
- unsigned int StippleState[I830_STP_SETUP_SIZE];
-} drm_i830_sarea_t;
-
-/* Flags for perf_boxes
- */
-#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */
-#define I830_BOX_FLIP 0x2 /* populated by kernel */
-#define I830_BOX_WAIT 0x4 /* populated by kernel & client */
-#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */
-#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */
-
-/* I830 specific ioctls
- * The device specific ioctl range is 0x40 to 0x79.
- */
-#define DRM_I830_INIT 0x00
-#define DRM_I830_VERTEX 0x01
-#define DRM_I830_CLEAR 0x02
-#define DRM_I830_FLUSH 0x03
-#define DRM_I830_GETAGE 0x04
-#define DRM_I830_GETBUF 0x05
-#define DRM_I830_SWAP 0x06
-#define DRM_I830_COPY 0x07
-#define DRM_I830_DOCOPY 0x08
-#define DRM_I830_FLIP 0x09
-#define DRM_I830_IRQ_EMIT 0x0a
-#define DRM_I830_IRQ_WAIT 0x0b
-#define DRM_I830_GETPARAM 0x0c
-#define DRM_I830_SETPARAM 0x0d
-
-#define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_INIT, drm_i830_init_t)
-#define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_I830_VERTEX, drm_i830_vertex_t)
-#define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_I830_CLEAR, drm_i830_clear_t)
-#define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLUSH)
-#define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_I830_GETAGE)
-#define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETBUF, drm_i830_dma_t)
-#define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_SWAP)
-#define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_I830_COPY, drm_i830_copy_t)
-#define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_I830_DOCOPY)
-#define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLIP)
-#define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_IRQ_EMIT, drm_i830_irq_emit_t)
-#define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_IRQ_WAIT, drm_i830_irq_wait_t)
-#define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETPARAM, drm_i830_getparam_t)
-#define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_SETPARAM, drm_i830_setparam_t)
-
-typedef struct _drm_i830_clear {
- int clear_color;
- int clear_depth;
- int flags;
- unsigned int clear_colormask;
- unsigned int clear_depthmask;
-} drm_i830_clear_t;
-
-/* These may be placeholders if we have more cliprects than
- * I830_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
- * false, indicating that the buffer will be dispatched again with a
- * new set of cliprects.
- */
-typedef struct _drm_i830_vertex {
- int idx; /* buffer index */
- int used; /* nr bytes in use */
- int discard; /* client is finished with the buffer? */
-} drm_i830_vertex_t;
-
-typedef struct _drm_i830_copy_t {
- int idx; /* buffer index */
- int used; /* nr bytes in use */
- void __user *address; /* Address to copy from */
-} drm_i830_copy_t;
-
-typedef struct drm_i830_dma {
- void __user *virtual;
- int request_idx;
- int request_size;
- int granted;
-} drm_i830_dma_t;
-
-/* 1.3: Userspace can request & wait on irq's:
- */
-typedef struct drm_i830_irq_emit {
- int __user *irq_seq;
-} drm_i830_irq_emit_t;
-
-typedef struct drm_i830_irq_wait {
- int irq_seq;
-} drm_i830_irq_wait_t;
-
-/* 1.3: New ioctl to query kernel params:
- */
-#define I830_PARAM_IRQ_ACTIVE 1
-
-typedef struct drm_i830_getparam {
- int param;
- int __user *value;
-} drm_i830_getparam_t;
-
-/* 1.3: New ioctl to set kernel params:
- */
-#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1
-
-typedef struct drm_i830_setparam {
- int param;
- int value;
-} drm_i830_setparam_t;
-
-#endif /* _I830_DRM_H_ */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 0039f1f97ad8..c4d6dbfa3ff4 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -290,6 +290,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_RELAXED_FENCING 12
#define I915_PARAM_HAS_COHERENT_RINGS 13
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
+#define I915_PARAM_HAS_RELAXED_DELTA 15
typedef struct drm_i915_getparam {
int param;
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index e2cfe80f6fca..5edd3a76fffa 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -94,6 +94,7 @@ struct drm_nouveau_setparam {
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
+#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
#define NOUVEAU_GEM_TILE_16BPP 0x00000001
#define NOUVEAU_GEM_TILE_32BPP 0x00000002
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index e5c607a02d57..3dec41cf8342 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -908,6 +908,7 @@ struct drm_radeon_cs {
#define RADEON_INFO_WANT_HYPERZ 0x07
#define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */
#define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */
+#define RADEON_INFO_NUM_BACKENDS 0x0a /* DB/backends for r600+ - need for OQ */
struct drm_radeon_info {
uint32_t request;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 1da8af6ac884..efed0820d9fa 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -50,13 +50,15 @@ struct ttm_backend_func {
* @pages: Array of pointers to ttm pages.
* @dummy_read_page: Page to be used instead of NULL pages in the
* array @pages.
+ * @dma_addrs: Array of DMA (bus) address of the ttm pages.
*
* Populate the backend with ttm pages. Depending on the backend,
* it may or may not copy the @pages array.
*/
int (*populate) (struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page);
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs);
/**
* struct ttm_backend_func member clear
*
@@ -149,6 +151,7 @@ enum ttm_caching_state {
* @swap_storage: Pointer to shmem struct file for swap storage.
* @caching_state: The current caching state of the pages.
* @state: The current binding state of the pages.
+ * @dma_address: The DMA (bus) addresses of the pages (if TTM_PAGE_FLAG_DMA32)
*
* This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
@@ -173,6 +176,7 @@ struct ttm_tt {
tt_unbound,
tt_unpopulated,
} state;
+ dma_addr_t *dma_address;
};
#define TTM_MEMTYPE_FLAG_FIXED (1 << 0) /* Fixed (on-card) PCI memory */
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 116821448c38..8062890f725e 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -36,11 +36,13 @@
* @flags: ttm flags for page allocation.
* @cstate: ttm caching state for the page.
* @count: number of pages to allocate.
+ * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
*/
int ttm_get_pages(struct list_head *pages,
int flags,
enum ttm_caching_state cstate,
- unsigned count);
+ unsigned count,
+ dma_addr_t *dma_address);
/**
* Put linked list of pages to pool.
*
@@ -49,11 +51,13 @@ int ttm_get_pages(struct list_head *pages,
* count.
* @flags: ttm flags for page allocation.
* @cstate: ttm caching state.
+ * @dma_address: The DMA (bus) address of pages (if TTM_PAGE_FLAG_DMA32 set).
*/
void ttm_put_pages(struct list_head *pages,
unsigned page_count,
int flags,
- enum ttm_caching_state cstate);
+ enum ttm_caching_state cstate,
+ dma_addr_t *dma_address);
/**
* Initialize pool allocator.
*/
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index c3e9de8321c6..9343dd3de858 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -230,7 +230,7 @@ struct cpufreq_driver {
int (*bios_limit) (int cpu, unsigned int *limit);
int (*exit) (struct cpufreq_policy *policy);
- int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
+ int (*suspend) (struct cpufreq_policy *policy);
int (*resume) (struct cpufreq_policy *policy);
struct freq_attr **attr;
};
@@ -281,19 +281,10 @@ __ATTR(_name, 0444, show_##_name, NULL)
static struct freq_attr _name = \
__ATTR(_name, _perm, show_##_name, NULL)
-#define cpufreq_freq_attr_ro_old(_name) \
-static struct freq_attr _name##_old = \
-__ATTR(_name, 0444, show_##_name##_old, NULL)
-
#define cpufreq_freq_attr_rw(_name) \
static struct freq_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
-#define cpufreq_freq_attr_rw_old(_name) \
-static struct freq_attr _name##_old = \
-__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
-
-
struct global_attr {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj,
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 68ba85a00c06..b2a36391d2a1 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -152,6 +152,8 @@
#define FB_ACCEL_PROSAVAGE_DDR 0x8d /* S3 ProSavage DDR */
#define FB_ACCEL_PROSAVAGE_DDRK 0x8e /* S3 ProSavage DDR-K */
+#define FB_ACCEL_PUV3_UNIGFX 0xa0 /* PKUnity-v3 Unigfx */
+
struct fb_fix_screeninfo {
char id[16]; /* identification string eg "TT Builtin" */
unsigned long smem_start; /* Start of frame buffer mem */
diff --git a/include/linux/i2c-tegra.h b/include/linux/i2c-tegra.h
new file mode 100644
index 000000000000..9c85da49857a
--- /dev/null
+++ b/include/linux/i2c-tegra.h
@@ -0,0 +1,25 @@
+/*
+ * drivers/i2c/busses/i2c-tegra.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_I2C_TEGRA_H
+#define _LINUX_I2C_TEGRA_H
+
+struct tegra_i2c_platform_data {
+ unsigned long bus_clk_rate;
+};
+
+#endif /* _LINUX_I2C_TEGRA_H */
diff --git a/include/linux/i2c/max6639.h b/include/linux/i2c/max6639.h
new file mode 100644
index 000000000000..6011c42034da
--- /dev/null
+++ b/include/linux/i2c/max6639.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_MAX6639_H
+#define _LINUX_MAX6639_H
+
+#include <linux/types.h>
+
+/* platform data for the MAX6639 temperature sensor and fan control */
+
+struct max6639_platform_data {
+ bool pwm_polarity; /* Polarity low (0) or high (1, default) */
+ int ppr; /* Pulses per rotation 1..4 (default == 2) */
+ int rpm_range; /* 2000, 4000 (default), 8000 or 16000 */
+};
+
+#endif /* _LINUX_MAX6639_H */
diff --git a/include/linux/i2c/pmbus.h b/include/linux/i2c/pmbus.h
new file mode 100644
index 000000000000..69280db02c41
--- /dev/null
+++ b/include/linux/i2c/pmbus.h
@@ -0,0 +1,45 @@
+/*
+ * Hardware monitoring driver for PMBus devices
+ *
+ * Copyright (c) 2010, 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _PMBUS_H_
+#define _PMBUS_H_
+
+/* flags */
+
+/*
+ * PMBUS_SKIP_STATUS_CHECK
+ *
+ * During register detection, skip checking the status register for
+ * communication or command errors.
+ *
+ * Some PMBus chips respond with valid data when trying to read an unsupported
+ * register. For such chips, checking the status register is mandatory when
+ * trying to determine if a chip register exists or not.
+ * Other PMBus chips don't support the STATUS_CML register, or report
+ * communication errors for no explicable reason. For such chips, checking
+ * the status register must be disabled.
+ */
+#define PMBUS_SKIP_STATUS_CHECK (1 << 0)
+
+struct pmbus_platform_data {
+ u32 flags; /* Device specific flags */
+};
+
+#endif /* _PMBUS_H_ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 78219887308e..581703d86fbd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -402,16 +402,23 @@ static inline void init_page_count(struct page *page)
/*
* PageBuddy() indicate that the page is free and in the buddy system
* (see mm/page_alloc.c).
+ *
+ * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
+ * -2 so that an underflow of the page_mapcount() won't be mistaken
+ * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
+ * efficiently by most CPU architectures.
*/
+#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
+
static inline int PageBuddy(struct page *page)
{
- return atomic_read(&page->_mapcount) == -2;
+ return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
}
static inline void __SetPageBuddy(struct page *page)
{
VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
- atomic_set(&page->_mapcount, -2);
+ atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
}
static inline void __ClearPageBuddy(struct page *page)
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 6023efa9f5d9..f88522b10a38 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -501,7 +501,7 @@ extern int nfs_writepage(struct page *page, struct writeback_control *wbc);
extern int nfs_writepages(struct address_space *, struct writeback_control *);
extern int nfs_flush_incompatible(struct file *file, struct page *page);
extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
-extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
+extern void nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
/*
* Try to write back everything synchronously (but check the
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 3e112de12d8d..216cea5db0aa 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -30,6 +30,8 @@ struct nfs_client {
#define NFS_CS_CALLBACK 1 /* - callback started */
#define NFS_CS_IDMAP 2 /* - idmap started */
#define NFS_CS_RENEWD 3 /* - renewd started */
+#define NFS_CS_STOP_RENEW 4 /* no more state to renew */
+#define NFS_CS_CHECK_LEASE_TIME 5 /* need to check lease time */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -75,7 +77,6 @@ struct nfs_client {
u32 cl_exchange_flags;
struct nfs4_session *cl_session; /* sharred session */
struct list_head cl_layouts;
- struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */
#endif /* CONFIG_NFS_V4 */
#ifdef CONFIG_NFS_FSCACHE
@@ -176,6 +177,7 @@ struct nfs_server {
#define NFS_CAP_CTIME (1U << 12)
#define NFS_CAP_MTIME (1U << 13)
#define NFS_CAP_POSIX_LOCK (1U << 14)
+#define NFS_CAP_UIDGID_NOMAP (1U << 15)
/* maximum number of slots to use */
diff --git a/include/linux/nfs_idmap.h b/include/linux/nfs_idmap.h
index e8352dc5afb5..ae7d6a380dae 100644
--- a/include/linux/nfs_idmap.h
+++ b/include/linux/nfs_idmap.h
@@ -65,6 +65,7 @@ struct idmap_msg {
/* Forward declaration to make this header independent of others */
struct nfs_client;
+struct nfs_server;
#ifdef CONFIG_NFS_USE_NEW_IDMAPPER
@@ -96,10 +97,10 @@ void nfs_idmap_delete(struct nfs_client *);
#endif /* CONFIG_NFS_USE_NEW_IDMAPPER */
-int nfs_map_name_to_uid(struct nfs_client *, const char *, size_t, __u32 *);
-int nfs_map_group_to_gid(struct nfs_client *, const char *, size_t, __u32 *);
-int nfs_map_uid_to_name(struct nfs_client *, __u32, char *, size_t);
-int nfs_map_gid_to_group(struct nfs_client *, __u32, char *, size_t);
+int nfs_map_name_to_uid(const struct nfs_server *, const char *, size_t, __u32 *);
+int nfs_map_group_to_gid(const struct nfs_server *, const char *, size_t, __u32 *);
+int nfs_map_uid_to_name(const struct nfs_server *, __u32, char *, size_t);
+int nfs_map_gid_to_group(const struct nfs_server *, __u32, char *, size_t);
extern unsigned int nfs_idmap_cache_timeout;
#endif /* __KERNEL__ */
diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h
index 68b10f5f8907..8866bb3502ee 100644
--- a/include/linux/nfs_iostat.h
+++ b/include/linux/nfs_iostat.h
@@ -113,6 +113,8 @@ enum nfs_stat_eventcounters {
NFSIOS_SHORTREAD,
NFSIOS_SHORTWRITE,
NFSIOS_DELAY,
+ NFSIOS_PNFS_READ,
+ NFSIOS_PNFS_WRITE,
__NFSIOS_COUNTSMAX,
};
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index d55cee73f634..90907ada6d52 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -59,9 +59,11 @@ struct nfs_pageio_descriptor {
unsigned int pg_base;
struct inode *pg_inode;
- int (*pg_doio)(struct inode *, struct list_head *, unsigned int, size_t, int);
+ int (*pg_doio)(struct nfs_pageio_descriptor *);
int pg_ioflags;
int pg_error;
+ struct pnfs_layout_segment *pg_lseg;
+ int (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *);
};
#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
@@ -79,7 +81,7 @@ extern int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *dst,
pgoff_t idx_start, unsigned int npages, int tag);
extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
struct inode *inode,
- int (*doio)(struct inode *, struct list_head *, unsigned int, size_t, int),
+ int (*doio)(struct nfs_pageio_descriptor *desc),
size_t bsize,
int how);
extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index b0068579bec2..2c2c67d2eb42 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1016,9 +1016,12 @@ struct nfs_read_data {
unsigned int npages; /* Max length of pagevec */
struct nfs_readargs args;
struct nfs_readres res;
-#ifdef CONFIG_NFS_V4
unsigned long timestamp; /* For lease renewal */
-#endif
+ struct pnfs_layout_segment *lseg;
+ struct nfs_client *ds_clp; /* pNFS data server */
+ const struct rpc_call_ops *mds_ops;
+ int (*read_done_cb) (struct rpc_task *task, struct nfs_read_data *data);
+ __u64 mds_offset;
struct page *page_array[NFS_PAGEVEC_SIZE];
};
@@ -1035,13 +1038,20 @@ struct nfs_write_data {
unsigned int npages; /* Max length of pagevec */
struct nfs_writeargs args; /* argument struct */
struct nfs_writeres res; /* result struct */
+ struct pnfs_layout_segment *lseg;
+ struct nfs_client *ds_clp; /* pNFS data server */
+ const struct rpc_call_ops *mds_ops;
+ int (*write_done_cb) (struct rpc_task *task, struct nfs_write_data *data);
#ifdef CONFIG_NFS_V4
unsigned long timestamp; /* For lease renewal */
#endif
+ __u64 mds_offset; /* Filelayout dense stripe */
struct page *page_array[NFS_PAGEVEC_SIZE];
};
struct nfs_access_entry;
+struct nfs_client;
+struct rpc_timeout;
/*
* RPC procedure vector for NFSv2/NFSv3 demuxing
@@ -1106,6 +1116,8 @@ struct nfs_rpc_ops {
struct nfs_open_context *ctx,
int open_flags,
struct iattr *iattr);
+ int (*init_client) (struct nfs_client *, const struct rpc_timeout *,
+ const char *, rpc_authflavor_t, int);
};
/*
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 580de67f318b..511e5ee809ef 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -517,7 +517,7 @@
#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302
#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
-#define PCI_DEVICE_ID_AMD_15H_NB_MISC 0x1603
+#define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603
#define PCI_DEVICE_ID_AMD_15H_NB_LINK 0x1604
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
@@ -2079,6 +2079,8 @@
#define PCI_DEVICE_ID_TIGON3_5723 0x165b
#define PCI_DEVICE_ID_TIGON3_5705M 0x165d
#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e
+#define PCI_DEVICE_ID_NX2_57712 0x1662
+#define PCI_DEVICE_ID_NX2_57712E 0x1663
#define PCI_DEVICE_ID_TIGON3_5714 0x1668
#define PCI_DEVICE_ID_TIGON3_5714S 0x1669
#define PCI_DEVICE_ID_TIGON3_5780 0x166a
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 1630d9cae22a..a2afc9fbe186 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -34,28 +34,32 @@ enum {
SCIx_NR_IRQS,
};
+#define SCIx_IRQ_MUXED(irq) \
+{ \
+ [SCIx_ERI_IRQ] = (irq), \
+ [SCIx_RXI_IRQ] = (irq), \
+ [SCIx_TXI_IRQ] = (irq), \
+ [SCIx_BRI_IRQ] = (irq), \
+}
+
struct device;
/*
* Platform device specific platform_data struct
*/
struct plat_sci_port {
- void __iomem *membase; /* io cookie */
unsigned long mapbase; /* resource base */
unsigned int irqs[SCIx_NR_IRQS]; /* ERI, RXI, TXI, BRI */
unsigned int type; /* SCI / SCIF / IRDA */
upf_t flags; /* UPF_* flags */
- char *clk; /* clock string */
unsigned int scbrr_algo_id; /* SCBRR calculation algo */
unsigned int scscr; /* SCSCR initialization */
struct device *dma_dev;
-#ifdef CONFIG_SERIAL_SH_SCI_DMA
- unsigned int dma_slave_tx;
- unsigned int dma_slave_rx;
-#endif
+ unsigned int dma_slave_tx;
+ unsigned int dma_slave_rx;
};
#endif /* __LINUX_SERIAL_SCI_H */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index ef9476a36ff7..db7bcaf7c5bd 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -129,6 +129,7 @@ struct rpc_create_args {
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
struct rpc_program *, u32);
+void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt);
struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
void rpc_shutdown_client(struct rpc_clnt *);
void rpc_release_client(struct rpc_clnt *);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index bef0f535f746..a0f998c07c65 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -12,7 +12,6 @@
#include <linux/uio.h>
#include <linux/socket.h>
#include <linux/in.h>
-#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xdr.h>
@@ -146,7 +145,7 @@ enum xprt_transports {
};
struct rpc_xprt {
- struct kref kref; /* Reference count */
+ atomic_t count; /* Reference count */
struct rpc_xprt_ops * ops; /* transport methods */
const struct rpc_timeout *timeout; /* timeout parms */
diff --git a/include/scsi/fc/fc_ns.h b/include/scsi/fc/fc_ns.h
index 185015dd1166..f7751d53f1d3 100644
--- a/include/scsi/fc/fc_ns.h
+++ b/include/scsi/fc/fc_ns.h
@@ -41,6 +41,7 @@ enum fc_ns_req {
FC_NS_GI_A = 0x0101, /* get identifiers - scope */
FC_NS_GPN_ID = 0x0112, /* get port name by ID */
FC_NS_GNN_ID = 0x0113, /* get node name by ID */
+ FC_NS_GSPN_ID = 0x0118, /* get symbolic port name */
FC_NS_GID_PN = 0x0121, /* get ID for port name */
FC_NS_GID_NN = 0x0131, /* get IDs for node name */
FC_NS_GID_FT = 0x0171, /* get IDs by FC4 type */
@@ -144,7 +145,7 @@ struct fc_ns_gid_pn {
};
/*
- * GID_PN response
+ * GID_PN response or GSPN_ID request
*/
struct fc_gid_pn_resp {
__u8 fp_resvd;
@@ -152,6 +153,14 @@ struct fc_gid_pn_resp {
};
/*
+ * GSPN_ID response
+ */
+struct fc_gspn_resp {
+ __u8 fp_name_len;
+ char fp_name[];
+};
+
+/*
* RFT_ID request - register FC-4 types for ID.
*/
struct fc_ns_rft_id {
diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h
index 6d293c846a46..be418d8448a5 100644
--- a/include/scsi/fc_encode.h
+++ b/include/scsi/fc_encode.h
@@ -46,16 +46,11 @@ struct fc_ct_req {
} payload;
};
-/**
- * fill FC header fields in specified fc_frame
- */
-static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl,
- u32 did, u32 sid, enum fc_fh_type type,
- u32 f_ctl, u32 parm_offset)
+static inline void __fc_fill_fc_hdr(struct fc_frame_header *fh,
+ enum fc_rctl r_ctl,
+ u32 did, u32 sid, enum fc_fh_type type,
+ u32 f_ctl, u32 parm_offset)
{
- struct fc_frame_header *fh;
-
- fh = fc_frame_header_get(fp);
WARN_ON(r_ctl == 0);
fh->fh_r_ctl = r_ctl;
hton24(fh->fh_d_id, did);
@@ -68,6 +63,19 @@ static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl,
}
/**
+ * fill FC header fields in specified fc_frame
+ */
+static inline void fc_fill_fc_hdr(struct fc_frame *fp, enum fc_rctl r_ctl,
+ u32 did, u32 sid, enum fc_fh_type type,
+ u32 f_ctl, u32 parm_offset)
+{
+ struct fc_frame_header *fh;
+
+ fh = fc_frame_header_get(fp);
+ __fc_fill_fc_hdr(fh, r_ctl, did, sid, type, f_ctl, parm_offset);
+}
+
+/**
* fc_adisc_fill() - Fill in adisc request frame
* @lport: local port.
* @fp: fc frame where payload will be placed.
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index f53c8e31d5fb..24193c1b0da0 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -35,6 +35,8 @@
#include <scsi/fc_frame.h>
+#define FC_FC4_PROV_SIZE (FC_TYPE_FCP + 1) /* size of tables */
+
/*
* libfc error codes
*/
@@ -156,6 +158,7 @@ struct fc_rport_libfc_priv {
#define FC_RP_FLAGS_REC_SUPPORTED (1 << 0)
#define FC_RP_FLAGS_RETRY (1 << 1)
#define FC_RP_STARTED (1 << 2)
+ #define FC_RP_FLAGS_CONF_REQ (1 << 3)
unsigned int e_d_tov;
unsigned int r_a_tov;
};
@@ -179,6 +182,7 @@ struct fc_rport_libfc_priv {
* @rp_mutex: The mutex that protects the remote port
* @retry_work: Handle for retries
* @event_callback: Callback when READY, FAILED or LOGO states complete
+ * @prli_count: Count of open PRLI sessions in providers
* @rcu: Structure used for freeing in an RCU-safe manner
*/
struct fc_rport_priv {
@@ -202,7 +206,13 @@ struct fc_rport_priv {
struct list_head peers;
struct work_struct event_work;
u32 supported_classes;
+ u16 prli_count;
struct rcu_head rcu;
+ u16 sp_features;
+ u8 spp_type;
+ void (*lld_event_callback)(struct fc_lport *,
+ struct fc_rport_priv *,
+ enum fc_rport_event);
};
/**
@@ -551,6 +561,16 @@ struct libfc_function_template {
struct fc_seq *(*seq_start_next)(struct fc_seq *);
/*
+ * Set a response handler for the exchange of the sequence.
+ *
+ * STATUS: OPTIONAL
+ */
+ void (*seq_set_resp)(struct fc_seq *sp,
+ void (*resp)(struct fc_seq *, struct fc_frame *,
+ void *),
+ void *arg);
+
+ /*
* Assign a sequence for an incoming request frame.
*
* STATUS: OPTIONAL
@@ -558,6 +578,13 @@ struct libfc_function_template {
struct fc_seq *(*seq_assign)(struct fc_lport *, struct fc_frame *);
/*
+ * Release the reference on the sequence returned by seq_assign().
+ *
+ * STATUS: OPTIONAL
+ */
+ void (*seq_release)(struct fc_seq *);
+
+ /*
* Reset an exchange manager, completing all sequences and exchanges.
* If s_id is non-zero, reset only exchanges originating from that FID.
* If d_id is non-zero, reset only exchanges sending to that FID.
@@ -656,6 +683,15 @@ struct libfc_function_template {
void (*rport_destroy)(struct kref *);
/*
+ * Callback routine after the remote port is logged in
+ *
+ * STATUS: OPTIONAL
+ */
+ void (*rport_event_callback)(struct fc_lport *,
+ struct fc_rport_priv *,
+ enum fc_rport_event);
+
+ /*
* Send a fcp cmd from fsp pkt.
* Called with the SCSI host lock unlocked and irqs disabled.
*
@@ -749,6 +785,15 @@ struct fc_disc {
enum fc_disc_event);
};
+/*
+ * Local port notifier and events.
+ */
+extern struct blocking_notifier_head fc_lport_notifier_head;
+enum fc_lport_event {
+ FC_LPORT_EV_ADD,
+ FC_LPORT_EV_DEL,
+};
+
/**
* struct fc_lport - Local port
* @host: The SCSI host associated with a local port
@@ -789,8 +834,10 @@ struct fc_disc {
* @lso_max: The maximum large offload send size
* @fcts: FC-4 type mask
* @lp_mutex: Mutex to protect the local port
- * @list: Handle for list of local ports
+ * @list: Linkage on list of vport peers
* @retry_work: Handle to local port for delayed retry context
+ * @prov: Pointers available for use by passive FC-4 providers
+ * @lport_list: Linkage on module-wide list of local ports
*/
struct fc_lport {
/* Associations */
@@ -846,8 +893,32 @@ struct fc_lport {
struct mutex lp_mutex;
struct list_head list;
struct delayed_work retry_work;
+ void *prov[FC_FC4_PROV_SIZE];
+ struct list_head lport_list;
};
+/**
+ * struct fc4_prov - FC-4 provider registration
+ * @prli: Handler for incoming PRLI
+ * @prlo: Handler for session reset
+ * @recv: Handler for incoming request
+ * @module: Pointer to module. May be NULL.
+ */
+struct fc4_prov {
+ int (*prli)(struct fc_rport_priv *, u32 spp_len,
+ const struct fc_els_spp *spp_in,
+ struct fc_els_spp *spp_out);
+ void (*prlo)(struct fc_rport_priv *);
+ void (*recv)(struct fc_lport *, struct fc_frame *);
+ struct module *module;
+};
+
+/*
+ * Register FC-4 provider with libfc.
+ */
+int fc_fc4_register_provider(enum fc_fh_type type, struct fc4_prov *);
+void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *);
+
/*
* FC_LPORT HELPER FUNCTIONS
*****************************/
@@ -978,6 +1049,7 @@ struct fc_lport *libfc_vport_create(struct fc_vport *, int privsize);
struct fc_lport *fc_vport_id_lookup(struct fc_lport *, u32 port_id);
int fc_lport_bsg_request(struct fc_bsg_job *);
void fc_lport_set_local_id(struct fc_lport *, u32 port_id);
+void fc_lport_iterate(void (*func)(struct fc_lport *, void *), void *);
/*
* REMOTE PORT LAYER
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index feb6a94c90ea..8c1638b8c28e 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -33,6 +33,12 @@
#define FCOE_MAX_CMD_LEN 16 /* Supported CDB length */
/*
+ * Max MTU for FCoE: 14 (FCoE header) + 24 (FC header) + 2112 (max FC payload)
+ * + 4 (FC CRC) + 4 (FCoE trailer) = 2158 bytes
+ */
+#define FCOE_MTU 2158
+
+/*
* FIP tunable parameters.
*/
#define FCOE_CTLR_START_DELAY 2000 /* mS after first adv. to choose FCF */
@@ -221,6 +227,8 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_lport *,
u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
const struct libfc_function_template *, int init_fcp);
+u32 fcoe_fc_crc(struct fc_frame *fp);
+int fcoe_start_io(struct sk_buff *skb);
/**
* is_fip_mode() - returns true if FIP mode selected.
@@ -231,5 +239,102 @@ static inline bool is_fip_mode(struct fcoe_ctlr *fip)
return fip->state == FIP_ST_ENABLED;
}
+/* helper for FCoE SW HBA drivers, can include subven and subdev if needed. The
+ * modpost would use pci_device_id table to auto-generate formatted module alias
+ * into the corresponding .mod.c file, but there may or may not be a pci device
+ * id table for FCoE drivers so we use the following helper for build the fcoe
+ * driver module alias.
+ */
+#define MODULE_ALIAS_FCOE_PCI(ven, dev) \
+ MODULE_ALIAS("fcoe-pci:" \
+ "v" __stringify(ven) \
+ "d" __stringify(dev) "sv*sd*bc*sc*i*")
+
+/* the name of the default FCoE transport driver fcoe.ko */
+#define FCOE_TRANSPORT_DEFAULT "fcoe"
+
+/* struct fcoe_transport - The FCoE transport interface
+ * @name: a vendor specific name for their FCoE transport driver
+ * @attached: whether this transport is already attached
+ * @list: list linkage to all attached transports
+ * @match: handler to allow the transport driver to match up a given netdev
+ * @create: handler to sysfs entry of create for FCoE instances
+ * @destroy: handler to sysfs entry of destroy for FCoE instances
+ * @enable: handler to sysfs entry of enable for FCoE instances
+ * @disable: handler to sysfs entry of disable for FCoE instances
+ */
+struct fcoe_transport {
+ char name[IFNAMSIZ];
+ bool attached;
+ struct list_head list;
+ bool (*match) (struct net_device *device);
+ int (*create) (struct net_device *device, enum fip_state fip_mode);
+ int (*destroy) (struct net_device *device);
+ int (*enable) (struct net_device *device);
+ int (*disable) (struct net_device *device);
+};
+
+/**
+ * struct fcoe_percpu_s - The context for FCoE receive thread(s)
+ * @thread: The thread context
+ * @fcoe_rx_list: The queue of pending packets to process
+ * @page: The memory page for calculating frame trailer CRCs
+ * @crc_eof_offset: The offset into the CRC page pointing to available
+ * memory for a new trailer
+ */
+struct fcoe_percpu_s {
+ struct task_struct *thread;
+ struct sk_buff_head fcoe_rx_list;
+ struct page *crc_eof_page;
+ int crc_eof_offset;
+};
+
+/**
+ * struct fcoe_port - The FCoE private structure
+ * @priv: The associated fcoe interface. The structure is
+ * defined by the low level driver
+ * @lport: The associated local port
+ * @fcoe_pending_queue: The pending Rx queue of skbs
+ * @fcoe_pending_queue_active: Indicates if the pending queue is active
+ * @max_queue_depth: Max queue depth of pending queue
+ * @min_queue_depth: Min queue depth of pending queue
+ * @timer: The queue timer
+ * @destroy_work: Handle for work context
+ * (to prevent RTNL deadlocks)
+ * @data_srt_addr: Source address for data
+ *
+ * An instance of this structure is to be allocated along with the
+ * Scsi_Host and libfc fc_lport structures.
+ */
+struct fcoe_port {
+ void *priv;
+ struct fc_lport *lport;
+ struct sk_buff_head fcoe_pending_queue;
+ u8 fcoe_pending_queue_active;
+ u32 max_queue_depth;
+ u32 min_queue_depth;
+ struct timer_list timer;
+ struct work_struct destroy_work;
+ u8 data_src_addr[ETH_ALEN];
+};
+void fcoe_clean_pending_queue(struct fc_lport *);
+void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb);
+void fcoe_queue_timer(ulong lport);
+int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
+ struct fcoe_percpu_s *fps);
+
+/**
+ * struct netdev_list
+ * A mapping from netdevice to fcoe_transport
+ */
+struct fcoe_netdev_mapping {
+ struct list_head list;
+ struct net_device *netdev;
+ struct fcoe_transport *ft;
+};
+
+/* fcoe transports registration and deregistration */
+int fcoe_transport_attach(struct fcoe_transport *ft);
+int fcoe_transport_detach(struct fcoe_transport *ft);
#endif /* _LIBFCOE_H */
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 748382b32b52..0f4367751b71 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -212,9 +212,6 @@ struct iscsi_conn {
/* values userspace uses to id a conn */
int persistent_port;
char *persistent_address;
- /* remote portal currently connected to */
- int portal_port;
- char portal_address[ISCSI_ADDRESS_BUF_LEN];
/* MIB-statistics */
uint64_t txdata_octets;
@@ -319,9 +316,6 @@ struct iscsi_host {
/* hw address or netdev iscsi connection is bound to */
char *hwaddress;
char *netdev;
- /* local address */
- int local_port;
- char local_address[ISCSI_ADDRESS_BUF_LEN];
wait_queue_head_t session_removal_wq;
/* protects sessions and state */
@@ -394,6 +388,8 @@ extern void iscsi_session_failure(struct iscsi_session *session,
enum iscsi_err err);
extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf);
+extern int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
+ enum iscsi_param param, char *buf);
extern void iscsi_suspend_tx(struct iscsi_conn *conn);
extern void iscsi_suspend_queue(struct iscsi_conn *conn);
extern void iscsi_conn_queue_work(struct iscsi_conn *conn);
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index b76d4006e36d..3668903e397b 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -435,6 +435,10 @@ static inline int scsi_is_wlun(unsigned int lun)
* recover the link. Transport class will
* retry or fail IO */
#define DID_TRANSPORT_FAILFAST 0x0f /* Transport class fastfailed the io */
+#define DID_TARGET_FAILURE 0x10 /* Permanent target failure, do not retry on
+ * other paths */
+#define DID_NEXUS_FAILURE 0x11 /* Permanent nexus failure, retry on other
+ * paths might yield different results */
#define DRIVER_OK 0x00 /* Driver status */
/*
@@ -464,6 +468,7 @@ static inline int scsi_is_wlun(unsigned int lun)
#define TIMEOUT_ERROR 0x2007
#define SCSI_RETURN_NOT_HANDLED 0x2008
#define FAST_IO_FAIL 0x2009
+#define TARGET_ERROR 0x200A
/*
* Midlevel queue return values.
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 85867dcde335..f171c65dc5a8 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -184,6 +184,7 @@ typedef void (*activate_complete)(void *, int);
struct scsi_device_handler {
/* Used by the infrastructure */
struct list_head list; /* list of scsi_device_handlers */
+ int idx;
/* Filled by the hardware handler */
struct module *module;
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 7fff94b3b2a8..bf8f52965675 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -101,6 +101,8 @@ struct iscsi_transport {
void (*destroy_conn) (struct iscsi_cls_conn *conn);
int (*set_param) (struct iscsi_cls_conn *conn, enum iscsi_param param,
char *buf, int buflen);
+ int (*get_ep_param) (struct iscsi_endpoint *ep, enum iscsi_param param,
+ char *buf);
int (*get_conn_param) (struct iscsi_cls_conn *conn,
enum iscsi_param param, char *buf);
int (*get_session_param) (struct iscsi_cls_session *session,
@@ -160,8 +162,9 @@ struct iscsi_cls_conn {
void *dd_data; /* LLD private data */
struct iscsi_transport *transport;
uint32_t cid; /* connection id */
+ struct mutex ep_mutex;
+ struct iscsi_endpoint *ep;
- int active; /* must be accessed with the connlock */
struct device dev; /* sysfs transport/container device */
};
@@ -222,6 +225,7 @@ struct iscsi_endpoint {
void *dd_data; /* LLD private data */
struct device dev;
uint64_t id;
+ struct iscsi_cls_conn *conn;
};
/*
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
index 25fbefdf2f2e..db6c93510f74 100644
--- a/include/trace/events/scsi.h
+++ b/include/trace/events/scsi.h
@@ -184,6 +184,17 @@
scsi_statusbyte_name(SAM_STAT_ACA_ACTIVE), \
scsi_statusbyte_name(SAM_STAT_TASK_ABORTED))
+#define scsi_prot_op_name(result) { result, #result }
+#define show_prot_op_name(val) \
+ __print_symbolic(val, \
+ scsi_prot_op_name(SCSI_PROT_NORMAL), \
+ scsi_prot_op_name(SCSI_PROT_READ_INSERT), \
+ scsi_prot_op_name(SCSI_PROT_WRITE_STRIP), \
+ scsi_prot_op_name(SCSI_PROT_READ_STRIP), \
+ scsi_prot_op_name(SCSI_PROT_WRITE_INSERT), \
+ scsi_prot_op_name(SCSI_PROT_READ_PASS), \
+ scsi_prot_op_name(SCSI_PROT_WRITE_PASS))
+
const char *scsi_trace_parse_cdb(struct trace_seq*, unsigned char*, int);
#define __parse_cdb(cdb, len) scsi_trace_parse_cdb(p, cdb, len)
@@ -202,6 +213,7 @@ TRACE_EVENT(scsi_dispatch_cmd_start,
__field( unsigned int, cmd_len )
__field( unsigned int, data_sglen )
__field( unsigned int, prot_sglen )
+ __field( unsigned char, prot_op )
__dynamic_array(unsigned char, cmnd, cmd->cmd_len)
),
@@ -214,13 +226,15 @@ TRACE_EVENT(scsi_dispatch_cmd_start,
__entry->cmd_len = cmd->cmd_len;
__entry->data_sglen = scsi_sg_count(cmd);
__entry->prot_sglen = scsi_prot_sg_count(cmd);
+ __entry->prot_op = scsi_get_prot_op(cmd);
memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
),
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
- " cmnd=(%s %s raw=%s)",
+ " prot_op=%s cmnd=(%s %s raw=%s)",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
+ show_prot_op_name(__entry->prot_op),
show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len))
@@ -242,6 +256,7 @@ TRACE_EVENT(scsi_dispatch_cmd_error,
__field( unsigned int, cmd_len )
__field( unsigned int, data_sglen )
__field( unsigned int, prot_sglen )
+ __field( unsigned char, prot_op )
__dynamic_array(unsigned char, cmnd, cmd->cmd_len)
),
@@ -255,13 +270,15 @@ TRACE_EVENT(scsi_dispatch_cmd_error,
__entry->cmd_len = cmd->cmd_len;
__entry->data_sglen = scsi_sg_count(cmd);
__entry->prot_sglen = scsi_prot_sg_count(cmd);
+ __entry->prot_op = scsi_get_prot_op(cmd);
memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
),
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
- " cmnd=(%s %s raw=%s) rtn=%d",
+ " prot_op=%s cmnd=(%s %s raw=%s) rtn=%d",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
+ show_prot_op_name(__entry->prot_op),
show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
@@ -284,6 +301,7 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
__field( unsigned int, cmd_len )
__field( unsigned int, data_sglen )
__field( unsigned int, prot_sglen )
+ __field( unsigned char, prot_op )
__dynamic_array(unsigned char, cmnd, cmd->cmd_len)
),
@@ -297,14 +315,16 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
__entry->cmd_len = cmd->cmd_len;
__entry->data_sglen = scsi_sg_count(cmd);
__entry->prot_sglen = scsi_prot_sg_count(cmd);
+ __entry->prot_op = scsi_get_prot_op(cmd);
memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len);
),
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u " \
- "prot_sgl=%u cmnd=(%s %s raw=%s) result=(driver=%s host=%s " \
- "message=%s status=%s)",
+ "prot_sgl=%u prot_op=%s cmnd=(%s %s raw=%s) result=(driver=" \
+ "%s host=%s message=%s status=%s)",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
+ show_prot_op_name(__entry->prot_op),
show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
diff --git a/include/video/sh_mobile_lcdc.h b/include/video/sh_mobile_lcdc.h
index daabae5817c6..2c8d369190b3 100644
--- a/include/video/sh_mobile_lcdc.h
+++ b/include/video/sh_mobile_lcdc.h
@@ -59,6 +59,8 @@ struct sh_mobile_lcdc_board_cfg {
struct sh_mobile_lcdc_sys_bus_ops *sys_ops);
void (*display_on)(void *board_data, struct fb_info *info);
void (*display_off)(void *board_data);
+ int (*set_brightness)(void *board_data, int brightness);
+ int (*get_brightness)(void *board_data);
};
struct sh_mobile_lcdc_lcd_size_cfg { /* width and height of panel in mm */
@@ -66,6 +68,12 @@ struct sh_mobile_lcdc_lcd_size_cfg { /* width and height of panel in mm */
unsigned long height;
};
+/* backlight info */
+struct sh_mobile_lcdc_bl_info {
+ const char *name;
+ int max_brightness;
+};
+
struct sh_mobile_lcdc_chan_cfg {
int chan;
int bpp;
@@ -76,7 +84,9 @@ struct sh_mobile_lcdc_chan_cfg {
int num_cfg;
struct sh_mobile_lcdc_lcd_size_cfg lcd_size_cfg;
struct sh_mobile_lcdc_board_cfg board_cfg;
+ struct sh_mobile_lcdc_bl_info bl_info;
struct sh_mobile_lcdc_sys_bus_cfg sys_bus_cfg; /* only for SYSn I/F */
+ int nonstd;
};
struct sh_mobile_lcdc_info {
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
new file mode 100644
index 000000000000..a2b22f01a51d
--- /dev/null
+++ b/include/xen/balloon.h
@@ -0,0 +1,25 @@
+/******************************************************************************
+ * Xen balloon functionality
+ */
+
+#define RETRY_UNLIMITED 0
+
+struct balloon_stats {
+ /* We aim for 'current allocation' == 'target allocation'. */
+ unsigned long current_pages;
+ unsigned long target_pages;
+ /* Number of pages in high- and low-memory balloons. */
+ unsigned long balloon_low;
+ unsigned long balloon_high;
+ unsigned long schedule_delay;
+ unsigned long max_schedule_delay;
+ unsigned long retry_count;
+ unsigned long max_retry_count;
+};
+
+extern struct balloon_stats balloon_stats;
+
+void balloon_set_new_target(unsigned long target);
+
+int alloc_xenballooned_pages(int nr_pages, struct page** pages);
+void free_xenballooned_pages(int nr_pages, struct page** pages);
diff --git a/include/xen/events.h b/include/xen/events.h
index d3b9010ee96a..f1b87ad48ac7 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -47,9 +47,9 @@ static inline void notify_remote_via_evtchn(int port)
(void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
}
-extern void notify_remote_via_irq(int irq);
+void notify_remote_via_irq(int irq);
-extern void xen_irq_resume(void);
+void xen_irq_resume(void);
/* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq);
@@ -68,20 +68,22 @@ void xen_poll_irq_timeout(int irq, u64 timeout);
unsigned irq_from_evtchn(unsigned int evtchn);
/* Xen HVM evtchn vector callback */
-extern void xen_hvm_callback_vector(void);
+void xen_hvm_callback_vector(void);
extern int xen_have_vector_callback;
int xen_set_callback_via(uint64_t via);
void xen_evtchn_do_upcall(struct pt_regs *regs);
void xen_hvm_evtchn_do_upcall(void);
-/* Allocate an irq for a physical interrupt, given a gsi. "Legacy"
- * GSIs are identity mapped; others are dynamically allocated as
- * usual. */
-int xen_allocate_pirq(unsigned gsi, int shareable, char *name);
-int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name);
+/* Allocate a pirq for a physical interrupt, given a gsi. */
+int xen_allocate_pirq_gsi(unsigned gsi);
+/* Bind a pirq for a physical interrupt to an irq. */
+int xen_bind_pirq_gsi_to_irq(unsigned gsi,
+ unsigned pirq, int shareable, char *name);
#ifdef CONFIG_PCI_MSI
+/* Allocate a pirq for a MSI style physical interrupt. */
int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc);
+/* Bind an PSI pirq to an irq. */
int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
int pirq, int vector, const char *name);
#endif
@@ -89,12 +91,6 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
/* De-allocates the above mentioned physical interrupt. */
int xen_destroy_irq(int irq);
-/* Return vector allocated to pirq */
-int xen_vector_from_irq(unsigned pirq);
-
-/* Return gsi allocated to pirq */
-int xen_gsi_from_irq(unsigned pirq);
-
/* Return irq from pirq */
int xen_irq_from_pirq(unsigned pirq);
diff --git a/include/xen/gntalloc.h b/include/xen/gntalloc.h
new file mode 100644
index 000000000000..76bd58065f4f
--- /dev/null
+++ b/include/xen/gntalloc.h
@@ -0,0 +1,82 @@
+/******************************************************************************
+ * gntalloc.h
+ *
+ * Interface to /dev/xen/gntalloc.
+ *
+ * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov>
+ *
+ * This file is in the public domain.
+ */
+
+#ifndef __LINUX_PUBLIC_GNTALLOC_H__
+#define __LINUX_PUBLIC_GNTALLOC_H__
+
+/*
+ * Allocates a new page and creates a new grant reference.
+ */
+#define IOCTL_GNTALLOC_ALLOC_GREF \
+_IOC(_IOC_NONE, 'G', 5, sizeof(struct ioctl_gntalloc_alloc_gref))
+struct ioctl_gntalloc_alloc_gref {
+ /* IN parameters */
+ /* The ID of the domain to be given access to the grants. */
+ uint16_t domid;
+ /* Flags for this mapping */
+ uint16_t flags;
+ /* Number of pages to map */
+ uint32_t count;
+ /* OUT parameters */
+ /* The offset to be used on a subsequent call to mmap(). */
+ uint64_t index;
+ /* The grant references of the newly created grant, one per page */
+ /* Variable size, depending on count */
+ uint32_t gref_ids[1];
+};
+
+#define GNTALLOC_FLAG_WRITABLE 1
+
+/*
+ * Deallocates the grant reference, allowing the associated page to be freed if
+ * no other domains are using it.
+ */
+#define IOCTL_GNTALLOC_DEALLOC_GREF \
+_IOC(_IOC_NONE, 'G', 6, sizeof(struct ioctl_gntalloc_dealloc_gref))
+struct ioctl_gntalloc_dealloc_gref {
+ /* IN parameters */
+ /* The offset returned in the map operation */
+ uint64_t index;
+ /* Number of references to unmap */
+ uint32_t count;
+};
+
+/*
+ * Sets up an unmap notification within the page, so that the other side can do
+ * cleanup if this side crashes. Required to implement cross-domain robust
+ * mutexes or close notification on communication channels.
+ *
+ * Each mapped page only supports one notification; multiple calls referring to
+ * the same page overwrite the previous notification. You must clear the
+ * notification prior to the IOCTL_GNTALLOC_DEALLOC_GREF if you do not want it
+ * to occur.
+ */
+#define IOCTL_GNTALLOC_SET_UNMAP_NOTIFY \
+_IOC(_IOC_NONE, 'G', 7, sizeof(struct ioctl_gntalloc_unmap_notify))
+struct ioctl_gntalloc_unmap_notify {
+ /* IN parameters */
+ /* Offset in the file descriptor for a byte within the page (same as
+ * used in mmap). If using UNMAP_NOTIFY_CLEAR_BYTE, this is the byte to
+ * be cleared. Otherwise, it can be any byte in the page whose
+ * notification we are adjusting.
+ */
+ uint64_t index;
+ /* Action(s) to take on unmap */
+ uint32_t action;
+ /* Event channel to notify */
+ uint32_t event_channel_port;
+};
+
+/* Clear (set to zero) the byte specified by index */
+#define UNMAP_NOTIFY_CLEAR_BYTE 0x1
+/* Send an interrupt on the indicated event channel */
+#define UNMAP_NOTIFY_SEND_EVENT 0x2
+
+#endif /* __LINUX_PUBLIC_GNTALLOC_H__ */
diff --git a/include/xen/gntdev.h b/include/xen/gntdev.h
index eb23f4188f5a..5304bd3c84c5 100644
--- a/include/xen/gntdev.h
+++ b/include/xen/gntdev.h
@@ -116,4 +116,35 @@ struct ioctl_gntdev_set_max_grants {
uint32_t count;
};
+/*
+ * Sets up an unmap notification within the page, so that the other side can do
+ * cleanup if this side crashes. Required to implement cross-domain robust
+ * mutexes or close notification on communication channels.
+ *
+ * Each mapped page only supports one notification; multiple calls referring to
+ * the same page overwrite the previous notification. You must clear the
+ * notification prior to the IOCTL_GNTALLOC_DEALLOC_GREF if you do not want it
+ * to occur.
+ */
+#define IOCTL_GNTDEV_SET_UNMAP_NOTIFY \
+_IOC(_IOC_NONE, 'G', 7, sizeof(struct ioctl_gntdev_unmap_notify))
+struct ioctl_gntdev_unmap_notify {
+ /* IN parameters */
+ /* Offset in the file descriptor for a byte within the page (same as
+ * used in mmap). If using UNMAP_NOTIFY_CLEAR_BYTE, this is the byte to
+ * be cleared. Otherwise, it can be any byte in the page whose
+ * notification we are adjusting.
+ */
+ uint64_t index;
+ /* Action(s) to take on unmap */
+ uint32_t action;
+ /* Event channel to notify */
+ uint32_t event_channel_port;
+};
+
+/* Clear (set to zero) the byte specified by index */
+#define UNMAP_NOTIFY_CLEAR_BYTE 0x1
+/* Send an interrupt on the indicated event channel */
+#define UNMAP_NOTIFY_SEND_EVENT 0x2
+
#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
index 5fec575a800a..dd55dac340de 100644
--- a/include/xen/interface/sched.h
+++ b/include/xen/interface/sched.h
@@ -65,6 +65,39 @@ struct sched_poll {
DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
/*
+ * Declare a shutdown for another domain. The main use of this function is
+ * in interpreting shutdown requests and reasons for fully-virtualized
+ * domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
+ * @arg == pointer to sched_remote_shutdown structure.
+ */
+#define SCHEDOP_remote_shutdown 4
+struct sched_remote_shutdown {
+ domid_t domain_id; /* Remote domain ID */
+ unsigned int reason; /* SHUTDOWN_xxx reason */
+};
+
+/*
+ * Latch a shutdown code, so that when the domain later shuts down it
+ * reports this code to the control tools.
+ * @arg == as for SCHEDOP_shutdown.
+ */
+#define SCHEDOP_shutdown_code 5
+
+/*
+ * Setup, poke and destroy a domain watchdog timer.
+ * @arg == pointer to sched_watchdog structure.
+ * With id == 0, setup a domain watchdog timer to cause domain shutdown
+ * after timeout, returns watchdog id.
+ * With id != 0 and timeout == 0, destroy domain watchdog timer.
+ * With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
+ */
+#define SCHEDOP_watchdog 6
+struct sched_watchdog {
+ uint32_t id; /* watchdog ID */
+ uint32_t timeout; /* timeout */
+};
+
+/*
* Reason codes for SCHEDOP_shutdown. These may be interpreted by control
* software to determine the appropriate action. For the most part, Xen does
* not care about the shutdown code.
@@ -73,5 +106,6 @@ DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
#define SHUTDOWN_reboot 1 /* Clean up, kill, and then restart. */
#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
+#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
#endif /* __XEN_PUBLIC_SCHED_H__ */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 7a1d15ff19b7..5467369e0889 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -92,7 +92,7 @@ struct xenbus_driver {
void (*otherend_changed)(struct xenbus_device *dev,
enum xenbus_state backend_state);
int (*remove)(struct xenbus_device *dev);
- int (*suspend)(struct xenbus_device *dev, pm_message_t state);
+ int (*suspend)(struct xenbus_device *dev);
int (*resume)(struct xenbus_device *dev);
int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *);
struct device_driver driver;
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 70a298d6da71..b8cadf70b1fb 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -34,7 +34,7 @@ config GCOV_KERNEL
config GCOV_PROFILE_ALL
bool "Profile entire Kernel"
depends on GCOV_KERNEL
- depends on S390 || X86 || (PPC && EXPERIMENTAL) || MICROBLAZE
+ depends on SUPERH || S390 || X86 || (PPC && EXPERIMENTAL) || MICROBLAZE
default n
---help---
This options activates profiling for the entire kernel.
diff --git a/kernel/smp.c b/kernel/smp.c
index 9910744f0856..7cbd0f293df4 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -194,7 +194,7 @@ void generic_smp_call_function_interrupt(void)
*/
list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
int refs;
- void (*func) (void *info);
+ smp_call_func_t func;
/*
* Since we walk the list without any locks, we might
@@ -214,17 +214,17 @@ void generic_smp_call_function_interrupt(void)
if (atomic_read(&data->refs) == 0)
continue;
- func = data->csd.func; /* for later warn */
- data->csd.func(data->csd.info);
+ func = data->csd.func; /* save for later warn */
+ func(data->csd.info);
/*
- * If the cpu mask is not still set then it enabled interrupts,
- * we took another smp interrupt, and executed the function
- * twice on this cpu. In theory that copy decremented refs.
+ * If the cpu mask is not still set then func enabled
+ * interrupts (BUG), and this cpu took another smp call
+ * function interrupt and executed func(info) twice
+ * on this cpu. That nested execution decremented refs.
*/
if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
- WARN(1, "%pS enabled interrupts and double executed\n",
- func);
+ WARN(1, "%pf enabled interrupts and double executed\n", func);
continue;
}
@@ -450,7 +450,7 @@ void smp_call_function_many(const struct cpumask *mask,
{
struct call_function_data *data;
unsigned long flags;
- int cpu, next_cpu, this_cpu = smp_processor_id();
+ int refs, cpu, next_cpu, this_cpu = smp_processor_id();
/*
* Can deadlock when called with interrupts disabled.
@@ -461,7 +461,7 @@ void smp_call_function_many(const struct cpumask *mask,
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
&& !oops_in_progress && !early_boot_irqs_disabled);
- /* So, what's a CPU they want? Ignoring this one. */
+ /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
cpu = cpumask_first_and(mask, cpu_online_mask);
if (cpu == this_cpu)
cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
@@ -483,22 +483,49 @@ void smp_call_function_many(const struct cpumask *mask,
data = &__get_cpu_var(cfd_data);
csd_lock(&data->csd);
+
+ /* This BUG_ON verifies our reuse assertions and can be removed */
BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
+ /*
+ * The global call function queue list add and delete are protected
+ * by a lock, but the list is traversed without any lock, relying
+ * on the rcu list add and delete to allow safe concurrent traversal.
+ * We reuse the call function data without waiting for any grace
+ * period after some other cpu removes it from the global queue.
+ * This means a cpu might find our data block as it is being
+ * filled out.
+ *
+ * We hold off the interrupt handler on the other cpu by
+ * ordering our writes to the cpu mask vs our setting of the
+ * refs counter. We assert only the cpu owning the data block
+ * will set a bit in cpumask, and each bit will only be cleared
+ * by the subject cpu. Each cpu must first find its bit is
+ * set and then check that refs is set indicating the element is
+ * ready to be processed, otherwise it must skip the entry.
+ *
+ * On the previous iteration refs was set to 0 by another cpu.
+ * To avoid the use of transitivity, set the counter to 0 here
+ * so the wmb will pair with the rmb in the interrupt handler.
+ */
+ atomic_set(&data->refs, 0); /* convert 3rd to 1st party write */
+
data->csd.func = func;
data->csd.info = info;
- cpumask_and(data->cpumask, mask, cpu_online_mask);
- cpumask_clear_cpu(this_cpu, data->cpumask);
- /*
- * To ensure the interrupt handler gets an complete view
- * we order the cpumask and refs writes and order the read
- * of them in the interrupt handler. In addition we may
- * only clear our own cpu bit from the mask.
- */
+ /* Ensure 0 refs is visible before mask. Also orders func and info */
smp_wmb();
- atomic_set(&data->refs, cpumask_weight(data->cpumask));
+ /* We rely on the "and" being processed before the store */
+ cpumask_and(data->cpumask, mask, cpu_online_mask);
+ cpumask_clear_cpu(this_cpu, data->cpumask);
+ refs = cpumask_weight(data->cpumask);
+
+ /* Some callers race with other cpus changing the passed mask */
+ if (unlikely(!refs)) {
+ csd_unlock(&data->csd);
+ return;
+ }
raw_spin_lock_irqsave(&call_function.lock, flags);
/*
@@ -507,6 +534,12 @@ void smp_call_function_many(const struct cpumask *mask,
* will not miss any other list entries:
*/
list_add_rcu(&data->csd.list, &call_function.queue);
+ /*
+ * We rely on the wmb() in list_add_rcu to complete our writes
+ * to the cpumask before this write to refs, which indicates
+ * data is on the list and is ready to be processed.
+ */
+ atomic_set(&data->refs, refs);
raw_spin_unlock_irqrestore(&call_function.lock, flags);
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bd7625676a64..7945247b1e53 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -286,7 +286,7 @@ static void bad_page(struct page *page)
/* Don't complain about poisoned pages */
if (PageHWPoison(page)) {
- __ClearPageBuddy(page);
+ reset_page_mapcount(page); /* remove PageBuddy */
return;
}
@@ -317,7 +317,7 @@ static void bad_page(struct page *page)
dump_stack();
out:
/* Leave bad fields for debug, except PageBuddy could make trouble */
- __ClearPageBuddy(page);
+ reset_page_mapcount(page); /* remove PageBuddy */
add_taint(TAINT_BAD_PAGE);
}
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 45dbf1521b9a..f3914d0c5079 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -417,7 +417,7 @@ static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
gss_msg->msg.len += len;
}
if (mech->gm_upcall_enctypes) {
- len = sprintf(p, mech->gm_upcall_enctypes);
+ len = sprintf(p, "enctypes=%s ", mech->gm_upcall_enctypes);
p += len;
gss_msg->msg.len += len;
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index f375decc024b..9022f0a6503e 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -750,7 +750,7 @@ static struct gss_api_mech gss_kerberos_mech = {
.gm_ops = &gss_kerberos_ops,
.gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs),
.gm_pfs = gss_kerberos_pfs,
- .gm_upcall_enctypes = "enctypes=18,17,16,23,3,1,2 ",
+ .gm_upcall_enctypes = "18,17,16,23,3,1,2",
};
static int __init init_kerberos_module(void)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 57d344cf2256..e7a96e478f63 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -436,7 +436,9 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
rovr->tk_flags |= RPC_TASK_KILLED;
rpc_exit(rovr, -EIO);
- rpc_wake_up_queued_task(rovr->tk_waitqueue, rovr);
+ if (RPC_IS_QUEUED(rovr))
+ rpc_wake_up_queued_task(rovr->tk_waitqueue,
+ rovr);
}
}
spin_unlock(&clnt->cl_lock);
@@ -597,6 +599,14 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
}
}
+void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt)
+{
+ rpc_task_release_client(task);
+ rpc_task_set_client(task, clnt);
+}
+EXPORT_SYMBOL_GPL(rpc_task_reset_client);
+
+
static void
rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
{
@@ -636,12 +646,6 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
rpc_task_set_client(task, task_setup_data->rpc_client);
rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
- if (task->tk_status != 0) {
- int ret = task->tk_status;
- rpc_put_task(task);
- return ERR_PTR(ret);
- }
-
if (task->tk_action == NULL)
rpc_call_start(task);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 3fc8624fcd17..ffb687671da0 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -299,15 +299,8 @@ static void rpc_make_runnable(struct rpc_task *task)
if (rpc_test_and_set_running(task))
return;
if (RPC_IS_ASYNC(task)) {
- int status;
-
INIT_WORK(&task->u.tk_work, rpc_async_schedule);
- status = queue_work(rpciod_workqueue, &task->u.tk_work);
- if (status < 0) {
- printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
- task->tk_status = status;
- return;
- }
+ queue_work(rpciod_workqueue, &task->u.tk_work);
} else
wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
}
@@ -637,14 +630,12 @@ static void __rpc_execute(struct rpc_task *task)
save_callback = task->tk_callback;
task->tk_callback = NULL;
save_callback(task);
- }
-
- /*
- * Perform the next FSM step.
- * tk_action may be NULL when the task has been killed
- * by someone else.
- */
- if (!RPC_IS_QUEUED(task)) {
+ } else {
+ /*
+ * Perform the next FSM step.
+ * tk_action may be NULL when the task has been killed
+ * by someone else.
+ */
if (task->tk_action == NULL)
break;
task->tk_action(task);
@@ -843,12 +834,6 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
}
rpc_init_task(task, setup_data);
- if (task->tk_status < 0) {
- int err = task->tk_status;
- rpc_put_task(task);
- return ERR_PTR(err);
- }
-
task->tk_flags |= flags;
dprintk("RPC: allocated task %p\n", task);
return task;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 856274d7e85c..9494c3767356 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -202,10 +202,9 @@ int xprt_reserve_xprt(struct rpc_task *task)
goto out_sleep;
}
xprt->snd_task = task;
- if (req) {
- req->rq_bytes_sent = 0;
- req->rq_ntrans++;
- }
+ req->rq_bytes_sent = 0;
+ req->rq_ntrans++;
+
return 1;
out_sleep:
@@ -213,7 +212,7 @@ out_sleep:
task->tk_pid, xprt);
task->tk_timeout = 0;
task->tk_status = -EAGAIN;
- if (req && req->rq_ntrans)
+ if (req->rq_ntrans)
rpc_sleep_on(&xprt->resend, task, NULL);
else
rpc_sleep_on(&xprt->sending, task, NULL);
@@ -965,7 +964,7 @@ struct rpc_xprt *xprt_alloc(struct net *net, int size, int max_req)
xprt = kzalloc(size, GFP_KERNEL);
if (xprt == NULL)
goto out;
- kref_init(&xprt->kref);
+ atomic_set(&xprt->count, 1);
xprt->max_reqs = max_req;
xprt->slot = kcalloc(max_req, sizeof(struct rpc_rqst), GFP_KERNEL);
@@ -1145,13 +1144,11 @@ found:
/**
* xprt_destroy - destroy an RPC transport, killing off all requests.
- * @kref: kref for the transport to destroy
+ * @xprt: transport to destroy
*
*/
-static void xprt_destroy(struct kref *kref)
+static void xprt_destroy(struct rpc_xprt *xprt)
{
- struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref);
-
dprintk("RPC: destroying transport %p\n", xprt);
xprt->shutdown = 1;
del_timer_sync(&xprt->timer);
@@ -1175,7 +1172,8 @@ static void xprt_destroy(struct kref *kref)
*/
void xprt_put(struct rpc_xprt *xprt)
{
- kref_put(&xprt->kref, xprt_destroy);
+ if (atomic_dec_and_test(&xprt->count))
+ xprt_destroy(xprt);
}
/**
@@ -1185,6 +1183,7 @@ void xprt_put(struct rpc_xprt *xprt)
*/
struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
{
- kref_get(&xprt->kref);
- return xprt;
+ if (atomic_inc_not_zero(&xprt->count))
+ return xprt;
+ return NULL;
}
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 2ac3f6e8adff..554d0814c875 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -87,6 +87,8 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
{
int len, n = 0, p;
+ int page_base;
+ struct page **ppages;
if (pos == 0 && xdrbuf->head[0].iov_len) {
seg[n].mr_page = NULL;
@@ -95,34 +97,32 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
++n;
}
- if (xdrbuf->page_len && (xdrbuf->pages[0] != NULL)) {
- if (n == nsegs)
- return 0;
- seg[n].mr_page = xdrbuf->pages[0];
- seg[n].mr_offset = (void *)(unsigned long) xdrbuf->page_base;
- seg[n].mr_len = min_t(u32,
- PAGE_SIZE - xdrbuf->page_base, xdrbuf->page_len);
- len = xdrbuf->page_len - seg[n].mr_len;
+ len = xdrbuf->page_len;
+ ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
+ page_base = xdrbuf->page_base & ~PAGE_MASK;
+ p = 0;
+ while (len && n < nsegs) {
+ seg[n].mr_page = ppages[p];
+ seg[n].mr_offset = (void *)(unsigned long) page_base;
+ seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
+ BUG_ON(seg[n].mr_len > PAGE_SIZE);
+ len -= seg[n].mr_len;
++n;
- p = 1;
- while (len > 0) {
- if (n == nsegs)
- return 0;
- seg[n].mr_page = xdrbuf->pages[p];
- seg[n].mr_offset = NULL;
- seg[n].mr_len = min_t(u32, PAGE_SIZE, len);
- len -= seg[n].mr_len;
- ++n;
- ++p;
- }
+ ++p;
+ page_base = 0; /* page offset only applies to first page */
}
+ /* Message overflows the seg array */
+ if (len && n == nsegs)
+ return 0;
+
if (xdrbuf->tail[0].iov_len) {
/* the rpcrdma protocol allows us to omit any trailing
* xdr pad bytes, saving the server an RDMA operation. */
if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
return n;
if (n == nsegs)
+ /* Tail remains, but we're out of segments */
return 0;
seg[n].mr_page = NULL;
seg[n].mr_offset = xdrbuf->tail[0].iov_base;
@@ -296,6 +296,8 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
int copy_len;
unsigned char *srcp, *destp;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
+ int page_base;
+ struct page **ppages;
destp = rqst->rq_svec[0].iov_base;
curlen = rqst->rq_svec[0].iov_len;
@@ -324,28 +326,25 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
__func__, destp + copy_len, curlen);
rqst->rq_svec[0].iov_len += curlen;
}
-
r_xprt->rx_stats.pullup_copy_count += copy_len;
- npages = PAGE_ALIGN(rqst->rq_snd_buf.page_base+copy_len) >> PAGE_SHIFT;
+
+ page_base = rqst->rq_snd_buf.page_base;
+ ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
+ page_base &= ~PAGE_MASK;
+ npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
for (i = 0; copy_len && i < npages; i++) {
- if (i == 0)
- curlen = PAGE_SIZE - rqst->rq_snd_buf.page_base;
- else
- curlen = PAGE_SIZE;
+ curlen = PAGE_SIZE - page_base;
if (curlen > copy_len)
curlen = copy_len;
dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
__func__, i, destp, copy_len, curlen);
- srcp = kmap_atomic(rqst->rq_snd_buf.pages[i],
- KM_SKB_SUNRPC_DATA);
- if (i == 0)
- memcpy(destp, srcp+rqst->rq_snd_buf.page_base, curlen);
- else
- memcpy(destp, srcp, curlen);
+ srcp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
+ memcpy(destp, srcp+page_base, curlen);
kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA);
rqst->rq_svec[0].iov_len += curlen;
destp += curlen;
copy_len -= curlen;
+ page_base = 0;
}
/* header now contains entire send message */
return pad;
@@ -606,6 +605,8 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
{
int i, npages, curlen, olen;
char *destp;
+ struct page **ppages;
+ int page_base;
curlen = rqst->rq_rcv_buf.head[0].iov_len;
if (curlen > copy_len) { /* write chunk header fixup */
@@ -624,32 +625,29 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
olen = copy_len;
i = 0;
rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
+ page_base = rqst->rq_rcv_buf.page_base;
+ ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
+ page_base &= ~PAGE_MASK;
+
if (copy_len && rqst->rq_rcv_buf.page_len) {
- npages = PAGE_ALIGN(rqst->rq_rcv_buf.page_base +
+ npages = PAGE_ALIGN(page_base +
rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
for (; i < npages; i++) {
- if (i == 0)
- curlen = PAGE_SIZE - rqst->rq_rcv_buf.page_base;
- else
- curlen = PAGE_SIZE;
+ curlen = PAGE_SIZE - page_base;
if (curlen > copy_len)
curlen = copy_len;
dprintk("RPC: %s: page %d"
" srcp 0x%p len %d curlen %d\n",
__func__, i, srcp, copy_len, curlen);
- destp = kmap_atomic(rqst->rq_rcv_buf.pages[i],
- KM_SKB_SUNRPC_DATA);
- if (i == 0)
- memcpy(destp + rqst->rq_rcv_buf.page_base,
- srcp, curlen);
- else
- memcpy(destp, srcp, curlen);
- flush_dcache_page(rqst->rq_rcv_buf.pages[i]);
+ destp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA);
+ memcpy(destp + page_base, srcp, curlen);
+ flush_dcache_page(ppages[i]);
kunmap_atomic(destp, KM_SKB_SUNRPC_DATA);
srcp += curlen;
copy_len -= curlen;
if (copy_len == 0)
break;
+ page_base = 0;
}
rqst->rq_rcv_buf.page_len = olen - copy_len;
} else
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 5f4c7b3bc711..d4297dc43dc4 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -144,6 +144,7 @@ rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
static inline
void rpcrdma_event_process(struct ib_wc *wc)
{
+ struct rpcrdma_mw *frmr;
struct rpcrdma_rep *rep =
(struct rpcrdma_rep *)(unsigned long) wc->wr_id;
@@ -154,15 +155,23 @@ void rpcrdma_event_process(struct ib_wc *wc)
return;
if (IB_WC_SUCCESS != wc->status) {
- dprintk("RPC: %s: %s WC status %X, connection lost\n",
- __func__, (wc->opcode & IB_WC_RECV) ? "recv" : "send",
- wc->status);
+ dprintk("RPC: %s: WC opcode %d status %X, connection lost\n",
+ __func__, wc->opcode, wc->status);
rep->rr_len = ~0U;
- rpcrdma_schedule_tasklet(rep);
+ if (wc->opcode != IB_WC_FAST_REG_MR && wc->opcode != IB_WC_LOCAL_INV)
+ rpcrdma_schedule_tasklet(rep);
return;
}
switch (wc->opcode) {
+ case IB_WC_FAST_REG_MR:
+ frmr = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
+ frmr->r.frmr.state = FRMR_IS_VALID;
+ break;
+ case IB_WC_LOCAL_INV:
+ frmr = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
+ frmr->r.frmr.state = FRMR_IS_INVALID;
+ break;
case IB_WC_RECV:
rep->rr_len = wc->byte_len;
ib_dma_sync_single_for_cpu(
@@ -1450,6 +1459,12 @@ rpcrdma_map_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg, int writing)
seg->mr_dma = ib_dma_map_single(ia->ri_id->device,
seg->mr_offset,
seg->mr_dmalen, seg->mr_dir);
+ if (ib_dma_mapping_error(ia->ri_id->device, seg->mr_dma)) {
+ dprintk("RPC: %s: mr_dma %llx mr_offset %p mr_dma_len %zu\n",
+ __func__,
+ (unsigned long long)seg->mr_dma,
+ seg->mr_offset, seg->mr_dmalen);
+ }
}
static void
@@ -1469,7 +1484,8 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_mr_seg *seg1 = seg;
- struct ib_send_wr frmr_wr, *bad_wr;
+ struct ib_send_wr invalidate_wr, frmr_wr, *bad_wr, *post_wr;
+
u8 key;
int len, pageoff;
int i, rc;
@@ -1484,6 +1500,7 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
rpcrdma_map_one(ia, seg, writing);
seg1->mr_chunk.rl_mw->r.frmr.fr_pgl->page_list[i] = seg->mr_dma;
len += seg->mr_len;
+ BUG_ON(seg->mr_len > PAGE_SIZE);
++seg;
++i;
/* Check for holes */
@@ -1494,26 +1511,45 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
dprintk("RPC: %s: Using frmr %p to map %d segments\n",
__func__, seg1->mr_chunk.rl_mw, i);
+ if (unlikely(seg1->mr_chunk.rl_mw->r.frmr.state == FRMR_IS_VALID)) {
+ dprintk("RPC: %s: frmr %x left valid, posting invalidate.\n",
+ __func__,
+ seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey);
+ /* Invalidate before using. */
+ memset(&invalidate_wr, 0, sizeof invalidate_wr);
+ invalidate_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw;
+ invalidate_wr.next = &frmr_wr;
+ invalidate_wr.opcode = IB_WR_LOCAL_INV;
+ invalidate_wr.send_flags = IB_SEND_SIGNALED;
+ invalidate_wr.ex.invalidate_rkey =
+ seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
+ DECR_CQCOUNT(&r_xprt->rx_ep);
+ post_wr = &invalidate_wr;
+ } else
+ post_wr = &frmr_wr;
+
/* Bump the key */
key = (u8)(seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey & 0x000000FF);
ib_update_fast_reg_key(seg1->mr_chunk.rl_mw->r.frmr.fr_mr, ++key);
/* Prepare FRMR WR */
memset(&frmr_wr, 0, sizeof frmr_wr);
+ frmr_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw;
frmr_wr.opcode = IB_WR_FAST_REG_MR;
- frmr_wr.send_flags = 0; /* unsignaled */
+ frmr_wr.send_flags = IB_SEND_SIGNALED;
frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma;
frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl;
frmr_wr.wr.fast_reg.page_list_len = i;
frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
frmr_wr.wr.fast_reg.length = i << PAGE_SHIFT;
+ BUG_ON(frmr_wr.wr.fast_reg.length < len);
frmr_wr.wr.fast_reg.access_flags = (writing ?
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
IB_ACCESS_REMOTE_READ);
frmr_wr.wr.fast_reg.rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
DECR_CQCOUNT(&r_xprt->rx_ep);
- rc = ib_post_send(ia->ri_id->qp, &frmr_wr, &bad_wr);
+ rc = ib_post_send(ia->ri_id->qp, post_wr, &bad_wr);
if (rc) {
dprintk("RPC: %s: failed ib_post_send for register,"
@@ -1542,8 +1578,9 @@ rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg,
rpcrdma_unmap_one(ia, seg++);
memset(&invalidate_wr, 0, sizeof invalidate_wr);
+ invalidate_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw;
invalidate_wr.opcode = IB_WR_LOCAL_INV;
- invalidate_wr.send_flags = 0; /* unsignaled */
+ invalidate_wr.send_flags = IB_SEND_SIGNALED;
invalidate_wr.ex.invalidate_rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
DECR_CQCOUNT(&r_xprt->rx_ep);
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index c7a7eba991bc..cae761a8536c 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -164,6 +164,7 @@ struct rpcrdma_mr_seg { /* chunk descriptors */
struct {
struct ib_fast_reg_page_list *fr_pgl;
struct ib_mr *fr_mr;
+ enum { FRMR_IS_INVALID, FRMR_IS_VALID } state;
} frmr;
} r;
struct list_head mw_list;