summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c123
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c122
-rw-r--r--drivers/edac/amd64_edac.c1442
-rw-r--r--drivers/edac/amd64_edac.h369
-rw-r--r--drivers/edac/amd64_edac_inj.c8
-rw-r--r--drivers/edac/edac_mc_sysfs.c26
-rw-r--r--drivers/edac/mce_amd.c8
-rw-r--r--drivers/edac/mce_amd.h28
-rw-r--r--drivers/gpu/drm/Kconfig47
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_crtc.c33
-rw-r--r--drivers/gpu/drm/drm_drv.c49
-rw-r--r--drivers/gpu/drm/drm_edid.c59
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c5
-rw-r--r--drivers/gpu/drm/drm_gem.c5
-rw-r--r--drivers/gpu/drm/drm_hashtab.c27
-rw-r--r--drivers/gpu/drm/drm_info.c27
-rw-r--r--drivers/gpu/drm/drm_ioctl.c134
-rw-r--r--drivers/gpu/drm/drm_irq.c14
-rw-r--r--drivers/gpu/drm/drm_mm.c570
-rw-r--r--drivers/gpu/drm/drm_modes.c6
-rw-r--r--drivers/gpu/drm/drm_pci.c205
-rw-r--r--drivers/gpu/drm/drm_platform.c75
-rw-r--r--drivers/gpu/drm/drm_stub.c21
-rw-r--r--drivers/gpu/drm/drm_sysfs.c7
-rw-r--r--drivers/gpu/drm/drm_usb.c117
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c18
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c20
-rw-r--r--drivers/gpu/drm/i830/Makefile8
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c1560
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c107
-rw-r--r--drivers/gpu/drm/i830/i830_drv.h295
-rw-r--r--drivers/gpu/drm/i830/i830_irq.c186
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c78
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c40
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c33
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h125
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c406
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c176
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c10
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c214
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h484
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c435
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h301
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c53
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c33
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1847
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c157
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h13
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c13
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c11
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c30
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c4
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c41
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c25
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c30
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h29
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c61
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c10
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c2
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c255
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c75
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c207
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c173
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ramht.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c368
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c50
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h19
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c19
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv_modes.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c59
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c164
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c191
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h42
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c290
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c199
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c149
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c65
-rw-r--r--drivers/gpu/drm/nouveau/nv84_crypt.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c62
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c14
-rw-r--r--drivers/gpu/drm/radeon/Makefile7
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c12
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c55
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.h32
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c16
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c130
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h17
-rw-r--r--drivers/gpu/drm/radeon/ni.c1294
-rw-r--r--drivers/gpu/drm/radeon/nid.h495
-rw-r--r--drivers/gpu/drm/radeon/r100.c16
-rw-r--r--drivers/gpu/drm/radeon/r600.c16
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c1
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c427
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c1
-rw-r--r--drivers/gpu/drm/radeon/r600d.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon.h137
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h87
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c52
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c98
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c10
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/cayman619
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/evergreen2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c2
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c14
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c13
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c34
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c12
-rw-r--r--drivers/gpu/drm/via/via_drv.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c5
-rw-r--r--drivers/hwmon/Kconfig92
-rw-r--r--drivers/hwmon/Makefile10
-rw-r--r--drivers/hwmon/f71882fg.c522
-rw-r--r--drivers/hwmon/lineage-pem.c586
-rw-r--r--drivers/hwmon/lis3lv02d_spi.c19
-rw-r--r--drivers/hwmon/lm85.c136
-rw-r--r--drivers/hwmon/ltc4151.c256
-rw-r--r--drivers/hwmon/max16064.c91
-rw-r--r--drivers/hwmon/max34440.c199
-rw-r--r--drivers/hwmon/max6639.c653
-rw-r--r--drivers/hwmon/max8688.c158
-rw-r--r--drivers/hwmon/pmbus.c203
-rw-r--r--drivers/hwmon/pmbus.h313
-rw-r--r--drivers/hwmon/pmbus_core.c1658
-rw-r--r--drivers/hwmon/w83627ehf.c1351
-rw-r--r--drivers/i2c/busses/Kconfig30
-rw-r--r--drivers/i2c/busses/Makefile3
-rw-r--r--drivers/i2c/busses/i2c-mxs.c412
-rw-r--r--drivers/i2c/busses/i2c-puv3.c306
-rw-r--r--drivers/i2c/busses/i2c-tegra.c700
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c26
-rw-r--r--drivers/input/serio/i8042-unicore32io.h73
-rw-r--r--drivers/input/serio/i8042.h2
-rw-r--r--drivers/md/dm-mpath.c22
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h1
-rw-r--r--drivers/message/fusion/mptbase.c7
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/message/fusion/mptsas.c7
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c7
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/s390/block/dasd_eckd.c1
-rw-r--r--drivers/s390/cio/chsc_sch.c17
-rw-r--r--drivers/s390/cio/cio.c43
-rw-r--r--drivers/s390/cio/cio.h11
-rw-r--r--drivers/s390/cio/css.c6
-rw-r--r--drivers/s390/cio/css.h10
-rw-r--r--drivers/s390/cio/device.c23
-rw-r--r--drivers/s390/cio/io_sch.h114
-rw-r--r--drivers/s390/cio/ioasm.h34
-rw-r--r--drivers/s390/cio/orb.h67
-rw-r--r--drivers/s390/scsi/zfcp_aux.c80
-rw-r--r--drivers/s390/scsi/zfcp_def.h15
-rw-r--r--drivers/s390/scsi/zfcp_erp.c4
-rw-r--r--drivers/s390/scsi/zfcp_ext.h9
-rw-r--r--drivers/s390/scsi/zfcp_fc.c333
-rw-r--r--drivers/s390/scsi/zfcp_fc.h124
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c27
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c71
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/NCR5380.c3
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c18
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h4
-rw-r--r--drivers/scsi/be2iscsi/be_main.c3
-rw-r--r--drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h1080
-rw-r--r--drivers/scsi/bnx2fc/Kconfig11
-rw-r--r--drivers/scsi/bnx2fc/Makefile3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h511
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_constants.h206
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h70
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c515
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2535
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c1868
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c1833
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c844
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h4
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c125
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c29
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c53
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c56
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.h19
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c7
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c66
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h5
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c112
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c18
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c7
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c26
-rw-r--r--drivers/scsi/fcoe/Makefile2
-rw-r--r--drivers/scsi/fcoe/fcoe.c621
-rw-r--r--drivers/scsi/fcoe/fcoe.h50
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c (renamed from drivers/scsi/fcoe/libfcoe.c)40
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c770
-rw-r--r--drivers/scsi/fcoe/libfcoe.h31
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/vnic_dev.c2
-rw-r--r--drivers/scsi/hpsa.c579
-rw-r--r--drivers/scsi/hpsa.h9
-rw-r--r--drivers/scsi/hpsa_cmd.h4
-rw-r--r--drivers/scsi/ipr.c9
-rw-r--r--drivers/scsi/iscsi_tcp.c134
-rw-r--r--drivers/scsi/iscsi_tcp.h4
-rw-r--r--drivers/scsi/libfc/fc_exch.c111
-rw-r--r--drivers/scsi/libfc/fc_fcp.c39
-rw-r--r--drivers/scsi/libfc/fc_libfc.c120
-rw-r--r--drivers/scsi/libfc/fc_libfc.h14
-rw-r--r--drivers/scsi/libfc/fc_lport.c69
-rw-r--r--drivers/scsi/libfc/fc_npiv.c10
-rw-r--r--drivers/scsi/libfc/fc_rport.c191
-rw-r--r--drivers/scsi/libiscsi.c44
-rw-r--r--drivers/scsi/libsas/Kconfig8
-rw-r--r--drivers/scsi/libsas/Makefile4
-rw-r--r--drivers/scsi/libsas/sas_ata.c80
-rw-r--r--drivers/scsi/libsas/sas_dump.c4
-rw-r--r--drivers/scsi/libsas/sas_dump.h12
-rw-r--r--drivers/scsi/libsas/sas_expander.c5
-rw-r--r--drivers/scsi/libsas/sas_internal.h6
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c123
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h9
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c962
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h60
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c173
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h104
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c134
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c113
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c217
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h10
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c147
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c19
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h5
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h6
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_history.txt384
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_sas.h7
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c128
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h40
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1
-rw-r--r--drivers/scsi/osd/osd_initiator.c20
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c37
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c27
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h10
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c45
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c191
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/scsi_debug.c192
-rw-r--r--drivers/scsi/scsi_devinfo.c85
-rw-r--r--drivers/scsi/scsi_error.c24
-rw-r--r--drivers/scsi/scsi_lib.c36
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c104
-rw-r--r--drivers/scsi/sd.c229
-rw-r--r--drivers/scsi/sd.h25
-rw-r--r--drivers/target/target_core_cdb.c8
-rw-r--r--drivers/tty/hvc/hvc_xen.c2
-rw-r--r--drivers/tty/serial/sh-sci.c431
-rw-r--r--drivers/tty/serial/sh-sci.h39
-rw-r--r--drivers/video/Kconfig12
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/fb-puv3.c846
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c222
-rw-r--r--drivers/video/sh_mobile_lcdcfb.h4
-rw-r--r--drivers/video/via/chip.h9
-rw-r--r--drivers/video/via/dvi.c4
-rw-r--r--drivers/video/via/hw.c772
-rw-r--r--drivers/video/via/hw.h2
-rw-r--r--drivers/video/via/lcd.c83
-rw-r--r--drivers/video/via/share.h141
-rw-r--r--drivers/video/via/tblDPASetting.c23
-rw-r--r--drivers/video/via/tblDPASetting.h2
-rw-r--r--drivers/video/via/via_i2c.c3
-rw-r--r--drivers/video/via/viafbdev.c6
-rw-r--r--drivers/video/via/viamode.c507
-rw-r--r--drivers/video/via/viamode.h9
-rw-r--r--drivers/video/via/vt1636.c43
-rw-r--r--drivers/watchdog/Kconfig42
-rw-r--r--drivers/watchdog/Makefile5
-rw-r--r--drivers/watchdog/alim1535_wdt.c10
-rw-r--r--drivers/watchdog/alim7101_wdt.c2
-rw-r--r--drivers/watchdog/bcm47xx_wdt.c4
-rw-r--r--drivers/watchdog/bfin_wdt.c4
-rw-r--r--drivers/watchdog/booke_wdt.c19
-rw-r--r--drivers/watchdog/cpwd.c36
-rw-r--r--drivers/watchdog/eurotechwdt.c2
-rw-r--r--drivers/watchdog/hpwdt.c2
-rw-r--r--drivers/watchdog/i6300esb.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c4
-rw-r--r--drivers/watchdog/intel_scu_watchdog.c572
-rw-r--r--drivers/watchdog/intel_scu_watchdog.h66
-rw-r--r--drivers/watchdog/it8712f_wdt.c2
-rw-r--r--drivers/watchdog/it87_wdt.c28
-rw-r--r--drivers/watchdog/jz4740_wdt.c322
-rw-r--r--drivers/watchdog/machzwd.c2
-rw-r--r--drivers/watchdog/max63xx_wdt.c2
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c6
-rw-r--r--drivers/watchdog/mpcore_wdt.c2
-rw-r--r--drivers/watchdog/mtx-1_wdt.c14
-rw-r--r--drivers/watchdog/nv_tco.c2
-rw-r--r--drivers/watchdog/omap_wdt.h2
-rw-r--r--drivers/watchdog/pc87413_wdt.c2
-rw-r--r--drivers/watchdog/pcwd_pci.c2
-rw-r--r--drivers/watchdog/pnx4008_wdt.c2
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--drivers/watchdog/sbc8360.c2
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c2
-rw-r--r--drivers/watchdog/shwdt.c365
-rw-r--r--drivers/watchdog/smsc37b787_wdt.c4
-rw-r--r--drivers/watchdog/softdog.c2
-rw-r--r--drivers/watchdog/sp5100_tco.c2
-rw-r--r--drivers/watchdog/ts72xx_wdt.c2
-rw-r--r--drivers/watchdog/w83697ug_wdt.c4
-rw-r--r--drivers/watchdog/wdt.c2
-rw-r--r--drivers/watchdog/wdt977.c2
-rw-r--r--drivers/watchdog/wdt_pci.c6
-rw-r--r--drivers/watchdog/xen_wdt.c359
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/Makefile6
-rw-r--r--drivers/xen/balloon.c359
-rw-r--r--drivers/xen/events.c439
-rw-r--r--drivers/xen/gntalloc.c545
-rw-r--r--drivers/xen/gntdev.c382
-rw-r--r--drivers/xen/grant-table.c10
-rw-r--r--drivers/xen/manage.c16
-rw-r--r--drivers/xen/xen-balloon.c256
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c12
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h3
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c11
411 files changed, 37296 insertions, 12889 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5cb4d09919d6..0f17ad8585d7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1371,7 +1371,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
goto out;
if (cpufreq_driver->suspend) {
- ret = cpufreq_driver->suspend(cpu_policy, pmsg);
+ ret = cpufreq_driver->suspend(cpu_policy);
if (ret)
printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
"step on CPU %u\n", cpu_policy->cpu);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 94284c8473b1..33b56e5c5c14 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -76,8 +76,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
/*
- * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
- * different CPUs. It protects dbs_enable in governor start/stop.
+ * dbs_mutex protects dbs_enable in governor start/stop.
*/
static DEFINE_MUTEX(dbs_mutex);
@@ -116,7 +115,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
if (wall)
*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
- return (cputime64_t)jiffies_to_usecs(idle_time);;
+ return (cputime64_t)jiffies_to_usecs(idle_time);
}
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -162,21 +161,12 @@ static struct notifier_block dbs_cpufreq_notifier_block = {
};
/************************** sysfs interface ************************/
-static ssize_t show_sampling_rate_max(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
- "sysfs file is deprecated - used by: %s\n", current->comm);
- return sprintf(buf, "%u\n", -1U);
-}
-
static ssize_t show_sampling_rate_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", min_sampling_rate);
}
-define_one_global_ro(sampling_rate_max);
define_one_global_ro(sampling_rate_min);
/* cpufreq_conservative Governor Tunables */
@@ -193,33 +183,6 @@ show_one(down_threshold, down_threshold);
show_one(ignore_nice_load, ignore_nice);
show_one(freq_step, freq_step);
-/*** delete after deprecation time ***/
-#define DEPRECATION_MSG(file_name) \
- printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
- "interface is deprecated - " #file_name "\n");
-
-#define show_one_old(file_name) \
-static ssize_t show_##file_name##_old \
-(struct cpufreq_policy *unused, char *buf) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return show_##file_name(NULL, NULL, buf); \
-}
-show_one_old(sampling_rate);
-show_one_old(sampling_down_factor);
-show_one_old(up_threshold);
-show_one_old(down_threshold);
-show_one_old(ignore_nice_load);
-show_one_old(freq_step);
-show_one_old(sampling_rate_min);
-show_one_old(sampling_rate_max);
-
-cpufreq_freq_attr_ro_old(sampling_rate_min);
-cpufreq_freq_attr_ro_old(sampling_rate_max);
-
-/*** delete after deprecation time ***/
-
static ssize_t store_sampling_down_factor(struct kobject *a,
struct attribute *b,
const char *buf, size_t count)
@@ -231,10 +194,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_down_factor = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -248,10 +208,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
if (ret != 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -262,16 +219,11 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
int ret;
ret = sscanf(buf, "%u", &input);
- mutex_lock(&dbs_mutex);
if (ret != 1 || input > 100 ||
- input <= dbs_tuners_ins.down_threshold) {
- mutex_unlock(&dbs_mutex);
+ input <= dbs_tuners_ins.down_threshold)
return -EINVAL;
- }
dbs_tuners_ins.up_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -282,17 +234,12 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
int ret;
ret = sscanf(buf, "%u", &input);
- mutex_lock(&dbs_mutex);
/* cannot be lower than 11 otherwise freq will not fall */
if (ret != 1 || input < 11 || input > 100 ||
- input >= dbs_tuners_ins.up_threshold) {
- mutex_unlock(&dbs_mutex);
+ input >= dbs_tuners_ins.up_threshold)
return -EINVAL;
- }
dbs_tuners_ins.down_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -311,11 +258,9 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- mutex_lock(&dbs_mutex);
- if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
- mutex_unlock(&dbs_mutex);
+ if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
return count;
- }
+
dbs_tuners_ins.ignore_nice = input;
/* we need to re-evaluate prev_cpu_idle */
@@ -327,8 +272,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (dbs_tuners_ins.ignore_nice)
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -347,10 +290,7 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
/* no need to test here if freq_step is zero as the user might actually
* want this, they would be crazy though :) */
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.freq_step = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -362,7 +302,6 @@ define_one_global_rw(ignore_nice_load);
define_one_global_rw(freq_step);
static struct attribute *dbs_attributes[] = {
- &sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
&sampling_down_factor.attr,
@@ -378,49 +317,6 @@ static struct attribute_group dbs_attr_group = {
.name = "conservative",
};
-/*** delete after deprecation time ***/
-
-#define write_one_old(file_name) \
-static ssize_t store_##file_name##_old \
-(struct cpufreq_policy *unused, const char *buf, size_t count) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return store_##file_name(NULL, NULL, buf, count); \
-}
-write_one_old(sampling_rate);
-write_one_old(sampling_down_factor);
-write_one_old(up_threshold);
-write_one_old(down_threshold);
-write_one_old(ignore_nice_load);
-write_one_old(freq_step);
-
-cpufreq_freq_attr_rw_old(sampling_rate);
-cpufreq_freq_attr_rw_old(sampling_down_factor);
-cpufreq_freq_attr_rw_old(up_threshold);
-cpufreq_freq_attr_rw_old(down_threshold);
-cpufreq_freq_attr_rw_old(ignore_nice_load);
-cpufreq_freq_attr_rw_old(freq_step);
-
-static struct attribute *dbs_attributes_old[] = {
- &sampling_rate_max_old.attr,
- &sampling_rate_min_old.attr,
- &sampling_rate_old.attr,
- &sampling_down_factor_old.attr,
- &up_threshold_old.attr,
- &down_threshold_old.attr,
- &ignore_nice_load_old.attr,
- &freq_step_old.attr,
- NULL
-};
-
-static struct attribute_group dbs_attr_group_old = {
- .attrs = dbs_attributes_old,
- .name = "conservative",
-};
-
-/*** delete after deprecation time ***/
-
/************************** sysfs end ************************/
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
@@ -596,12 +492,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_mutex);
- rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
- if (rc) {
- mutex_unlock(&dbs_mutex);
- return rc;
- }
-
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
@@ -664,7 +554,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
- sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
dbs_enable--;
mutex_destroy(&this_dbs_info->timer_mutex);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 58aa85ea5ec6..891360edecdd 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -99,8 +99,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
static unsigned int dbs_enable; /* number of CPUs using this policy */
/*
- * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
- * different CPUs. It protects dbs_enable in governor start/stop.
+ * dbs_mutex protects dbs_enable in governor start/stop.
*/
static DEFINE_MUTEX(dbs_mutex);
@@ -235,21 +234,12 @@ static void ondemand_powersave_bias_init(void)
/************************** sysfs interface ************************/
-static ssize_t show_sampling_rate_max(struct kobject *kobj,
- struct attribute *attr, char *buf)
-{
- printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
- "sysfs file is deprecated - used by: %s\n", current->comm);
- return sprintf(buf, "%u\n", -1U);
-}
-
static ssize_t show_sampling_rate_min(struct kobject *kobj,
struct attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", min_sampling_rate);
}
-define_one_global_ro(sampling_rate_max);
define_one_global_ro(sampling_rate_min);
/* cpufreq_ondemand Governor Tunables */
@@ -266,32 +256,6 @@ show_one(sampling_down_factor, sampling_down_factor);
show_one(ignore_nice_load, ignore_nice);
show_one(powersave_bias, powersave_bias);
-/*** delete after deprecation time ***/
-
-#define DEPRECATION_MSG(file_name) \
- printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
- "interface is deprecated - " #file_name "\n");
-
-#define show_one_old(file_name) \
-static ssize_t show_##file_name##_old \
-(struct cpufreq_policy *unused, char *buf) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return show_##file_name(NULL, NULL, buf); \
-}
-show_one_old(sampling_rate);
-show_one_old(up_threshold);
-show_one_old(ignore_nice_load);
-show_one_old(powersave_bias);
-show_one_old(sampling_rate_min);
-show_one_old(sampling_rate_max);
-
-cpufreq_freq_attr_ro_old(sampling_rate_min);
-cpufreq_freq_attr_ro_old(sampling_rate_max);
-
-/*** delete after deprecation time ***/
-
static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -300,11 +264,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
-
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -317,11 +277,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
-
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.io_is_busy = !!input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -336,11 +292,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL;
}
-
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.up_threshold = input;
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -353,7 +305,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
@@ -362,8 +313,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
dbs_info = &per_cpu(od_cpu_dbs_info, j);
dbs_info->rate_mult = 1;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -382,9 +331,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
if (input > 1)
input = 1;
- mutex_lock(&dbs_mutex);
if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
- mutex_unlock(&dbs_mutex);
return count;
}
dbs_tuners_ins.ignore_nice = input;
@@ -399,8 +346,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
}
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -417,11 +362,8 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
if (input > 1000)
input = 1000;
- mutex_lock(&dbs_mutex);
dbs_tuners_ins.powersave_bias = input;
ondemand_powersave_bias_init();
- mutex_unlock(&dbs_mutex);
-
return count;
}
@@ -433,7 +375,6 @@ define_one_global_rw(ignore_nice_load);
define_one_global_rw(powersave_bias);
static struct attribute *dbs_attributes[] = {
- &sampling_rate_max.attr,
&sampling_rate_min.attr,
&sampling_rate.attr,
&up_threshold.attr,
@@ -449,43 +390,6 @@ static struct attribute_group dbs_attr_group = {
.name = "ondemand",
};
-/*** delete after deprecation time ***/
-
-#define write_one_old(file_name) \
-static ssize_t store_##file_name##_old \
-(struct cpufreq_policy *unused, const char *buf, size_t count) \
-{ \
- printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
- "interface is deprecated - " #file_name "\n"); \
- return store_##file_name(NULL, NULL, buf, count); \
-}
-write_one_old(sampling_rate);
-write_one_old(up_threshold);
-write_one_old(ignore_nice_load);
-write_one_old(powersave_bias);
-
-cpufreq_freq_attr_rw_old(sampling_rate);
-cpufreq_freq_attr_rw_old(up_threshold);
-cpufreq_freq_attr_rw_old(ignore_nice_load);
-cpufreq_freq_attr_rw_old(powersave_bias);
-
-static struct attribute *dbs_attributes_old[] = {
- &sampling_rate_max_old.attr,
- &sampling_rate_min_old.attr,
- &sampling_rate_old.attr,
- &up_threshold_old.attr,
- &ignore_nice_load_old.attr,
- &powersave_bias_old.attr,
- NULL
-};
-
-static struct attribute_group dbs_attr_group_old = {
- .attrs = dbs_attributes_old,
- .name = "ondemand",
-};
-
-/*** delete after deprecation time ***/
-
/************************** sysfs end ************************/
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
@@ -642,12 +546,7 @@ static void do_dbs_timer(struct work_struct *work)
unsigned int cpu = dbs_info->cpu;
int sample_type = dbs_info->sample_type;
- /* We want all CPUs to do sampling nearly on same jiffy */
- int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
- * dbs_info->rate_mult);
-
- if (num_online_cpus() > 1)
- delay -= jiffies % delay;
+ int delay;
mutex_lock(&dbs_info->timer_mutex);
@@ -660,10 +559,20 @@ static void do_dbs_timer(struct work_struct *work)
/* Setup timer for SUB_SAMPLE */
dbs_info->sample_type = DBS_SUB_SAMPLE;
delay = dbs_info->freq_hi_jiffies;
+ } else {
+ /* We want all CPUs to do sampling nearly on
+ * same jiffy
+ */
+ delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
+ * dbs_info->rate_mult);
+
+ if (num_online_cpus() > 1)
+ delay -= jiffies % delay;
}
} else {
__cpufreq_driver_target(dbs_info->cur_policy,
dbs_info->freq_lo, CPUFREQ_RELATION_H);
+ delay = dbs_info->freq_lo_jiffies;
}
schedule_delayed_work_on(cpu, &dbs_info->work, delay);
mutex_unlock(&dbs_info->timer_mutex);
@@ -727,12 +636,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
mutex_lock(&dbs_mutex);
- rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
- if (rc) {
- mutex_unlock(&dbs_mutex);
- return rc;
- }
-
dbs_enable++;
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_info_s *j_dbs_info;
@@ -785,7 +688,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
dbs_timer_exit(this_dbs_info);
mutex_lock(&dbs_mutex);
- sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
mutex_destroy(&this_dbs_info->timer_mutex);
dbs_enable--;
mutex_unlock(&dbs_mutex);
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 23e03554f0d3..0be30e978c85 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -25,59 +25,12 @@ static struct mem_ctl_info **mcis;
static struct ecc_settings **ecc_stngs;
/*
- * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
- * later.
- */
-static int ddr2_dbam_revCG[] = {
- [0] = 32,
- [1] = 64,
- [2] = 128,
- [3] = 256,
- [4] = 512,
- [5] = 1024,
- [6] = 2048,
-};
-
-static int ddr2_dbam_revD[] = {
- [0] = 32,
- [1] = 64,
- [2 ... 3] = 128,
- [4] = 256,
- [5] = 512,
- [6] = 256,
- [7] = 512,
- [8 ... 9] = 1024,
- [10] = 2048,
-};
-
-static int ddr2_dbam[] = { [0] = 128,
- [1] = 256,
- [2 ... 4] = 512,
- [5 ... 6] = 1024,
- [7 ... 8] = 2048,
- [9 ... 10] = 4096,
- [11] = 8192,
-};
-
-static int ddr3_dbam[] = { [0] = -1,
- [1] = 256,
- [2] = 512,
- [3 ... 4] = -1,
- [5 ... 6] = 1024,
- [7 ... 8] = 2048,
- [9 ... 10] = 4096,
- [11] = 8192,
-};
-
-/*
* Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
* bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
* or higher value'.
*
*FIXME: Produce a better mapping/linearisation.
*/
-
-
struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
@@ -107,6 +60,79 @@ struct scrubrate {
{ 0x00, 0UL}, /* scrubbing off */
};
+static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 *val, const char *func)
+{
+ int err = 0;
+
+ err = pci_read_config_dword(pdev, offset, val);
+ if (err)
+ amd64_warn("%s: error reading F%dx%03x.\n",
+ func, PCI_FUNC(pdev->devfn), offset);
+
+ return err;
+}
+
+int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 val, const char *func)
+{
+ int err = 0;
+
+ err = pci_write_config_dword(pdev, offset, val);
+ if (err)
+ amd64_warn("%s: error writing to F%dx%03x.\n",
+ func, PCI_FUNC(pdev->devfn), offset);
+
+ return err;
+}
+
+/*
+ *
+ * Depending on the family, F2 DCT reads need special handling:
+ *
+ * K8: has a single DCT only
+ *
+ * F10h: each DCT has its own set of regs
+ * DCT0 -> F2x040..
+ * DCT1 -> F2x140..
+ *
+ * F15h: we select which DCT we access using F1x10C[DctCfgSel]
+ *
+ */
+static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+ const char *func)
+{
+ if (addr >= 0x100)
+ return -EINVAL;
+
+ return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
+static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+ const char *func)
+{
+ return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
+static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
+ const char *func)
+{
+ u32 reg = 0;
+ u8 dct = 0;
+
+ if (addr >= 0x140 && addr <= 0x1a0) {
+ dct = 1;
+ addr -= 0x100;
+ }
+
+ amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
+ reg &= 0xfffffffe;
+ reg |= dct;
+ amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
+
+ return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
+}
+
/*
* Memory scrubber control interface. For K8, memory scrubbing is handled by
* hardware and can involve L2 cache, dcache as well as the main memory. With
@@ -156,7 +182,7 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
+ pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
if (scrubval)
return scrubrates[i].bandwidth;
@@ -167,8 +193,12 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
{
struct amd64_pvt *pvt = mci->pvt_info;
+ u32 min_scrubrate = 0x5;
+
+ if (boot_cpu_data.x86 == 0xf)
+ min_scrubrate = 0x0;
- return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
+ return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
}
static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
@@ -177,7 +207,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
u32 scrubval = 0;
int i, retval = -EINVAL;
- amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
+ amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
scrubval = scrubval & 0x001F;
@@ -192,63 +222,14 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
return retval;
}
-/* Map from a CSROW entry to the mask entry that operates on it */
-static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
-{
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
- return csrow;
- else
- return csrow >> 1;
-}
-
-/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
-static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
-{
- if (dct == 0)
- return pvt->dcsb0[csrow];
- else
- return pvt->dcsb1[csrow];
-}
-
-/*
- * Return the 'mask' address the i'th CS entry. This function is needed because
- * there number of DCSM registers on Rev E and prior vs Rev F and later is
- * different.
- */
-static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
-{
- if (dct == 0)
- return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
- else
- return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
-}
-
-
/*
- * In *base and *limit, pass back the full 40-bit base and limit physical
- * addresses for the node given by node_id. This information is obtained from
- * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
- * base and limit addresses are of type SysAddr, as defined at the start of
- * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
- * in the address range they represent.
+ * returns true if the SysAddr given by sys_addr matches the
+ * DRAM base/limit associated with node_id
*/
-static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
- u64 *base, u64 *limit)
+static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
+ unsigned nid)
{
- *base = pvt->dram_base[node_id];
- *limit = pvt->dram_limit[node_id];
-}
-
-/*
- * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
- * with node_id
- */
-static int amd64_base_limit_match(struct amd64_pvt *pvt,
- u64 sys_addr, int node_id)
-{
- u64 base, limit, addr;
-
- amd64_get_base_and_limit(pvt, node_id, &base, &limit);
+ u64 addr;
/* The K8 treats this as a 40-bit value. However, bits 63-40 will be
* all ones if the most significant implemented address bit is 1.
@@ -258,7 +239,8 @@ static int amd64_base_limit_match(struct amd64_pvt *pvt,
*/
addr = sys_addr & 0x000000ffffffffffull;
- return (addr >= base) && (addr <= limit);
+ return ((addr >= get_dram_base(pvt, nid)) &&
+ (addr <= get_dram_limit(pvt, nid)));
}
/*
@@ -271,7 +253,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
u64 sys_addr)
{
struct amd64_pvt *pvt;
- int node_id;
+ unsigned node_id;
u32 intlv_en, bits;
/*
@@ -285,10 +267,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
* registers. Therefore we arbitrarily choose to read it from the
* register for node 0.
*/
- intlv_en = pvt->dram_IntlvEn[0];
+ intlv_en = dram_intlv_en(pvt, 0);
if (intlv_en == 0) {
- for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
+ for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
if (amd64_base_limit_match(pvt, sys_addr, node_id))
goto found;
}
@@ -305,10 +287,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
bits = (((u32) sys_addr) >> 12) & intlv_en;
for (node_id = 0; ; ) {
- if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
+ if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
break; /* intlv_sel field matches */
- if (++node_id >= DRAM_REG_COUNT)
+ if (++node_id >= DRAM_RANGES)
goto err_no_match;
}
@@ -321,7 +303,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
}
found:
- return edac_mc_find(node_id);
+ return edac_mc_find((int)node_id);
err_no_match:
debugf2("sys_addr 0x%lx doesn't match any node\n",
@@ -331,37 +313,50 @@ err_no_match:
}
/*
- * Extract the DRAM CS base address from selected csrow register.
+ * compute the CS base address of the @csrow on the DRAM controller @dct.
+ * For details see F2x[5C:40] in the processor's BKDG
*/
-static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
+static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
+ u64 *base, u64 *mask)
{
- return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
- pvt->dcs_shift;
-}
+ u64 csbase, csmask, base_bits, mask_bits;
+ u8 addr_shift;
-/*
- * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
- */
-static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
-{
- u64 dcsm_bits, other_bits;
- u64 mask;
-
- /* Extract bits from DRAM CS Mask. */
- dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
+ if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
+ csbase = pvt->csels[dct].csbases[csrow];
+ csmask = pvt->csels[dct].csmasks[csrow];
+ base_bits = GENMASK(21, 31) | GENMASK(9, 15);
+ mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
+ addr_shift = 4;
+ } else {
+ csbase = pvt->csels[dct].csbases[csrow];
+ csmask = pvt->csels[dct].csmasks[csrow >> 1];
+ addr_shift = 8;
- other_bits = pvt->dcsm_mask;
- other_bits = ~(other_bits << pvt->dcs_shift);
+ if (boot_cpu_data.x86 == 0x15)
+ base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
+ else
+ base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
+ }
- /*
- * The extracted bits from DCSM belong in the spaces represented by
- * the cleared bits in other_bits.
- */
- mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
+ *base = (csbase & base_bits) << addr_shift;
- return mask;
+ *mask = ~0ULL;
+ /* poke holes for the csmask */
+ *mask &= ~(mask_bits << addr_shift);
+ /* OR them in */
+ *mask |= (csmask & mask_bits) << addr_shift;
}
+#define for_each_chip_select(i, dct, pvt) \
+ for (i = 0; i < pvt->csels[dct].b_cnt; i++)
+
+#define chip_select_base(i, dct, pvt) \
+ pvt->csels[dct].csbases[i]
+
+#define for_each_chip_select_mask(i, dct, pvt) \
+ for (i = 0; i < pvt->csels[dct].m_cnt; i++)
+
/*
* @input_addr is an InputAddr associated with the node given by mci. Return the
* csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
@@ -374,19 +369,13 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
pvt = mci->pvt_info;
- /*
- * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
- * base/mask register pair, test the condition shown near the start of
- * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
- */
- for (csrow = 0; csrow < pvt->cs_count; csrow++) {
-
- /* This DRAM chip select is disabled on this node */
- if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
+ for_each_chip_select(csrow, 0, pvt) {
+ if (!csrow_enabled(csrow, 0, pvt))
continue;
- base = base_from_dct_base(pvt, csrow);
- mask = ~mask_from_dct_mask(pvt, csrow);
+ get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
+
+ mask = ~mask;
if ((input_addr & mask) == (base & mask)) {
debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
@@ -396,7 +385,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
return csrow;
}
}
-
debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
(unsigned long)input_addr, pvt->mc_node_id);
@@ -404,19 +392,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
}
/*
- * Return the base value defined by the DRAM Base register for the node
- * represented by mci. This function returns the full 40-bit value despite the
- * fact that the register only stores bits 39-24 of the value. See section
- * 3.4.4.1 (BKDG #26094, K8, revA-E)
- */
-static inline u64 get_dram_base(struct mem_ctl_info *mci)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
-
- return pvt->dram_base[pvt->mc_node_id];
-}
-
-/*
* Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
* for the node represented by mci. Info is passed back in *hole_base,
* *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
@@ -445,14 +420,13 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
return 1;
}
- /* only valid for Fam10h */
- if (boot_cpu_data.x86 == 0x10 &&
- (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
+ /* valid for Fam10h and above */
+ if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
return 1;
}
- if ((pvt->dhar & DHAR_VALID) == 0) {
+ if (!dhar_valid(pvt)) {
debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
pvt->mc_node_id);
return 1;
@@ -476,15 +450,15 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
* addresses in the hole so that they start at 0x100000000.
*/
- base = dhar_base(pvt->dhar);
+ base = dhar_base(pvt);
*hole_base = base;
*hole_size = (0x1ull << 32) - base;
if (boot_cpu_data.x86 > 0xf)
- *hole_offset = f10_dhar_offset(pvt->dhar);
+ *hole_offset = f10_dhar_offset(pvt);
else
- *hole_offset = k8_dhar_offset(pvt->dhar);
+ *hole_offset = k8_dhar_offset(pvt);
debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
pvt->mc_node_id, (unsigned long)*hole_base,
@@ -525,10 +499,11 @@ EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
*/
static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
{
+ struct amd64_pvt *pvt = mci->pvt_info;
u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
int ret = 0;
- dram_base = get_dram_base(mci);
+ dram_base = get_dram_base(pvt, pvt->mc_node_id);
ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
&hole_size);
@@ -556,7 +531,7 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
* section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
* Programmer's Manual Volume 1 Application Programming.
*/
- dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
+ dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
"DramAddr 0x%lx\n", (unsigned long)sys_addr,
@@ -592,9 +567,9 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
* See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
* concerning translating a DramAddr to an InputAddr.
*/
- intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
- input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
- (dram_addr & 0xfff);
+ intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
+ input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
+ (dram_addr & 0xfff);
debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
intlv_shift, (unsigned long)dram_addr,
@@ -628,7 +603,7 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
{
struct amd64_pvt *pvt;
- int node_id, intlv_shift;
+ unsigned node_id, intlv_shift;
u64 bits, dram_addr;
u32 intlv_sel;
@@ -642,10 +617,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
*/
pvt = mci->pvt_info;
node_id = pvt->mc_node_id;
- BUG_ON((node_id < 0) || (node_id > 7));
- intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
+ BUG_ON(node_id > 7);
+ intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
if (intlv_shift == 0) {
debugf1(" InputAddr 0x%lx translates to DramAddr of "
"same value\n", (unsigned long)input_addr);
@@ -653,10 +628,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
return input_addr;
}
- bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
- (input_addr & 0xfff);
+ bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
+ (input_addr & 0xfff);
- intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
+ intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
dram_addr = bits + (intlv_sel << 12);
debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
@@ -673,7 +648,7 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
{
struct amd64_pvt *pvt = mci->pvt_info;
- u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
+ u64 hole_base, hole_offset, hole_size, base, sys_addr;
int ret = 0;
ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
@@ -691,7 +666,7 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
}
}
- amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
+ base = get_dram_base(pvt, pvt->mc_node_id);
sys_addr = dram_addr + base;
/*
@@ -736,13 +711,12 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
u64 base, mask;
pvt = mci->pvt_info;
- BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
+ BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
- base = base_from_dct_base(pvt, csrow);
- mask = mask_from_dct_mask(pvt, csrow);
+ get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
*input_addr_min = base & ~mask;
- *input_addr_max = base | mask | pvt->dcs_mask_notused;
+ *input_addr_max = base | mask;
}
/* Map the Error address to a PAGE and PAGE OFFSET. */
@@ -775,18 +749,13 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
-static u16 extract_syndrome(struct err_regs *err)
-{
- return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
-}
-
/*
* Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
* are ECC capable.
*/
static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
{
- int bit;
+ u8 bit;
enum dev_type edac_cap = EDAC_FLAG_NONE;
bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
@@ -799,8 +768,7 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
return edac_cap;
}
-
-static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
+static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
static void amd64_dump_dramcfg_low(u32 dclr, int chan)
{
@@ -813,8 +781,9 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
debugf1(" PAR/ERR parity: %s\n",
(dclr & BIT(8)) ? "enabled" : "disabled");
- debugf1(" DCT 128bit mode width: %s\n",
- (dclr & BIT(11)) ? "128b" : "64b");
+ if (boot_cpu_data.x86 == 0x10)
+ debugf1(" DCT 128bit mode width: %s\n",
+ (dclr & BIT(11)) ? "128b" : "64b");
debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
(dclr & BIT(12)) ? "yes" : "no",
@@ -824,16 +793,16 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
}
/* Display and decode various NB registers for debug purposes. */
-static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
+static void dump_misc_regs(struct amd64_pvt *pvt)
{
debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
debugf1(" NB two channel DRAM capable: %s\n",
- (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
+ (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
- (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
- (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
+ (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
+ (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
amd64_dump_dramcfg_low(pvt->dclr0, 0);
@@ -841,130 +810,84 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
"offset: 0x%08x\n",
- pvt->dhar,
- dhar_base(pvt->dhar),
- (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
- : f10_dhar_offset(pvt->dhar));
+ pvt->dhar, dhar_base(pvt),
+ (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
+ : f10_dhar_offset(pvt));
- debugf1(" DramHoleValid: %s\n",
- (pvt->dhar & DHAR_VALID) ? "yes" : "no");
+ debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
- amd64_debug_display_dimm_sizes(0, pvt);
+ amd64_debug_display_dimm_sizes(pvt, 0);
/* everything below this point is Fam10h and above */
if (boot_cpu_data.x86 == 0xf)
return;
- amd64_debug_display_dimm_sizes(1, pvt);
+ amd64_debug_display_dimm_sizes(pvt, 1);
- amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
+ amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
amd64_dump_dramcfg_low(pvt->dclr1, 1);
}
-/* Read in both of DBAM registers */
-static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
-{
- amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0);
-
- if (boot_cpu_data.x86 >= 0x10)
- amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
-}
-
/*
- * NOTE: CPU Revision Dependent code: Rev E and Rev F
- *
- * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
- * set the shift factor for the DCSB and DCSM values.
- *
- * ->dcs_mask_notused, RevE:
- *
- * To find the max InputAddr for the csrow, start with the base address and set
- * all bits that are "don't care" bits in the test at the start of section
- * 3.5.4 (p. 84).
- *
- * The "don't care" bits are all set bits in the mask and all bits in the gaps
- * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
- * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
- * gaps.
- *
- * ->dcs_mask_notused, RevF and later:
- *
- * To find the max InputAddr for the csrow, start with the base address and set
- * all bits that are "don't care" bits in the test at the start of NPT section
- * 4.5.4 (p. 87).
- *
- * The "don't care" bits are all set bits in the mask and all bits in the gaps
- * between bit ranges [36:27] and [21:13].
- *
- * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
- * which are all bits in the above-mentioned gaps.
+ * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
*/
-static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
+static void prep_chip_selects(struct amd64_pvt *pvt)
{
-
if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
- pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
- pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
- pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
- pvt->dcs_shift = REV_E_DCS_SHIFT;
- pvt->cs_count = 8;
- pvt->num_dcsm = 8;
+ pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
+ pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
} else {
- pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
- pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
- pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
- pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
- pvt->cs_count = 8;
- pvt->num_dcsm = 4;
+ pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
+ pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
}
}
/*
- * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
+ * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
*/
-static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
+static void read_dct_base_mask(struct amd64_pvt *pvt)
{
- int cs, reg;
+ int cs;
- amd64_set_dct_base_and_mask(pvt);
+ prep_chip_selects(pvt);
- for (cs = 0; cs < pvt->cs_count; cs++) {
- reg = K8_DCSB0 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs]))
+ for_each_chip_select(cs, 0, pvt) {
+ int reg0 = DCSB0 + (cs * 4);
+ int reg1 = DCSB1 + (cs * 4);
+ u32 *base0 = &pvt->csels[0].csbases[cs];
+ u32 *base1 = &pvt->csels[1].csbases[cs];
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsb0[cs], reg);
-
- /* If DCT are NOT ganged, then read in DCT1's base */
- if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
- reg = F10_DCSB1 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg,
- &pvt->dcsb1[cs]))
- debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsb1[cs], reg);
- } else {
- pvt->dcsb1[cs] = 0;
- }
+ cs, *base0, reg0);
+
+ if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+ continue;
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
+ debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
+ cs, *base1, reg1);
}
- for (cs = 0; cs < pvt->num_dcsm; cs++) {
- reg = K8_DCSM0 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs]))
+ for_each_chip_select_mask(cs, 0, pvt) {
+ int reg0 = DCSM0 + (cs * 4);
+ int reg1 = DCSM1 + (cs * 4);
+ u32 *mask0 = &pvt->csels[0].csmasks[cs];
+ u32 *mask1 = &pvt->csels[1].csmasks[cs];
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsm0[cs], reg);
-
- /* If DCT are NOT ganged, then read in DCT1's mask */
- if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
- reg = F10_DCSM1 + (cs * 4);
- if (!amd64_read_pci_cfg(pvt->F2, reg,
- &pvt->dcsm1[cs]))
- debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
- cs, pvt->dcsm1[cs], reg);
- } else {
- pvt->dcsm1[cs] = 0;
- }
+ cs, *mask0, reg0);
+
+ if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+ continue;
+
+ if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
+ debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
+ cs, *mask1, reg1);
}
}
@@ -972,7 +895,10 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
{
enum mem_type type;
- if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
+ /* F15h supports only DDR3 */
+ if (boot_cpu_data.x86 >= 0x15)
+ type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
+ else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
if (pvt->dchr0 & DDR3_MODE)
type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
else
@@ -986,26 +912,14 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
return type;
}
-/*
- * Read the DRAM Configuration Low register. It differs between CG, D & E revs
- * and the later RevF memory controllers (DDR vs DDR2)
- *
- * Return:
- * number of memory channels in operation
- * Pass back:
- * contents of the DCL0_LOW register
- */
+/* Get the number of DCT channels the memory controller is using. */
static int k8_early_channel_count(struct amd64_pvt *pvt)
{
- int flag, err = 0;
-
- err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
- if (err)
- return err;
+ int flag;
if (pvt->ext_model >= K8_REV_F)
/* RevF (NPT) and later */
- flag = pvt->dclr0 & F10_WIDTH_128;
+ flag = pvt->dclr0 & WIDTH_128;
else
/* RevE and earlier */
flag = pvt->dclr0 & REVE_WIDTH_128;
@@ -1016,55 +930,47 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
return (flag) ? 2 : 1;
}
-/* extract the ERROR ADDRESS for the K8 CPUs */
-static u64 k8_get_error_address(struct mem_ctl_info *mci,
- struct err_regs *info)
+/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
+static u64 get_error_address(struct mce *m)
{
- return (((u64) (info->nbeah & 0xff)) << 32) +
- (info->nbeal & ~0x03);
+ u8 start_bit = 1;
+ u8 end_bit = 47;
+
+ if (boot_cpu_data.x86 == 0xf) {
+ start_bit = 3;
+ end_bit = 39;
+ }
+
+ return m->addr & GENMASK(start_bit, end_bit);
}
-/*
- * Read the Base and Limit registers for K8 based Memory controllers; extract
- * fields from the 'raw' reg into separate data fields
- *
- * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
- */
-static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
{
- u32 low;
- u32 off = dram << 3; /* 8 bytes between DRAM entries */
+ int off = range << 3;
- amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low);
+ amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
+ amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
- /* Extract parts into separate data entries */
- pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
- pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
- pvt->dram_rw_en[dram] = (low & 0x3);
+ if (boot_cpu_data.x86 == 0xf)
+ return;
- amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low);
+ if (!dram_rw(pvt, range))
+ return;
- /*
- * Extract parts into separate data entries. Limit is the HIGHEST memory
- * location of the region, so lower 24 bits need to be all ones
- */
- pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
- pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
- pvt->dram_DstNode[dram] = (low & 0x7);
+ amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
+ amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
}
-static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
- struct err_regs *err_info, u64 sys_addr)
+static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
+ u16 syndrome)
{
struct mem_ctl_info *src_mci;
+ struct amd64_pvt *pvt = mci->pvt_info;
int channel, csrow;
u32 page, offset;
- u16 syndrome;
-
- syndrome = extract_syndrome(err_info);
/* CHIPKILL enabled */
- if (err_info->nbcfg & K8_NBCFG_CHIPKILL) {
+ if (pvt->nbcfg & NBCFG_CHIPKILL) {
channel = get_channel_from_ecc_syndrome(mci, syndrome);
if (channel < 0) {
/*
@@ -1113,18 +1019,41 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
}
}
-static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
+static int ddr2_cs_size(unsigned i, bool dct_width)
{
- int *dbam_map;
+ unsigned shift = 0;
- if (pvt->ext_model >= K8_REV_F)
- dbam_map = ddr2_dbam;
- else if (pvt->ext_model >= K8_REV_D)
- dbam_map = ddr2_dbam_revD;
+ if (i <= 2)
+ shift = i;
+ else if (!(i & 0x1))
+ shift = i >> 1;
else
- dbam_map = ddr2_dbam_revCG;
+ shift = (i + 1) >> 1;
- return dbam_map[cs_mode];
+ return 128 << (shift + !!dct_width);
+}
+
+static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
+{
+ u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
+
+ if (pvt->ext_model >= K8_REV_F) {
+ WARN_ON(cs_mode > 11);
+ return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
+ }
+ else if (pvt->ext_model >= K8_REV_D) {
+ WARN_ON(cs_mode > 10);
+
+ if (cs_mode == 3 || cs_mode == 8)
+ return 32 << (cs_mode - 1);
+ else
+ return 32 << cs_mode;
+ }
+ else {
+ WARN_ON(cs_mode > 6);
+ return 32 << cs_mode;
+ }
}
/*
@@ -1135,17 +1064,13 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
* Pass back:
* contents of the DCL0_LOW register
*/
-static int f10_early_channel_count(struct amd64_pvt *pvt)
+static int f1x_early_channel_count(struct amd64_pvt *pvt)
{
- int dbams[] = { DBAM0, DBAM1 };
int i, j, channels = 0;
- u32 dbam;
- /* If we are in 128 bit mode, then we are using 2 channels */
- if (pvt->dclr0 & F10_WIDTH_128) {
- channels = 2;
- return channels;
- }
+ /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
+ if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
+ return 2;
/*
* Need to check if in unganged mode: In such, there are 2 channels,
@@ -1162,9 +1087,8 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
* is more than just one DIMM present in unganged mode. Need to check
* both controllers since DIMMs can be placed in either one.
*/
- for (i = 0; i < ARRAY_SIZE(dbams); i++) {
- if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam))
- goto err_reg;
+ for (i = 0; i < 2; i++) {
+ u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
for (j = 0; j < 4; j++) {
if (DBAM_DIMM(j, dbam) > 0) {
@@ -1180,216 +1104,191 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
amd64_info("MCT channel count: %d\n", channels);
return channels;
-
-err_reg:
- return -1;
-
}
-static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
+static int ddr3_cs_size(unsigned i, bool dct_width)
{
- int *dbam_map;
+ unsigned shift = 0;
+ int cs_size = 0;
- if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
- dbam_map = ddr3_dbam;
+ if (i == 0 || i == 3 || i == 4)
+ cs_size = -1;
+ else if (i <= 2)
+ shift = i;
+ else if (i == 12)
+ shift = 7;
+ else if (!(i & 0x1))
+ shift = i >> 1;
else
- dbam_map = ddr2_dbam;
+ shift = (i + 1) >> 1;
+
+ if (cs_size != -1)
+ cs_size = (128 * (1 << !!dct_width)) << shift;
- return dbam_map[cs_mode];
+ return cs_size;
}
-static u64 f10_get_error_address(struct mem_ctl_info *mci,
- struct err_regs *info)
+static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
{
- return (((u64) (info->nbeah & 0xffff)) << 32) +
- (info->nbeal & ~0x01);
+ u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
+
+ WARN_ON(cs_mode > 11);
+
+ if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
+ return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
+ else
+ return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
}
/*
- * Read the Base and Limit registers for F10 based Memory controllers. Extract
- * fields from the 'raw' reg into separate data fields.
- *
- * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
+ * F15h supports only 64bit DCT interfaces
*/
-static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
+static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
+ unsigned cs_mode)
{
- u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
-
- low_offset = K8_DRAM_BASE_LOW + (dram << 3);
- high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
-
- /* read the 'raw' DRAM BASE Address register */
- amd64_read_pci_cfg(pvt->F1, low_offset, &low_base);
- amd64_read_pci_cfg(pvt->F1, high_offset, &high_base);
-
- /* Extract parts into separate data entries */
- pvt->dram_rw_en[dram] = (low_base & 0x3);
-
- if (pvt->dram_rw_en[dram] == 0)
- return;
-
- pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
-
- pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
- (((u64)low_base & 0xFFFF0000) << 8);
-
- low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
- high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
-
- /* read the 'raw' LIMIT registers */
- amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit);
- amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit);
-
- pvt->dram_DstNode[dram] = (low_limit & 0x7);
- pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
+ WARN_ON(cs_mode > 12);
- /*
- * Extract address values and form a LIMIT address. Limit is the HIGHEST
- * memory location of the region, so low 24 bits need to be all ones.
- */
- pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
- (((u64) low_limit & 0xFFFF0000) << 8) |
- 0x00FFFFFF;
+ return ddr3_cs_size(cs_mode, false);
}
-static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
+static void read_dram_ctl_register(struct amd64_pvt *pvt)
{
- if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW,
- &pvt->dram_ctl_select_low)) {
- debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
- "High range addresses at: 0x%x\n",
- pvt->dram_ctl_select_low,
- dct_sel_baseaddr(pvt));
+ if (boot_cpu_data.x86 == 0xf)
+ return;
+
+ if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
+ debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
+ pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
- debugf0(" DCT mode: %s, All DCTs on: %s\n",
- (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
- (dct_dram_enabled(pvt) ? "yes" : "no"));
+ debugf0(" DCTs operate in %s mode.\n",
+ (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
if (!dct_ganging_enabled(pvt))
debugf0(" Address range split per DCT: %s\n",
(dct_high_range_enabled(pvt) ? "yes" : "no"));
- debugf0(" DCT data interleave for ECC: %s, "
+ debugf0(" data interleave for ECC: %s, "
"DRAM cleared since last warm reset: %s\n",
(dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
(dct_memory_cleared(pvt) ? "yes" : "no"));
- debugf0(" DCT channel interleave: %s, "
- "DCT interleave bits selector: 0x%x\n",
+ debugf0(" channel interleave: %s, "
+ "interleave bits selector: 0x%x\n",
(dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
dct_sel_interleave_addr(pvt));
}
- amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH,
- &pvt->dram_ctl_select_high);
+ amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
}
/*
- * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
+ * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
* Interleaving Modes.
*/
-static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
- int hi_range_sel, u32 intlv_en)
+static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
+ bool hi_range_sel, u8 intlv_en)
{
- u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
+ u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
if (dct_ganging_enabled(pvt))
- cs = 0;
- else if (hi_range_sel)
- cs = dct_sel_high;
- else if (dct_interleave_enabled(pvt)) {
- /*
- * see F2x110[DctSelIntLvAddr] - channel interleave mode
- */
- if (dct_sel_interleave_addr(pvt) == 0)
- cs = sys_addr >> 6 & 1;
- else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
- temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
+ return 0;
- if (dct_sel_interleave_addr(pvt) & 1)
- cs = (sys_addr >> 9 & 1) ^ temp;
- else
- cs = (sys_addr >> 6 & 1) ^ temp;
- } else if (intlv_en & 4)
- cs = sys_addr >> 15 & 1;
- else if (intlv_en & 2)
- cs = sys_addr >> 14 & 1;
- else if (intlv_en & 1)
- cs = sys_addr >> 13 & 1;
- else
- cs = sys_addr >> 12 & 1;
- } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
- cs = ~dct_sel_high & 1;
- else
- cs = 0;
+ if (hi_range_sel)
+ return dct_sel_high;
- return cs;
-}
+ /*
+ * see F2x110[DctSelIntLvAddr] - channel interleave mode
+ */
+ if (dct_interleave_enabled(pvt)) {
+ u8 intlv_addr = dct_sel_interleave_addr(pvt);
-static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
-{
- if (intlv_en == 1)
- return 1;
- else if (intlv_en == 3)
- return 2;
- else if (intlv_en == 7)
- return 3;
+ /* return DCT select function: 0=DCT0, 1=DCT1 */
+ if (!intlv_addr)
+ return sys_addr >> 6 & 1;
+
+ if (intlv_addr & 0x2) {
+ u8 shift = intlv_addr & 0x1 ? 9 : 6;
+ u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
+
+ return ((sys_addr >> shift) & 1) ^ temp;
+ }
+
+ return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
+ }
+
+ if (dct_high_range_enabled(pvt))
+ return ~dct_sel_high & 1;
return 0;
}
-/* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
-static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
- u32 dct_sel_base_addr,
- u64 dct_sel_base_off,
- u32 hole_valid, u32 hole_off,
- u64 dram_base)
+/* Convert the sys_addr to the normalized DCT address */
+static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
+ u64 sys_addr, bool hi_rng,
+ u32 dct_sel_base_addr)
{
u64 chan_off;
+ u64 dram_base = get_dram_base(pvt, range);
+ u64 hole_off = f10_dhar_offset(pvt);
+ u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
- if (hi_range_sel) {
- if (!(dct_sel_base_addr & 0xFFFF0000) &&
- hole_valid && (sys_addr >= 0x100000000ULL))
- chan_off = hole_off << 16;
+ if (hi_rng) {
+ /*
+ * if
+ * base address of high range is below 4Gb
+ * (bits [47:27] at [31:11])
+ * DRAM address space on this DCT is hoisted above 4Gb &&
+ * sys_addr > 4Gb
+ *
+ * remove hole offset from sys_addr
+ * else
+ * remove high range offset from sys_addr
+ */
+ if ((!(dct_sel_base_addr >> 16) ||
+ dct_sel_base_addr < dhar_base(pvt)) &&
+ dhar_valid(pvt) &&
+ (sys_addr >= BIT_64(32)))
+ chan_off = hole_off;
else
chan_off = dct_sel_base_off;
} else {
- if (hole_valid && (sys_addr >= 0x100000000ULL))
- chan_off = hole_off << 16;
+ /*
+ * if
+ * we have a valid hole &&
+ * sys_addr > 4Gb
+ *
+ * remove hole
+ * else
+ * remove dram base to normalize to DCT address
+ */
+ if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
+ chan_off = hole_off;
else
- chan_off = dram_base & 0xFFFFF8000000ULL;
+ chan_off = dram_base;
}
- return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
- (chan_off & 0x0000FFFFFF800000ULL);
+ return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
}
-/* Hack for the time being - Can we get this from BIOS?? */
-#define CH0SPARE_RANK 0
-#define CH1SPARE_RANK 1
-
/*
* checks if the csrow passed in is marked as SPARED, if so returns the new
* spare row
*/
-static inline int f10_process_possible_spare(int csrow,
- u32 cs, struct amd64_pvt *pvt)
-{
- u32 swap_done;
- u32 bad_dram_cs;
-
- /* Depending on channel, isolate respective SPARING info */
- if (cs) {
- swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
- bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
- if (swap_done && (csrow == bad_dram_cs))
- csrow = CH1SPARE_RANK;
- } else {
- swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
- bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
- if (swap_done && (csrow == bad_dram_cs))
- csrow = CH0SPARE_RANK;
+static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
+{
+ int tmp_cs;
+
+ if (online_spare_swap_done(pvt, dct) &&
+ csrow == online_spare_bad_dramcs(pvt, dct)) {
+
+ for_each_chip_select(tmp_cs, dct, pvt) {
+ if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
+ csrow = tmp_cs;
+ break;
+ }
+ }
}
return csrow;
}
@@ -1402,11 +1301,11 @@ static inline int f10_process_possible_spare(int csrow,
* -EINVAL: NOT FOUND
* 0..csrow = Chip-Select Row
*/
-static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
+static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- u32 cs_base, cs_mask;
+ u64 cs_base, cs_mask;
int cs_found = -EINVAL;
int csrow;
@@ -1416,39 +1315,25 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
pvt = mci->pvt_info;
- debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
-
- for (csrow = 0; csrow < pvt->cs_count; csrow++) {
+ debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
- cs_base = amd64_get_dct_base(pvt, cs, csrow);
- if (!(cs_base & K8_DCSB_CS_ENABLE))
+ for_each_chip_select(csrow, dct, pvt) {
+ if (!csrow_enabled(csrow, dct, pvt))
continue;
- /*
- * We have an ENABLED CSROW, Isolate just the MASK bits of the
- * target: [28:19] and [13:5], which map to [36:27] and [21:13]
- * of the actual address.
- */
- cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
-
- /*
- * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
- * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
- */
- cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
+ get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
- debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
- csrow, cs_base, cs_mask);
+ debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
+ csrow, cs_base, cs_mask);
- cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
+ cs_mask = ~cs_mask;
- debugf1(" Final CSMask=0x%x\n", cs_mask);
- debugf1(" (InputAddr & ~CSMask)=0x%x "
- "(CSBase & ~CSMask)=0x%x\n",
- (in_addr & ~cs_mask), (cs_base & ~cs_mask));
+ debugf1(" (InputAddr & ~CSMask)=0x%llx "
+ "(CSBase & ~CSMask)=0x%llx\n",
+ (in_addr & cs_mask), (cs_base & cs_mask));
- if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
- cs_found = f10_process_possible_spare(csrow, cs, pvt);
+ if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
+ cs_found = f10_process_possible_spare(pvt, dct, csrow);
debugf1(" MATCH csrow=%d\n", cs_found);
break;
@@ -1457,38 +1342,75 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
return cs_found;
}
-/* For a given @dram_range, check if @sys_addr falls within it. */
-static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
- u64 sys_addr, int *nid, int *chan_sel)
+/*
+ * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
+ * swapped with a region located at the bottom of memory so that the GPU can use
+ * the interleaved region and thus two channels.
+ */
+static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
{
- int node_id, cs_found = -EINVAL, high_range = 0;
- u32 intlv_en, intlv_sel, intlv_shift, hole_off;
- u32 hole_valid, tmp, dct_sel_base, channel;
- u64 dram_base, chan_addr, dct_sel_base_off;
+ u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
- dram_base = pvt->dram_base[dram_range];
- intlv_en = pvt->dram_IntlvEn[dram_range];
+ if (boot_cpu_data.x86 == 0x10) {
+ /* only revC3 and revE have that feature */
+ if (boot_cpu_data.x86_model < 4 ||
+ (boot_cpu_data.x86_model < 0xa &&
+ boot_cpu_data.x86_mask < 3))
+ return sys_addr;
+ }
- node_id = pvt->dram_DstNode[dram_range];
- intlv_sel = pvt->dram_IntlvSel[dram_range];
+ amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
- debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
- dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
+ if (!(swap_reg & 0x1))
+ return sys_addr;
- /*
- * This assumes that one node's DHAR is the same as all the other
- * nodes' DHAR.
- */
- hole_off = (pvt->dhar & 0x0000FF80);
- hole_valid = (pvt->dhar & 0x1);
- dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
+ swap_base = (swap_reg >> 3) & 0x7f;
+ swap_limit = (swap_reg >> 11) & 0x7f;
+ rgn_size = (swap_reg >> 20) & 0x7f;
+ tmp_addr = sys_addr >> 27;
- debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
- hole_off, hole_valid, intlv_sel);
+ if (!(sys_addr >> 34) &&
+ (((tmp_addr >= swap_base) &&
+ (tmp_addr <= swap_limit)) ||
+ (tmp_addr < rgn_size)))
+ return sys_addr ^ (u64)swap_base << 27;
+
+ return sys_addr;
+}
+
+/* For a given @dram_range, check if @sys_addr falls within it. */
+static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
+ u64 sys_addr, int *nid, int *chan_sel)
+{
+ int cs_found = -EINVAL;
+ u64 chan_addr;
+ u32 dct_sel_base;
+ u8 channel;
+ bool high_range = false;
+
+ u8 node_id = dram_dst_node(pvt, range);
+ u8 intlv_en = dram_intlv_en(pvt, range);
+ u32 intlv_sel = dram_intlv_sel(pvt, range);
+
+ debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
+ range, sys_addr, get_dram_limit(pvt, range));
+
+ if (dhar_valid(pvt) &&
+ dhar_base(pvt) <= sys_addr &&
+ sys_addr < BIT_64(32)) {
+ amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
+ sys_addr);
+ return -EINVAL;
+ }
if (intlv_en &&
- (intlv_sel != ((sys_addr >> 12) & intlv_en)))
+ (intlv_sel != ((sys_addr >> 12) & intlv_en))) {
+ amd64_warn("Botched intlv bits, en: 0x%x, sel: 0x%x\n",
+ intlv_en, intlv_sel);
return -EINVAL;
+ }
+
+ sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
dct_sel_base = dct_sel_baseaddr(pvt);
@@ -1499,38 +1421,41 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
if (dct_high_range_enabled(pvt) &&
!dct_ganging_enabled(pvt) &&
((sys_addr >> 27) >= (dct_sel_base >> 11)))
- high_range = 1;
-
- channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
+ high_range = true;
- chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
- dct_sel_base_off, hole_valid,
- hole_off, dram_base);
+ channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
- intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
+ chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
+ high_range, dct_sel_base);
- /* remove Node ID (in case of memory interleaving) */
- tmp = chan_addr & 0xFC0;
+ /* Remove node interleaving, see F1x120 */
+ if (intlv_en)
+ chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
+ (chan_addr & 0xfff);
- chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
-
- /* remove channel interleave and hash */
+ /* remove channel interleave */
if (dct_interleave_enabled(pvt) &&
!dct_high_range_enabled(pvt) &&
!dct_ganging_enabled(pvt)) {
- if (dct_sel_interleave_addr(pvt) != 1)
- chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
- else {
- tmp = chan_addr & 0xFC0;
- chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
- | tmp;
- }
+
+ if (dct_sel_interleave_addr(pvt) != 1) {
+ if (dct_sel_interleave_addr(pvt) == 0x3)
+ /* hash 9 */
+ chan_addr = ((chan_addr >> 10) << 9) |
+ (chan_addr & 0x1ff);
+ else
+ /* A[6] or hash 6 */
+ chan_addr = ((chan_addr >> 7) << 6) |
+ (chan_addr & 0x3f);
+ } else
+ /* A[12] */
+ chan_addr = ((chan_addr >> 13) << 12) |
+ (chan_addr & 0xfff);
}
- debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
- chan_addr, (u32)(chan_addr >> 8));
+ debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr);
- cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
+ cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
if (cs_found >= 0) {
*nid = node_id;
@@ -1539,23 +1464,21 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
return cs_found;
}
-static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
+static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
int *node, int *chan_sel)
{
- int dram_range, cs_found = -EINVAL;
- u64 dram_base, dram_limit;
+ int cs_found = -EINVAL;
+ unsigned range;
- for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
+ for (range = 0; range < DRAM_RANGES; range++) {
- if (!pvt->dram_rw_en[dram_range])
+ if (!dram_rw(pvt, range))
continue;
- dram_base = pvt->dram_base[dram_range];
- dram_limit = pvt->dram_limit[dram_range];
+ if ((get_dram_base(pvt, range) <= sys_addr) &&
+ (get_dram_limit(pvt, range) >= sys_addr)) {
- if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
-
- cs_found = f10_match_to_this_node(pvt, dram_range,
+ cs_found = f1x_match_to_this_node(pvt, range,
sys_addr, node,
chan_sel);
if (cs_found >= 0)
@@ -1572,16 +1495,14 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
* The @sys_addr is usually an error address received from the hardware
* (MCX_ADDR).
*/
-static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
- struct err_regs *err_info,
- u64 sys_addr)
+static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
+ u16 syndrome)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 page, offset;
int nid, csrow, chan = 0;
- u16 syndrome;
- csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
+ csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
if (csrow < 0) {
edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
@@ -1590,14 +1511,12 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
error_address_to_page_and_offset(sys_addr, &page, &offset);
- syndrome = extract_syndrome(err_info);
-
/*
* We need the syndromes for channel detection only when we're
* ganged. Otherwise @chan should already contain the channel at
* this point.
*/
- if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
+ if (dct_ganging_enabled(pvt))
chan = get_channel_from_ecc_syndrome(mci, syndrome);
if (chan >= 0)
@@ -1614,16 +1533,16 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
/*
* debug routine to display the memory sizes of all logical DIMMs and its
- * CSROWs as well
+ * CSROWs
*/
-static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
+static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
{
int dimm, size0, size1, factor = 0;
- u32 dbam;
- u32 *dcsb;
+ u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
+ u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
if (boot_cpu_data.x86 == 0xf) {
- if (pvt->dclr0 & F10_WIDTH_128)
+ if (pvt->dclr0 & WIDTH_128)
factor = 1;
/* K8 families < revF not supported yet */
@@ -1634,7 +1553,8 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
}
dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
- dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0;
+ dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
+ : pvt->csels[0].csbases;
debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
@@ -1644,12 +1564,14 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
for (dimm = 0; dimm < 4; dimm++) {
size0 = 0;
- if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
- size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
+ if (dcsb[dimm*2] & DCSB_CS_ENABLE)
+ size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
+ DBAM_DIMM(dimm, dbam));
size1 = 0;
- if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
- size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
+ if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
+ size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
+ DBAM_DIMM(dimm, dbam));
amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
dimm * 2, size0 << factor,
@@ -1664,10 +1586,9 @@ static struct amd64_family_type amd64_family_types[] = {
.f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
.ops = {
.early_channel_count = k8_early_channel_count,
- .get_error_address = k8_get_error_address,
- .read_dram_base_limit = k8_read_dram_base_limit,
.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
.dbam_to_cs = k8_dbam_to_chip_select,
+ .read_dct_pci_cfg = k8_read_dct_pci_cfg,
}
},
[F10_CPUS] = {
@@ -1675,12 +1596,21 @@ static struct amd64_family_type amd64_family_types[] = {
.f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
.f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
.ops = {
- .early_channel_count = f10_early_channel_count,
- .get_error_address = f10_get_error_address,
- .read_dram_base_limit = f10_read_dram_base_limit,
- .read_dram_ctl_register = f10_read_dram_ctl_register,
- .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f10_dbam_to_chip_select,
+ .read_dct_pci_cfg = f10_read_dct_pci_cfg,
+ }
+ },
+ [F15_CPUS] = {
+ .ctl_name = "F15h",
+ .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
+ .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
+ .ops = {
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
+ .dbam_to_cs = f15_dbam_to_chip_select,
+ .read_dct_pci_cfg = f15_read_dct_pci_cfg,
}
},
};
@@ -1770,15 +1700,15 @@ static u16 x8_vectors[] = {
0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
};
-static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
- int v_dim)
+static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
+ unsigned v_dim)
{
unsigned int i, err_sym;
for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
u16 s = syndrome;
- int v_idx = err_sym * v_dim;
- int v_end = (err_sym + 1) * v_dim;
+ unsigned v_idx = err_sym * v_dim;
+ unsigned v_end = (err_sym + 1) * v_dim;
/* walk over all 16 bits of the syndrome */
for (i = 1; i < (1U << 16); i <<= 1) {
@@ -1850,51 +1780,50 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
struct amd64_pvt *pvt = mci->pvt_info;
int err_sym = -1;
- if (pvt->syn_type == 8)
+ if (pvt->ecc_sym_sz == 8)
err_sym = decode_syndrome(syndrome, x8_vectors,
ARRAY_SIZE(x8_vectors),
- pvt->syn_type);
- else if (pvt->syn_type == 4)
+ pvt->ecc_sym_sz);
+ else if (pvt->ecc_sym_sz == 4)
err_sym = decode_syndrome(syndrome, x4_vectors,
ARRAY_SIZE(x4_vectors),
- pvt->syn_type);
+ pvt->ecc_sym_sz);
else {
- amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
+ amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
return err_sym;
}
- return map_err_sym_to_channel(err_sym, pvt->syn_type);
+ return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
}
/*
* Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
* ADDRESS and process.
*/
-static void amd64_handle_ce(struct mem_ctl_info *mci,
- struct err_regs *info)
+static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
{
struct amd64_pvt *pvt = mci->pvt_info;
u64 sys_addr;
+ u16 syndrome;
/* Ensure that the Error Address is VALID */
- if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
+ if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
return;
}
- sys_addr = pvt->ops->get_error_address(mci, info);
+ sys_addr = get_error_address(m);
+ syndrome = extract_syndrome(m->status);
amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
- pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
+ pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
}
/* Handle any Un-correctable Errors (UEs) */
-static void amd64_handle_ue(struct mem_ctl_info *mci,
- struct err_regs *info)
+static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
{
- struct amd64_pvt *pvt = mci->pvt_info;
struct mem_ctl_info *log_mci, *src_mci = NULL;
int csrow;
u64 sys_addr;
@@ -1902,13 +1831,13 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
log_mci = mci;
- if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
+ if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
return;
}
- sys_addr = pvt->ops->get_error_address(mci, info);
+ sys_addr = get_error_address(m);
/*
* Find out which node the error address belongs to. This may be
@@ -1936,14 +1865,14 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
}
static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
- struct err_regs *info)
+ struct mce *m)
{
- u16 ec = EC(info->nbsl);
- u8 xec = XEC(info->nbsl, 0x1f);
- int ecc_type = (info->nbsh >> 13) & 0x3;
+ u16 ec = EC(m->status);
+ u8 xec = XEC(m->status, 0x1f);
+ u8 ecc_type = (m->status >> 45) & 0x3;
/* Bail early out if this was an 'observed' error */
- if (PP(ec) == K8_NBSL_PP_OBS)
+ if (PP(ec) == NBSL_PP_OBS)
return;
/* Do only ECC errors */
@@ -1951,34 +1880,16 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
return;
if (ecc_type == 2)
- amd64_handle_ce(mci, info);
+ amd64_handle_ce(mci, m);
else if (ecc_type == 1)
- amd64_handle_ue(mci, info);
+ amd64_handle_ue(mci, m);
}
void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
{
struct mem_ctl_info *mci = mcis[node_id];
- struct err_regs regs;
-
- regs.nbsl = (u32) m->status;
- regs.nbsh = (u32)(m->status >> 32);
- regs.nbeal = (u32) m->addr;
- regs.nbeah = (u32)(m->addr >> 32);
- regs.nbcfg = nbcfg;
-
- __amd64_decode_bus_error(mci, &regs);
-
- /*
- * Check the UE bit of the NB status high register, if set generate some
- * logs. If NOT a GART error, then process the event as a NO-INFO event.
- * If it was a GART error, skip that process.
- *
- * FIXME: this should go somewhere else, if at all.
- */
- if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
- edac_mc_handle_ue_no_info(mci, "UE bit is set");
+ __amd64_decode_bus_error(mci, m);
}
/*
@@ -2027,9 +1938,10 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt)
*/
static void read_mc_regs(struct amd64_pvt *pvt)
{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
u64 msr_val;
u32 tmp;
- int dram;
+ unsigned range;
/*
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
@@ -2046,75 +1958,66 @@ static void read_mc_regs(struct amd64_pvt *pvt)
} else
debugf0(" TOP_MEM2 disabled.\n");
- amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
+ amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
- if (pvt->ops->read_dram_ctl_register)
- pvt->ops->read_dram_ctl_register(pvt);
+ read_dram_ctl_register(pvt);
- for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
- /*
- * Call CPU specific READ function to get the DRAM Base and
- * Limit values from the DCT.
- */
- pvt->ops->read_dram_base_limit(pvt, dram);
+ for (range = 0; range < DRAM_RANGES; range++) {
+ u8 rw;
- /*
- * Only print out debug info on rows with both R and W Enabled.
- * Normal processing, compiler should optimize this whole 'if'
- * debug output block away.
- */
- if (pvt->dram_rw_en[dram] != 0) {
- debugf1(" DRAM-BASE[%d]: 0x%016llx "
- "DRAM-LIMIT: 0x%016llx\n",
- dram,
- pvt->dram_base[dram],
- pvt->dram_limit[dram]);
-
- debugf1(" IntlvEn=%s %s %s "
- "IntlvSel=%d DstNode=%d\n",
- pvt->dram_IntlvEn[dram] ?
- "Enabled" : "Disabled",
- (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
- (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
- pvt->dram_IntlvSel[dram],
- pvt->dram_DstNode[dram]);
- }
+ /* read settings for this DRAM range */
+ read_dram_base_limit_regs(pvt, range);
+
+ rw = dram_rw(pvt, range);
+ if (!rw)
+ continue;
+
+ debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
+ range,
+ get_dram_base(pvt, range),
+ get_dram_limit(pvt, range));
+
+ debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
+ dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
+ (rw & 0x1) ? "R" : "-",
+ (rw & 0x2) ? "W" : "-",
+ dram_intlv_sel(pvt, range),
+ dram_dst_node(pvt, range));
}
- amd64_read_dct_base_mask(pvt);
+ read_dct_base_mask(pvt);
- amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
- amd64_read_dbam_reg(pvt);
+ amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
+ amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
- amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
- amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0);
+ amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
+ amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
- if (boot_cpu_data.x86 >= 0x10) {
- if (!dct_ganging_enabled(pvt)) {
- amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1);
- amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
- }
- amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+ if (!dct_ganging_enabled(pvt)) {
+ amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
+ amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
}
- if (boot_cpu_data.x86 == 0x10 &&
- boot_cpu_data.x86_model > 7 &&
- /* F3x180[EccSymbolSize]=1 => x8 symbols */
- tmp & BIT(25))
- pvt->syn_type = 8;
- else
- pvt->syn_type = 4;
+ pvt->ecc_sym_sz = 4;
- amd64_dump_misc_regs(pvt);
+ if (c->x86 >= 0x10) {
+ amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
+ amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
+
+ /* F10h, revD and later can do x8 ECC too */
+ if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
+ pvt->ecc_sym_sz = 8;
+ }
+ dump_misc_regs(pvt);
}
/*
* NOTE: CPU Revision Dependent code
*
* Input:
- * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
+ * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
* k8 private pointer to -->
* DRAM Bank Address mapping register
* node_id
@@ -2144,7 +2047,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
* encompasses
*
*/
-static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
+static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
{
u32 cs_mode, nr_pages;
@@ -2157,7 +2060,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
*/
cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
- nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
+ nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
/*
* If dual channel then double the memory size of single channel.
@@ -2180,23 +2083,22 @@ static int init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow;
struct amd64_pvt *pvt = mci->pvt_info;
- u64 input_addr_min, input_addr_max, sys_addr;
+ u64 input_addr_min, input_addr_max, sys_addr, base, mask;
u32 val;
int i, empty = 1;
- amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val);
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
pvt->nbcfg = val;
- pvt->ctl_error_info.nbcfg = val;
debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
pvt->mc_node_id, val,
- !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE));
+ !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
- for (i = 0; i < pvt->cs_count; i++) {
+ for_each_chip_select(i, 0, pvt) {
csrow = &mci->csrows[i];
- if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
+ if (!csrow_enabled(i, 0, pvt)) {
debugf1("----CSROW %d EMPTY for node %d\n", i,
pvt->mc_node_id);
continue;
@@ -2206,13 +2108,15 @@ static int init_csrows(struct mem_ctl_info *mci)
i, pvt->mc_node_id);
empty = 0;
- csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
+ csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
- csrow->page_mask = ~mask_from_dct_mask(pvt, i);
+
+ get_cs_base_and_mask(pvt, i, 0, &base, &mask);
+ csrow->page_mask = ~mask;
/* 8 bytes of resolution */
csrow->mtype = amd64_determine_memory_type(pvt, i);
@@ -2231,9 +2135,9 @@ static int init_csrows(struct mem_ctl_info *mci)
/*
* determine whether CHIPKILL or JUST ECC or NO ECC is operating
*/
- if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
+ if (pvt->nbcfg & NBCFG_ECC_ENABLE)
csrow->edac_mode =
- (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
+ (pvt->nbcfg & NBCFG_CHIPKILL) ?
EDAC_S4ECD4ED : EDAC_SECDED;
else
csrow->edac_mode = EDAC_NONE;
@@ -2243,7 +2147,7 @@ static int init_csrows(struct mem_ctl_info *mci)
}
/* get all cores on this DCT */
-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
+static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
{
int cpu;
@@ -2253,7 +2157,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
}
/* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(int nid)
+static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
{
cpumask_var_t mask;
int cpu, nbe;
@@ -2270,7 +2174,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid)
for_each_cpu(cpu, mask) {
struct msr *reg = per_cpu_ptr(msrs, cpu);
- nbe = reg->l & K8_MSR_MCGCTL_NBE;
+ nbe = reg->l & MSR_MCGCTL_NBE;
debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
cpu, reg->q,
@@ -2305,16 +2209,16 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
struct msr *reg = per_cpu_ptr(msrs, cpu);
if (on) {
- if (reg->l & K8_MSR_MCGCTL_NBE)
+ if (reg->l & MSR_MCGCTL_NBE)
s->flags.nb_mce_enable = 1;
- reg->l |= K8_MSR_MCGCTL_NBE;
+ reg->l |= MSR_MCGCTL_NBE;
} else {
/*
* Turn off NB MCE reporting only when it was off before
*/
if (!s->flags.nb_mce_enable)
- reg->l &= ~K8_MSR_MCGCTL_NBE;
+ reg->l &= ~MSR_MCGCTL_NBE;
}
}
wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
@@ -2328,40 +2232,38 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
struct pci_dev *F3)
{
bool ret = true;
- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ u32 value, mask = 0x3; /* UECC/CECC enable */
if (toggle_ecc_err_reporting(s, nid, ON)) {
amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
return false;
}
- amd64_read_pci_cfg(F3, K8_NBCTL, &value);
+ amd64_read_pci_cfg(F3, NBCTL, &value);
- /* turn on UECCEn and CECCEn bits */
s->old_nbctl = value & mask;
s->nbctl_valid = true;
value |= mask;
- pci_write_config_dword(F3, K8_NBCTL, value);
+ amd64_write_pci_cfg(F3, NBCTL, value);
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
- debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- nid, value,
- !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
+ debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
- if (!(value & K8_NBCFG_ECC_ENABLE)) {
+ if (!(value & NBCFG_ECC_ENABLE)) {
amd64_warn("DRAM ECC disabled on this node, enabling...\n");
s->flags.nb_ecc_prev = 0;
/* Attempt to turn on DRAM ECC Enable */
- value |= K8_NBCFG_ECC_ENABLE;
- pci_write_config_dword(F3, K8_NBCFG, value);
+ value |= NBCFG_ECC_ENABLE;
+ amd64_write_pci_cfg(F3, NBCFG, value);
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
- if (!(value & K8_NBCFG_ECC_ENABLE)) {
+ if (!(value & NBCFG_ECC_ENABLE)) {
amd64_warn("Hardware rejected DRAM ECC enable,"
"check memory DIMM configuration.\n");
ret = false;
@@ -2372,9 +2274,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
s->flags.nb_ecc_prev = 1;
}
- debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- nid, value,
- !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
+ debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
+ nid, value, !!(value & NBCFG_ECC_ENABLE));
return ret;
}
@@ -2382,22 +2283,23 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
struct pci_dev *F3)
{
- u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
+ u32 value, mask = 0x3; /* UECC/CECC enable */
+
if (!s->nbctl_valid)
return;
- amd64_read_pci_cfg(F3, K8_NBCTL, &value);
+ amd64_read_pci_cfg(F3, NBCTL, &value);
value &= ~mask;
value |= s->old_nbctl;
- pci_write_config_dword(F3, K8_NBCTL, value);
+ amd64_write_pci_cfg(F3, NBCTL, value);
/* restore previous BIOS DRAM ECC "off" setting we force-enabled */
if (!s->flags.nb_ecc_prev) {
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
- value &= ~K8_NBCFG_ECC_ENABLE;
- pci_write_config_dword(F3, K8_NBCFG, value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
+ value &= ~NBCFG_ECC_ENABLE;
+ amd64_write_pci_cfg(F3, NBCFG, value);
}
/* restore the NB Enable MCGCTL bit */
@@ -2423,9 +2325,9 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid)
u8 ecc_en = 0;
bool nb_mce_en = false;
- amd64_read_pci_cfg(F3, K8_NBCFG, &value);
+ amd64_read_pci_cfg(F3, NBCFG, &value);
- ecc_en = !!(value & K8_NBCFG_ECC_ENABLE);
+ ecc_en = !!(value & NBCFG_ECC_ENABLE);
amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
@@ -2463,23 +2365,24 @@ static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
mci->mc_driver_sysfs_attributes = sysfs_attrs;
}
-static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
+static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
+ struct amd64_family_type *fam)
{
struct amd64_pvt *pvt = mci->pvt_info;
mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
mci->edac_ctl_cap = EDAC_FLAG_NONE;
- if (pvt->nbcap & K8_NBCAP_SECDED)
+ if (pvt->nbcap & NBCAP_SECDED)
mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
- if (pvt->nbcap & K8_NBCAP_CHIPKILL)
+ if (pvt->nbcap & NBCAP_CHIPKILL)
mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
mci->edac_cap = amd64_determine_edac_cap(pvt);
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = EDAC_AMD64_VERSION;
- mci->ctl_name = pvt->ctl_name;
+ mci->ctl_name = fam->ctl_name;
mci->dev_name = pci_name(pvt->F2);
mci->ctl_page_to_phys = NULL;
@@ -2500,14 +2403,16 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
case 0xf:
fam_type = &amd64_family_types[K8_CPUS];
pvt->ops = &amd64_family_types[K8_CPUS].ops;
- pvt->ctl_name = fam_type->ctl_name;
- pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
break;
+
case 0x10:
fam_type = &amd64_family_types[F10_CPUS];
pvt->ops = &amd64_family_types[F10_CPUS].ops;
- pvt->ctl_name = fam_type->ctl_name;
- pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
+ break;
+
+ case 0x15:
+ fam_type = &amd64_family_types[F15_CPUS];
+ pvt->ops = &amd64_family_types[F15_CPUS].ops;
break;
default:
@@ -2517,7 +2422,7 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
pvt->ext_model = boot_cpu_data.x86_model >> 4;
- amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
+ amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
(fam == 0xf ?
(pvt->ext_model >= K8_REV_F ? "revF or later "
: "revE or earlier ")
@@ -2564,14 +2469,14 @@ static int amd64_init_one_instance(struct pci_dev *F2)
goto err_siblings;
ret = -ENOMEM;
- mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, nid);
+ mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
if (!mci)
goto err_siblings;
mci->pvt_info = pvt;
mci->dev = &pvt->F2->dev;
- setup_mci_misc_attrs(mci);
+ setup_mci_misc_attrs(mci, fam_type);
if (init_csrows(mci))
mci->edac_cap = EDAC_FLAG_NONE;
@@ -2714,6 +2619,15 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = {
.class = 0,
.class_mask = 0,
},
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ },
+
{0, }
};
MODULE_DEVICE_TABLE(pci, amd64_pci_table);
@@ -2754,7 +2668,7 @@ static int __init amd64_edac_init(void)
{
int err = -ENODEV;
- edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
+ printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
opstate_init();
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 613ec72b0f65..11be36a311eb 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -144,7 +144,7 @@
* sections 3.5.4 and 3.5.5 for more information.
*/
-#define EDAC_AMD64_VERSION "v3.3.0"
+#define EDAC_AMD64_VERSION "3.4.0"
#define EDAC_MOD_STR "amd64_edac"
/* Extended Model from CPUID, for CPU Revision numbers */
@@ -153,85 +153,64 @@
#define K8_REV_F 4
/* Hardware limit on ChipSelect rows per MC and processors per system */
-#define MAX_CS_COUNT 8
-#define DRAM_REG_COUNT 8
+#define NUM_CHIPSELECTS 8
+#define DRAM_RANGES 8
#define ON true
#define OFF false
/*
+ * Create a contiguous bitmask starting at bit position @lo and ending at
+ * position @hi. For example
+ *
+ * GENMASK(21, 39) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(lo, hi) (((1ULL << ((hi) - (lo) + 1)) - 1) << (lo))
+
+/*
* PCI-defined configuration space registers
*/
+#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
+#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
/*
* Function 1 - Address Map
*/
-#define K8_DRAM_BASE_LOW 0x40
-#define K8_DRAM_LIMIT_LOW 0x44
-#define K8_DHAR 0xf0
-
-#define DHAR_VALID BIT(0)
-#define F10_DRAM_MEM_HOIST_VALID BIT(1)
+#define DRAM_BASE_LO 0x40
+#define DRAM_LIMIT_LO 0x44
-#define DHAR_BASE_MASK 0xff000000
-#define dhar_base(dhar) (dhar & DHAR_BASE_MASK)
+#define dram_intlv_en(pvt, i) ((u8)((pvt->ranges[i].base.lo >> 8) & 0x7))
+#define dram_rw(pvt, i) ((u8)(pvt->ranges[i].base.lo & 0x3))
+#define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7))
+#define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7))
-#define K8_DHAR_OFFSET_MASK 0x0000ff00
-#define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16)
+#define DHAR 0xf0
+#define dhar_valid(pvt) ((pvt)->dhar & BIT(0))
+#define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1))
+#define dhar_base(pvt) ((pvt)->dhar & 0xff000000)
+#define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16)
-#define F10_DHAR_OFFSET_MASK 0x0000ff80
/* NOTE: Extra mask bit vs K8 */
-#define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16)
+#define f10_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff80) << 16)
+#define DCT_CFG_SEL 0x10C
-/* F10 High BASE/LIMIT registers */
-#define F10_DRAM_BASE_HIGH 0x140
-#define F10_DRAM_LIMIT_HIGH 0x144
+#define DRAM_BASE_HI 0x140
+#define DRAM_LIMIT_HI 0x144
/*
* Function 2 - DRAM controller
*/
-#define K8_DCSB0 0x40
-#define F10_DCSB1 0x140
+#define DCSB0 0x40
+#define DCSB1 0x140
+#define DCSB_CS_ENABLE BIT(0)
-#define K8_DCSB_CS_ENABLE BIT(0)
-#define K8_DCSB_NPT_SPARE BIT(1)
-#define K8_DCSB_NPT_TESTFAIL BIT(2)
+#define DCSM0 0x60
+#define DCSM1 0x160
-/*
- * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form
- * the address
- */
-#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL)
-#define REV_E_DCS_SHIFT 4
-
-#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL)
-#define REV_F_F1Xh_DCS_SHIFT 8
-
-/*
- * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount
- * to form the address
- */
-#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL)
-#define REV_F_DCS_SHIFT 8
-
-/* DRAM CS Mask Registers */
-#define K8_DCSM0 0x60
-#define F10_DCSM1 0x160
-
-/* REV E: select [29:21] and [15:9] from DCSM */
-#define REV_E_DCSM_MASK_BITS 0x3FE0FE00
-
-/* unused bits [24:20] and [12:0] */
-#define REV_E_DCS_NOTUSED_BITS 0x01F01FFF
-
-/* REV F and later: select [28:19] and [13:5] from DCSM */
-#define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0
-
-/* unused bits [26:22] and [12:0] */
-#define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF
+#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE)
#define DBAM0 0x80
#define DBAM1 0x180
@@ -241,148 +220,84 @@
#define DBAM_MAX_VALUE 11
-
-#define F10_DCLR_0 0x90
-#define F10_DCLR_1 0x190
+#define DCLR0 0x90
+#define DCLR1 0x190
#define REVE_WIDTH_128 BIT(16)
-#define F10_WIDTH_128 BIT(11)
+#define WIDTH_128 BIT(11)
+#define DCHR0 0x94
+#define DCHR1 0x194
+#define DDR3_MODE BIT(8)
-#define F10_DCHR_0 0x94
-#define F10_DCHR_1 0x194
+#define DCT_SEL_LO 0x110
+#define dct_sel_baseaddr(pvt) ((pvt)->dct_sel_lo & 0xFFFFF800)
+#define dct_sel_interleave_addr(pvt) (((pvt)->dct_sel_lo >> 6) & 0x3)
+#define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0))
+#define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2))
-#define F10_DCHR_FOUR_RANK_DIMM BIT(18)
-#define DDR3_MODE BIT(8)
-#define F10_DCHR_MblMode BIT(6)
+#define dct_ganging_enabled(pvt) ((boot_cpu_data.x86 == 0x10) && ((pvt)->dct_sel_lo & BIT(4)))
+#define dct_data_intlv_enabled(pvt) ((pvt)->dct_sel_lo & BIT(5))
+#define dct_memory_cleared(pvt) ((pvt)->dct_sel_lo & BIT(10))
-#define F10_DCTL_SEL_LOW 0x110
-#define dct_sel_baseaddr(pvt) ((pvt->dram_ctl_select_low) & 0xFFFFF800)
-#define dct_sel_interleave_addr(pvt) (((pvt->dram_ctl_select_low) >> 6) & 0x3)
-#define dct_high_range_enabled(pvt) (pvt->dram_ctl_select_low & BIT(0))
-#define dct_interleave_enabled(pvt) (pvt->dram_ctl_select_low & BIT(2))
-#define dct_ganging_enabled(pvt) (pvt->dram_ctl_select_low & BIT(4))
-#define dct_data_intlv_enabled(pvt) (pvt->dram_ctl_select_low & BIT(5))
-#define dct_dram_enabled(pvt) (pvt->dram_ctl_select_low & BIT(8))
-#define dct_memory_cleared(pvt) (pvt->dram_ctl_select_low & BIT(10))
+#define SWAP_INTLV_REG 0x10c
-#define F10_DCTL_SEL_HIGH 0x114
+#define DCT_SEL_HI 0x114
/*
* Function 3 - Misc Control
*/
-#define K8_NBCTL 0x40
-
-/* Correctable ECC error reporting enable */
-#define K8_NBCTL_CECCEn BIT(0)
-
-/* UnCorrectable ECC error reporting enable */
-#define K8_NBCTL_UECCEn BIT(1)
+#define NBCTL 0x40
-#define K8_NBCFG 0x44
-#define K8_NBCFG_CHIPKILL BIT(23)
-#define K8_NBCFG_ECC_ENABLE BIT(22)
+#define NBCFG 0x44
+#define NBCFG_CHIPKILL BIT(23)
+#define NBCFG_ECC_ENABLE BIT(22)
-#define K8_NBSL 0x48
-
-
-/* Family F10h: Normalized Extended Error Codes */
-#define F10_NBSL_EXT_ERR_RES 0x0
+/* F3x48: NBSL */
#define F10_NBSL_EXT_ERR_ECC 0x8
+#define NBSL_PP_OBS 0x2
-/* Next two are overloaded values */
-#define F10_NBSL_EXT_ERR_LINK_PROTO 0xB
-#define F10_NBSL_EXT_ERR_L3_PROTO 0xB
-
-#define F10_NBSL_EXT_ERR_NB_ARRAY 0xC
-#define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD
-#define F10_NBSL_EXT_ERR_LINK_RETRY 0xE
-
-/* Next two are overloaded values */
-#define F10_NBSL_EXT_ERR_GART_WALK 0xF
-#define F10_NBSL_EXT_ERR_DEV_WALK 0xF
-
-/* 0x10 to 0x1B: Reserved */
-#define F10_NBSL_EXT_ERR_L3_DATA 0x1C
-#define F10_NBSL_EXT_ERR_L3_TAG 0x1D
-#define F10_NBSL_EXT_ERR_L3_LRU 0x1E
-
-/* K8: Normalized Extended Error Codes */
-#define K8_NBSL_EXT_ERR_ECC 0x0
-#define K8_NBSL_EXT_ERR_CRC 0x1
-#define K8_NBSL_EXT_ERR_SYNC 0x2
-#define K8_NBSL_EXT_ERR_MST 0x3
-#define K8_NBSL_EXT_ERR_TGT 0x4
-#define K8_NBSL_EXT_ERR_GART 0x5
-#define K8_NBSL_EXT_ERR_RMW 0x6
-#define K8_NBSL_EXT_ERR_WDT 0x7
-#define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8
-#define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD
-
-/*
- * The following are for BUS type errors AFTER values have been normalized by
- * shifting right
- */
-#define K8_NBSL_PP_SRC 0x0
-#define K8_NBSL_PP_RES 0x1
-#define K8_NBSL_PP_OBS 0x2
-#define K8_NBSL_PP_GENERIC 0x3
-
-#define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF)
-
-#define K8_NBEAL 0x50
-#define K8_NBEAH 0x54
-#define K8_SCRCTRL 0x58
-
-#define F10_NB_CFG_LOW 0x88
+#define SCRCTRL 0x58
#define F10_ONLINE_SPARE 0xB0
-#define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1))
-#define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3))
-#define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007)
-#define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007)
+#define online_spare_swap_done(pvt, c) (((pvt)->online_spare >> (1 + 2 * (c))) & 0x1)
+#define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7)
#define F10_NB_ARRAY_ADDR 0xB8
-
-#define F10_NB_ARRAY_DRAM_ECC 0x80000000
+#define F10_NB_ARRAY_DRAM_ECC BIT(31)
/* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */
#define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1)
#define F10_NB_ARRAY_DATA 0xBC
-
#define SET_NB_DRAM_INJECTION_WRITE(word, bits) \
(BIT(((word) & 0xF) + 20) | \
BIT(17) | bits)
-
#define SET_NB_DRAM_INJECTION_READ(word, bits) \
(BIT(((word) & 0xF) + 20) | \
BIT(16) | bits)
-#define K8_NBCAP 0xE8
-#define K8_NBCAP_CORES (BIT(12)|BIT(13))
-#define K8_NBCAP_CHIPKILL BIT(4)
-#define K8_NBCAP_SECDED BIT(3)
-#define K8_NBCAP_DCT_DUAL BIT(0)
+#define NBCAP 0xE8
+#define NBCAP_CHIPKILL BIT(4)
+#define NBCAP_SECDED BIT(3)
+#define NBCAP_DCT_DUAL BIT(0)
#define EXT_NB_MCA_CFG 0x180
/* MSRs */
-#define K8_MSR_MCGCTL_NBE BIT(4)
-
-#define K8_MSR_MC4CTL 0x0410
-#define K8_MSR_MC4STAT 0x0411
-#define K8_MSR_MC4ADDR 0x0412
+#define MSR_MCGCTL_NBE BIT(4)
/* AMD sets the first MC device at device ID 0x18. */
-static inline int get_node_id(struct pci_dev *pdev)
+static inline u8 get_node_id(struct pci_dev *pdev)
{
return PCI_SLOT(pdev->devfn) - 0x18;
}
-enum amd64_chipset_families {
+enum amd_families {
K8_CPUS = 0,
F10_CPUS,
+ F15_CPUS,
+ NUM_FAMILIES,
};
/* Error injection control structure */
@@ -392,13 +307,35 @@ struct error_injection {
u32 bit_map;
};
+/* low and high part of PCI config space regs */
+struct reg_pair {
+ u32 lo, hi;
+};
+
+/*
+ * See F1x[1, 0][7C:40] DRAM Base/Limit Registers
+ */
+struct dram_range {
+ struct reg_pair base;
+ struct reg_pair lim;
+};
+
+/* A DCT chip selects collection */
+struct chip_select {
+ u32 csbases[NUM_CHIPSELECTS];
+ u8 b_cnt;
+
+ u32 csmasks[NUM_CHIPSELECTS];
+ u8 m_cnt;
+};
+
struct amd64_pvt {
struct low_ops *ops;
/* pci_device handles which we utilize */
struct pci_dev *F1, *F2, *F3;
- int mc_node_id; /* MC index of this MC node */
+ unsigned mc_node_id; /* MC index of this MC node */
int ext_model; /* extended model value of this node */
int channel_count;
@@ -414,60 +351,50 @@ struct amd64_pvt {
u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */
u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
- /* DRAM CS Base Address Registers F2x[1,0][5C:40] */
- u32 dcsb0[MAX_CS_COUNT];
- u32 dcsb1[MAX_CS_COUNT];
-
- /* DRAM CS Mask Registers F2x[1,0][6C:60] */
- u32 dcsm0[MAX_CS_COUNT];
- u32 dcsm1[MAX_CS_COUNT];
-
- /*
- * Decoded parts of DRAM BASE and LIMIT Registers
- * F1x[78,70,68,60,58,50,48,40]
- */
- u64 dram_base[DRAM_REG_COUNT];
- u64 dram_limit[DRAM_REG_COUNT];
- u8 dram_IntlvSel[DRAM_REG_COUNT];
- u8 dram_IntlvEn[DRAM_REG_COUNT];
- u8 dram_DstNode[DRAM_REG_COUNT];
- u8 dram_rw_en[DRAM_REG_COUNT];
-
- /*
- * The following fields are set at (load) run time, after CPU revision
- * has been determined, since the dct_base and dct_mask registers vary
- * based on revision
- */
- u32 dcsb_base; /* DCSB base bits */
- u32 dcsm_mask; /* DCSM mask bits */
- u32 cs_count; /* num chip selects (== num DCSB registers) */
- u32 num_dcsm; /* Number of DCSM registers */
- u32 dcs_mask_notused; /* DCSM notused mask bits */
- u32 dcs_shift; /* DCSB and DCSM shift value */
+ /* one for each DCT */
+ struct chip_select csels[2];
+
+ /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
+ struct dram_range ranges[DRAM_RANGES];
u64 top_mem; /* top of memory below 4GB */
u64 top_mem2; /* top of memory above 4GB */
- u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */
- u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */
- u32 online_spare; /* On-Line spare Reg */
+ u32 dct_sel_lo; /* DRAM Controller Select Low */
+ u32 dct_sel_hi; /* DRAM Controller Select High */
+ u32 online_spare; /* On-Line spare Reg */
/* x4 or x8 syndromes in use */
- u8 syn_type;
-
- /* temp storage for when input is received from sysfs */
- struct err_regs ctl_error_info;
+ u8 ecc_sym_sz;
/* place to store error injection parameters prior to issue */
struct error_injection injection;
+};
- /* DCT per-family scrubrate setting */
- u32 min_scrubrate;
+static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
+{
+ u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
- /* family name this instance is running on */
- const char *ctl_name;
+ if (boot_cpu_data.x86 == 0xf)
+ return addr;
-};
+ return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
+}
+
+static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i)
+{
+ u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;
+
+ if (boot_cpu_data.x86 == 0xf)
+ return lim;
+
+ return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim;
+}
+
+static inline u16 extract_syndrome(u64 status)
+{
+ return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00);
+}
/*
* per-node ECC settings descriptor
@@ -482,14 +409,6 @@ struct ecc_settings {
} flags;
};
-extern const char *tt_msgs[4];
-extern const char *ll_msgs[4];
-extern const char *rrrr_msgs[16];
-extern const char *to_msgs[2];
-extern const char *pp_msgs[4];
-extern const char *ii_msgs[4];
-extern const char *htlink_msgs[8];
-
#ifdef CONFIG_EDAC_DEBUG
#define NUM_DBG_ATTRS 5
#else
@@ -511,14 +430,11 @@ extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
*/
struct low_ops {
int (*early_channel_count) (struct amd64_pvt *pvt);
-
- u64 (*get_error_address) (struct mem_ctl_info *mci,
- struct err_regs *info);
- void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram);
- void (*read_dram_ctl_register) (struct amd64_pvt *pvt);
- void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci,
- struct err_regs *info, u64 SystemAddr);
- int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode);
+ void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
+ u16 syndrome);
+ int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode);
+ int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset,
+ u32 *val, const char *func);
};
struct amd64_family_type {
@@ -527,28 +443,17 @@ struct amd64_family_type {
struct low_ops ops;
};
-static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
- u32 *val, const char *func)
-{
- int err = 0;
-
- err = pci_read_config_dword(pdev, offset, val);
- if (err)
- amd64_warn("%s: error reading F%dx%x.\n",
- func, PCI_FUNC(pdev->devfn), offset);
-
- return err;
-}
+int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
+ u32 val, const char *func);
#define amd64_read_pci_cfg(pdev, offset, val) \
- amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
+ __amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
-/*
- * For future CPU versions, verify the following as new 'slow' rates appear and
- * modify the necessary skip values for the supported CPU.
- */
-#define K8_MIN_SCRUB_RATE_BITS 0x0
-#define F10_MIN_SCRUB_RATE_BITS 0x5
+#define amd64_write_pci_cfg(pdev, offset, val) \
+ __amd64_write_pci_cfg_dword(pdev, offset, val, __func__)
+
+#define amd64_read_dct_pci_cfg(pvt, offset, val) \
+ pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__)
int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
u64 *hole_offset, u64 *hole_size);
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index 688478de1cbd..303f10e03dda 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -117,13 +117,13 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci,
/* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section);
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word,
pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
@@ -150,13 +150,13 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci,
/* Form value to choose 16-byte section of cacheline */
section = F10_NB_ARRAY_DRAM_ECC |
SET_NB_ARRAY_ADDRESS(pvt->injection.section);
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section);
word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word,
pvt->injection.bit_map);
/* Issue 'word' and 'bit' along with the READ request */
- pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
+ amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits);
debugf0("section=0x%x word_bits=0x%x\n", section, word_bits);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 39d97cfdf58c..73196f7b7229 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -785,10 +785,10 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
{
int err;
- debugf1("%s()\n", __func__);
+ debugf4("%s()\n", __func__);
while (sysfs_attrib) {
- debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
+ debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
struct mcidev_sysfs_group_kobj *grp_kobj;
@@ -818,7 +818,7 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
if (err < 0)
return err;
} else if (sysfs_attrib->attr.name) {
- debugf0("%s() file %s\n", __func__,
+ debugf4("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
err = sysfs_create_file(kobj, &sysfs_attrib->attr);
@@ -853,26 +853,26 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
* Remove first all the atributes
*/
while (sysfs_attrib) {
- debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
+ debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib);
if (sysfs_attrib->grp) {
- debugf1("%s() seeking for group %s\n",
+ debugf4("%s() seeking for group %s\n",
__func__, sysfs_attrib->grp->name);
list_for_each_entry(grp_kobj,
&mci->grp_kobj_list, list) {
- debugf1("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
+ debugf4("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp);
if (grp_kobj->grp == sysfs_attrib->grp) {
edac_remove_mci_instance_attributes(mci,
grp_kobj->grp->mcidev_attr,
&grp_kobj->kobj, count + 1);
- debugf0("%s() group %s\n", __func__,
+ debugf4("%s() group %s\n", __func__,
sysfs_attrib->grp->name);
kobject_put(&grp_kobj->kobj);
}
}
- debugf1("%s() end of seeking for group %s\n",
+ debugf4("%s() end of seeking for group %s\n",
__func__, sysfs_attrib->grp->name);
} else if (sysfs_attrib->attr.name) {
- debugf0("%s() file %s\n", __func__,
+ debugf4("%s() file %s\n", __func__,
sysfs_attrib->attr.name);
sysfs_remove_file(kobj, &sysfs_attrib->attr);
} else
@@ -979,7 +979,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
debugf0("%s()\n", __func__);
/* remove all csrow kobjects */
- debugf0("%s() unregister this mci kobj\n", __func__);
+ debugf4("%s() unregister this mci kobj\n", __func__);
for (i = 0; i < mci->nr_csrows; i++) {
if (mci->csrows[i].nr_pages > 0) {
debugf0("%s() unreg csrow-%d\n", __func__, i);
@@ -989,18 +989,18 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
/* remove this mci instance's attribtes */
if (mci->mc_driver_sysfs_attributes) {
- debugf0("%s() unregister mci private attributes\n", __func__);
+ debugf4("%s() unregister mci private attributes\n", __func__);
edac_remove_mci_instance_attributes(mci,
mci->mc_driver_sysfs_attributes,
&mci->edac_mci_kobj, 0);
}
/* remove the symlink */
- debugf0("%s() remove_link\n", __func__);
+ debugf4("%s() remove_link\n", __func__);
sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK);
/* unregister this instance's kobject */
- debugf0("%s() remove_mci_instance\n", __func__);
+ debugf4("%s() remove_mci_instance\n", __func__);
kobject_put(&mci->edac_mci_kobj);
}
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index f6cf73d93359..795cfbc0bf50 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -594,6 +594,7 @@ static bool nb_noop_mce(u16 ec, u8 xec)
void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
u16 ec = EC(m->status);
u8 xec = XEC(m->status, 0x1f);
u32 nbsh = (u32)(m->status >> 32);
@@ -602,9 +603,8 @@ void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
pr_emerg(HW_ERR "Northbridge Error (node %d", node_id);
/* F10h, revD can disable ErrCpu[3:0] through ErrCpuVal */
- if ((boot_cpu_data.x86 == 0x10) &&
- (boot_cpu_data.x86_model > 7)) {
- if (nbsh & K8_NBSH_ERR_CPU_VAL)
+ if (c->x86 == 0x10 && c->x86_model > 7) {
+ if (nbsh & NBSH_ERR_CPU_VAL)
core = nbsh & nb_err_cpumask;
} else {
u8 assoc_cpus = nbsh & nb_err_cpumask;
@@ -646,7 +646,7 @@ void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg)
if (!fam_ops->nb_mce(ec, xec))
goto wrong_nb_mce;
- if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10)
+ if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15)
if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder)
nb_bus_decoder(node_id, m, nbcfg);
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index 45dda47173f2..795a3206acf5 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -31,19 +31,10 @@
#define R4(x) (((x) >> 4) & 0xf)
#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!")
-#define K8_NBSH 0x4C
-
-#define K8_NBSH_VALID_BIT BIT(31)
-#define K8_NBSH_OVERFLOW BIT(30)
-#define K8_NBSH_UC_ERR BIT(29)
-#define K8_NBSH_ERR_EN BIT(28)
-#define K8_NBSH_MISCV BIT(27)
-#define K8_NBSH_VALID_ERROR_ADDR BIT(26)
-#define K8_NBSH_PCC BIT(25)
-#define K8_NBSH_ERR_CPU_VAL BIT(24)
-#define K8_NBSH_CECC BIT(14)
-#define K8_NBSH_UECC BIT(13)
-#define K8_NBSH_ERR_SCRUBER BIT(8)
+/*
+ * F3x4C bits (MCi_STATUS' high half)
+ */
+#define NBSH_ERR_CPU_VAL BIT(24)
enum tt_ids {
TT_INSTR = 0,
@@ -86,17 +77,6 @@ extern const char *to_msgs[];
extern const char *ii_msgs[];
/*
- * relevant NB regs
- */
-struct err_regs {
- u32 nbcfg;
- u32 nbsh;
- u32 nbsl;
- u32 nbeah;
- u32 nbeal;
-};
-
-/*
* per-family decoder ops
*/
struct amd_decoder_ops {
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 0902d4460039..a6feb78c404c 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -73,32 +73,17 @@ source "drivers/gpu/drm/radeon/Kconfig"
config DRM_I810
tristate "Intel I810"
- # BKL usage in order to avoid AB-BA deadlocks, may become BROKEN_ON_SMP
- depends on DRM && AGP && AGP_INTEL && BKL
+ # !PREEMPT because of missing ioctl locking
+ depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
selected, the module will be called i810. AGP support is required
for this driver to work.
-choice
- prompt "Intel 830M, 845G, 852GM, 855GM, 865G"
- depends on DRM && AGP && AGP_INTEL
- optional
-
-config DRM_I830
- tristate "i830 driver"
- # BKL usage in order to avoid AB-BA deadlocks, i830 may get removed
- depends on BKL
- help
- Choose this option if you have a system that has Intel 830M, 845G,
- 852GM, 855GM or 865G integrated graphics. If M is selected, the
- module will be called i830. AGP support is required for this driver
- to work. This driver is used by the older X releases X.org 6.7 and
- XFree86 4.3. If unsure, build this and i915 as modules and the X server
- will load the correct one.
-
config DRM_I915
- tristate "i915 driver"
+ tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
+ depends on DRM
+ depends on AGP
depends on AGP_INTEL
# we need shmfs for the swappable backing store, and in particular
# the shmem_readpage() which depends upon tmpfs
@@ -115,12 +100,20 @@ config DRM_I915
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
help
- Choose this option if you have a system that has Intel 830M, 845G,
- 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
- module will be called i915. AGP support is required for this driver
- to work. This driver is used by the Intel driver in X.org 6.8 and
- XFree86 4.4 and above. If unsure, build this and i830 as modules and
- the X server will load the correct one.
+ Choose this option if you have a system that has "Intel Graphics
+ Media Accelerator" or "HD Graphics" integrated graphics,
+ including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
+ G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
+ Core i5, Core i7 as well as Atom CPUs with integrated graphics.
+ If M is selected, the module will be called i915. AGP support
+ is required for this driver to work. This driver is used by
+ the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
+ replaces the older i830 module that supported a subset of the
+ hardware in older X.org releases.
+
+ Note that the older i810/i815 chipsets require the use of the
+ i810 driver instead, and the Atom z5xx series has an entirely
+ different implementation.
config DRM_I915_KMS
bool "Enable modesetting on intel by default"
@@ -132,8 +125,6 @@ config DRM_I915_KMS
the driver to bind to PCI devices, which precludes loading things
like intelfb.
-endchoice
-
config DRM_MGA
tristate "Matrox g200/g400"
depends on DRM && PCI
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 997c43d04909..89cf05a72d1c 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -12,7 +12,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
- drm_trace_points.o drm_global.o
+ drm_trace_points.o drm_global.o drm_usb.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
@@ -29,7 +29,6 @@ obj-$(CONFIG_DRM_R128) += r128/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
-obj-$(CONFIG_DRM_I830) += i830/
obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 654faa803dcb..4c95b5fd9df3 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2694,3 +2694,36 @@ void drm_mode_config_reset(struct drm_device *dev)
connector->funcs->reset(connector);
}
EXPORT_SYMBOL(drm_mode_config_reset);
+
+int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_create_dumb *args = data;
+
+ if (!dev->driver->dumb_create)
+ return -ENOSYS;
+ return dev->driver->dumb_create(file_priv, dev, args);
+}
+
+int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_map_dumb *args = data;
+
+ /* call driver ioctl to get mmap offset */
+ if (!dev->driver->dumb_map_offset)
+ return -ENOSYS;
+
+ return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset);
+}
+
+int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_destroy_dumb *args = data;
+
+ if (!dev->driver->dumb_destroy)
+ return -ENOSYS;
+
+ return dev->driver->dumb_destroy(file_priv, dev, args->handle);
+}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 271835a71570..93a112d45c1a 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -67,6 +67,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -150,7 +151,10 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -234,49 +238,6 @@ int drm_lastclose(struct drm_device * dev)
return 0;
}
-/**
- * Module initialization. Called via init_module at module load time, or via
- * linux/init/main.c (this is not currently supported).
- *
- * \return zero on success or a negative number on failure.
- *
- * Initializes an array of drm_device structures, and attempts to
- * initialize all available devices, using consecutive minors, registering the
- * stubs and initializing the device.
- *
- * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
- * after the initialization for driver customization.
- */
-int drm_init(struct drm_driver *driver)
-{
- DRM_DEBUG("\n");
- INIT_LIST_HEAD(&driver->device_list);
-
- if (driver->driver_features & DRIVER_USE_PLATFORM_DEVICE)
- return drm_platform_init(driver);
- else
- return drm_pci_init(driver);
-}
-
-EXPORT_SYMBOL(drm_init);
-
-void drm_exit(struct drm_driver *driver)
-{
- struct drm_device *dev, *tmp;
- DRM_DEBUG("\n");
-
- if (driver->driver_features & DRIVER_MODESET) {
- pci_unregister_driver(&driver->pci_driver);
- } else {
- list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
- drm_put_dev(dev);
- }
-
- DRM_INFO("Module unloaded\n");
-}
-
-EXPORT_SYMBOL(drm_exit);
-
/** File operations structure */
static const struct file_operations drm_stub_fops = {
.owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a245d17165ae..9c595e3b9c20 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -230,24 +230,32 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
int block, int len)
{
unsigned char start = block * EDID_LENGTH;
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &start,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = len,
- .buf = buf,
- }
- };
+ int ret, retries = 5;
- if (i2c_transfer(adapter, msgs, 2) == 2)
- return 0;
+ /* The core i2c driver will automatically retry the transfer if the
+ * adapter reports EAGAIN. However, we find that bit-banging transfers
+ * are susceptible to errors under a heavily loaded machine and
+ * generate spurious NAKs and timeouts. Retrying the transfer
+ * of the individual block a few times seems to overcome this.
+ */
+ do {
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DDC_ADDR,
+ .flags = 0,
+ .len = 1,
+ .buf = &start,
+ }, {
+ .addr = DDC_ADDR,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = buf,
+ }
+ };
+ ret = i2c_transfer(adapter, msgs, 2);
+ } while (ret != 2 && --retries);
- return -1;
+ return ret == 2 ? 0 : -1;
}
static u8 *
@@ -449,12 +457,11 @@ static void edid_fixup_preferred(struct drm_connector *connector,
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
{
+ struct drm_display_mode *mode = NULL;
int i;
- struct drm_display_mode *ptr, *mode;
- mode = NULL;
for (i = 0; i < drm_num_dmt_modes; i++) {
- ptr = &drm_dmt_modes[i];
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize == ptr->hdisplay &&
vsize == ptr->vdisplay &&
fresh == drm_mode_vrefresh(ptr)) {
@@ -885,7 +892,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
}
static bool
-mode_is_rb(struct drm_display_mode *mode)
+mode_is_rb(const struct drm_display_mode *mode)
{
return (mode->htotal - mode->hdisplay == 160) &&
(mode->hsync_end - mode->hdisplay == 80) &&
@@ -894,7 +901,8 @@ mode_is_rb(struct drm_display_mode *mode)
}
static bool
-mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+mode_in_hsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
{
int hsync, hmin, hmax;
@@ -910,7 +918,8 @@ mode_in_hsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
}
static bool
-mode_in_vsync_range(struct drm_display_mode *mode, struct edid *edid, u8 *t)
+mode_in_vsync_range(const struct drm_display_mode *mode,
+ struct edid *edid, u8 *t)
{
int vsync, vmin, vmax;
@@ -941,7 +950,7 @@ range_pixel_clock(struct edid *edid, u8 *t)
}
static bool
-mode_in_range(struct drm_display_mode *mode, struct edid *edid,
+mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
struct detailed_timing *timing)
{
u32 max_clock;
@@ -1472,7 +1481,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
int hdisplay, int vdisplay)
{
int i, count, num_modes = 0;
- struct drm_display_mode *mode, *ptr;
+ struct drm_display_mode *mode;
struct drm_device *dev = connector->dev;
count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
@@ -1482,7 +1491,7 @@ int drm_add_modes_noedid(struct drm_connector *connector,
vdisplay = 0;
for (i = 0; i < count; i++) {
- ptr = &drm_dmt_modes[i];
+ const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hdisplay && vdisplay) {
/*
* Only when two are valid, they will be used to check
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index 6eb7592e152f..5f2064489fd5 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -32,7 +32,7 @@
* This table is copied from xfree86/modes/xf86EdidModes.c.
* But the mode with Reduced blank feature is deleted.
*/
-static struct drm_display_mode drm_dmt_modes[] = {
+static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 350, 382, 385, 445, 0,
@@ -266,7 +266,7 @@ static struct drm_display_mode drm_dmt_modes[] = {
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-static struct drm_display_mode edid_est_modes[] = {
+static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index f73ef4390db6..950720473967 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -627,6 +627,11 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
value = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
+ if (info->var.transp.length > 0) {
+ u32 mask = (1 << info->var.transp.length) - 1;
+ mask <<= info->var.transp.offset;
+ value |= mask;
+ }
palette[regno] = value;
return 0;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ea1c4b019ebf..57ce27c9a747 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -101,7 +101,7 @@ drm_gem_init(struct drm_device *dev)
dev->mm_private = mm;
- if (drm_ht_create(&mm->offset_hash, 19)) {
+ if (drm_ht_create(&mm->offset_hash, 12)) {
kfree(mm);
return -ENOMEM;
}
@@ -181,7 +181,7 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
/**
* Removes the mapping from handle to filp for this object.
*/
-static int
+int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
struct drm_device *dev;
@@ -214,6 +214,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
return 0;
}
+EXPORT_SYMBOL(drm_gem_handle_delete);
/**
* Create a handle for this object. This adds a handle reference
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index a93d7b4ddaa6..e3a75688f3cd 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -39,27 +39,18 @@
int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
- unsigned int i;
+ unsigned int size = 1 << order;
- ht->size = 1 << order;
ht->order = order;
- ht->fill = 0;
ht->table = NULL;
- ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
- if (!ht->use_vmalloc) {
- ht->table = kcalloc(ht->size, sizeof(*ht->table), GFP_KERNEL);
- }
- if (!ht->table) {
- ht->use_vmalloc = 1;
- ht->table = vmalloc(ht->size*sizeof(*ht->table));
- }
+ if (size <= PAGE_SIZE / sizeof(*ht->table))
+ ht->table = kcalloc(size, sizeof(*ht->table), GFP_KERNEL);
+ else
+ ht->table = vzalloc(size*sizeof(*ht->table));
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
- for (i=0; i< ht->size; ++i) {
- INIT_HLIST_HEAD(&ht->table[i]);
- }
return 0;
}
EXPORT_SYMBOL(drm_ht_create);
@@ -180,7 +171,6 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
list = drm_ht_find_key(ht, key);
if (list) {
hlist_del_init(list);
- ht->fill--;
return 0;
}
return -EINVAL;
@@ -189,7 +179,6 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
hlist_del_init(&item->head);
- ht->fill--;
return 0;
}
EXPORT_SYMBOL(drm_ht_remove_item);
@@ -197,10 +186,10 @@ EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht)
{
if (ht->table) {
- if (ht->use_vmalloc)
- vfree(ht->table);
- else
+ if ((PAGE_SIZE / sizeof(*ht->table)) >> ht->order)
kfree(ht->table);
+ else
+ vfree(ht->table);
ht->table = NULL;
}
}
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index be9a9c07d152..ab1162da70f8 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -47,30 +47,19 @@ int drm_name_info(struct seq_file *m, void *data)
struct drm_minor *minor = node->minor;
struct drm_device *dev = minor->dev;
struct drm_master *master = minor->master;
-
+ const char *bus_name;
if (!master)
return 0;
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->platform_device->name,
- dev_name(dev->dev), master->unique);
- } else {
- seq_printf(m, "%s\n",
- dev->driver->platform_device->name);
- }
+ bus_name = dev->driver->bus->get_name(dev);
+ if (master->unique) {
+ seq_printf(m, "%s %s %s\n",
+ bus_name,
+ dev_name(dev->dev), master->unique);
} else {
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->pci_driver.name,
- dev_name(dev->dev), master->unique);
- } else {
- seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
- dev_name(dev->dev));
- }
+ seq_printf(m, "%s %s\n",
+ bus_name, dev_name(dev->dev));
}
-
return 0;
}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 47db4df37a69..7f6912a16761 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -96,7 +96,7 @@ int drm_setunique(struct drm_device *dev, void *data,
{
struct drm_unique *u = data;
struct drm_master *master = file_priv->master;
- int domain, bus, slot, func, ret;
+ int ret;
if (master->unique_len || master->unique)
return -EBUSY;
@@ -104,50 +104,12 @@ int drm_setunique(struct drm_device *dev, void *data,
if (!u->unique_len || u->unique_len > 1024)
return -EINVAL;
- master->unique_len = u->unique_len;
- master->unique_size = u->unique_len + 1;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (!master->unique) {
- ret = -ENOMEM;
- goto err;
- }
-
- if (copy_from_user(master->unique, u->unique, master->unique_len)) {
- ret = -EFAULT;
- goto err;
- }
-
- master->unique[master->unique_len] = '\0';
-
- dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
- strlen(master->unique) + 2, GFP_KERNEL);
- if (!dev->devname) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
+ if (!dev->driver->bus->set_unique)
+ return -EINVAL;
- /* Return error if the busid submitted doesn't match the device's actual
- * busid.
- */
- ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
- if (ret != 3) {
- ret = -EINVAL;
+ ret = dev->driver->bus->set_unique(dev, master, u);
+ if (ret)
goto err;
- }
-
- domain = bus >> 8;
- bus &= 0xff;
-
- if ((domain != drm_get_pci_domain(dev)) ||
- (bus != dev->pdev->bus->number) ||
- (slot != PCI_SLOT(dev->pdev->devfn)) ||
- (func != PCI_FUNC(dev->pdev->devfn))) {
- ret = -EINVAL;
- goto err;
- }
return 0;
@@ -159,74 +121,15 @@ err:
static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
- int len, ret;
+ int ret;
if (master->unique != NULL)
drm_unset_busid(dev, master);
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
- master->unique_len = 10 + strlen(dev->platformdev->name);
- master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
-
- if (master->unique == NULL)
- return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len,
- "platform:%s", dev->platformdev->name);
-
- if (len > master->unique_len) {
- DRM_ERROR("Unique buffer overflowed\n");
- ret = -EINVAL;
- goto err;
- }
-
- dev->devname =
- kmalloc(strlen(dev->platformdev->name) +
- master->unique_len + 2, GFP_KERNEL);
-
- if (dev->devname == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->platformdev->name,
- master->unique);
-
- } else {
- master->unique_len = 40;
- master->unique_size = master->unique_len;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (master->unique == NULL)
- return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len,
- "pci:%04x:%02x:%02x.%d",
- drm_get_pci_domain(dev),
- dev->pdev->bus->number,
- PCI_SLOT(dev->pdev->devfn),
- PCI_FUNC(dev->pdev->devfn));
- if (len >= master->unique_len) {
- DRM_ERROR("buffer overflow");
- ret = -EINVAL;
- goto err;
- } else
- master->unique_len = len;
-
- dev->devname =
- kmalloc(strlen(dev->driver->pci_driver.name) +
- master->unique_len + 2, GFP_KERNEL);
-
- if (dev->devname == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
- }
-
+ ret = dev->driver->bus->set_busid(dev, master);
+ if (ret)
+ goto err;
return 0;
-
err:
drm_unset_busid(dev, master);
return ret;
@@ -365,6 +268,25 @@ int drm_getstats(struct drm_device *dev, void *data,
}
/**
+ * Get device/driver capabilities
+ */
+int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_get_cap *req = data;
+
+ req->value = 0;
+ switch (req->capability) {
+ case DRM_CAP_DUMB_BUFFER:
+ if (dev->driver->dumb_create)
+ req->value = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
* Setversion ioctl.
*
* \param inode device inode.
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 28d1d3c24d65..a34ef97d3c81 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -74,23 +74,13 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
{
struct drm_irq_busid *p = data;
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
+ if (!dev->driver->bus->irq_by_busid)
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
- if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
- (p->busnum & 0xff) != dev->pdev->bus->number ||
- p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
- return -EINVAL;
-
- p->irq = dev->pdev->irq;
-
- DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
- p->irq);
-
- return 0;
+ return dev->driver->bus->irq_by_busid(dev, p);
}
/*
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index c59515ba7e69..add1737dae0d 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -64,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
else {
child =
list_entry(mm->unused_nodes.next,
- struct drm_mm_node, free_stack);
- list_del(&child->free_stack);
+ struct drm_mm_node, node_list);
+ list_del(&child->node_list);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
@@ -94,195 +94,242 @@ int drm_mm_pre_get(struct drm_mm *mm)
return ret;
}
++mm->num_unused;
- list_add_tail(&node->free_stack, &mm->unused_nodes);
+ list_add_tail(&node->node_list, &mm->unused_nodes);
}
spin_unlock(&mm->unused_lock);
return 0;
}
EXPORT_SYMBOL(drm_mm_pre_get);
-static int drm_mm_create_tail_node(struct drm_mm *mm,
- unsigned long start,
- unsigned long size, int atomic)
+static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
- struct drm_mm_node *child;
-
- child = drm_mm_kmalloc(mm, atomic);
- if (unlikely(child == NULL))
- return -ENOMEM;
-
- child->free = 1;
- child->size = size;
- child->start = start;
- child->mm = mm;
+ return hole_node->start + hole_node->size;
+}
- list_add_tail(&child->node_list, &mm->node_list);
- list_add_tail(&child->free_stack, &mm->free_stack);
+static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
+{
+ struct drm_mm_node *next_node =
+ list_entry(hole_node->node_list.next, struct drm_mm_node,
+ node_list);
- return 0;
+ return next_node->start;
}
-static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
- unsigned long size,
- int atomic)
+static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
{
- struct drm_mm_node *child;
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long tmp = 0, wasted = 0;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
- child = drm_mm_kmalloc(parent->mm, atomic);
- if (unlikely(child == NULL))
- return NULL;
+ BUG_ON(!hole_node->hole_follows || node->allocated);
- INIT_LIST_HEAD(&child->free_stack);
+ if (alignment)
+ tmp = hole_start % alignment;
- child->size = size;
- child->start = parent->start;
- child->mm = parent->mm;
+ if (!tmp) {
+ hole_node->hole_follows = 0;
+ list_del_init(&hole_node->hole_stack);
+ } else
+ wasted = alignment - tmp;
- list_add_tail(&child->node_list, &parent->node_list);
- INIT_LIST_HEAD(&child->free_stack);
+ node->start = hole_start + wasted;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
- parent->size -= size;
- parent->start += size;
- return child;
-}
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list);
+
+ BUG_ON(node->start + node->size > hole_end);
+ if (node->start + node->size < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
+ } else {
+ node->hole_follows = 0;
+ }
+}
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
int atomic)
{
+ struct drm_mm_node *node;
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
- if (alignment)
- tmp = node->start % alignment;
+ drm_mm_insert_helper(hole_node, node, size, alignment);
- if (tmp) {
- align_splitoff =
- drm_mm_split_at_start(node, alignment - tmp, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
- }
+ return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_generic);
- if (node->size == size) {
- list_del_init(&node->free_stack);
- node->free = 0;
- } else {
- node = drm_mm_split_at_start(node, size, atomic);
- }
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. The preallocated memory node
+ * must be cleared.
+ */
+int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment)
+{
+ struct drm_mm_node *hole_node;
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
+ hole_node = drm_mm_search_free(mm, size, alignment, 0);
+ if (!hole_node)
+ return -ENOSPC;
- return node;
+ drm_mm_insert_helper(hole_node, node, size, alignment);
+
+ return 0;
}
-EXPORT_SYMBOL(drm_mm_get_block_generic);
+EXPORT_SYMBOL(drm_mm_insert_node);
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end,
- int atomic)
+static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
+ struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
{
- struct drm_mm_node *align_splitoff = NULL;
- unsigned tmp = 0;
- unsigned wasted = 0;
+ struct drm_mm *mm = hole_node->mm;
+ unsigned long tmp = 0, wasted = 0;
+ unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+ unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+
+ BUG_ON(!hole_node->hole_follows || node->allocated);
- if (node->start < start)
- wasted += start - node->start;
+ if (hole_start < start)
+ wasted += start - hole_start;
if (alignment)
- tmp = ((node->start + wasted) % alignment);
+ tmp = (hole_start + wasted) % alignment;
if (tmp)
wasted += alignment - tmp;
- if (wasted) {
- align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
- if (unlikely(align_splitoff == NULL))
- return NULL;
+
+ if (!wasted) {
+ hole_node->hole_follows = 0;
+ list_del_init(&hole_node->hole_stack);
}
- if (node->size == size) {
- list_del_init(&node->free_stack);
- node->free = 0;
+ node->start = hole_start + wasted;
+ node->size = size;
+ node->mm = mm;
+ node->allocated = 1;
+
+ INIT_LIST_HEAD(&node->hole_stack);
+ list_add(&node->node_list, &hole_node->node_list);
+
+ BUG_ON(node->start + node->size > hole_end);
+ BUG_ON(node->start + node->size > end);
+
+ if (node->start + node->size < hole_end) {
+ list_add(&node->hole_stack, &mm->hole_stack);
+ node->hole_follows = 1;
} else {
- node = drm_mm_split_at_start(node, size, atomic);
+ node->hole_follows = 0;
}
+}
- if (align_splitoff)
- drm_mm_put_block(align_splitoff);
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ int atomic)
+{
+ struct drm_mm_node *node;
+
+ node = drm_mm_kmalloc(hole_node->mm, atomic);
+ if (unlikely(node == NULL))
+ return NULL;
+
+ drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ start, end);
return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);
-/*
- * Put a block. Merge with the previous and / or next block if they are free.
- * Otherwise add to the free stack.
+/**
+ * Search for free space and insert a preallocated memory node. Returns
+ * -ENOSPC if no suitable free area is available. This is for range
+ * restricted allocations. The preallocated memory node must be cleared.
*/
-
-void drm_mm_put_block(struct drm_mm_node *cur)
+int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long size, unsigned alignment,
+ unsigned long start, unsigned long end)
{
+ struct drm_mm_node *hole_node;
- struct drm_mm *mm = cur->mm;
- struct list_head *cur_head = &cur->node_list;
- struct list_head *root_head = &mm->node_list;
- struct drm_mm_node *prev_node = NULL;
- struct drm_mm_node *next_node;
+ hole_node = drm_mm_search_free_in_range(mm, size, alignment,
+ start, end, 0);
+ if (!hole_node)
+ return -ENOSPC;
- int merged = 0;
+ drm_mm_insert_helper_range(hole_node, node, size, alignment,
+ start, end);
- BUG_ON(cur->scanned_block || cur->scanned_prev_free
- || cur->scanned_next_free);
+ return 0;
+}
+EXPORT_SYMBOL(drm_mm_insert_node_in_range);
- if (cur_head->prev != root_head) {
- prev_node =
- list_entry(cur_head->prev, struct drm_mm_node, node_list);
- if (prev_node->free) {
- prev_node->size += cur->size;
- merged = 1;
- }
- }
- if (cur_head->next != root_head) {
- next_node =
- list_entry(cur_head->next, struct drm_mm_node, node_list);
- if (next_node->free) {
- if (merged) {
- prev_node->size += next_node->size;
- list_del(&next_node->node_list);
- list_del(&next_node->free_stack);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&next_node->free_stack,
- &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(next_node);
- spin_unlock(&mm->unused_lock);
- } else {
- next_node->size += cur->size;
- next_node->start = cur->start;
- merged = 1;
- }
- }
- }
- if (!merged) {
- cur->free = 1;
- list_add(&cur->free_stack, &mm->free_stack);
- } else {
- list_del(&cur->node_list);
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&cur->free_stack, &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(cur);
- spin_unlock(&mm->unused_lock);
- }
+/**
+ * Remove a memory node from the allocator.
+ */
+void drm_mm_remove_node(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node;
+
+ BUG_ON(node->scanned_block || node->scanned_prev_free
+ || node->scanned_next_free);
+
+ prev_node =
+ list_entry(node->node_list.prev, struct drm_mm_node, node_list);
+
+ if (node->hole_follows) {
+ BUG_ON(drm_mm_hole_node_start(node)
+ == drm_mm_hole_node_end(node));
+ list_del(&node->hole_stack);
+ } else
+ BUG_ON(drm_mm_hole_node_start(node)
+ != drm_mm_hole_node_end(node));
+
+ if (!prev_node->hole_follows) {
+ prev_node->hole_follows = 1;
+ list_add(&prev_node->hole_stack, &mm->hole_stack);
+ } else
+ list_move(&prev_node->hole_stack, &mm->hole_stack);
+
+ list_del(&node->node_list);
+ node->allocated = 0;
}
+EXPORT_SYMBOL(drm_mm_remove_node);
+
+/*
+ * Remove a memory node from the allocator and free the allocated struct
+ * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
+ * drm_mm_get_block functions.
+ */
+void drm_mm_put_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+
+ drm_mm_remove_node(node);
+
+ spin_lock(&mm->unused_lock);
+ if (mm->num_unused < MM_UNUSED_TARGET) {
+ list_add(&node->node_list, &mm->unused_nodes);
+ ++mm->num_unused;
+ } else
+ kfree(node);
+ spin_unlock(&mm->unused_lock);
+}
EXPORT_SYMBOL(drm_mm_put_block);
static int check_free_hole(unsigned long start, unsigned long end,
@@ -319,8 +366,10 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- list_for_each_entry(entry, &mm->free_stack, free_stack) {
- if (!check_free_hole(entry->start, entry->start + entry->size,
+ list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+ BUG_ON(!entry->hole_follows);
+ if (!check_free_hole(drm_mm_hole_node_start(entry),
+ drm_mm_hole_node_end(entry),
size, alignment))
continue;
@@ -353,12 +402,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- list_for_each_entry(entry, &mm->free_stack, free_stack) {
- unsigned long adj_start = entry->start < start ?
- start : entry->start;
- unsigned long adj_end = entry->start + entry->size > end ?
- end : entry->start + entry->size;
+ list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+ unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
+ start : drm_mm_hole_node_start(entry);
+ unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
+ end : drm_mm_hole_node_end(entry);
+ BUG_ON(!entry->hole_follows);
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
@@ -376,6 +426,23 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
EXPORT_SYMBOL(drm_mm_search_free_in_range);
/**
+ * Moves an allocation. To be used with embedded struct drm_mm_node.
+ */
+void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
+{
+ list_replace(&old->node_list, &new->node_list);
+ list_replace(&old->node_list, &new->hole_stack);
+ new->hole_follows = old->hole_follows;
+ new->mm = old->mm;
+ new->start = old->start;
+ new->size = old->size;
+
+ old->allocated = 0;
+ new->allocated = 1;
+}
+EXPORT_SYMBOL(drm_mm_replace_node);
+
+/**
* Initializa lru scanning.
*
* This simply sets up the scanning routines with the parameters for the desired
@@ -393,6 +460,7 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
mm->scan_check_range = 0;
+ mm->prev_scanned_node = NULL;
}
EXPORT_SYMBOL(drm_mm_init_scan);
@@ -418,6 +486,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
mm->scan_start = start;
mm->scan_end = end;
mm->scan_check_range = 1;
+ mm->prev_scanned_node = NULL;
}
EXPORT_SYMBOL(drm_mm_init_scan_with_range);
@@ -430,70 +499,42 @@ EXPORT_SYMBOL(drm_mm_init_scan_with_range);
int drm_mm_scan_add_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
- struct list_head *prev_free, *next_free;
- struct drm_mm_node *prev_node, *next_node;
+ struct drm_mm_node *prev_node;
+ unsigned long hole_start, hole_end;
unsigned long adj_start;
unsigned long adj_end;
mm->scanned_blocks++;
- prev_free = next_free = NULL;
-
- BUG_ON(node->free);
+ BUG_ON(node->scanned_block);
node->scanned_block = 1;
- node->free = 1;
-
- if (node->node_list.prev != &mm->node_list) {
- prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
- node_list);
-
- if (prev_node->free) {
- list_del(&prev_node->node_list);
- node->start = prev_node->start;
- node->size += prev_node->size;
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
- prev_node->scanned_prev_free = 1;
-
- prev_free = &prev_node->free_stack;
- }
- }
-
- if (node->node_list.next != &mm->node_list) {
- next_node = list_entry(node->node_list.next, struct drm_mm_node,
- node_list);
-
- if (next_node->free) {
- list_del(&next_node->node_list);
-
- node->size += next_node->size;
-
- next_node->scanned_next_free = 1;
-
- next_free = &next_node->free_stack;
- }
- }
-
- /* The free_stack list is not used for allocated objects, so these two
- * pointers can be abused (as long as no allocations in this memory
- * manager happens). */
- node->free_stack.prev = prev_free;
- node->free_stack.next = next_free;
+ node->scanned_preceeds_hole = prev_node->hole_follows;
+ prev_node->hole_follows = 1;
+ list_del(&node->node_list);
+ node->node_list.prev = &prev_node->node_list;
+ node->node_list.next = &mm->prev_scanned_node->node_list;
+ mm->prev_scanned_node = node;
+ hole_start = drm_mm_hole_node_start(prev_node);
+ hole_end = drm_mm_hole_node_end(prev_node);
if (mm->scan_check_range) {
- adj_start = node->start < mm->scan_start ?
- mm->scan_start : node->start;
- adj_end = node->start + node->size > mm->scan_end ?
- mm->scan_end : node->start + node->size;
+ adj_start = hole_start < mm->scan_start ?
+ mm->scan_start : hole_start;
+ adj_end = hole_end > mm->scan_end ?
+ mm->scan_end : hole_end;
} else {
- adj_start = node->start;
- adj_end = node->start + node->size;
+ adj_start = hole_start;
+ adj_end = hole_end;
}
if (check_free_hole(adj_start , adj_end,
mm->scan_size, mm->scan_alignment)) {
- mm->scan_hit_start = node->start;
- mm->scan_hit_size = node->size;
+ mm->scan_hit_start = hole_start;
+ mm->scan_hit_size = hole_end;
return 1;
}
@@ -519,39 +560,19 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
int drm_mm_scan_remove_block(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
- struct drm_mm_node *prev_node, *next_node;
+ struct drm_mm_node *prev_node;
mm->scanned_blocks--;
BUG_ON(!node->scanned_block);
node->scanned_block = 0;
- node->free = 0;
-
- prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
- free_stack);
- next_node = list_entry(node->free_stack.next, struct drm_mm_node,
- free_stack);
- if (prev_node) {
- BUG_ON(!prev_node->scanned_prev_free);
- prev_node->scanned_prev_free = 0;
-
- list_add_tail(&prev_node->node_list, &node->node_list);
-
- node->start = prev_node->start + prev_node->size;
- node->size -= prev_node->size;
- }
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
- if (next_node) {
- BUG_ON(!next_node->scanned_next_free);
- next_node->scanned_next_free = 0;
-
- list_add(&next_node->node_list, &node->node_list);
-
- node->size -= next_node->size;
- }
-
- INIT_LIST_HEAD(&node->free_stack);
+ prev_node->hole_follows = node->scanned_preceeds_hole;
+ INIT_LIST_HEAD(&node->node_list);
+ list_add(&node->node_list, &prev_node->node_list);
/* Only need to check for containement because start&size for the
* complete resulting free block (not just the desired part) is
@@ -568,7 +589,7 @@ EXPORT_SYMBOL(drm_mm_scan_remove_block);
int drm_mm_clean(struct drm_mm * mm)
{
- struct list_head *head = &mm->node_list;
+ struct list_head *head = &mm->head_node.node_list;
return (head->next->next == head);
}
@@ -576,38 +597,40 @@ EXPORT_SYMBOL(drm_mm_clean);
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
- INIT_LIST_HEAD(&mm->node_list);
- INIT_LIST_HEAD(&mm->free_stack);
+ INIT_LIST_HEAD(&mm->hole_stack);
INIT_LIST_HEAD(&mm->unused_nodes);
mm->num_unused = 0;
mm->scanned_blocks = 0;
spin_lock_init(&mm->unused_lock);
- return drm_mm_create_tail_node(mm, start, size, 0);
+ /* Clever trick to avoid a special case in the free hole tracking. */
+ INIT_LIST_HEAD(&mm->head_node.node_list);
+ INIT_LIST_HEAD(&mm->head_node.hole_stack);
+ mm->head_node.hole_follows = 1;
+ mm->head_node.scanned_block = 0;
+ mm->head_node.scanned_prev_free = 0;
+ mm->head_node.scanned_next_free = 0;
+ mm->head_node.mm = mm;
+ mm->head_node.start = start + size;
+ mm->head_node.size = start - mm->head_node.start;
+ list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+
+ return 0;
}
EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{
- struct list_head *bnode = mm->free_stack.next;
- struct drm_mm_node *entry;
- struct drm_mm_node *next;
+ struct drm_mm_node *entry, *next;
- entry = list_entry(bnode, struct drm_mm_node, free_stack);
-
- if (entry->node_list.next != &mm->node_list ||
- entry->free_stack.next != &mm->free_stack) {
+ if (!list_empty(&mm->head_node.node_list)) {
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
return;
}
- list_del(&entry->free_stack);
- list_del(&entry->node_list);
- kfree(entry);
-
spin_lock(&mm->unused_lock);
- list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
- list_del(&entry->free_stack);
+ list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
+ list_del(&entry->node_list);
kfree(entry);
--mm->num_unused;
}
@@ -620,19 +643,37 @@ EXPORT_SYMBOL(drm_mm_takedown);
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
-
- list_for_each_entry(entry, &mm->node_list, node_list) {
- printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
+ unsigned long total_used = 0, total_free = 0, total = 0;
+ unsigned long hole_start, hole_end, hole_size;
+
+ hole_start = drm_mm_hole_node_start(&mm->head_node);
+ hole_end = drm_mm_hole_node_end(&mm->head_node);
+ hole_size = hole_end - hole_start;
+ if (hole_size)
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
+ prefix, hole_start, hole_end,
+ hole_size);
+ total_free += hole_size;
+
+ drm_mm_for_each_node(entry, mm) {
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
prefix, entry->start, entry->start + entry->size,
- entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
+ entry->size);
+ total_used += entry->size;
+
+ if (entry->hole_follows) {
+ hole_start = drm_mm_hole_node_start(entry);
+ hole_end = drm_mm_hole_node_end(entry);
+ hole_size = hole_end - hole_start;
+ printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
+ prefix, hole_start, hole_end,
+ hole_size);
+ total_free += hole_size;
+ }
}
- printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
+ total = total_free + total_used;
+
+ printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);
@@ -641,17 +682,34 @@ EXPORT_SYMBOL(drm_mm_debug_table);
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
struct drm_mm_node *entry;
- int total_used = 0, total_free = 0, total = 0;
-
- list_for_each_entry(entry, &mm->node_list, node_list) {
- seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
- total += entry->size;
- if (entry->free)
- total_free += entry->size;
- else
- total_used += entry->size;
+ unsigned long total_used = 0, total_free = 0, total = 0;
+ unsigned long hole_start, hole_end, hole_size;
+
+ hole_start = drm_mm_hole_node_start(&mm->head_node);
+ hole_end = drm_mm_hole_node_end(&mm->head_node);
+ hole_size = hole_end - hole_start;
+ if (hole_size)
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+ hole_start, hole_end, hole_size);
+ total_free += hole_size;
+
+ drm_mm_for_each_node(entry, mm) {
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
+ entry->start, entry->start + entry->size,
+ entry->size);
+ total_used += entry->size;
+ if (entry->hole_follows) {
+ hole_start = drm_mm_hole_node_start(&mm->head_node);
+ hole_end = drm_mm_hole_node_end(&mm->head_node);
+ hole_size = hole_end - hole_start;
+ seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+ hole_start, hole_end, hole_size);
+ total_free += hole_size;
+ }
}
- seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
+ total = total_free + total_used;
+
+ seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 58e65f92c232..25bf87390f53 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -593,7 +593,7 @@ EXPORT_SYMBOL(drm_mode_height);
*
* Return @modes's hsync rate in kHz, rounded to the nearest int.
*/
-int drm_mode_hsync(struct drm_display_mode *mode)
+int drm_mode_hsync(const struct drm_display_mode *mode)
{
unsigned int calc_val;
@@ -627,7 +627,7 @@ EXPORT_SYMBOL(drm_mode_hsync);
* If it is 70.288, it will return 70Hz.
* If it is 59.6, it will return 60Hz.
*/
-int drm_mode_vrefresh(struct drm_display_mode *mode)
+int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
int refresh = 0;
unsigned int calc_val;
@@ -725,7 +725,7 @@ EXPORT_SYMBOL(drm_mode_set_crtcinfo);
* a pointer to it. Used to create new instances of established modes.
*/
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct drm_display_mode *nmode;
int new_id;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index f5bd9e590c80..e1aee4f6a7c6 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -125,6 +125,176 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
EXPORT_SYMBOL(drm_pci_free);
#ifdef CONFIG_PCI
+
+static int drm_get_pci_domain(struct drm_device *dev)
+{
+#ifndef __alpha__
+ /* For historical reasons, drm_get_pci_domain() is busticated
+ * on most archs and has to remain so for userspace interface
+ * < 1.4, except on alpha which was right from the beginning
+ */
+ if (dev->if_version < 0x10004)
+ return 0;
+#endif /* __alpha__ */
+
+ return pci_domain_nr(dev->pdev->bus);
+}
+
+static int drm_pci_get_irq(struct drm_device *dev)
+{
+ return dev->pdev->irq;
+}
+
+static const char *drm_pci_get_name(struct drm_device *dev)
+{
+ struct pci_driver *pdriver = dev->driver->kdriver.pci;
+ return pdriver->name;
+}
+
+int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+ int len, ret;
+ struct pci_driver *pdriver = dev->driver->kdriver.pci;
+ master->unique_len = 40;
+ master->unique_size = master->unique_len;
+ master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+ if (master->unique == NULL)
+ return -ENOMEM;
+
+
+ len = snprintf(master->unique, master->unique_len,
+ "pci:%04x:%02x:%02x.%d",
+ drm_get_pci_domain(dev),
+ dev->pdev->bus->number,
+ PCI_SLOT(dev->pdev->devfn),
+ PCI_FUNC(dev->pdev->devfn));
+
+ if (len >= master->unique_len) {
+ DRM_ERROR("buffer overflow");
+ ret = -EINVAL;
+ goto err;
+ } else
+ master->unique_len = len;
+
+ dev->devname =
+ kmalloc(strlen(pdriver->name) +
+ master->unique_len + 2, GFP_KERNEL);
+
+ if (dev->devname == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", pdriver->name,
+ master->unique);
+
+ return 0;
+err:
+ return ret;
+}
+
+int drm_pci_set_unique(struct drm_device *dev,
+ struct drm_master *master,
+ struct drm_unique *u)
+{
+ int domain, bus, slot, func, ret;
+ const char *bus_name;
+
+ master->unique_len = u->unique_len;
+ master->unique_size = u->unique_len + 1;
+ master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+ if (!master->unique) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (copy_from_user(master->unique, u->unique, master->unique_len)) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ master->unique[master->unique_len] = '\0';
+
+ bus_name = dev->driver->bus->get_name(dev);
+ dev->devname = kmalloc(strlen(bus_name) +
+ strlen(master->unique) + 2, GFP_KERNEL);
+ if (!dev->devname) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", bus_name,
+ master->unique);
+
+ /* Return error if the busid submitted doesn't match the device's actual
+ * busid.
+ */
+ ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
+ if (ret != 3) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ domain = bus >> 8;
+ bus &= 0xff;
+
+ if ((domain != drm_get_pci_domain(dev)) ||
+ (bus != dev->pdev->bus->number) ||
+ (slot != PCI_SLOT(dev->pdev->devfn)) ||
+ (func != PCI_FUNC(dev->pdev->devfn))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ return 0;
+err:
+ return ret;
+}
+
+
+int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
+{
+ if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
+ (p->busnum & 0xff) != dev->pdev->bus->number ||
+ p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+ return -EINVAL;
+
+ p->irq = dev->pdev->irq;
+
+ DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+ p->irq);
+ return 0;
+}
+
+int drm_pci_agp_init(struct drm_device *dev)
+{
+ if (drm_core_has_AGP(dev)) {
+ if (drm_pci_device_is_agp(dev))
+ dev->agp = drm_agp_init(dev);
+ if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
+ && (dev->agp == NULL)) {
+ DRM_ERROR("Cannot initialize the agpgart module.\n");
+ return -EINVAL;
+ }
+ if (drm_core_has_MTRR(dev)) {
+ if (dev->agp)
+ dev->agp->agp_mtrr =
+ mtrr_add(dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size *
+ 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
+ }
+ }
+ return 0;
+}
+
+static struct drm_bus drm_pci_bus = {
+ .bus_type = DRIVER_BUS_PCI,
+ .get_irq = drm_pci_get_irq,
+ .get_name = drm_pci_get_name,
+ .set_busid = drm_pci_set_busid,
+ .set_unique = drm_pci_set_unique,
+ .agp_init = drm_pci_agp_init,
+};
+
/**
* Register.
*
@@ -219,7 +389,7 @@ err_g1:
EXPORT_SYMBOL(drm_get_pci_dev);
/**
- * PCI device initialization. Called via drm_init at module load time,
+ * PCI device initialization. Called direct from modules at load time.
*
* \return zero on success or a negative number on failure.
*
@@ -229,18 +399,24 @@ EXPORT_SYMBOL(drm_get_pci_dev);
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
* after the initialization for driver customization.
*/
-int drm_pci_init(struct drm_driver *driver)
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
struct pci_dev *pdev = NULL;
const struct pci_device_id *pid;
int i;
+ DRM_DEBUG("\n");
+
+ INIT_LIST_HEAD(&driver->device_list);
+ driver->kdriver.pci = pdriver;
+ driver->bus = &drm_pci_bus;
+
if (driver->driver_features & DRIVER_MODESET)
- return pci_register_driver(&driver->pci_driver);
+ return pci_register_driver(pdriver);
/* If not using KMS, fall back to stealth mode manual scanning. */
- for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
- pid = &driver->pci_driver.id_table[i];
+ for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
+ pid = &pdriver->id_table[i];
/* Loop around setting up a DRM device for each PCI device
* matching our ID and device class. If we had the internal
@@ -265,10 +441,27 @@ int drm_pci_init(struct drm_driver *driver)
#else
-int drm_pci_init(struct drm_driver *driver)
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
{
return -1;
}
#endif
+
+EXPORT_SYMBOL(drm_pci_init);
+
/*@}*/
+void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
+{
+ struct drm_device *dev, *tmp;
+ DRM_DEBUG("\n");
+
+ if (driver->driver_features & DRIVER_MODESET) {
+ pci_unregister_driver(pdriver);
+ } else {
+ list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ drm_put_dev(dev);
+ }
+ DRM_INFO("Module unloaded\n");
+}
+EXPORT_SYMBOL(drm_pci_exit);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 92d1d0fb7b75..7223f06d8e58 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -109,8 +109,60 @@ err_g1:
}
EXPORT_SYMBOL(drm_get_platform_dev);
+static int drm_platform_get_irq(struct drm_device *dev)
+{
+ return platform_get_irq(dev->platformdev, 0);
+}
+
+static const char *drm_platform_get_name(struct drm_device *dev)
+{
+ return dev->platformdev->name;
+}
+
+static int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
+{
+ int len, ret;
+
+ master->unique_len = 10 + strlen(dev->platformdev->name);
+ master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
+
+ if (master->unique == NULL)
+ return -ENOMEM;
+
+ len = snprintf(master->unique, master->unique_len,
+ "platform:%s", dev->platformdev->name);
+
+ if (len > master->unique_len) {
+ DRM_ERROR("Unique buffer overflowed\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dev->devname =
+ kmalloc(strlen(dev->platformdev->name) +
+ master->unique_len + 2, GFP_KERNEL);
+
+ if (dev->devname == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", dev->platformdev->name,
+ master->unique);
+ return 0;
+err:
+ return ret;
+}
+
+static struct drm_bus drm_platform_bus = {
+ .bus_type = DRIVER_BUS_PLATFORM,
+ .get_irq = drm_platform_get_irq,
+ .get_name = drm_platform_get_name,
+ .set_busid = drm_platform_set_busid,
+};
+
/**
- * Platform device initialization. Called via drm_init at module load time,
+ * Platform device initialization. Called direct from modules.
*
* \return zero on success or a negative number on failure.
*
@@ -121,7 +173,24 @@ EXPORT_SYMBOL(drm_get_platform_dev);
* after the initialization for driver customization.
*/
-int drm_platform_init(struct drm_driver *driver)
+int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device)
{
- return drm_get_platform_dev(driver->platform_device, driver);
+ DRM_DEBUG("\n");
+
+ driver->kdriver.platform_device = platform_device;
+ driver->bus = &drm_platform_bus;
+ INIT_LIST_HEAD(&driver->device_list);
+ return drm_get_platform_dev(platform_device, driver);
+}
+EXPORT_SYMBOL(drm_platform_init);
+
+void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device)
+{
+ struct drm_device *dev, *tmp;
+ DRM_DEBUG("\n");
+
+ list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
+ drm_put_dev(dev);
+ DRM_INFO("Module unloaded\n");
}
+EXPORT_SYMBOL(drm_platform_exit);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index d59edc18301f..001273d57f2d 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -269,25 +269,14 @@ int drm_fill_in_dev(struct drm_device *dev,
dev->driver = driver;
- if (drm_core_has_AGP(dev)) {
- if (drm_device_is_agp(dev))
- dev->agp = drm_agp_init(dev);
- if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
- && (dev->agp == NULL)) {
- DRM_ERROR("Cannot initialize the agpgart module.\n");
- retcode = -EINVAL;
+ if (dev->driver->bus->agp_init) {
+ retcode = dev->driver->bus->agp_init(dev);
+ if (retcode)
goto error_out_unreg;
- }
- if (drm_core_has_MTRR(dev)) {
- if (dev->agp)
- dev->agp->agp_mtrr =
- mtrr_add(dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024, MTRR_TYPE_WRCOMB, 1);
- }
}
+
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
@@ -425,7 +414,6 @@ int drm_put_minor(struct drm_minor **minor_p)
*
* Cleans up all DRM device, calling drm_lastclose().
*
- * \sa drm_init
*/
void drm_put_dev(struct drm_device *dev)
{
@@ -475,6 +463,7 @@ void drm_put_dev(struct drm_device *dev)
drm_put_minor(&dev->primary);
+ list_del(&dev->driver_item);
if (dev->devname) {
kfree(dev->devname);
dev->devname = NULL;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 85da4c40694c..2eee8e016b38 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -158,8 +158,15 @@ static ssize_t status_show(struct device *device,
{
struct drm_connector *connector = to_drm_connector(device);
enum drm_connector_status status;
+ int ret;
+
+ ret = mutex_lock_interruptible(&connector->dev->mode_config.mutex);
+ if (ret)
+ return ret;
status = connector->funcs->detect(connector, true);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
return snprintf(buf, PAGE_SIZE, "%s\n",
drm_get_connector_status_name(status));
}
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
new file mode 100644
index 000000000000..206d2300d873
--- /dev/null
+++ b/drivers/gpu/drm/drm_usb.c
@@ -0,0 +1,117 @@
+#include "drmP.h"
+#include <linux/usb.h>
+
+#ifdef CONFIG_USB
+int drm_get_usb_dev(struct usb_interface *interface,
+ const struct usb_device_id *id,
+ struct drm_driver *driver)
+{
+ struct drm_device *dev;
+ struct usb_device *usbdev;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ usbdev = interface_to_usbdev(interface);
+ dev->usbdev = usbdev;
+ dev->dev = &usbdev->dev;
+
+ mutex_lock(&drm_global_mutex);
+
+ ret = drm_fill_in_dev(dev, NULL, driver);
+ if (ret) {
+ printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+ goto err_g1;
+ }
+
+ usb_set_intfdata(interface, dev);
+ ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_g1;
+
+ ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_g2;
+
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, 0);
+ if (ret)
+ goto err_g3;
+ }
+
+ /* setup the grouping for the legacy output */
+ ret = drm_mode_group_init_legacy_group(dev,
+ &dev->primary->mode_group);
+ if (ret)
+ goto err_g3;
+
+ list_add_tail(&dev->driver_item, &driver->device_list);
+
+ mutex_unlock(&drm_global_mutex);
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ driver->name, driver->major, driver->minor, driver->patchlevel,
+ driver->date, dev->primary->index);
+
+ return 0;
+
+err_g3:
+ drm_put_minor(&dev->primary);
+err_g2:
+ drm_put_minor(&dev->control);
+err_g1:
+ kfree(dev);
+ mutex_unlock(&drm_global_mutex);
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_get_usb_dev);
+
+static int drm_usb_get_irq(struct drm_device *dev)
+{
+ return 0;
+}
+
+static const char *drm_usb_get_name(struct drm_device *dev)
+{
+ return "USB";
+}
+
+static int drm_usb_set_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ return 0;
+}
+
+static struct drm_bus drm_usb_bus = {
+ .bus_type = DRIVER_BUS_USB,
+ .get_irq = drm_usb_get_irq,
+ .get_name = drm_usb_get_name,
+ .set_busid = drm_usb_set_busid,
+};
+
+int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver)
+{
+ int res;
+ DRM_DEBUG("\n");
+
+ INIT_LIST_HEAD(&driver->device_list);
+ driver->kdriver.usb = udriver;
+ driver->bus = &drm_usb_bus;
+
+ res = usb_register(udriver);
+ return res;
+}
+EXPORT_SYMBOL(drm_usb_init);
+
+void drm_usb_exit(struct drm_driver *driver,
+ struct usb_driver *udriver)
+{
+ usb_deregister(udriver);
+}
+EXPORT_SYMBOL(drm_usb_exit);
+#endif
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ff33e53bbbf8..8f371e8d630f 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -37,7 +37,6 @@
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#define I810_BUF_FREE 2
@@ -94,7 +93,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
struct drm_buf *buf;
drm_i810_buf_priv_t *buf_priv;
- lock_kernel();
dev = priv->minor->dev;
dev_priv = dev->dev_private;
buf = dev_priv->mmap_buffer;
@@ -104,7 +102,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
vma->vm_file = filp;
buf_priv->currently_mapped = I810_BUF_MAPPED;
- unlock_kernel();
if (io_remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff,
@@ -116,7 +113,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = i810_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
.llseek = noop_llseek,
@@ -1242,19 +1239,6 @@ int i810_driver_dma_quiescent(struct drm_device *dev)
return 0;
}
-/*
- * call the drm_ioctl under the big kernel lock because
- * to lock against the i810_mmap_buffers function.
- */
-long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
- lock_kernel();
- ret = drm_ioctl(file, cmd, arg);
- unlock_kernel();
- return ret;
-}
-
struct drm_ioctl_desc i810_ioctls[] = {
DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 88bcd331e7c5..6f98d059f68a 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -57,18 +57,13 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = i810_ioctl,
+ .unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -77,15 +72,24 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver i810_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init i810_init(void)
{
+ if (num_possible_cpus() > 1) {
+ pr_err("drm/i810 does not support SMP\n");
+ return -EINVAL;
+ }
driver.num_ioctls = i810_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &i810_pci_driver);
}
static void __exit i810_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &i810_pci_driver);
}
module_init(i810_init);
diff --git a/drivers/gpu/drm/i830/Makefile b/drivers/gpu/drm/i830/Makefile
deleted file mode 100644
index c642ee0b238c..000000000000
--- a/drivers/gpu/drm/i830/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for the drm device driver. This driver provides support for the
-# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
-ccflags-y := -Iinclude/drm
-i830-y := i830_drv.o i830_dma.o i830_irq.o
-
-obj-$(CONFIG_DRM_I830) += i830.o
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
deleted file mode 100644
index ca6f31ff0eec..000000000000
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ /dev/null
@@ -1,1560 +0,0 @@
-/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- * Keith Whitwell <keith@tungstengraphics.com>
- * Abraham vd Merwe <abraham@2d3d.co.za>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i830_drm.h"
-#include "i830_drv.h"
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/smp_lock.h>
-#include <linux/pagemap.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <asm/uaccess.h>
-
-#define I830_BUF_FREE 2
-#define I830_BUF_CLIENT 1
-#define I830_BUF_HARDWARE 0
-
-#define I830_BUF_UNMAPPED 0
-#define I830_BUF_MAPPED 1
-
-static struct drm_buf *i830_freelist_get(struct drm_device * dev)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
- int used;
-
- /* Linear search might not be the best solution */
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- /* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
- I830_BUF_CLIENT);
- if (used == I830_BUF_FREE)
- return buf;
- }
- return NULL;
-}
-
-/* This should only be called if the buffer is not sent to the hardware
- * yet, the hardware updates in use for us once its on the ring buffer.
- */
-
-static int i830_freelist_put(struct drm_device *dev, struct drm_buf *buf)
-{
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- int used;
-
- /* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE);
- if (used != I830_BUF_CLIENT) {
- DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev;
- drm_i830_private_t *dev_priv;
- struct drm_buf *buf;
- drm_i830_buf_priv_t *buf_priv;
-
- lock_kernel();
- dev = priv->minor->dev;
- dev_priv = dev->dev_private;
- buf = dev_priv->mmap_buffer;
- buf_priv = buf->dev_private;
-
- vma->vm_flags |= (VM_IO | VM_DONTCOPY);
- vma->vm_file = filp;
-
- buf_priv->currently_mapped = I830_BUF_MAPPED;
- unlock_kernel();
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
- return 0;
-}
-
-static const struct file_operations i830_buffer_fops = {
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = i830_ioctl,
- .mmap = i830_mmap_buffers,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
-};
-
-static int i830_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
-{
- struct drm_device *dev = file_priv->minor->dev;
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- drm_i830_private_t *dev_priv = dev->dev_private;
- const struct file_operations *old_fops;
- unsigned long virtual;
- int retcode = 0;
-
- if (buf_priv->currently_mapped == I830_BUF_MAPPED)
- return -EINVAL;
-
- down_write(&current->mm->mmap_sem);
- old_fops = file_priv->filp->f_op;
- file_priv->filp->f_op = &i830_buffer_fops;
- dev_priv->mmap_buffer = buf;
- virtual = do_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE,
- MAP_SHARED, buf->bus_address);
- dev_priv->mmap_buffer = NULL;
- file_priv->filp->f_op = old_fops;
- if (IS_ERR((void *)virtual)) { /* ugh */
- /* Real error */
- DRM_ERROR("mmap error\n");
- retcode = PTR_ERR((void *)virtual);
- buf_priv->virtual = NULL;
- } else {
- buf_priv->virtual = (void __user *)virtual;
- }
- up_write(&current->mm->mmap_sem);
-
- return retcode;
-}
-
-static int i830_unmap_buffer(struct drm_buf *buf)
-{
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- int retcode = 0;
-
- if (buf_priv->currently_mapped != I830_BUF_MAPPED)
- return -EINVAL;
-
- down_write(&current->mm->mmap_sem);
- retcode = do_munmap(current->mm,
- (unsigned long)buf_priv->virtual,
- (size_t) buf->total);
- up_write(&current->mm->mmap_sem);
-
- buf_priv->currently_mapped = I830_BUF_UNMAPPED;
- buf_priv->virtual = NULL;
-
- return retcode;
-}
-
-static int i830_dma_get_buffer(struct drm_device *dev, drm_i830_dma_t *d,
- struct drm_file *file_priv)
-{
- struct drm_buf *buf;
- drm_i830_buf_priv_t *buf_priv;
- int retcode = 0;
-
- buf = i830_freelist_get(dev);
- if (!buf) {
- retcode = -ENOMEM;
- DRM_DEBUG("retcode=%d\n", retcode);
- return retcode;
- }
-
- retcode = i830_map_buffer(buf, file_priv);
- if (retcode) {
- i830_freelist_put(dev, buf);
- DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
- return retcode;
- }
- buf->file_priv = file_priv;
- buf_priv = buf->dev_private;
- d->granted = 1;
- d->request_idx = buf->idx;
- d->request_size = buf->total;
- d->virtual = buf_priv->virtual;
-
- return retcode;
-}
-
-static int i830_dma_cleanup(struct drm_device *dev)
-{
- struct drm_device_dma *dma = dev->dma;
-
- /* Make sure interrupts are disabled here because the uninstall ioctl
- * may not have been called from userspace and after dev_private
- * is freed, it's too late.
- */
- if (dev->irq_enabled)
- drm_irq_uninstall(dev);
-
- if (dev->dev_private) {
- int i;
- drm_i830_private_t *dev_priv =
- (drm_i830_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start)
- drm_core_ioremapfree(&dev_priv->ring.map, dev);
- if (dev_priv->hw_status_page) {
- pci_free_consistent(dev->pdev, PAGE_SIZE,
- dev_priv->hw_status_page,
- dev_priv->dma_status_page);
- /* Need to rewrite hardware status page */
- I830_WRITE(0x02080, 0x1ffff000);
- }
-
- kfree(dev->dev_private);
- dev->dev_private = NULL;
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- if (buf_priv->kernel_virtual && buf->total)
- drm_core_ioremapfree(&buf_priv->map, dev);
- }
- }
- return 0;
-}
-
-int i830_wait_ring(struct drm_device *dev, int n, const char *caller)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
- int iters = 0;
- unsigned long end;
- unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
-
- end = jiffies + (HZ * 3);
- while (ring->space < n) {
- ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-
- if (ring->head != last_head) {
- end = jiffies + (HZ * 3);
- last_head = ring->head;
- }
-
- iters++;
- if (time_before(end, jiffies)) {
- DRM_ERROR("space: %d wanted %d\n", ring->space, n);
- DRM_ERROR("lockup\n");
- goto out_wait_ring;
- }
- udelay(1);
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
- }
-
-out_wait_ring:
- return iters;
-}
-
-static void i830_kernel_lost_context(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
-
- ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
- ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-
- if (ring->head == ring->tail)
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
-}
-
-static int i830_freelist_init(struct drm_device *dev, drm_i830_private_t *dev_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int my_idx = 36;
- u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
- int i;
-
- if (dma->buf_count > 1019) {
- /* Not enough space in the status page for the freelist */
- return -EINVAL;
- }
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
-
- buf_priv->in_use = hw_status++;
- buf_priv->my_use_idx = my_idx;
- my_idx += 4;
-
- *buf_priv->in_use = I830_BUF_FREE;
-
- buf_priv->map.offset = buf->bus_address;
- buf_priv->map.size = buf->total;
- buf_priv->map.type = _DRM_AGP;
- buf_priv->map.flags = 0;
- buf_priv->map.mtrr = 0;
-
- drm_core_ioremap(&buf_priv->map, dev);
- buf_priv->kernel_virtual = buf_priv->map.handle;
- }
- return 0;
-}
-
-static int i830_dma_initialize(struct drm_device *dev,
- drm_i830_private_t *dev_priv,
- drm_i830_init_t *init)
-{
- struct drm_map_list *r_list;
-
- memset(dev_priv, 0, sizeof(drm_i830_private_t));
-
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map &&
- r_list->map->type == _DRM_SHM &&
- r_list->map->flags & _DRM_CONTAINS_LOCK) {
- dev_priv->sarea_map = r_list->map;
- break;
- }
- }
-
- if (!dev_priv->sarea_map) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not find sarea!\n");
- return -EINVAL;
- }
- dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
- if (!dev_priv->mmio_map) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not find mmio map!\n");
- return -EINVAL;
- }
- dev->agp_buffer_token = init->buffers_offset;
- dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
- if (!dev->agp_buffer_map) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not find dma buffer map!\n");
- return -EINVAL;
- }
-
- dev_priv->sarea_priv = (drm_i830_sarea_t *)
- ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
-
- dev_priv->ring.Start = init->ring_start;
- dev_priv->ring.End = init->ring_end;
- dev_priv->ring.Size = init->ring_size;
-
- dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
- dev_priv->ring.map.type = _DRM_AGP;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
- drm_core_ioremap(&dev_priv->ring.map, dev);
-
- if (dev_priv->ring.map.handle == NULL) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
- }
-
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
-
- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-
- dev_priv->w = init->w;
- dev_priv->h = init->h;
- dev_priv->pitch = init->pitch;
- dev_priv->back_offset = init->back_offset;
- dev_priv->depth_offset = init->depth_offset;
- dev_priv->front_offset = init->front_offset;
-
- dev_priv->front_di1 = init->front_offset | init->pitch_bits;
- dev_priv->back_di1 = init->back_offset | init->pitch_bits;
- dev_priv->zi1 = init->depth_offset | init->pitch_bits;
-
- DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1);
- DRM_DEBUG("back_offset %x\n", dev_priv->back_offset);
- DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1);
- DRM_DEBUG("pitch_bits %x\n", init->pitch_bits);
-
- dev_priv->cpp = init->cpp;
- /* We are using separate values as placeholders for mechanisms for
- * private backbuffer/depthbuffer usage.
- */
-
- dev_priv->back_pitch = init->back_pitch;
- dev_priv->depth_pitch = init->depth_pitch;
- dev_priv->do_boxes = 0;
- dev_priv->use_mi_batchbuffer_start = 0;
-
- /* Program Hardware Status Page */
- dev_priv->hw_status_page =
- pci_alloc_consistent(dev->pdev, PAGE_SIZE,
- &dev_priv->dma_status_page);
- if (!dev_priv->hw_status_page) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
- memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
-
- I830_WRITE(0x02080, dev_priv->dma_status_page);
- DRM_DEBUG("Enabled hardware status page\n");
-
- /* Now we need to init our freelist */
- if (i830_freelist_init(dev, dev_priv) != 0) {
- dev->dev_private = (void *)dev_priv;
- i830_dma_cleanup(dev);
- DRM_ERROR("Not enough space in the status page for"
- " the freelist\n");
- return -ENOMEM;
- }
- dev->dev_private = (void *)dev_priv;
-
- return 0;
-}
-
-static int i830_dma_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv;
- drm_i830_init_t *init = data;
- int retcode = 0;
-
- switch (init->func) {
- case I830_INIT_DMA:
- dev_priv = kmalloc(sizeof(drm_i830_private_t), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
- retcode = i830_dma_initialize(dev, dev_priv, init);
- break;
- case I830_CLEANUP_DMA:
- retcode = i830_dma_cleanup(dev);
- break;
- default:
- retcode = -EINVAL;
- break;
- }
-
- return retcode;
-}
-
-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define ST1_ENABLE (1<<16)
-#define ST1_MASK (0xffff)
-
-/* Most efficient way to verify state for the i830 is as it is
- * emitted. Non-conformant state is silently dropped.
- */
-static void i830EmitContextVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I830_CTX_SETUP_SIZE + 4);
-
- for (i = 0; i < I830_CTXREG_BLENDCOLR0; i++) {
- tmp = code[i];
- if ((tmp & (7 << 29)) == CMD_3D &&
- (tmp & (0x1f << 24)) < (0x1d << 24)) {
- OUT_RING(tmp);
- j++;
- } else {
- DRM_ERROR("Skipping %d\n", i);
- }
- }
-
- OUT_RING(STATE3D_CONST_BLEND_COLOR_CMD);
- OUT_RING(code[I830_CTXREG_BLENDCOLR]);
- j += 2;
-
- for (i = I830_CTXREG_VF; i < I830_CTXREG_MCSB0; i++) {
- tmp = code[i];
- if ((tmp & (7 << 29)) == CMD_3D &&
- (tmp & (0x1f << 24)) < (0x1d << 24)) {
- OUT_RING(tmp);
- j++;
- } else {
- DRM_ERROR("Skipping %d\n", i);
- }
- }
-
- OUT_RING(STATE3D_MAP_COORD_SETBIND_CMD);
- OUT_RING(code[I830_CTXREG_MCSB1]);
- j += 2;
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i830EmitTexVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO ||
- (code[I830_TEXREG_MI0] & ~(0xf * LOAD_TEXTURE_MAP0)) ==
- (STATE3D_LOAD_STATE_IMMEDIATE_2 | 4)) {
-
- BEGIN_LP_RING(I830_TEX_SETUP_SIZE);
-
- OUT_RING(code[I830_TEXREG_MI0]); /* TM0LI */
- OUT_RING(code[I830_TEXREG_MI1]); /* TM0S0 */
- OUT_RING(code[I830_TEXREG_MI2]); /* TM0S1 */
- OUT_RING(code[I830_TEXREG_MI3]); /* TM0S2 */
- OUT_RING(code[I830_TEXREG_MI4]); /* TM0S3 */
- OUT_RING(code[I830_TEXREG_MI5]); /* TM0S4 */
-
- for (i = 6; i < I830_TEX_SETUP_SIZE; i++) {
- tmp = code[i];
- OUT_RING(tmp);
- j++;
- }
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
- } else
- printk("rejected packet %x\n", code[0]);
-}
-
-static void i830EmitTexBlendVerified(struct drm_device *dev,
- unsigned int *code, unsigned int num)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- if (!num)
- return;
-
- BEGIN_LP_RING(num + 1);
-
- for (i = 0; i < num; i++) {
- tmp = code[i];
- OUT_RING(tmp);
- j++;
- }
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i830EmitTexPalette(struct drm_device *dev,
- unsigned int *palette, int number, int is_shared)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- int i;
- RING_LOCALS;
-
- return;
-
- BEGIN_LP_RING(258);
-
- if (is_shared == 1) {
- OUT_RING(CMD_OP_MAP_PALETTE_LOAD |
- MAP_PALETTE_NUM(0) | MAP_PALETTE_BOTH);
- } else {
- OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
- }
- for (i = 0; i < 256; i++)
- OUT_RING(palette[i]);
- OUT_RING(0);
- /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
- */
-}
-
-/* Need to do some additional checking when setting the dest buffer.
- */
-static void i830EmitDestVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I830_DEST_SETUP_SIZE + 10);
-
- tmp = code[I830_DESTREG_CBUFADDR];
- if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
- if (((int)outring) & 8) {
- OUT_RING(0);
- OUT_RING(0);
- }
-
- OUT_RING(CMD_OP_DESTBUFFER_INFO);
- OUT_RING(BUF_3D_ID_COLOR_BACK |
- BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) |
- BUF_3D_USE_FENCE);
- OUT_RING(tmp);
- OUT_RING(0);
-
- OUT_RING(CMD_OP_DESTBUFFER_INFO);
- OUT_RING(BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE |
- BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
- OUT_RING(dev_priv->zi1);
- OUT_RING(0);
- } else {
- DRM_ERROR("bad di1 %x (allow %x or %x)\n",
- tmp, dev_priv->front_di1, dev_priv->back_di1);
- }
-
- /* invarient:
- */
-
- OUT_RING(GFX_OP_DESTBUFFER_VARS);
- OUT_RING(code[I830_DESTREG_DV1]);
-
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(code[I830_DESTREG_DR1]);
- OUT_RING(code[I830_DESTREG_DR2]);
- OUT_RING(code[I830_DESTREG_DR3]);
- OUT_RING(code[I830_DESTREG_DR4]);
-
- /* Need to verify this */
- tmp = code[I830_DESTREG_SENABLE];
- if ((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) {
- OUT_RING(tmp);
- } else {
- DRM_ERROR("bad scissor enable\n");
- OUT_RING(0);
- }
-
- OUT_RING(GFX_OP_SCISSOR_RECT);
- OUT_RING(code[I830_DESTREG_SR1]);
- OUT_RING(code[I830_DESTREG_SR2]);
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i830EmitStippleVerified(struct drm_device *dev, unsigned int *code)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- BEGIN_LP_RING(2);
- OUT_RING(GFX_OP_STIPPLE);
- OUT_RING(code[1]);
- ADVANCE_LP_RING();
-}
-
-static void i830EmitState(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int dirty = sarea_priv->dirty;
-
- DRM_DEBUG("%s %x\n", __func__, dirty);
-
- if (dirty & I830_UPLOAD_BUFFERS) {
- i830EmitDestVerified(dev, sarea_priv->BufferState);
- sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS;
- }
-
- if (dirty & I830_UPLOAD_CTX) {
- i830EmitContextVerified(dev, sarea_priv->ContextState);
- sarea_priv->dirty &= ~I830_UPLOAD_CTX;
- }
-
- if (dirty & I830_UPLOAD_TEX0) {
- i830EmitTexVerified(dev, sarea_priv->TexState[0]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX0;
- }
-
- if (dirty & I830_UPLOAD_TEX1) {
- i830EmitTexVerified(dev, sarea_priv->TexState[1]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX1;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND0) {
- i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[0],
- sarea_priv->TexBlendStateWordsUsed[0]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND1) {
- i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[1],
- sarea_priv->TexBlendStateWordsUsed[1]);
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1;
- }
-
- if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) {
- i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1);
- } else {
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) {
- i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0);
- }
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) {
- i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1);
- }
-
- /* 1.3:
- */
-#if 0
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) {
- i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
- }
- if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) {
- i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
- }
-#endif
- }
-
- /* 1.3:
- */
- if (dirty & I830_UPLOAD_STIPPLE) {
- i830EmitStippleVerified(dev, sarea_priv->StippleState);
- sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE;
- }
-
- if (dirty & I830_UPLOAD_TEX2) {
- i830EmitTexVerified(dev, sarea_priv->TexState2);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX2;
- }
-
- if (dirty & I830_UPLOAD_TEX3) {
- i830EmitTexVerified(dev, sarea_priv->TexState3);
- sarea_priv->dirty &= ~I830_UPLOAD_TEX3;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND2) {
- i830EmitTexBlendVerified(dev,
- sarea_priv->TexBlendState2,
- sarea_priv->TexBlendStateWordsUsed2);
-
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2;
- }
-
- if (dirty & I830_UPLOAD_TEXBLEND3) {
- i830EmitTexBlendVerified(dev,
- sarea_priv->TexBlendState3,
- sarea_priv->TexBlendStateWordsUsed3);
- sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3;
- }
-}
-
-/* ================================================================
- * Performance monitoring functions
- */
-
-static void i830_fill_box(struct drm_device *dev,
- int x, int y, int w, int h, int r, int g, int b)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- u32 color;
- unsigned int BR13, CMD;
- RING_LOCALS;
-
- BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1 << 24);
- CMD = XY_COLOR_BLT_CMD;
- x += dev_priv->sarea_priv->boxes[0].x1;
- y += dev_priv->sarea_priv->boxes[0].y1;
-
- if (dev_priv->cpp == 4) {
- BR13 |= (1 << 25);
- CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
- color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
- } else {
- color = (((r & 0xf8) << 8) |
- ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
- }
-
- BEGIN_LP_RING(6);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((y << 16) | x);
- OUT_RING(((y + h) << 16) | (x + w));
-
- if (dev_priv->current_page == 1)
- OUT_RING(dev_priv->front_offset);
- else
- OUT_RING(dev_priv->back_offset);
-
- OUT_RING(color);
- ADVANCE_LP_RING();
-}
-
-static void i830_cp_performance_boxes(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- /* Purple box for page flipping
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP)
- i830_fill_box(dev, 4, 4, 8, 8, 255, 0, 255);
-
- /* Red box if we have to wait for idle at any point
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT)
- i830_fill_box(dev, 16, 4, 8, 8, 255, 0, 0);
-
- /* Blue box: lost context?
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT)
- i830_fill_box(dev, 28, 4, 8, 8, 0, 0, 255);
-
- /* Yellow box for texture swaps
- */
- if (dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD)
- i830_fill_box(dev, 40, 4, 8, 8, 255, 255, 0);
-
- /* Green box if hardware never idles (as far as we can tell)
- */
- if (!(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY))
- i830_fill_box(dev, 64, 4, 8, 8, 0, 255, 0);
-
- /* Draw bars indicating number of buffers allocated
- * (not a great measure, easily confused)
- */
- if (dev_priv->dma_used) {
- int bar = dev_priv->dma_used / 10240;
- if (bar > 100)
- bar = 100;
- if (bar < 1)
- bar = 1;
- i830_fill_box(dev, 4, 16, bar, 4, 196, 128, 128);
- dev_priv->dma_used = 0;
- }
-
- dev_priv->sarea_priv->perf_boxes = 0;
-}
-
-static void i830_dma_dispatch_clear(struct drm_device *dev, int flags,
- unsigned int clear_color,
- unsigned int clear_zval,
- unsigned int clear_depthmask)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int pitch = dev_priv->pitch;
- int cpp = dev_priv->cpp;
- int i;
- unsigned int BR13, CMD, D_CMD;
- RING_LOCALS;
-
- if (dev_priv->current_page == 1) {
- unsigned int tmp = flags;
-
- flags &= ~(I830_FRONT | I830_BACK);
- if (tmp & I830_FRONT)
- flags |= I830_BACK;
- if (tmp & I830_BACK)
- flags |= I830_FRONT;
- }
-
- i830_kernel_lost_context(dev);
-
- switch (cpp) {
- case 2:
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
- D_CMD = CMD = XY_COLOR_BLT_CMD;
- break;
- case 4:
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25);
- CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
- XY_COLOR_BLT_WRITE_RGB);
- D_CMD = XY_COLOR_BLT_CMD;
- if (clear_depthmask & 0x00ffffff)
- D_CMD |= XY_COLOR_BLT_WRITE_RGB;
- if (clear_depthmask & 0xff000000)
- D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
- break;
- default:
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
- D_CMD = CMD = XY_COLOR_BLT_CMD;
- break;
- }
-
- if (nbox > I830_NR_SAREA_CLIPRECTS)
- nbox = I830_NR_SAREA_CLIPRECTS;
-
- for (i = 0; i < nbox; i++, pbox++) {
- if (pbox->x1 > pbox->x2 ||
- pbox->y1 > pbox->y2 ||
- pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
- continue;
-
- if (flags & I830_FRONT) {
- DRM_DEBUG("clear front\n");
- BEGIN_LP_RING(6);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
- OUT_RING(dev_priv->front_offset);
- OUT_RING(clear_color);
- ADVANCE_LP_RING();
- }
-
- if (flags & I830_BACK) {
- DRM_DEBUG("clear back\n");
- BEGIN_LP_RING(6);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
- OUT_RING(dev_priv->back_offset);
- OUT_RING(clear_color);
- ADVANCE_LP_RING();
- }
-
- if (flags & I830_DEPTH) {
- DRM_DEBUG("clear depth\n");
- BEGIN_LP_RING(6);
- OUT_RING(D_CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
- OUT_RING(dev_priv->depth_offset);
- OUT_RING(clear_zval);
- ADVANCE_LP_RING();
- }
- }
-}
-
-static void i830_dma_dispatch_swap(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int pitch = dev_priv->pitch;
- int cpp = dev_priv->cpp;
- int i;
- unsigned int CMD, BR13;
- RING_LOCALS;
-
- DRM_DEBUG("swapbuffers\n");
-
- i830_kernel_lost_context(dev);
-
- if (dev_priv->do_boxes)
- i830_cp_performance_boxes(dev);
-
- switch (cpp) {
- case 2:
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
- CMD = XY_SRC_COPY_BLT_CMD;
- break;
- case 4:
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
- CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
- XY_SRC_COPY_BLT_WRITE_RGB);
- break;
- default:
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
- CMD = XY_SRC_COPY_BLT_CMD;
- break;
- }
-
- if (nbox > I830_NR_SAREA_CLIPRECTS)
- nbox = I830_NR_SAREA_CLIPRECTS;
-
- for (i = 0; i < nbox; i++, pbox++) {
- if (pbox->x1 > pbox->x2 ||
- pbox->y1 > pbox->y2 ||
- pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
- continue;
-
- DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
- pbox->x1, pbox->y1, pbox->x2, pbox->y2);
-
- BEGIN_LP_RING(8);
- OUT_RING(CMD);
- OUT_RING(BR13);
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING((pbox->y2 << 16) | pbox->x2);
-
- if (dev_priv->current_page == 0)
- OUT_RING(dev_priv->front_offset);
- else
- OUT_RING(dev_priv->back_offset);
-
- OUT_RING((pbox->y1 << 16) | pbox->x1);
- OUT_RING(BR13 & 0xffff);
-
- if (dev_priv->current_page == 0)
- OUT_RING(dev_priv->back_offset);
- else
- OUT_RING(dev_priv->front_offset);
-
- ADVANCE_LP_RING();
- }
-}
-
-static void i830_dma_dispatch_flip(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
- __func__,
- dev_priv->current_page,
- dev_priv->sarea_priv->pf_current_page);
-
- i830_kernel_lost_context(dev);
-
- if (dev_priv->do_boxes) {
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP;
- i830_cp_performance_boxes(dev);
- }
-
- BEGIN_LP_RING(2);
- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(6);
- OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
- OUT_RING(0);
- if (dev_priv->current_page == 0) {
- OUT_RING(dev_priv->back_offset);
- dev_priv->current_page = 1;
- } else {
- OUT_RING(dev_priv->front_offset);
- dev_priv->current_page = 0;
- }
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(2);
- OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-}
-
-static void i830_dma_dispatch_vertex(struct drm_device *dev,
- struct drm_buf *buf, int discard, int used)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
- drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
- struct drm_clip_rect *box = sarea_priv->boxes;
- int nbox = sarea_priv->nbox;
- unsigned long address = (unsigned long)buf->bus_address;
- unsigned long start = address - dev->agp->base;
- int i = 0, u;
- RING_LOCALS;
-
- i830_kernel_lost_context(dev);
-
- if (nbox > I830_NR_SAREA_CLIPRECTS)
- nbox = I830_NR_SAREA_CLIPRECTS;
-
- if (discard) {
- u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
- I830_BUF_HARDWARE);
- if (u != I830_BUF_CLIENT)
- DRM_DEBUG("xxxx 2\n");
- }
-
- if (used > 4 * 1023)
- used = 0;
-
- if (sarea_priv->dirty)
- i830EmitState(dev);
-
- DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
- address, used, nbox);
-
- dev_priv->counter++;
- DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
- DRM_DEBUG("i830_dma_dispatch\n");
- DRM_DEBUG("start : %lx\n", start);
- DRM_DEBUG("used : %d\n", used);
- DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
-
- if (buf_priv->currently_mapped == I830_BUF_MAPPED) {
- u32 *vp = buf_priv->kernel_virtual;
-
- vp[0] = (GFX_OP_PRIMITIVE |
- sarea_priv->vertex_prim | ((used / 4) - 2));
-
- if (dev_priv->use_mi_batchbuffer_start) {
- vp[used / 4] = MI_BATCH_BUFFER_END;
- used += 4;
- }
-
- if (used & 4) {
- vp[used / 4] = 0;
- used += 4;
- }
-
- i830_unmap_buffer(buf);
- }
-
- if (used) {
- do {
- if (i < nbox) {
- BEGIN_LP_RING(6);
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(sarea_priv->
- BufferState[I830_DESTREG_DR1]);
- OUT_RING(box[i].x1 | (box[i].y1 << 16));
- OUT_RING(box[i].x2 | (box[i].y2 << 16));
- OUT_RING(sarea_priv->
- BufferState[I830_DESTREG_DR4]);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- if (dev_priv->use_mi_batchbuffer_start) {
- BEGIN_LP_RING(2);
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
- OUT_RING(start | MI_BATCH_NON_SECURE);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(4);
- OUT_RING(MI_BATCH_BUFFER);
- OUT_RING(start | MI_BATCH_NON_SECURE);
- OUT_RING(start + used - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- } while (++i < nbox);
- }
-
- if (discard) {
- dev_priv->counter++;
-
- (void)cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
- I830_BUF_HARDWARE);
-
- BEGIN_LP_RING(8);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(20);
- OUT_RING(dev_priv->counter);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(buf_priv->my_use_idx);
- OUT_RING(I830_BUF_FREE);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-}
-
-static void i830_dma_quiescent(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- i830_kernel_lost_context(dev);
-
- BEGIN_LP_RING(4);
- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
-}
-
-static int i830_flush_queue(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- struct drm_device_dma *dma = dev->dma;
- int i, ret = 0;
- RING_LOCALS;
-
- i830_kernel_lost_context(dev);
-
- BEGIN_LP_RING(2);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
-
- int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
- I830_BUF_FREE);
-
- if (used == I830_BUF_HARDWARE)
- DRM_DEBUG("reclaimed from HARDWARE\n");
- if (used == I830_BUF_CLIENT)
- DRM_DEBUG("still on client\n");
- }
-
- return ret;
-}
-
-/* Must be called with the lock held */
-static void i830_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
-
- if (!dma)
- return;
- if (!dev->dev_private)
- return;
- if (!dma->buflist)
- return;
-
- i830_flush_queue(dev);
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i830_buf_priv_t *buf_priv = buf->dev_private;
-
- if (buf->file_priv == file_priv && buf_priv) {
- int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
- I830_BUF_FREE);
-
- if (used == I830_BUF_CLIENT)
- DRM_DEBUG("reclaimed from client\n");
- if (buf_priv->currently_mapped == I830_BUF_MAPPED)
- buf_priv->currently_mapped = I830_BUF_UNMAPPED;
- }
- }
-}
-
-static int i830_flush_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- i830_flush_queue(dev);
- return 0;
-}
-
-static int i830_dma_vertex(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
- dev_priv->sarea_priv;
- drm_i830_vertex_t *vertex = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
- vertex->idx, vertex->used, vertex->discard);
-
- if (vertex->idx < 0 || vertex->idx > dma->buf_count)
- return -EINVAL;
-
- i830_dma_dispatch_vertex(dev,
- dma->buflist[vertex->idx],
- vertex->discard, vertex->used);
-
- sarea_priv->last_enqueue = dev_priv->counter - 1;
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return 0;
-}
-
-static int i830_clear_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_clear_t *clear = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- /* GH: Someone's doing nasty things... */
- if (!dev->dev_private)
- return -EINVAL;
-
- i830_dma_dispatch_clear(dev, clear->flags,
- clear->clear_color,
- clear->clear_depth, clear->clear_depthmask);
- return 0;
-}
-
-static int i830_swap_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- DRM_DEBUG("i830_swap_bufs\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- i830_dma_dispatch_swap(dev);
- return 0;
-}
-
-/* Not sure why this isn't set all the time:
- */
-static void i830_do_init_pageflip(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
- dev_priv->page_flipping = 1;
- dev_priv->current_page = 0;
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-}
-
-static int i830_do_cleanup_pageflip(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
- if (dev_priv->current_page != 0)
- i830_dma_dispatch_flip(dev);
-
- dev_priv->page_flipping = 0;
- return 0;
-}
-
-static int i830_flip_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("%s\n", __func__);
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (!dev_priv->page_flipping)
- i830_do_init_pageflip(dev);
-
- i830_dma_dispatch_flip(dev);
- return 0;
-}
-
-static int i830_getage(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
- dev_priv->sarea_priv;
-
- sarea_priv->last_dispatch = (int)hw_status[5];
- return 0;
-}
-
-static int i830_getbuf(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int retcode = 0;
- drm_i830_dma_t *d = data;
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
- dev_priv->sarea_priv;
-
- DRM_DEBUG("getbuf\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- d->granted = 0;
-
- retcode = i830_dma_get_buffer(dev, d, file_priv);
-
- DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
- task_pid_nr(current), retcode, d->granted);
-
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return retcode;
-}
-
-static int i830_copybuf(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* Never copy - 2.4.x doesn't need it */
- return 0;
-}
-
-static int i830_docopy(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return 0;
-}
-
-static int i830_getparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_getparam_t *param = data;
- int value;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- switch (param->param) {
- case I830_PARAM_IRQ_ACTIVE:
- value = dev->irq_enabled;
- break;
- default:
- return -EINVAL;
- }
-
- if (copy_to_user(param->value, &value, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int i830_setparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_setparam_t *param = data;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- switch (param->param) {
- case I830_SETPARAM_USE_MI_BATCHBUFFER_START:
- dev_priv->use_mi_batchbuffer_start = param->value;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-int i830_driver_load(struct drm_device *dev, unsigned long flags)
-{
- /* i830 has 4 more counters */
- dev->counters += 4;
- dev->types[6] = _DRM_STAT_IRQ;
- dev->types[7] = _DRM_STAT_PRIMARY;
- dev->types[8] = _DRM_STAT_SECONDARY;
- dev->types[9] = _DRM_STAT_DMA;
-
- return 0;
-}
-
-void i830_driver_lastclose(struct drm_device *dev)
-{
- i830_dma_cleanup(dev);
-}
-
-void i830_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
-{
- if (dev->dev_private) {
- drm_i830_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping)
- i830_do_cleanup_pageflip(dev);
- }
-}
-
-void i830_driver_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv)
-{
- i830_reclaim_buffers(dev, file_priv);
-}
-
-int i830_driver_dma_quiescent(struct drm_device *dev)
-{
- i830_dma_quiescent(dev);
- return 0;
-}
-
-/*
- * call the drm_ioctl under the big kernel lock because
- * to lock against the i830_mmap_buffers function.
- */
-long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int ret;
- lock_kernel();
- ret = drm_ioctl(file, cmd, arg);
- unlock_kernel();
- return ret;
-}
-
-struct drm_ioctl_desc i830_ioctls[] = {
- DRM_IOCTL_DEF_DRV(I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
-};
-
-int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
-
-/**
- * Determine if the device really is AGP or not.
- *
- * All Intel graphics chipsets are treated as AGP, even if they are really
- * PCI-e.
- *
- * \param dev The device to be tested.
- *
- * \returns
- * A value of 1 is always retured to indictate every i8xx is AGP.
- */
-int i830_driver_device_is_agp(struct drm_device *dev)
-{
- return 1;
-}
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
deleted file mode 100644
index f655ab7977da..000000000000
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/* i830_drv.c -- I810 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- * Abraham vd Merwe <abraham@2d3d.co.za>
- * Keith Whitwell <keith@tungstengraphics.com>
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i830_drm.h"
-#include "i830_drv.h"
-
-#include "drm_pciids.h"
-
-static struct pci_device_id pciidlist[] = {
- i830_PCI_IDS
-};
-
-static struct drm_driver driver = {
- .driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
- DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
-#if USE_IRQS
- .driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
-#endif
- .dev_priv_size = sizeof(drm_i830_buf_priv_t),
- .load = i830_driver_load,
- .lastclose = i830_driver_lastclose,
- .preclose = i830_driver_preclose,
- .device_is_agp = i830_driver_device_is_agp,
- .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
- .dma_quiescent = i830_driver_dma_quiescent,
-#if USE_IRQS
- .irq_preinstall = i830_driver_irq_preinstall,
- .irq_postinstall = i830_driver_irq_postinstall,
- .irq_uninstall = i830_driver_irq_uninstall,
- .irq_handler = i830_driver_irq_handler,
-#endif
- .ioctls = i830_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = i830_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static int __init i830_init(void)
-{
- driver.num_ioctls = i830_max_ioctl;
- return drm_init(&driver);
-}
-
-static void __exit i830_exit(void)
-{
- drm_exit(&driver);
-}
-
-module_init(i830_init);
-module_exit(i830_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
deleted file mode 100644
index 0df1c720560b..000000000000
--- a/drivers/gpu/drm/i830/i830_drv.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/* i830_drv.h -- Private header for the I830 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- *
- */
-
-#ifndef _I830_DRV_H_
-#define _I830_DRV_H_
-
-/* General customization:
- */
-
-#define DRIVER_AUTHOR "VA Linux Systems Inc."
-
-#define DRIVER_NAME "i830"
-#define DRIVER_DESC "Intel 830M"
-#define DRIVER_DATE "20021108"
-
-/* Interface history:
- *
- * 1.1: Original.
- * 1.2: ?
- * 1.3: New irq emit/wait ioctls.
- * New pageflip ioctl.
- * New getparam ioctl.
- * State for texunits 3&4 in sarea.
- * New (alternative) layout for texture state.
- */
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 3
-#define DRIVER_PATCHLEVEL 2
-
-/* Driver will work either way: IRQ's save cpu time when waiting for
- * the card, but are subject to subtle interactions between bios,
- * hardware and the driver.
- */
-/* XXX: Add vblank support? */
-#define USE_IRQS 0
-
-typedef struct drm_i830_buf_priv {
- u32 *in_use;
- int my_use_idx;
- int currently_mapped;
- void __user *virtual;
- void *kernel_virtual;
- drm_local_map_t map;
-} drm_i830_buf_priv_t;
-
-typedef struct _drm_i830_ring_buffer {
- int tail_mask;
- unsigned long Start;
- unsigned long End;
- unsigned long Size;
- u8 *virtual_start;
- int head;
- int tail;
- int space;
- drm_local_map_t map;
-} drm_i830_ring_buffer_t;
-
-typedef struct drm_i830_private {
- struct drm_local_map *sarea_map;
- struct drm_local_map *mmio_map;
-
- drm_i830_sarea_t *sarea_priv;
- drm_i830_ring_buffer_t ring;
-
- void *hw_status_page;
- unsigned long counter;
-
- dma_addr_t dma_status_page;
-
- struct drm_buf *mmap_buffer;
-
- u32 front_di1, back_di1, zi1;
-
- int back_offset;
- int depth_offset;
- int front_offset;
- int w, h;
- int pitch;
- int back_pitch;
- int depth_pitch;
- unsigned int cpp;
-
- int do_boxes;
- int dma_used;
-
- int current_page;
- int page_flipping;
-
- wait_queue_head_t irq_queue;
- atomic_t irq_received;
- atomic_t irq_emitted;
-
- int use_mi_batchbuffer_start;
-
-} drm_i830_private_t;
-
-long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-extern struct drm_ioctl_desc i830_ioctls[];
-extern int i830_max_ioctl;
-
-/* i830_irq.c */
-extern int i830_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int i830_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i830_driver_irq_preinstall(struct drm_device *dev);
-extern void i830_driver_irq_postinstall(struct drm_device *dev);
-extern void i830_driver_irq_uninstall(struct drm_device *dev);
-extern int i830_driver_load(struct drm_device *, unsigned long flags);
-extern void i830_driver_preclose(struct drm_device *dev,
- struct drm_file *file_priv);
-extern void i830_driver_lastclose(struct drm_device *dev);
-extern void i830_driver_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv);
-extern int i830_driver_dma_quiescent(struct drm_device *dev);
-extern int i830_driver_device_is_agp(struct drm_device *dev);
-
-#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
-#define I830_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
-#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
-#define I830_WRITE16(reg, val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
-
-#define I830_VERBOSE 0
-
-#define RING_LOCALS unsigned int outring, ringmask, outcount; \
- volatile char *virt;
-
-#define BEGIN_LP_RING(n) do { \
- if (I830_VERBOSE) \
- printk("BEGIN_LP_RING(%d)\n", (n)); \
- if (dev_priv->ring.space < n*4) \
- i830_wait_ring(dev, n*4, __func__); \
- outcount = 0; \
- outring = dev_priv->ring.tail; \
- ringmask = dev_priv->ring.tail_mask; \
- virt = dev_priv->ring.virtual_start; \
-} while (0)
-
-#define OUT_RING(n) do { \
- if (I830_VERBOSE) \
- printk(" OUT_RING %x\n", (int)(n)); \
- *(volatile unsigned int *)(virt + outring) = n; \
- outcount++; \
- outring += 4; \
- outring &= ringmask; \
-} while (0)
-
-#define ADVANCE_LP_RING() do { \
- if (I830_VERBOSE) \
- printk("ADVANCE_LP_RING %x\n", outring); \
- dev_priv->ring.tail = outring; \
- dev_priv->ring.space -= outcount * 4; \
- I830_WRITE(LP_RING + RING_TAIL, outring); \
-} while (0)
-
-extern int i830_wait_ring(struct drm_device *dev, int n, const char *caller);
-
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
-#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
-#define CMD_REPORT_HEAD (7<<23)
-#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
-#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
-
-#define STATE3D_LOAD_STATE_IMMEDIATE_2 ((0x3<<29)|(0x1d<<24)|(0x03<<16))
-#define LOAD_TEXTURE_MAP0 (1<<11)
-
-#define INST_PARSER_CLIENT 0x00000000
-#define INST_OP_FLUSH 0x02000000
-#define INST_FLUSH_MAP_CACHE 0x00000001
-
-#define BB1_START_ADDR_MASK (~0x7)
-#define BB1_PROTECTED (1<<0)
-#define BB1_UNPROTECTED (0<<0)
-#define BB2_END_ADDR_MASK (~0x7)
-
-#define I830REG_HWSTAM 0x02098
-#define I830REG_INT_IDENTITY_R 0x020a4
-#define I830REG_INT_MASK_R 0x020a8
-#define I830REG_INT_ENABLE_R 0x020a0
-
-#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
-
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define RING_TAIL 0x00
-#define TAIL_ADDR 0x001FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x0xFFFFF000
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x001FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
-
-#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define SC_UPDATE_SCISSOR (0x1<<1)
-#define SC_ENABLE_MASK (0x1<<0)
-#define SC_ENABLE (0x1<<0)
-
-#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
-#define SCI_YMIN_MASK (0xffff<<16)
-#define SCI_XMIN_MASK (0xffff<<0)
-#define SCI_YMAX_MASK (0xffff<<16)
-#define SCI_XMAX_MASK (0xffff<<0)
-
-#define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
-#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4)
-#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
-#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
-#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
-
-#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
-
-#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
-#define ASYNC_FLIP (1<<22)
-
-#define CMD_3D (0x3<<29)
-#define STATE3D_CONST_BLEND_COLOR_CMD (CMD_3D|(0x1d<<24)|(0x88<<16))
-#define STATE3D_MAP_COORD_SETBIND_CMD (CMD_3D|(0x1d<<24)|(0x02<<16))
-
-#define BR00_BITBLT_CLIENT 0x40000000
-#define BR00_OP_COLOR_BLT 0x10000000
-#define BR00_OP_SRC_COPY_BLT 0x10C00000
-#define BR13_SOLID_PATTERN 0x80000000
-
-#define BUF_3D_ID_COLOR_BACK (0x3<<24)
-#define BUF_3D_ID_DEPTH (0x7<<24)
-#define BUF_3D_USE_FENCE (1<<23)
-#define BUF_3D_PITCH(x) (((x)/4)<<2)
-
-#define CMD_OP_MAP_PALETTE_LOAD ((3<<29)|(0x1d<<24)|(0x82<<16)|255)
-#define MAP_PALETTE_NUM(x) ((x<<8) & (1<<8))
-#define MAP_PALETTE_BOTH (1<<11)
-
-#define XY_COLOR_BLT_CMD ((2<<29)|(0x50<<22)|0x4)
-#define XY_COLOR_BLT_WRITE_ALPHA (1<<21)
-#define XY_COLOR_BLT_WRITE_RGB (1<<20)
-
-#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
-#define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21)
-#define XY_SRC_COPY_BLT_WRITE_RGB (1<<20)
-
-#define MI_BATCH_BUFFER ((0x30<<23)|1)
-#define MI_BATCH_BUFFER_START (0x31<<23)
-#define MI_BATCH_BUFFER_END (0xA<<23)
-#define MI_BATCH_NON_SECURE (1)
-
-#define MI_WAIT_FOR_EVENT ((0x3<<23))
-#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
-#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
-
-#define MI_LOAD_SCAN_LINES_INCL ((0x12<<23))
-
-#endif
diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
deleted file mode 100644
index d1a6b95d631d..000000000000
--- a/drivers/gpu/drm/i830/i830_irq.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
- *
- * Copyright 2002 Tungsten Graphics, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Keith Whitwell <keith@tungstengraphics.com>
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "i830_drm.h"
-#include "i830_drv.h"
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/delay.h>
-
-irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- u16 temp;
-
- temp = I830_READ16(I830REG_INT_IDENTITY_R);
- DRM_DEBUG("%x\n", temp);
-
- if (!(temp & 2))
- return IRQ_NONE;
-
- I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
-
- atomic_inc(&dev_priv->irq_received);
- wake_up_interruptible(&dev_priv->irq_queue);
-
- return IRQ_HANDLED;
-}
-
-static int i830_emit_irq(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- DRM_DEBUG("%s\n", __func__);
-
- atomic_inc(&dev_priv->irq_emitted);
-
- BEGIN_LP_RING(2);
- OUT_RING(0);
- OUT_RING(GFX_OP_USER_INTERRUPT);
- ADVANCE_LP_RING();
-
- return atomic_read(&dev_priv->irq_emitted);
-}
-
-static int i830_wait_irq(struct drm_device *dev, int irq_nr)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- DECLARE_WAITQUEUE(entry, current);
- unsigned long end = jiffies + HZ * 3;
- int ret = 0;
-
- DRM_DEBUG("%s\n", __func__);
-
- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
- return 0;
-
- dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
-
- add_wait_queue(&dev_priv->irq_queue, &entry);
-
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
- break;
- if ((signed)(end - jiffies) <= 0) {
- DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
- I830_READ16(I830REG_INT_IDENTITY_R),
- I830_READ16(I830REG_INT_MASK_R),
- I830_READ16(I830REG_INT_ENABLE_R),
- I830_READ16(I830REG_HWSTAM));
-
- ret = -EBUSY; /* Lockup? Missed irq? */
- break;
- }
- schedule_timeout(HZ * 3);
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- }
-
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&dev_priv->irq_queue, &entry);
- return ret;
-}
-
-/* Needs the lock as it touches the ring.
- */
-int i830_irq_emit(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_irq_emit_t *emit = data;
- int result;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- result = i830_emit_irq(dev);
-
- if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-/* Doesn't need the hardware lock.
- */
-int i830_irq_wait(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i830_private_t *dev_priv = dev->dev_private;
- drm_i830_irq_wait_t *irqwait = data;
-
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __func__);
- return -EINVAL;
- }
-
- return i830_wait_irq(dev, irqwait->irq_seq);
-}
-
-/* drm_dma.h hooks
-*/
-void i830_driver_irq_preinstall(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
-
- I830_WRITE16(I830REG_HWSTAM, 0xffff);
- I830_WRITE16(I830REG_INT_MASK_R, 0x0);
- I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
- atomic_set(&dev_priv->irq_received, 0);
- atomic_set(&dev_priv->irq_emitted, 0);
- init_waitqueue_head(&dev_priv->irq_queue);
-}
-
-void i830_driver_irq_postinstall(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
-
- I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
-}
-
-void i830_driver_irq_uninstall(struct drm_device *dev)
-{
- drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
- if (!dev_priv)
- return;
-
- I830_WRITE16(I830REG_INT_MASK_R, 0xffff);
- I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
-}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4ff9b6cc973f..09e0327fc6ce 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -326,21 +326,21 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
struct intel_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
- const char *pipe = crtc->pipe ? "B" : "A";
- const char *plane = crtc->plane ? "B" : "A";
+ const char pipe = pipe_name(crtc->pipe);
+ const char plane = plane_name(crtc->plane);
struct intel_unpin_work *work;
spin_lock_irqsave(&dev->event_lock, flags);
work = crtc->unpin_work;
if (work == NULL) {
- seq_printf(m, "No flip due on pipe %s (plane %s)\n",
+ seq_printf(m, "No flip due on pipe %c (plane %c)\n",
pipe, plane);
} else {
if (!work->pending) {
- seq_printf(m, "Flip queued on pipe %s (plane %s)\n",
+ seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
pipe, plane);
} else {
- seq_printf(m, "Flip pending (waiting for vsync) on pipe %s (plane %s)\n",
+ seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
pipe, plane);
}
if (work->enable_stall_check)
@@ -458,7 +458,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret, i;
+ int ret, i, pipe;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -471,10 +471,10 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(IIR));
seq_printf(m, "Interrupt mask: %08x\n",
I915_READ(IMR));
- seq_printf(m, "Pipe A stat: %08x\n",
- I915_READ(PIPEASTAT));
- seq_printf(m, "Pipe B stat: %08x\n",
- I915_READ(PIPEBSTAT));
+ for_each_pipe(pipe)
+ seq_printf(m, "Pipe %c stat: %08x\n",
+ pipe_name(pipe),
+ I915_READ(PIPESTAT(pipe)));
} else {
seq_printf(m, "North Display Interrupt enable: %08x\n",
I915_READ(DEIER));
@@ -544,11 +544,11 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
- volatile u32 *hws;
+ const volatile u32 __iomem *hws;
int i;
ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
- hws = (volatile u32 *)ring->status_page.page_addr;
+ hws = (volatile u32 __iomem *)ring->status_page.page_addr;
if (hws == NULL)
return 0;
@@ -615,7 +615,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
if (!ring->obj) {
seq_printf(m, "No ringbuffer setup\n");
} else {
- u8 *virt = ring->virtual_start;
+ const u8 __iomem *virt = ring->virtual_start;
uint32_t off;
for (off = 0; off < ring->size; off += 4) {
@@ -805,15 +805,20 @@ static int i915_error_state(struct seq_file *m, void *unused)
}
}
- if (error->ringbuffer) {
- struct drm_i915_error_object *obj = error->ringbuffer;
-
- seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
- offset = 0;
- for (page = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
- offset += 4;
+ for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++) {
+ if (error->ringbuffer[i]) {
+ struct drm_i915_error_object *obj = error->ringbuffer[i];
+ seq_printf(m, "%s --- ringbuffer = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ seq_printf(m, "%08x : %08x\n",
+ offset,
+ obj->pages[page][elt]);
+ offset += 4;
+ }
}
}
}
@@ -862,19 +867,44 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ u32 rpstat;
+ u32 rpupei, rpcurup, rpprevup;
+ u32 rpdownei, rpcurdown, rpprevdown;
int max_freq;
/* RPSTAT1 is in the GT power well */
__gen6_gt_force_wake_get(dev_priv);
+ rpstat = I915_READ(GEN6_RPSTAT1);
+ rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
+ rpcurup = I915_READ(GEN6_RP_CUR_UP);
+ rpprevup = I915_READ(GEN6_RP_PREV_UP);
+ rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
+ rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
+ rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
+
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
- seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
+ seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & 0xff00) >> 8);
seq_printf(m, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
+ seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
+ GEN6_CAGF_SHIFT) * 100);
+ seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
+ GEN6_CURICONT_MASK);
+ seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
+ GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
+ GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
+ GEN6_CURIAVG_MASK);
+ seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
+ GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
+ GEN6_CURBSYTAVG_MASK);
max_freq = (rp_state_cap & 0xff0000) >> 16;
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
@@ -1259,7 +1289,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
}
static struct drm_info_list i915_debugfs_list[] = {
- {"i915_capabilities", i915_capabilities, 0, 0},
+ {"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
{"i915_gem_gtt", i915_gem_gtt_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e33d9be7df3b..72730377a01b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -43,6 +43,17 @@
#include <linux/slab.h>
#include <acpi/video.h>
+static void i915_write_hws_pga(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 addr;
+
+ addr = dev_priv->status_page_dmah->busaddr;
+ if (INTEL_INFO(dev)->gen >= 4)
+ addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
+ I915_WRITE(HWS_PGA, addr);
+}
+
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
@@ -60,16 +71,13 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
- dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+ ring->status_page.page_addr =
+ (void __force __iomem *)dev_priv->status_page_dmah->vaddr;
- memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
- if (INTEL_INFO(dev)->gen >= 4)
- dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
- 0xf0;
+ i915_write_hws_pga(dev);
- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
}
@@ -216,7 +224,7 @@ static int i915_dma_resume(struct drm_device * dev)
if (ring->status_page.gfx_addr != 0)
intel_ring_setup_status_page(ring);
else
- I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+ i915_write_hws_pga(dev);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
@@ -771,6 +779,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_CONSTANTS:
value = INTEL_INFO(dev)->gen >= 4;
break;
+ case I915_PARAM_HAS_RELAXED_DELTA:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -859,8 +870,9 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
" G33 hw status page\n");
return -ENOMEM;
}
- ring->status_page.page_addr = dev_priv->hws_map.handle;
- memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+ ring->status_page.page_addr =
+ (void __force __iomem *)dev_priv->hws_map.handle;
+ memset_io(ring->status_page.page_addr, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
@@ -2013,9 +2025,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
- dev_priv->trace_irq_seqno = 0;
- ret = drm_vblank_init(dev, I915_NUM_PIPE);
+ if (IS_MOBILE(dev) || !IS_GEN2(dev))
+ dev_priv->num_pipe = 2;
+ else
+ dev_priv->num_pipe = 1;
+
+ ret = drm_vblank_init(dev, dev_priv->num_pipe);
if (ret)
goto out_gem_unload;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 22ec066adae6..c34a8dd31d02 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -43,10 +43,13 @@ module_param_named(modeset, i915_modeset, int, 0400);
unsigned int i915_fbpercrtc = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
+int i915_panel_ignore_lid = 0;
+module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
+
unsigned int i915_powersave = 1;
module_param_named(powersave, i915_powersave, int, 0600);
-unsigned int i915_semaphores = 0;
+unsigned int i915_semaphores = 1;
module_param_named(semaphores, i915_semaphores, int, 0600);
unsigned int i915_enable_rc6 = 0;
@@ -58,7 +61,10 @@ module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
unsigned int i915_panel_use_ssc = 1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
-bool i915_try_reset = true;
+int i915_vbt_sdvo_panel_type = -1;
+module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
+
+static bool i915_try_reset = true;
module_param_named(reset, i915_try_reset, bool, 0600);
static struct drm_driver driver;
@@ -716,6 +722,9 @@ static struct drm_driver driver = {
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
+ .dumb_create = i915_gem_dumb_create,
+ .dumb_map_offset = i915_gem_mmap_gtt,
+ .dumb_destroy = i915_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = {
.owner = THIS_MODULE,
@@ -732,14 +741,6 @@ static struct drm_driver driver = {
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = i915_pci_probe,
- .remove = i915_pci_remove,
- .driver.pm = &i915_pm_ops,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -748,6 +749,14 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver i915_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = i915_pci_probe,
+ .remove = i915_pci_remove,
+ .driver.pm = &i915_pm_ops,
+};
+
static int __init i915_init(void)
{
if (!intel_agp_enabled) {
@@ -781,12 +790,12 @@ static int __init i915_init(void)
if (!(driver.driver_features & DRIVER_MODESET))
driver.get_vblank_timestamp = NULL;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &i915_pci_driver);
}
static void __exit i915_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &i915_pci_driver);
}
module_init(i915_init);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 456f40484838..449650545bb4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -49,17 +49,22 @@
enum pipe {
PIPE_A = 0,
PIPE_B,
+ PIPE_C,
+ I915_MAX_PIPES
};
+#define pipe_name(p) ((p) + 'A')
enum plane {
PLANE_A = 0,
PLANE_B,
+ PLANE_C,
};
-
-#define I915_NUM_PIPE 2
+#define plane_name(p) ((p) + 'A')
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
+#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
+
/* Interface history:
*
* 1.1: Original.
@@ -75,10 +80,7 @@ enum plane {
#define DRIVER_PATCHLEVEL 0
#define WATCH_COHERENCY 0
-#define WATCH_EXEC 0
-#define WATCH_RELOC 0
#define WATCH_LISTS 0
-#define WATCH_PWRITE 0
#define I915_GEM_PHYS_CURSOR_0 1
#define I915_GEM_PHYS_CURSOR_1 2
@@ -111,6 +113,7 @@ struct intel_opregion {
struct opregion_swsci *swsci;
struct opregion_asle *asle;
void *vbt;
+ u32 __iomem *lid_state;
};
#define OPREGION_SIZE (8*1024)
@@ -144,8 +147,7 @@ struct intel_display_error_state;
struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
- u32 pipeastat;
- u32 pipebstat;
+ u32 pipestat[I915_MAX_PIPES];
u32 ipeir;
u32 ipehr;
u32 instdone;
@@ -172,7 +174,7 @@ struct drm_i915_error_state {
int page_count;
u32 gtt_offset;
u32 *pages[0];
- } *ringbuffer, *batchbuffer[I915_NUM_RINGS];
+ } *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS];
struct drm_i915_error_buffer {
u32 size;
u32 name;
@@ -200,9 +202,7 @@ struct drm_i915_display_funcs {
void (*disable_fbc)(struct drm_device *dev);
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
- void (*update_wm)(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int sr_htotal,
- int pixel_size);
+ void (*update_wm)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -274,7 +274,6 @@ typedef struct drm_i915_private {
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
- dma_addr_t dma_status_page;
uint32_t counter;
drm_local_map_t hws_map;
struct drm_i915_gem_object *pwrctx;
@@ -289,7 +288,6 @@ typedef struct drm_i915_private {
int page_flipping;
atomic_t irq_received;
- u32 trace_irq_seqno;
/* protects the irq masks */
spinlock_t irq_lock;
@@ -324,8 +322,6 @@ typedef struct drm_i915_private {
int cfb_plane;
int cfb_y;
- int irq_enabled;
-
struct intel_opregion opregion;
/* overlay */
@@ -387,7 +383,6 @@ typedef struct drm_i915_private {
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
- u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
@@ -615,6 +610,12 @@ typedef struct drm_i915_private {
struct delayed_work retire_work;
/**
+ * Are we in a non-interruptible section of code like
+ * modesetting?
+ */
+ bool interruptible;
+
+ /**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
@@ -652,6 +653,7 @@ typedef struct drm_i915_private {
unsigned int lvds_border_bits;
/* Panel fitter placement and size for Ironlake+ */
u32 pch_pf_pos, pch_pf_size;
+ int panel_t3, panel_t12;
struct drm_crtc *plane_to_crtc_mapping[2];
struct drm_crtc *pipe_to_crtc_mapping[2];
@@ -698,6 +700,8 @@ typedef struct drm_i915_private {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
+
+ struct drm_property *broadcast_rgb_property;
} drm_i915_private_t;
struct drm_i915_gem_object {
@@ -955,10 +959,12 @@ enum intel_chip_family {
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
+extern int i915_panel_ignore_lid;
extern unsigned int i915_powersave;
extern unsigned int i915_semaphores;
extern unsigned int i915_lvds_downclock;
extern unsigned int i915_panel_use_ssc;
+extern int i915_vbt_sdvo_panel_type;
extern unsigned int i915_enable_rc6;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
@@ -998,8 +1004,6 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
-extern void i915_enable_interrupt (struct drm_device *dev);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(struct drm_device * dev);
@@ -1051,7 +1055,6 @@ extern void i915_mem_takedown(struct mem_block **heap);
extern void i915_mem_release(struct drm_device * dev,
struct drm_file *file_priv, struct mem_block *heap);
/* i915_gem.c */
-int i915_gem_check_is_wedged(struct drm_device *dev);
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -1094,8 +1097,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
-int __must_check i915_gem_flush_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
@@ -1110,12 +1112,18 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool interruptible);
+int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
u32 seqno);
+int i915_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle);
/**
* Returns true if seq1 is later than seq2.
*/
@@ -1126,16 +1134,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
}
static inline u32
-i915_gem_next_request_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
return ring->outstanding_lazy_request = dev_priv->next_seqno;
}
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined,
- bool interruptible);
+ struct intel_ring_buffer *pipelined);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
void i915_gem_retire_requests(struct drm_device *dev);
@@ -1144,8 +1150,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
-int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
- bool interruptible);
+int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_do_init(struct drm_device *dev,
@@ -1154,14 +1159,11 @@ void i915_gem_do_init(struct drm_device *dev,
unsigned long end);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
-int __must_check i915_add_request(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_i915_gem_request *request,
- struct intel_ring_buffer *ring);
-int __must_check i915_do_wait_request(struct drm_device *dev,
- uint32_t seqno,
- bool interruptible,
- struct intel_ring_buffer *ring);
+int __must_check i915_add_request(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ struct drm_i915_gem_request *request);
+int __must_check i915_wait_request(struct intel_ring_buffer *ring,
+ uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
@@ -1313,7 +1315,7 @@ extern void intel_display_print_error_state(struct seq_file *m,
#define __i915_read(x, y) \
static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = read##y(dev_priv->regs + reg); \
- trace_i915_reg_rw('R', reg, val, sizeof(val)); \
+ trace_i915_reg_rw(false, reg, val, sizeof(val)); \
return val; \
}
__i915_read(8, b)
@@ -1324,7 +1326,7 @@ __i915_read(64, q)
#define __i915_write(x, y) \
static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
- trace_i915_reg_rw('W', reg, val, sizeof(val)); \
+ trace_i915_reg_rw(true, reg, val, sizeof(val)); \
write##y(val, dev_priv->regs + reg); \
}
__i915_write(8, b)
@@ -1382,47 +1384,4 @@ static inline void i915_gt_write(struct drm_i915_private *dev_priv,
__gen6_gt_wait_for_fifo(dev_priv);
I915_WRITE(reg, val);
}
-
-static inline void
-i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
-{
- /* Trace down the write operation before the real write */
- trace_i915_reg_rw('W', reg, val, len);
- switch (len) {
- case 8:
- writeq(val, dev_priv->regs + reg);
- break;
- case 4:
- writel(val, dev_priv->regs + reg);
- break;
- case 2:
- writew(val, dev_priv->regs + reg);
- break;
- case 1:
- writeb(val, dev_priv->regs + reg);
- break;
- }
-}
-
-/**
- * Reads a dword out of the status page, which is written to from the command
- * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
- * MI_STORE_DATA_IMM.
- *
- * The following dwords have a reserved meaning:
- * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
- * 0x04: ring 0 head pointer
- * 0x05: ring 1 head pointer (915-class)
- * 0x06: ring 2 head pointer (915-class)
- * 0x10-0x1b: Context status DWords (GM45)
- * 0x1f: Last written status offset. (GM45)
- *
- * The area from dword 0x20 to 0x3ff is available for driver usage.
- */
-#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
- (LP_RING(dev_priv)->status_page.page_addr))[reg])
-#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
-#define I915_GEM_HWS_INDEX 0x20
-#define I915_BREADCRUMB_INDEX 0x21
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 36e66cc5225e..c4c2855d002d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -75,8 +75,8 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
dev_priv->mm.object_memory -= size;
}
-int
-i915_gem_check_is_wedged(struct drm_device *dev)
+static int
+i915_gem_wait_for_error(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct completion *x = &dev_priv->error_completion;
@@ -90,27 +90,24 @@ i915_gem_check_is_wedged(struct drm_device *dev)
if (ret)
return ret;
- /* Success, we reset the GPU! */
- if (!atomic_read(&dev_priv->mm.wedged))
- return 0;
-
- /* GPU is hung, bump the completion count to account for
- * the token we just consumed so that we never hit zero and
- * end up waiting upon a subsequent completion event that
- * will never happen.
- */
- spin_lock_irqsave(&x->wait.lock, flags);
- x->done++;
- spin_unlock_irqrestore(&x->wait.lock, flags);
- return -EIO;
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ /* GPU is hung, bump the completion count to account for
+ * the token we just consumed so that we never hit zero and
+ * end up waiting upon a subsequent completion event that
+ * will never happen.
+ */
+ spin_lock_irqsave(&x->wait.lock, flags);
+ x->done++;
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+ }
+ return 0;
}
int i915_mutex_lock_interruptible(struct drm_device *dev)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ret = i915_gem_check_is_wedged(dev);
+ ret = i915_gem_wait_for_error(dev);
if (ret)
return ret;
@@ -118,11 +115,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
if (ret)
return ret;
- if (atomic_read(&dev_priv->mm.wedged)) {
- mutex_unlock(&dev->struct_mutex);
- return -EAGAIN;
- }
-
WARN_ON(i915_verify_lists(dev));
return 0;
}
@@ -193,22 +185,20 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-/**
- * Creates a new mm object and returns a handle to it.
- */
-int
-i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+static int
+i915_gem_create(struct drm_file *file,
+ struct drm_device *dev,
+ uint64_t size,
+ uint32_t *handle_p)
{
- struct drm_i915_gem_create *args = data;
struct drm_i915_gem_object *obj;
int ret;
u32 handle;
- args->size = roundup(args->size, PAGE_SIZE);
+ size = roundup(size, PAGE_SIZE);
/* Allocate the new object */
- obj = i915_gem_alloc_object(dev, args->size);
+ obj = i915_gem_alloc_object(dev, size);
if (obj == NULL)
return -ENOMEM;
@@ -224,10 +214,41 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_unreference(&obj->base);
trace_i915_gem_object_create(obj);
- args->handle = handle;
+ *handle_p = handle;
return 0;
}
+int
+i915_gem_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ /* have to work out size/pitch and return them */
+ args->pitch = ALIGN(args->width & ((args->bpp + 1) / 8), 64);
+ args->size = args->pitch * args->height;
+ return i915_gem_create(file, dev,
+ args->size, &args->handle);
+}
+
+int i915_gem_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_create *args = data;
+ return i915_gem_create(file, dev,
+ args->size, &args->handle);
+}
+
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
@@ -514,7 +535,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -526,6 +547,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ trace_i915_gem_object_pread(obj, args->offset, args->size);
+
ret = i915_gem_object_set_cpu_read_domain_range(obj,
args->offset,
args->size);
@@ -955,7 +978,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -967,6 +990,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
+ trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
* different detiling behavior between reading and writing.
@@ -1049,7 +1074,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -1092,7 +1117,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -1121,7 +1146,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap *args = data;
struct drm_gem_object *obj;
- loff_t offset;
unsigned long addr;
if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1136,8 +1160,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -E2BIG;
}
- offset = args->offset;
-
down_write(&current->mm->mmap_sem);
addr = do_mmap(obj->filp, 0, args->size,
PROT_READ | PROT_WRITE, MAP_SHARED,
@@ -1182,9 +1204,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
PAGE_SHIFT;
- /* Now bind it into the GTT if needed */
- mutex_lock(&dev->struct_mutex);
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto out;
+ trace_i915_gem_object_fault(obj, page_offset, true, write);
+
+ /* Now bind it into the GTT if needed */
if (!obj->map_and_fenceable) {
ret = i915_gem_object_unbind(obj);
if (ret)
@@ -1203,7 +1229,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (obj->tiling_mode == I915_TILING_NONE)
ret = i915_gem_object_put_fence(obj);
else
- ret = i915_gem_object_get_fence(obj, NULL, true);
+ ret = i915_gem_object_get_fence(obj, NULL);
if (ret)
goto unlock;
@@ -1219,12 +1245,21 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
unlock:
mutex_unlock(&dev->struct_mutex);
-
+out:
switch (ret) {
+ case -EIO:
case -EAGAIN:
+ /* Give the error handler a chance to run and move the
+ * objects off the GPU active list. Next time we service the
+ * fault, we should be able to transition the page into the
+ * GTT without touching the GPU (and so avoid further
+ * EIO/EGAIN). If the GPU is wedged, then there is no issue
+ * with coherency, just lost writes.
+ */
set_need_resched();
case 0:
case -ERESTARTSYS:
+ case -EINTR:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
@@ -1425,27 +1460,13 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
return tile_height * obj->stride * 2;
}
-/**
- * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
- * @dev: DRM device
- * @data: GTT mapping ioctl data
- * @file: GEM object info
- *
- * Simply returns the fake offset to userspace so it can mmap it.
- * The mmap call will end up in drm_gem_mmap(), which will set things
- * up so we can get faults in the handler above.
- *
- * The fault handler will take care of binding the object into the GTT
- * (since it may have been evicted to make room for something), allocating
- * a fence register, and mapping the appropriate aperture address into
- * userspace.
- */
int
-i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+i915_gem_mmap_gtt(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle,
+ uint64_t *offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_mmap_gtt *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -1456,8 +1477,8 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -1479,7 +1500,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
goto out;
}
- args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
+ *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
out:
drm_gem_object_unreference(&obj->base);
@@ -1488,6 +1509,34 @@ unlock:
return ret;
}
+/**
+ * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
+ * @dev: DRM device
+ * @data: GTT mapping ioctl data
+ * @file: GEM object info
+ *
+ * Simply returns the fake offset to userspace so it can mmap it.
+ * The mmap call will end up in drm_gem_mmap(), which will set things
+ * up so we can get faults in the handler above.
+ *
+ * The fault handler will take care of binding the object into the GTT
+ * (since it may have been evicted to make room for something), allocating
+ * a fence register, and mapping the appropriate aperture address into
+ * userspace.
+ */
+int
+i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_mmap_gtt *args = data;
+
+ if (!(dev->driver->driver_features & DRIVER_GEM))
+ return -ENODEV;
+
+ return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+}
+
+
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask)
@@ -1669,9 +1718,8 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
}
static void
-i915_gem_process_flushing_list(struct drm_device *dev,
- uint32_t flush_domains,
- struct intel_ring_buffer *ring)
+i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
+ uint32_t flush_domains)
{
struct drm_i915_gem_object *obj, *next;
@@ -1684,7 +1732,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
obj->base.write_domain = 0;
list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_active(obj, ring,
- i915_gem_next_request_seqno(dev, ring));
+ i915_gem_next_request_seqno(ring));
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
@@ -1694,27 +1742,22 @@ i915_gem_process_flushing_list(struct drm_device *dev,
}
int
-i915_add_request(struct drm_device *dev,
+i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
- struct drm_i915_gem_request *request,
- struct intel_ring_buffer *ring)
+ struct drm_i915_gem_request *request)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_file_private *file_priv = NULL;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
uint32_t seqno;
int was_empty;
int ret;
BUG_ON(request == NULL);
- if (file != NULL)
- file_priv = file->driver_priv;
-
ret = ring->add_request(ring, &seqno);
if (ret)
return ret;
- ring->outstanding_lazy_request = false;
+ trace_i915_gem_request_add(ring, seqno);
request->seqno = seqno;
request->ring = ring;
@@ -1722,7 +1765,9 @@ i915_add_request(struct drm_device *dev,
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
- if (file_priv) {
+ if (file) {
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
spin_lock(&file_priv->mm.lock);
request->file_priv = file_priv;
list_add_tail(&request->client_list,
@@ -1730,6 +1775,8 @@ i915_add_request(struct drm_device *dev,
spin_unlock(&file_priv->mm.lock);
}
+ ring->outstanding_lazy_request = false;
+
if (!dev_priv->mm.suspended) {
mod_timer(&dev_priv->hangcheck_timer,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -1846,18 +1893,15 @@ void i915_gem_reset(struct drm_device *dev)
* This function clears the request list as sequence numbers are passed.
*/
static void
-i915_gem_retire_requests_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
int i;
- if (!ring->status_page.page_addr ||
- list_empty(&ring->request_list))
+ if (list_empty(&ring->request_list))
return;
- WARN_ON(i915_verify_lists(dev));
+ WARN_ON(i915_verify_lists(ring->dev));
seqno = ring->get_seqno(ring);
@@ -1875,7 +1919,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
if (!i915_seqno_passed(seqno, request->seqno))
break;
- trace_i915_gem_request_retire(dev, request->seqno);
+ trace_i915_gem_request_retire(ring, request->seqno);
list_del(&request->list);
i915_gem_request_remove_from_client(request);
@@ -1901,13 +1945,13 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
i915_gem_object_move_to_inactive(obj);
}
- if (unlikely (dev_priv->trace_irq_seqno &&
- i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
+ if (unlikely(ring->trace_irq_seqno &&
+ i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
ring->irq_put(ring);
- dev_priv->trace_irq_seqno = 0;
+ ring->trace_irq_seqno = 0;
}
- WARN_ON(i915_verify_lists(dev));
+ WARN_ON(i915_verify_lists(ring->dev));
}
void
@@ -1931,7 +1975,7 @@ i915_gem_retire_requests(struct drm_device *dev)
}
for (i = 0; i < I915_NUM_RINGS; i++)
- i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
+ i915_gem_retire_requests_ring(&dev_priv->ring[i]);
}
static void
@@ -1965,11 +2009,11 @@ i915_gem_retire_work_handler(struct work_struct *work)
struct drm_i915_gem_request *request;
int ret;
- ret = i915_gem_flush_ring(dev, ring, 0,
- I915_GEM_GPU_DOMAINS);
+ ret = i915_gem_flush_ring(ring,
+ 0, I915_GEM_GPU_DOMAINS);
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (ret || request == NULL ||
- i915_add_request(dev, NULL, request, ring))
+ i915_add_request(ring, NULL, request))
kfree(request);
}
@@ -1982,18 +2026,32 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
- bool interruptible, struct intel_ring_buffer *ring)
+i915_wait_request(struct intel_ring_buffer *ring,
+ uint32_t seqno)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier;
int ret = 0;
BUG_ON(seqno == 0);
- if (atomic_read(&dev_priv->mm.wedged))
- return -EAGAIN;
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ struct completion *x = &dev_priv->error_completion;
+ bool recovery_complete;
+ unsigned long flags;
+
+ /* Give the error handler a chance to run. */
+ spin_lock_irqsave(&x->wait.lock, flags);
+ recovery_complete = x->done > 0;
+ spin_unlock_irqrestore(&x->wait.lock, flags);
+
+ return recovery_complete ? -EIO : -EAGAIN;
+ }
if (seqno == ring->outstanding_lazy_request) {
struct drm_i915_gem_request *request;
@@ -2002,7 +2060,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (request == NULL)
return -ENOMEM;
- ret = i915_add_request(dev, NULL, request, ring);
+ ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
return ret;
@@ -2012,22 +2070,22 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
}
if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(ring->dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
ier = I915_READ(IER);
if (!ier) {
DRM_ERROR("something (likely vbetool) disabled "
"interrupts, re-enabling\n");
- i915_driver_irq_preinstall(dev);
- i915_driver_irq_postinstall(dev);
+ i915_driver_irq_preinstall(ring->dev);
+ i915_driver_irq_postinstall(ring->dev);
}
- trace_i915_gem_request_wait_begin(dev, seqno);
+ trace_i915_gem_request_wait_begin(ring, seqno);
ring->waiting_seqno = seqno;
if (ring->irq_get(ring)) {
- if (interruptible)
+ if (dev_priv->mm.interruptible)
ret = wait_event_interruptible(ring->irq_queue,
i915_seqno_passed(ring->get_seqno(ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
@@ -2043,7 +2101,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
ret = -EBUSY;
ring->waiting_seqno = 0;
- trace_i915_gem_request_wait_end(dev, seqno);
+ trace_i915_gem_request_wait_end(ring, seqno);
}
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
@@ -2059,31 +2117,18 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
* a separate wait queue to handle that.
*/
if (ret == 0)
- i915_gem_retire_requests_ring(dev, ring);
+ i915_gem_retire_requests_ring(ring);
return ret;
}
/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno,
- struct intel_ring_buffer *ring)
-{
- return i915_do_wait_request(dev, seqno, 1, ring);
-}
-
-/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
- bool interruptible)
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
int ret;
/* This function only exists to support waiting for existing rendering,
@@ -2095,10 +2140,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
* it.
*/
if (obj->active) {
- ret = i915_do_wait_request(dev,
- obj->last_rendering_seqno,
- interruptible,
- obj->ring);
+ ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
if (ret)
return ret;
}
@@ -2148,6 +2190,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (ret == -ERESTARTSYS)
return ret;
+ trace_i915_gem_object_unbind(obj);
+
i915_gem_gtt_unbind_object(obj);
i915_gem_object_put_pages_gtt(obj);
@@ -2163,29 +2207,27 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
- trace_i915_gem_object_unbind(obj);
-
return ret;
}
int
-i915_gem_flush_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains)
{
int ret;
+ trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
+
ret = ring->flush(ring, invalidate_domains, flush_domains);
if (ret)
return ret;
- i915_gem_process_flushing_list(dev, flush_domains, ring);
+ i915_gem_process_flushing_list(ring, flush_domains);
return 0;
}
-static int i915_ring_idle(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int i915_ring_idle(struct intel_ring_buffer *ring)
{
int ret;
@@ -2193,15 +2235,13 @@ static int i915_ring_idle(struct drm_device *dev,
return 0;
if (!list_empty(&ring->gpu_write_list)) {
- ret = i915_gem_flush_ring(dev, ring,
+ ret = i915_gem_flush_ring(ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
}
- return i915_wait_request(dev,
- i915_gem_next_request_seqno(dev, ring),
- ring);
+ return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
}
int
@@ -2218,7 +2258,7 @@ i915_gpu_idle(struct drm_device *dev)
/* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) {
- ret = i915_ring_idle(dev, &dev_priv->ring[i]);
+ ret = i915_ring_idle(&dev_priv->ring[i]);
if (ret)
return ret;
}
@@ -2402,15 +2442,13 @@ static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined,
- bool interruptible)
+ struct intel_ring_buffer *pipelined)
{
int ret;
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->base.dev,
- obj->last_fenced_ring,
+ ret = i915_gem_flush_ring(obj->last_fenced_ring,
0, obj->base.write_domain);
if (ret)
return ret;
@@ -2422,10 +2460,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
if (!ring_passed_seqno(obj->last_fenced_ring,
obj->last_fenced_seqno)) {
- ret = i915_do_wait_request(obj->base.dev,
- obj->last_fenced_seqno,
- interruptible,
- obj->last_fenced_ring);
+ ret = i915_wait_request(obj->last_fenced_ring,
+ obj->last_fenced_seqno);
if (ret)
return ret;
}
@@ -2451,7 +2487,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
if (obj->tiling_mode)
i915_gem_release_mmap(obj);
- ret = i915_gem_object_flush_fence(obj, NULL, true);
+ ret = i915_gem_object_flush_fence(obj, NULL);
if (ret)
return ret;
@@ -2528,8 +2564,7 @@ i915_find_fence_reg(struct drm_device *dev,
*/
int
i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined,
- bool interruptible)
+ struct intel_ring_buffer *pipelined)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2551,10 +2586,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (reg->setup_seqno) {
if (!ring_passed_seqno(obj->last_fenced_ring,
reg->setup_seqno)) {
- ret = i915_do_wait_request(obj->base.dev,
- reg->setup_seqno,
- interruptible,
- obj->last_fenced_ring);
+ ret = i915_wait_request(obj->last_fenced_ring,
+ reg->setup_seqno);
if (ret)
return ret;
}
@@ -2563,15 +2596,13 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
}
} else if (obj->last_fenced_ring &&
obj->last_fenced_ring != pipelined) {
- ret = i915_gem_object_flush_fence(obj,
- pipelined,
- interruptible);
+ ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
return ret;
} else if (obj->tiling_changed) {
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
+ ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
if (ret)
return ret;
@@ -2588,7 +2619,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (obj->tiling_changed) {
if (pipelined) {
reg->setup_seqno =
- i915_gem_next_request_seqno(dev, pipelined);
+ i915_gem_next_request_seqno(pipelined);
obj->last_fenced_seqno = reg->setup_seqno;
obj->last_fenced_ring = pipelined;
}
@@ -2602,7 +2633,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (reg == NULL)
return -ENOSPC;
- ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
+ ret = i915_gem_object_flush_fence(obj, pipelined);
if (ret)
return ret;
@@ -2614,9 +2645,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (old->tiling_mode)
i915_gem_release_mmap(old);
- ret = i915_gem_object_flush_fence(old,
- pipelined,
- interruptible);
+ ret = i915_gem_object_flush_fence(old, pipelined);
if (ret) {
drm_gem_object_unreference(&old->base);
return ret;
@@ -2628,7 +2657,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
old->fence_reg = I915_FENCE_REG_NONE;
old->last_fenced_ring = pipelined;
old->last_fenced_seqno =
- pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+ pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
drm_gem_object_unreference(&old->base);
} else if (obj->last_fenced_seqno == 0)
@@ -2640,7 +2669,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
obj->last_fenced_ring = pipelined;
reg->setup_seqno =
- pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+ pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
obj->last_fenced_seqno = reg->setup_seqno;
update:
@@ -2837,7 +2866,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
obj->map_and_fenceable = mappable && fenceable;
- trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
+ trace_i915_gem_object_bind(obj, map_and_fenceable);
return 0;
}
@@ -2860,13 +2889,11 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
static int
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
-
if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
return 0;
/* Queue the GPU write cache flushing we need. */
- return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
+ return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2933,12 +2960,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (obj->gtt_space == NULL)
return -EINVAL;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+ return 0;
+
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
if (obj->pending_gpu_write || write) {
- ret = i915_gem_object_wait_rendering(obj, true);
+ ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
@@ -2988,7 +3018,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
/* Currently, we are always called from an non-interruptible context. */
if (pipelined != obj->ring) {
- ret = i915_gem_object_wait_rendering(obj, false);
+ ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
@@ -3006,8 +3036,7 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
}
int
-i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
- bool interruptible)
+i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
{
int ret;
@@ -3015,13 +3044,12 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
- 0, obj->base.write_domain);
+ ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
if (ret)
return ret;
}
- return i915_gem_object_wait_rendering(obj, interruptible);
+ return i915_gem_object_wait_rendering(obj);
}
/**
@@ -3036,11 +3064,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
uint32_t old_write_domain, old_read_domains;
int ret;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+ return 0;
+
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
- ret = i915_gem_object_wait_rendering(obj, true);
+ ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
@@ -3138,7 +3169,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- ret = i915_gem_object_wait_rendering(obj, true);
+ ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
@@ -3209,6 +3240,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
u32 seqno = 0;
int ret;
+ if (atomic_read(&dev_priv->mm.wedged))
+ return -EIO;
+
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
if (time_after_eq(request->emitted_jiffies, recent_enough))
@@ -3324,7 +3358,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -3375,7 +3409,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -3412,7 +3446,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -3430,7 +3464,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* flush earlier is beneficial.
*/
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
- ret = i915_gem_flush_ring(dev, obj->ring,
+ ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
} else if (obj->ring->outstanding_lazy_request ==
obj->last_rendering_seqno) {
@@ -3441,9 +3475,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
*/
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request)
- ret = i915_add_request(dev,
- NULL, request,
- obj->ring);
+ ret = i915_add_request(obj->ring, NULL,request);
else
ret = -ENOMEM;
}
@@ -3453,7 +3485,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* are actually unmasked, and our working set ends up being
* larger than required.
*/
- i915_gem_retire_requests_ring(dev, obj->ring);
+ i915_gem_retire_requests_ring(obj->ring);
args->busy = obj->active;
}
@@ -3492,7 +3524,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
@@ -3583,6 +3615,8 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
kfree(obj->page_cpu_valid);
kfree(obj->bit_17);
kfree(obj);
+
+ trace_i915_gem_object_destroy(obj);
}
void i915_gem_free_object(struct drm_gem_object *gem_obj)
@@ -3590,8 +3624,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
- trace_i915_gem_object_destroy(obj);
-
while (obj->pin_count > 0)
i915_gem_object_unpin(obj);
@@ -3837,6 +3869,8 @@ i915_gem_load(struct drm_device *dev)
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
+ dev_priv->mm.interruptible = true;
+
dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 29d014c48ca2..8da1899bd24f 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -134,51 +134,6 @@ i915_verify_lists(struct drm_device *dev)
}
#endif /* WATCH_INACTIVE */
-
-#if WATCH_EXEC | WATCH_PWRITE
-static void
-i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
- uint32_t bias, uint32_t mark)
-{
- uint32_t *mem = kmap_atomic(page, KM_USER0);
- int i;
- for (i = start; i < end; i += 4)
- DRM_INFO("%08x: %08x%s\n",
- (int) (bias + i), mem[i / 4],
- (bias + i == mark) ? " ********" : "");
- kunmap_atomic(mem, KM_USER0);
- /* give syslog time to catch up */
- msleep(1);
-}
-
-void
-i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
- const char *where, uint32_t mark)
-{
- int page;
-
- DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
- for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
- int page_len, chunk, chunk_len;
-
- page_len = len - page * PAGE_SIZE;
- if (page_len > PAGE_SIZE)
- page_len = PAGE_SIZE;
-
- for (chunk = 0; chunk < page_len; chunk += 128) {
- chunk_len = page_len - chunk;
- if (chunk_len > 128)
- chunk_len = 128;
- i915_gem_dump_page(obj->pages[page],
- chunk, chunk + chunk_len,
- obj->gtt_offset +
- page * PAGE_SIZE,
- mark);
- }
- }
-}
-#endif
-
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 3d39005540aa..da05a2692a75 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -30,6 +30,7 @@
#include "drm.h"
#include "i915_drv.h"
#include "i915_drm.h"
+#include "i915_trace.h"
static bool
mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
@@ -63,6 +64,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
return 0;
}
+ trace_i915_gem_evict(dev, min_size, alignment, mappable);
+
/*
* The goal is to evict objects and amalgamate space in LRU order.
* The oldest idle objects reside on the inactive list, which is in
@@ -189,6 +192,8 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
if (lists_empty)
return -ENOSPC;
+ trace_i915_gem_evict_everything(dev, purgeable_only);
+
/* Flush everything (on to the inactive lists) and evict */
ret = i915_gpu_idle(dev);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 50ab1614571c..7ff7f933ddf1 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -37,6 +37,7 @@ struct change_domains {
uint32_t invalidate_domains;
uint32_t flush_domains;
uint32_t flush_rings;
+ uint32_t flips;
};
/*
@@ -190,6 +191,9 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
i915_gem_release_mmap(obj);
+ if (obj->base.pending_write_domain)
+ cd->flips |= atomic_read(&obj->pending_flip);
+
/* The actual obj->write_domain will be updated with
* pending_write_domain after we emit the accumulated flush for all
* of our domain changes in execbuffers (which clears objects'
@@ -282,21 +286,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
target_offset = to_intel_bo(target_obj)->gtt_offset;
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc->offset,
- (int) reloc->target_handle,
- (int) reloc->read_domains,
- (int) reloc->write_domain,
- (int) target_offset,
- (int) reloc->presumed_offset,
- reloc->delta);
-#endif
-
/* The target buffer should have appeared before us in the
* exec_object list, so it should have a GTT space bound by now.
*/
@@ -365,16 +354,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
}
- /* and points to somewhere within the target object. */
- if (unlikely(reloc->delta >= target_obj->size)) {
- DRM_ERROR("Relocation beyond target object bounds: "
- "obj %p target %d delta %d size %d.\n",
- obj, reloc->target_handle,
- (int) reloc->delta,
- (int) target_obj->size);
- return ret;
- }
-
reloc->delta += target_offset;
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
uint32_t page_offset = reloc->offset & ~PAGE_MASK;
@@ -575,7 +554,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (has_fenced_gpu_access) {
if (need_fence) {
- ret = i915_gem_object_get_fence(obj, ring, 1);
+ ret = i915_gem_object_get_fence(obj, ring);
if (ret)
break;
} else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
@@ -690,11 +669,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
/* reacquire the objects */
eb_reset(eb);
for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj;
-
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
@@ -749,8 +726,7 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
for (i = 0; i < I915_NUM_RINGS; i++)
if (flush_rings & (1 << i)) {
- ret = i915_gem_flush_ring(dev,
- &dev_priv->ring[i],
+ ret = i915_gem_flush_ring(&dev_priv->ring[i],
invalidate_domains,
flush_domains);
if (ret)
@@ -774,7 +750,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
- return i915_gem_object_wait_rendering(obj, true);
+ return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
@@ -789,7 +765,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
if (request == NULL)
return -ENOMEM;
- ret = i915_add_request(obj->base.dev, NULL, request, from);
+ ret = i915_add_request(from, NULL, request);
if (ret) {
kfree(request);
return ret;
@@ -803,6 +779,39 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
}
static int
+i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
+{
+ u32 plane, flip_mask;
+ int ret;
+
+ /* Check for any pending flips. As we only maintain a flip queue depth
+ * of 1, we can simply insert a WAIT for the next display flip prior
+ * to executing the batch and avoid stalling the CPU.
+ */
+
+ for (plane = 0; flips >> plane; plane++) {
+ if (((flips >> plane) & 1) == 0)
+ continue;
+
+ if (plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ }
+
+ return 0;
+}
+
+
+static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
@@ -810,19 +819,11 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct change_domains cd;
int ret;
- cd.invalidate_domains = 0;
- cd.flush_domains = 0;
- cd.flush_rings = 0;
+ memset(&cd, 0, sizeof(cd));
list_for_each_entry(obj, objects, exec_list)
i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
if (cd.invalidate_domains | cd.flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- cd.invalidate_domains,
- cd.flush_domains);
-#endif
ret = i915_gem_execbuffer_flush(ring->dev,
cd.invalidate_domains,
cd.flush_domains,
@@ -831,6 +832,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
return ret;
}
+ if (cd.flips) {
+ ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
+ if (ret)
+ return ret;
+ }
+
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_sync_rings(obj, ring);
if (ret)
@@ -877,47 +884,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
return 0;
}
-static int
-i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
- struct list_head *objects)
-{
- struct drm_i915_gem_object *obj;
- int flips;
-
- /* Check for any pending flips. As we only maintain a flip queue depth
- * of 1, we can simply insert a WAIT for the next display flip prior
- * to executing the batch and avoid stalling the CPU.
- */
- flips = 0;
- list_for_each_entry(obj, objects, exec_list) {
- if (obj->base.write_domain)
- flips |= atomic_read(&obj->pending_flip);
- }
- if (flips) {
- int plane, flip_mask, ret;
-
- for (plane = 0; flips >> plane; plane++) {
- if (((flips >> plane) & 1) == 0)
- continue;
-
- if (plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
- ret = intel_ring_begin(ring, 2);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
- }
- }
-
- return 0;
-}
-
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct intel_ring_buffer *ring,
@@ -926,6 +892,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
struct drm_i915_gem_object *obj;
list_for_each_entry(obj, objects, exec_list) {
+ u32 old_read = obj->base.read_domains;
+ u32 old_write = obj->base.write_domain;
+
+
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
@@ -939,9 +909,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
intel_mark_busy(ring->dev, obj);
}
- trace_i915_gem_object_change_domain(obj,
- obj->base.read_domains,
- obj->base.write_domain);
+ trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}
@@ -963,14 +931,14 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 4)
invalidate |= I915_GEM_DOMAIN_SAMPLER;
if (ring->flush(ring, invalidate, 0)) {
- i915_gem_next_request_seqno(dev, ring);
+ i915_gem_next_request_seqno(ring);
return;
}
/* Add a breadcrumb for the completion of the batch buffer */
request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL || i915_add_request(dev, file, request, ring)) {
- i915_gem_next_request_seqno(dev, ring);
+ if (request == NULL || i915_add_request(ring, file, request)) {
+ i915_gem_next_request_seqno(ring);
kfree(request);
}
}
@@ -1000,10 +968,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
return ret;
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
@@ -1113,7 +1077,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
- if (obj == NULL) {
+ if (&obj->base == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
@@ -1170,11 +1134,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto err;
- ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
- if (ret)
- goto err;
-
- seqno = i915_gem_next_request_seqno(dev, ring);
+ seqno = i915_gem_next_request_seqno(ring);
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
if (seqno < ring->sync_seqno[i]) {
/* The GPU can not handle its semaphore value wrapping,
@@ -1189,6 +1149,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
}
+ trace_i915_gem_ring_dispatch(ring, seqno);
+
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
@@ -1245,11 +1207,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret, i;
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
if (args->buffer_count < 1) {
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
@@ -1330,17 +1287,16 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
if (args->buffer_count < 1) {
DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
return -EINVAL;
}
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (exec2_list == NULL)
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list),
+ args->buffer_count);
if (exec2_list == NULL) {
DRM_ERROR("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index d64843e18df2..281ad3d6115d 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -284,14 +284,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
- int ret;
-
- ret = i915_gem_check_is_wedged(dev);
- if (ret)
- return ret;
+ int ret = 0;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL)
+ if (&obj->base == NULL)
return -ENOENT;
if (!i915_tiling_ok(dev,
@@ -384,7 +380,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (obj == NULL)
+ if (&obj->base == NULL)
return -ENOENT;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8a9e08bf1cf7..188b497e5076 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,21 +85,11 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
}
}
-static inline u32
-i915_pipestat(int pipe)
-{
- if (pipe == 0)
- return PIPEASTAT;
- if (pipe == 1)
- return PIPEBSTAT;
- BUG();
-}
-
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != mask) {
- u32 reg = i915_pipestat(pipe);
+ u32 reg = PIPESTAT(pipe);
dev_priv->pipestat[pipe] |= mask;
/* Enable the interrupt, clear any pending status */
@@ -112,7 +102,7 @@ void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != 0) {
- u32 reg = i915_pipestat(pipe);
+ u32 reg = PIPESTAT(pipe);
dev_priv->pipestat[pipe] &= ~mask;
I915_WRITE(reg, dev_priv->pipestat[pipe]);
@@ -171,12 +161,12 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
- "pipe %d\n", pipe);
+ "pipe %c\n", pipe_name(pipe));
return 0;
}
- high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
- low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+ high_frame = PIPEFRAME(pipe);
+ low_frame = PIPEFRAMEPIXEL(pipe);
/*
* High & low register fields aren't synchronized, so make sure
@@ -197,11 +187,11 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
+ int reg = PIPE_FRMCOUNT_GM45(pipe);
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
- "pipe %d\n", pipe);
+ "pipe %c\n", pipe_name(pipe));
return 0;
}
@@ -219,7 +209,7 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
- "pipe %d\n", pipe);
+ "pipe %c\n", pipe_name(pipe));
return 0;
}
@@ -367,7 +357,7 @@ static void notify_ring(struct drm_device *dev,
return;
seqno = ring->get_seqno(ring);
- trace_i915_gem_request_complete(dev, seqno);
+ trace_i915_gem_request_complete(ring, seqno);
ring->irq_seqno = seqno;
wake_up_all(&ring->irq_queue);
@@ -419,6 +409,7 @@ static void pch_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 pch_iir;
+ int pipe;
pch_iir = I915_READ(SDEIIR);
@@ -439,13 +430,11 @@ static void pch_irq_handler(struct drm_device *dev)
if (pch_iir & SDE_POISON)
DRM_ERROR("PCH poison interrupt\n");
- if (pch_iir & SDE_FDI_MASK) {
- u32 fdia, fdib;
-
- fdia = I915_READ(FDI_RXA_IIR);
- fdib = I915_READ(FDI_RXB_IIR);
- DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib);
- }
+ if (pch_iir & SDE_FDI_MASK)
+ for_each_pipe(pipe)
+ DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
@@ -650,9 +639,14 @@ static void
i915_error_state_free(struct drm_device *dev,
struct drm_i915_error_state *error)
{
- i915_error_object_free(error->batchbuffer[0]);
- i915_error_object_free(error->batchbuffer[1]);
- i915_error_object_free(error->ringbuffer);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++)
+ i915_error_object_free(error->batchbuffer[i]);
+
+ for (i = 0; i < ARRAY_SIZE(error->ringbuffer); i++)
+ i915_error_object_free(error->ringbuffer[i]);
+
kfree(error->active_bo);
kfree(error->overlay);
kfree(error);
@@ -767,7 +761,7 @@ static void i915_capture_error_state(struct drm_device *dev)
struct drm_i915_gem_object *obj;
struct drm_i915_error_state *error;
unsigned long flags;
- int i;
+ int i, pipe;
spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error;
@@ -775,19 +769,21 @@ static void i915_capture_error_state(struct drm_device *dev)
if (error)
return;
+ /* Account for pipe specific data like PIPE*STAT */
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
return;
}
- DRM_DEBUG_DRIVER("generating error event\n");
+ DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
+ dev->primary->index);
error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
- error->pipeastat = I915_READ(PIPEASTAT);
- error->pipebstat = I915_READ(PIPEBSTAT);
+ for_each_pipe(pipe)
+ error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
error->instpm = I915_READ(INSTPM);
error->error = 0;
if (INTEL_INFO(dev)->gen >= 6) {
@@ -826,15 +822,16 @@ static void i915_capture_error_state(struct drm_device *dev)
}
i915_gem_record_fences(dev, error);
- /* Record the active batchbuffers */
- for (i = 0; i < I915_NUM_RINGS; i++)
+ /* Record the active batch and ring buffers */
+ for (i = 0; i < I915_NUM_RINGS; i++) {
error->batchbuffer[i] =
i915_error_first_batchbuffer(dev_priv,
&dev_priv->ring[i]);
- /* Record the ringbuffer */
- error->ringbuffer = i915_error_object_create(dev_priv,
- dev_priv->ring[RCS].obj);
+ error->ringbuffer[i] =
+ i915_error_object_create(dev_priv,
+ dev_priv->ring[i].obj);
+ }
/* Record buffers on the active and pinned lists. */
error->active_bo = NULL;
@@ -907,6 +904,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 eir = I915_READ(EIR);
+ int pipe;
if (!eir)
return;
@@ -955,14 +953,10 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
}
if (eir & I915_ERROR_MEMORY_REFRESH) {
- u32 pipea_stats = I915_READ(PIPEASTAT);
- u32 pipeb_stats = I915_READ(PIPEBSTAT);
-
- printk(KERN_ERR "memory refresh error\n");
- printk(KERN_ERR "PIPEASTAT: 0x%08x\n",
- pipea_stats);
- printk(KERN_ERR "PIPEBSTAT: 0x%08x\n",
- pipeb_stats);
+ printk(KERN_ERR "memory refresh error:\n");
+ for_each_pipe(pipe)
+ printk(KERN_ERR "pipe %c stat: 0x%08x\n",
+ pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
/* pipestat has already been acked */
}
if (eir & I915_ERROR_INSTRUCTION) {
@@ -1076,10 +1070,10 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
- int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
+ int dspsurf = DSPSURF(intel_crtc->plane);
stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
} else {
- int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
+ int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
crtc->y * crtc->fb->pitch +
crtc->x * crtc->fb->bits_per_pixel/8);
@@ -1099,12 +1093,13 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv;
u32 iir, new_iir;
- u32 pipea_stats, pipeb_stats;
+ u32 pipe_stats[I915_MAX_PIPES];
u32 vblank_status;
int vblank = 0;
unsigned long irqflags;
int irq_received;
- int ret = IRQ_NONE;
+ int ret = IRQ_NONE, pipe;
+ bool blc_event = false;
atomic_inc(&dev_priv->irq_received);
@@ -1127,27 +1122,23 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
* interrupts (for non-MSI).
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- pipea_stats = I915_READ(PIPEASTAT);
- pipeb_stats = I915_READ(PIPEBSTAT);
-
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
- /*
- * Clear the PIPE(A|B)STAT regs before the IIR
- */
- if (pipea_stats & 0x8000ffff) {
- if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe a underrun\n");
- I915_WRITE(PIPEASTAT, pipea_stats);
- irq_received = 1;
- }
-
- if (pipeb_stats & 0x8000ffff) {
- if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_DEBUG_DRIVER("pipe b underrun\n");
- I915_WRITE(PIPEBSTAT, pipeb_stats);
- irq_received = 1;
+ for_each_pipe(pipe) {
+ int reg = PIPESTAT(pipe);
+ pipe_stats[pipe] = I915_READ(reg);
+
+ /*
+ * Clear the PIPE*STAT regs before the IIR
+ */
+ if (pipe_stats[pipe] & 0x8000ffff) {
+ if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
+ DRM_DEBUG_DRIVER("pipe %c underrun\n",
+ pipe_name(pipe));
+ I915_WRITE(reg, pipe_stats[pipe]);
+ irq_received = 1;
+ }
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -1198,27 +1189,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
intel_finish_page_flip_plane(dev, 1);
}
- if (pipea_stats & vblank_status &&
- drm_handle_vblank(dev, 0)) {
- vblank++;
- if (!dev_priv->flip_pending_is_done) {
- i915_pageflip_stall_check(dev, 0);
- intel_finish_page_flip(dev, 0);
+ for_each_pipe(pipe) {
+ if (pipe_stats[pipe] & vblank_status &&
+ drm_handle_vblank(dev, pipe)) {
+ vblank++;
+ if (!dev_priv->flip_pending_is_done) {
+ i915_pageflip_stall_check(dev, pipe);
+ intel_finish_page_flip(dev, pipe);
+ }
}
- }
- if (pipeb_stats & vblank_status &&
- drm_handle_vblank(dev, 1)) {
- vblank++;
- if (!dev_priv->flip_pending_is_done) {
- i915_pageflip_stall_check(dev, 1);
- intel_finish_page_flip(dev, 1);
- }
+ if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
+ blc_event = true;
}
- if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
- (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
- (iir & I915_ASLE_INTERRUPT))
+
+ if (blc_event || (iir & I915_ASLE_INTERRUPT))
intel_opregion_asle_intr(dev);
/* With MSI, interrupts are only generated when iir
@@ -1268,16 +1254,6 @@ static int i915_emit_irq(struct drm_device * dev)
return dev_priv->counter;
}
-void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct intel_ring_buffer *ring = LP_RING(dev_priv);
-
- if (dev_priv->trace_irq_seqno == 0 &&
- ring->irq_get(ring))
- dev_priv->trace_irq_seqno = seqno;
-}
-
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1377,7 +1353,12 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
else
i915_enable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE);
+
+ /* maintain vblank delivery even in deep C-states */
+ if (dev_priv->info->gen == 3)
+ I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
return 0;
}
@@ -1390,6 +1371,10 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ if (dev_priv->info->gen == 3)
+ I915_WRITE(INSTPM,
+ INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS);
+
if (HAS_PCH_SPLIT(dev))
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
@@ -1400,16 +1385,6 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void i915_enable_interrupt (struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (!HAS_PCH_SPLIT(dev))
- intel_opregion_enable_asle(dev);
- dev_priv->irq_enabled = 1;
-}
-
-
/* Set the vblank monitor pipe
*/
int i915_vblank_pipe_set(struct drm_device *dev, void *data,
@@ -1646,12 +1621,16 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
POSTING_READ(GTIER);
if (HAS_PCH_CPT(dev)) {
- hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
- SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
+ hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
+ SDE_PORTB_HOTPLUG_CPT |
+ SDE_PORTC_HOTPLUG_CPT |
+ SDE_PORTD_HOTPLUG_CPT);
} else {
- hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
- SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
- hotplug_mask |= SDE_AUX_MASK;
+ hotplug_mask = (SDE_CRT_HOTPLUG |
+ SDE_PORTB_HOTPLUG |
+ SDE_PORTC_HOTPLUG |
+ SDE_PORTD_HOTPLUG |
+ SDE_AUX_MASK);
}
dev_priv->pch_irq_mask = ~hotplug_mask;
@@ -1674,6 +1653,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
void i915_driver_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
atomic_set(&dev_priv->irq_received, 0);
@@ -1691,8 +1671,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
}
I915_WRITE(HWSTAM, 0xeffe);
- I915_WRITE(PIPEASTAT, 0);
- I915_WRITE(PIPEBSTAT, 0);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
POSTING_READ(IER);
@@ -1804,6 +1784,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
void i915_driver_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
if (!dev_priv)
return;
@@ -1821,12 +1802,13 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
}
I915_WRITE(HWSTAM, 0xffffffff);
- I915_WRITE(PIPEASTAT, 0);
- I915_WRITE(PIPEBSTAT, 0);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
- I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
- I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
+ for_each_pipe(pipe)
+ I915_WRITE(PIPESTAT(pipe),
+ I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
I915_WRITE(IIR, I915_READ(IIR));
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2abe240dae58..f39ac3a0fa93 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -405,9 +405,12 @@
#define I915_ERROR_INSTRUCTION (1<<0)
#define INSTPM 0x020c0
#define INSTPM_SELF_EN (1<<12) /* 915GM only */
+#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
+ will not assert AGPBUSY# and will only
+ be delivered when out of C3. */
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
-#define FW_BLC2 0x020dc
+#define FW_BLC2 0x020dc
#define FW_BLC_SELF 0x020e0 /* 915+ only */
#define FW_BLC_SELF_EN_MASK (1<<31)
#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
@@ -706,9 +709,9 @@
#define VGA1_PD_P1_DIV_2 (1 << 13)
#define VGA1_PD_P1_SHIFT 8
#define VGA1_PD_P1_MASK (0x1f << 8)
-#define DPLL_A 0x06014
-#define DPLL_B 0x06018
-#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B)
+#define _DPLL_A 0x06014
+#define _DPLL_B 0x06018
+#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
@@ -779,7 +782,7 @@
#define SDVO_MULTIPLIER_MASK 0x000000ff
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define DPLL_A_MD 0x0601c /* 965+ only */
+#define _DPLL_A_MD 0x0601c /* 965+ only */
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
*
@@ -816,14 +819,14 @@
*/
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
-#define DPLL_B_MD 0x06020 /* 965+ only */
-#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD)
-#define FPA0 0x06040
-#define FPA1 0x06044
-#define FPB0 0x06048
-#define FPB1 0x0604c
-#define FP0(pipe) _PIPE(pipe, FPA0, FPB0)
-#define FP1(pipe) _PIPE(pipe, FPA1, FPB1)
+#define _DPLL_B_MD 0x06020 /* 965+ only */
+#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
+#define _FPA0 0x06040
+#define _FPA1 0x06044
+#define _FPB0 0x06048
+#define _FPB1 0x0604c
+#define FP0(pipe) _PIPE(pipe, _FPA0, _FPB0)
+#define FP1(pipe) _PIPE(pipe, _FPA1, _FPB1)
#define FP_N_DIV_MASK 0x003f0000
#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
#define FP_N_DIV_SHIFT 16
@@ -962,8 +965,9 @@
* Palette regs
*/
-#define PALETTE_A 0x0a000
-#define PALETTE_B 0x0a800
+#define _PALETTE_A 0x0a000
+#define _PALETTE_B 0x0a800
+#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
/* MCH MMIO space */
@@ -1267,32 +1271,32 @@
*/
/* Pipe A timing regs */
-#define HTOTAL_A 0x60000
-#define HBLANK_A 0x60004
-#define HSYNC_A 0x60008
-#define VTOTAL_A 0x6000c
-#define VBLANK_A 0x60010
-#define VSYNC_A 0x60014
-#define PIPEASRC 0x6001c
-#define BCLRPAT_A 0x60020
+#define _HTOTAL_A 0x60000
+#define _HBLANK_A 0x60004
+#define _HSYNC_A 0x60008
+#define _VTOTAL_A 0x6000c
+#define _VBLANK_A 0x60010
+#define _VSYNC_A 0x60014
+#define _PIPEASRC 0x6001c
+#define _BCLRPAT_A 0x60020
/* Pipe B timing regs */
-#define HTOTAL_B 0x61000
-#define HBLANK_B 0x61004
-#define HSYNC_B 0x61008
-#define VTOTAL_B 0x6100c
-#define VBLANK_B 0x61010
-#define VSYNC_B 0x61014
-#define PIPEBSRC 0x6101c
-#define BCLRPAT_B 0x61020
-
-#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B)
-#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B)
-#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B)
-#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
-#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
-#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
-#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
+#define _HTOTAL_B 0x61000
+#define _HBLANK_B 0x61004
+#define _HSYNC_B 0x61008
+#define _VTOTAL_B 0x6100c
+#define _VBLANK_B 0x61010
+#define _VSYNC_B 0x61014
+#define _PIPEBSRC 0x6101c
+#define _BCLRPAT_B 0x61020
+
+#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
+#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
+#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
+#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
+#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
+#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
+#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
/* VGA port control */
#define ADPA 0x61100
@@ -1386,6 +1390,7 @@
#define SDVO_ENCODING_HDMI (0x2 << 10)
/** Requird for HDMI operation */
#define SDVO_NULL_PACKETS_DURING_VSYNC (1 << 9)
+#define SDVO_COLOR_RANGE_16_235 (1 << 8)
#define SDVO_BORDER_ENABLE (1 << 7)
#define SDVO_AUDIO_ENABLE (1 << 6)
/** New with 965, default is to be set */
@@ -1441,8 +1446,13 @@
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30)
+#define LVDS_PIPE_MASK (1 << 30)
/* LVDS dithering flag on 965/g4x platform */
#define LVDS_ENABLE_DITHER (1 << 25)
+/* LVDS sync polarity flags. Set to invert (i.e. negative) */
+#define LVDS_VSYNC_POLARITY (1 << 21)
+#define LVDS_HSYNC_POLARITY (1 << 20)
+
/* Enable border for unscaled (or aspect-scaled) display */
#define LVDS_BORDER_ENABLE (1 << 15)
/*
@@ -1476,6 +1486,9 @@
#define LVDS_B0B3_POWER_DOWN (0 << 2)
#define LVDS_B0B3_POWER_UP (3 << 2)
+#define LVDS_PIPE_ENABLED(V, P) \
+ (((V) & (LVDS_PIPE_MASK | LVDS_PORT_EN)) == ((P) << 30 | LVDS_PORT_EN))
+
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
#define VIDEO_DIP_CTL 0x61170
@@ -2064,6 +2077,10 @@
#define DP_PORT_EN (1 << 31)
#define DP_PIPEB_SELECT (1 << 30)
+#define DP_PIPE_MASK (1 << 30)
+
+#define DP_PIPE_ENABLED(V, P) \
+ (((V) & (DP_PIPE_MASK | DP_PORT_EN)) == ((P) << 30 | DP_PORT_EN))
/* Link training mode - select a suitable mode for each stage */
#define DP_LINK_TRAIN_PAT_1 (0 << 28)
@@ -2206,8 +2223,8 @@
* which is after the LUTs, so we want the bytes for our color format.
* For our current usage, this is always 3, one byte for R, G and B.
*/
-#define PIPEA_GMCH_DATA_M 0x70050
-#define PIPEB_GMCH_DATA_M 0x71050
+#define _PIPEA_GMCH_DATA_M 0x70050
+#define _PIPEB_GMCH_DATA_M 0x71050
/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
@@ -2215,8 +2232,8 @@
#define PIPE_GMCH_DATA_M_MASK (0xffffff)
-#define PIPEA_GMCH_DATA_N 0x70054
-#define PIPEB_GMCH_DATA_N 0x71054
+#define _PIPEA_GMCH_DATA_N 0x70054
+#define _PIPEB_GMCH_DATA_N 0x71054
#define PIPE_GMCH_DATA_N_MASK (0xffffff)
/*
@@ -2230,20 +2247,25 @@
* Attributes and VB-ID.
*/
-#define PIPEA_DP_LINK_M 0x70060
-#define PIPEB_DP_LINK_M 0x71060
+#define _PIPEA_DP_LINK_M 0x70060
+#define _PIPEB_DP_LINK_M 0x71060
#define PIPEA_DP_LINK_M_MASK (0xffffff)
-#define PIPEA_DP_LINK_N 0x70064
-#define PIPEB_DP_LINK_N 0x71064
+#define _PIPEA_DP_LINK_N 0x70064
+#define _PIPEB_DP_LINK_N 0x71064
#define PIPEA_DP_LINK_N_MASK (0xffffff)
+#define PIPE_GMCH_DATA_M(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_M, _PIPEB_GMCH_DATA_M)
+#define PIPE_GMCH_DATA_N(pipe) _PIPE(pipe, _PIPEA_GMCH_DATA_N, _PIPEB_GMCH_DATA_N)
+#define PIPE_DP_LINK_M(pipe) _PIPE(pipe, _PIPEA_DP_LINK_M, _PIPEB_DP_LINK_M)
+#define PIPE_DP_LINK_N(pipe) _PIPE(pipe, _PIPEA_DP_LINK_N, _PIPEB_DP_LINK_N)
+
/* Display & cursor control */
/* Pipe A */
-#define PIPEADSL 0x70000
+#define _PIPEADSL 0x70000
#define DSL_LINEMASK 0x00000fff
-#define PIPEACONF 0x70008
+#define _PIPEACONF 0x70008
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
@@ -2269,7 +2291,7 @@
#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
-#define PIPEASTAT 0x70024
+#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
@@ -2305,10 +2327,12 @@
#define PIPE_6BPC (2 << 5)
#define PIPE_12BPC (3 << 5)
-#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
-#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
-#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
-#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL)
+#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
+#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
+#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
+#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
+#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
+#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -2470,20 +2494,21 @@
* } while (high1 != high2);
* frame = (high1 << 8) | low1;
*/
-#define PIPEAFRAMEHIGH 0x70040
+#define _PIPEAFRAMEHIGH 0x70040
#define PIPE_FRAME_HIGH_MASK 0x0000ffff
#define PIPE_FRAME_HIGH_SHIFT 0
-#define PIPEAFRAMEPIXEL 0x70044
+#define _PIPEAFRAMEPIXEL 0x70044
#define PIPE_FRAME_LOW_MASK 0xff000000
#define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff
#define PIPE_PIXEL_SHIFT 0
/* GM45+ just has to be different */
-#define PIPEA_FRMCOUNT_GM45 0x70040
-#define PIPEA_FLIPCOUNT_GM45 0x70044
+#define _PIPEA_FRMCOUNT_GM45 0x70040
+#define _PIPEA_FLIPCOUNT_GM45 0x70044
+#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
/* Cursor A & B regs */
-#define CURACNTR 0x70080
+#define _CURACNTR 0x70080
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
@@ -2504,23 +2529,23 @@
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
-#define CURABASE 0x70084
-#define CURAPOS 0x70088
+#define _CURABASE 0x70084
+#define _CURAPOS 0x70088
#define CURSOR_POS_MASK 0x007FF
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
#define CURSIZE 0x700a0
-#define CURBCNTR 0x700c0
-#define CURBBASE 0x700c4
-#define CURBPOS 0x700c8
+#define _CURBCNTR 0x700c0
+#define _CURBBASE 0x700c4
+#define _CURBPOS 0x700c8
-#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR)
-#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE)
-#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS)
+#define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
+#define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
+#define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
/* Display A control */
-#define DSPACNTR 0x70180
+#define _DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1<<31)
#define DISPLAY_PLANE_DISABLE 0
#define DISPPLANE_GAMMA_ENABLE (1<<30)
@@ -2534,9 +2559,10 @@
#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
-#define DISPPLANE_SEL_PIPE_MASK (1<<24)
+#define DISPPLANE_SEL_PIPE_SHIFT 24
+#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SEL_PIPE_A 0
-#define DISPPLANE_SEL_PIPE_B (1<<24)
+#define DISPPLANE_SEL_PIPE_B (1<<DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SRC_KEY_ENABLE (1<<22)
#define DISPPLANE_SRC_KEY_DISABLE 0
#define DISPPLANE_LINE_DOUBLE (1<<20)
@@ -2545,20 +2571,20 @@
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
-#define DSPAADDR 0x70184
-#define DSPASTRIDE 0x70188
-#define DSPAPOS 0x7018C /* reserved */
-#define DSPASIZE 0x70190
-#define DSPASURF 0x7019C /* 965+ only */
-#define DSPATILEOFF 0x701A4 /* 965+ only */
-
-#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR)
-#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR)
-#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE)
-#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS)
-#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE)
-#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF)
-#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF)
+#define _DSPAADDR 0x70184
+#define _DSPASTRIDE 0x70188
+#define _DSPAPOS 0x7018C /* reserved */
+#define _DSPASIZE 0x70190
+#define _DSPASURF 0x7019C /* 965+ only */
+#define _DSPATILEOFF 0x701A4 /* 965+ only */
+
+#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
+#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
+#define DSPSTRIDE(plane) _PIPE(plane, _DSPASTRIDE, _DSPBSTRIDE)
+#define DSPPOS(plane) _PIPE(plane, _DSPAPOS, _DSPBPOS)
+#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
+#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
+#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
/* VBIOS flags */
#define SWF00 0x71410
@@ -2576,27 +2602,27 @@
#define SWF32 0x7241c
/* Pipe B */
-#define PIPEBDSL 0x71000
-#define PIPEBCONF 0x71008
-#define PIPEBSTAT 0x71024
-#define PIPEBFRAMEHIGH 0x71040
-#define PIPEBFRAMEPIXEL 0x71044
-#define PIPEB_FRMCOUNT_GM45 0x71040
-#define PIPEB_FLIPCOUNT_GM45 0x71044
+#define _PIPEBDSL 0x71000
+#define _PIPEBCONF 0x71008
+#define _PIPEBSTAT 0x71024
+#define _PIPEBFRAMEHIGH 0x71040
+#define _PIPEBFRAMEPIXEL 0x71044
+#define _PIPEB_FRMCOUNT_GM45 0x71040
+#define _PIPEB_FLIPCOUNT_GM45 0x71044
/* Display B control */
-#define DSPBCNTR 0x71180
+#define _DSPBCNTR 0x71180
#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
-#define DSPBADDR 0x71184
-#define DSPBSTRIDE 0x71188
-#define DSPBPOS 0x7118C
-#define DSPBSIZE 0x71190
-#define DSPBSURF 0x7119C
-#define DSPBTILEOFF 0x711A4
+#define _DSPBADDR 0x71184
+#define _DSPBSTRIDE 0x71188
+#define _DSPBPOS 0x7118C
+#define _DSPBSIZE 0x71190
+#define _DSPBSURF 0x7119C
+#define _DSPBTILEOFF 0x711A4
/* VBIOS regs */
#define VGACNTRL 0x71400
@@ -2650,68 +2676,80 @@
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
-#define PIPEA_DATA_M1 0x60030
+#define _PIPEA_DATA_M1 0x60030
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
#define TU_SIZE_MASK 0x7e000000
#define PIPE_DATA_M1_OFFSET 0
-#define PIPEA_DATA_N1 0x60034
+#define _PIPEA_DATA_N1 0x60034
#define PIPE_DATA_N1_OFFSET 0
-#define PIPEA_DATA_M2 0x60038
+#define _PIPEA_DATA_M2 0x60038
#define PIPE_DATA_M2_OFFSET 0
-#define PIPEA_DATA_N2 0x6003c
+#define _PIPEA_DATA_N2 0x6003c
#define PIPE_DATA_N2_OFFSET 0
-#define PIPEA_LINK_M1 0x60040
+#define _PIPEA_LINK_M1 0x60040
#define PIPE_LINK_M1_OFFSET 0
-#define PIPEA_LINK_N1 0x60044
+#define _PIPEA_LINK_N1 0x60044
#define PIPE_LINK_N1_OFFSET 0
-#define PIPEA_LINK_M2 0x60048
+#define _PIPEA_LINK_M2 0x60048
#define PIPE_LINK_M2_OFFSET 0
-#define PIPEA_LINK_N2 0x6004c
+#define _PIPEA_LINK_N2 0x6004c
#define PIPE_LINK_N2_OFFSET 0
/* PIPEB timing regs are same start from 0x61000 */
-#define PIPEB_DATA_M1 0x61030
-#define PIPEB_DATA_N1 0x61034
+#define _PIPEB_DATA_M1 0x61030
+#define _PIPEB_DATA_N1 0x61034
-#define PIPEB_DATA_M2 0x61038
-#define PIPEB_DATA_N2 0x6103c
+#define _PIPEB_DATA_M2 0x61038
+#define _PIPEB_DATA_N2 0x6103c
-#define PIPEB_LINK_M1 0x61040
-#define PIPEB_LINK_N1 0x61044
+#define _PIPEB_LINK_M1 0x61040
+#define _PIPEB_LINK_N1 0x61044
-#define PIPEB_LINK_M2 0x61048
-#define PIPEB_LINK_N2 0x6104c
+#define _PIPEB_LINK_M2 0x61048
+#define _PIPEB_LINK_N2 0x6104c
-#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1)
-#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1)
-#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2)
-#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2)
-#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1)
-#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1)
-#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2)
-#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2)
+#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
+#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
+#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
+#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
+#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
+#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
+#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
+#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
/* CPU panel fitter */
-#define PFA_CTL_1 0x68080
-#define PFB_CTL_1 0x68880
+/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
+#define _PFA_CTL_1 0x68080
+#define _PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31)
#define PF_FILTER_MASK (3<<23)
#define PF_FILTER_PROGRAMMED (0<<23)
#define PF_FILTER_MED_3x3 (1<<23)
#define PF_FILTER_EDGE_ENHANCE (2<<23)
#define PF_FILTER_EDGE_SOFTEN (3<<23)
-#define PFA_WIN_SZ 0x68074
-#define PFB_WIN_SZ 0x68874
-#define PFA_WIN_POS 0x68070
-#define PFB_WIN_POS 0x68870
+#define _PFA_WIN_SZ 0x68074
+#define _PFB_WIN_SZ 0x68874
+#define _PFA_WIN_POS 0x68070
+#define _PFB_WIN_POS 0x68870
+#define _PFA_VSCALE 0x68084
+#define _PFB_VSCALE 0x68884
+#define _PFA_HSCALE 0x68090
+#define _PFB_HSCALE 0x68890
+
+#define PF_CTL(pipe) _PIPE(pipe, _PFA_CTL_1, _PFB_CTL_1)
+#define PF_WIN_SZ(pipe) _PIPE(pipe, _PFA_WIN_SZ, _PFB_WIN_SZ)
+#define PF_WIN_POS(pipe) _PIPE(pipe, _PFA_WIN_POS, _PFB_WIN_POS)
+#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
+#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
/* legacy palette */
-#define LGC_PALETTE_A 0x4a000
-#define LGC_PALETTE_B 0x4a800
+#define _LGC_PALETTE_A 0x4a000
+#define _LGC_PALETTE_B 0x4a800
+#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
/* interrupts */
#define DE_MASTER_IRQ_CONTROL (1 << 31)
@@ -2877,17 +2915,17 @@
#define PCH_GMBUS4 0xc5110
#define PCH_GMBUS5 0xc5120
-#define PCH_DPLL_A 0xc6014
-#define PCH_DPLL_B 0xc6018
-#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
+#define _PCH_DPLL_A 0xc6014
+#define _PCH_DPLL_B 0xc6018
+#define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B)
-#define PCH_FPA0 0xc6040
+#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
-#define PCH_FPA1 0xc6044
-#define PCH_FPB0 0xc6048
-#define PCH_FPB1 0xc604c
-#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0)
-#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1)
+#define _PCH_FPA1 0xc6044
+#define _PCH_FPB0 0xc6048
+#define _PCH_FPB1 0xc604c
+#define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0)
+#define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1)
#define PCH_DPLL_TEST 0xc606c
@@ -2906,6 +2944,7 @@
#define DREF_NONSPREAD_SOURCE_MASK (3<<9)
#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
+#define DREF_SUPERSPREAD_SOURCE_MASK (3<<7)
#define DREF_SSC4_DOWNSPREAD (0<<6)
#define DREF_SSC4_CENTERSPREAD (1<<6)
#define DREF_SSC1_DISABLE (0<<1)
@@ -2938,60 +2977,69 @@
/* transcoder */
-#define TRANS_HTOTAL_A 0xe0000
+#define _TRANS_HTOTAL_A 0xe0000
#define TRANS_HTOTAL_SHIFT 16
#define TRANS_HACTIVE_SHIFT 0
-#define TRANS_HBLANK_A 0xe0004
+#define _TRANS_HBLANK_A 0xe0004
#define TRANS_HBLANK_END_SHIFT 16
#define TRANS_HBLANK_START_SHIFT 0
-#define TRANS_HSYNC_A 0xe0008
+#define _TRANS_HSYNC_A 0xe0008
#define TRANS_HSYNC_END_SHIFT 16
#define TRANS_HSYNC_START_SHIFT 0
-#define TRANS_VTOTAL_A 0xe000c
+#define _TRANS_VTOTAL_A 0xe000c
#define TRANS_VTOTAL_SHIFT 16
#define TRANS_VACTIVE_SHIFT 0
-#define TRANS_VBLANK_A 0xe0010
+#define _TRANS_VBLANK_A 0xe0010
#define TRANS_VBLANK_END_SHIFT 16
#define TRANS_VBLANK_START_SHIFT 0
-#define TRANS_VSYNC_A 0xe0014
+#define _TRANS_VSYNC_A 0xe0014
#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
-#define TRANSA_DATA_M1 0xe0030
-#define TRANSA_DATA_N1 0xe0034
-#define TRANSA_DATA_M2 0xe0038
-#define TRANSA_DATA_N2 0xe003c
-#define TRANSA_DP_LINK_M1 0xe0040
-#define TRANSA_DP_LINK_N1 0xe0044
-#define TRANSA_DP_LINK_M2 0xe0048
-#define TRANSA_DP_LINK_N2 0xe004c
-
-#define TRANS_HTOTAL_B 0xe1000
-#define TRANS_HBLANK_B 0xe1004
-#define TRANS_HSYNC_B 0xe1008
-#define TRANS_VTOTAL_B 0xe100c
-#define TRANS_VBLANK_B 0xe1010
-#define TRANS_VSYNC_B 0xe1014
-
-#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B)
-#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B)
-#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B)
-#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B)
-#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B)
-#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B)
-
-#define TRANSB_DATA_M1 0xe1030
-#define TRANSB_DATA_N1 0xe1034
-#define TRANSB_DATA_M2 0xe1038
-#define TRANSB_DATA_N2 0xe103c
-#define TRANSB_DP_LINK_M1 0xe1040
-#define TRANSB_DP_LINK_N1 0xe1044
-#define TRANSB_DP_LINK_M2 0xe1048
-#define TRANSB_DP_LINK_N2 0xe104c
-
-#define TRANSACONF 0xf0008
-#define TRANSBCONF 0xf1008
-#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF)
+#define _TRANSA_DATA_M1 0xe0030
+#define _TRANSA_DATA_N1 0xe0034
+#define _TRANSA_DATA_M2 0xe0038
+#define _TRANSA_DATA_N2 0xe003c
+#define _TRANSA_DP_LINK_M1 0xe0040
+#define _TRANSA_DP_LINK_N1 0xe0044
+#define _TRANSA_DP_LINK_M2 0xe0048
+#define _TRANSA_DP_LINK_N2 0xe004c
+
+#define _TRANS_HTOTAL_B 0xe1000
+#define _TRANS_HBLANK_B 0xe1004
+#define _TRANS_HSYNC_B 0xe1008
+#define _TRANS_VTOTAL_B 0xe100c
+#define _TRANS_VBLANK_B 0xe1010
+#define _TRANS_VSYNC_B 0xe1014
+
+#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
+#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
+#define TRANS_HSYNC(pipe) _PIPE(pipe, _TRANS_HSYNC_A, _TRANS_HSYNC_B)
+#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
+#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
+#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
+
+#define _TRANSB_DATA_M1 0xe1030
+#define _TRANSB_DATA_N1 0xe1034
+#define _TRANSB_DATA_M2 0xe1038
+#define _TRANSB_DATA_N2 0xe103c
+#define _TRANSB_DP_LINK_M1 0xe1040
+#define _TRANSB_DP_LINK_N1 0xe1044
+#define _TRANSB_DP_LINK_M2 0xe1048
+#define _TRANSB_DP_LINK_N2 0xe104c
+
+#define TRANSDATA_M1(pipe) _PIPE(pipe, _TRANSA_DATA_M1, _TRANSB_DATA_M1)
+#define TRANSDATA_N1(pipe) _PIPE(pipe, _TRANSA_DATA_N1, _TRANSB_DATA_N1)
+#define TRANSDATA_M2(pipe) _PIPE(pipe, _TRANSA_DATA_M2, _TRANSB_DATA_M2)
+#define TRANSDATA_N2(pipe) _PIPE(pipe, _TRANSA_DATA_N2, _TRANSB_DATA_N2)
+#define TRANSDPLINK_M1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M1, _TRANSB_DP_LINK_M1)
+#define TRANSDPLINK_N1(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N1, _TRANSB_DP_LINK_N1)
+#define TRANSDPLINK_M2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_M2, _TRANSB_DP_LINK_M2)
+#define TRANSDPLINK_N2(pipe) _PIPE(pipe, _TRANSA_DP_LINK_N2, _TRANSB_DP_LINK_N2)
+
+#define _TRANSACONF 0xf0008
+#define _TRANSBCONF 0xf1008
+#define TRANSCONF(plane) _PIPE(plane, _TRANSACONF, _TRANSBCONF)
#define TRANS_DISABLE (0<<31)
#define TRANS_ENABLE (1<<31)
#define TRANS_STATE_MASK (1<<30)
@@ -3009,18 +3057,19 @@
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
-#define FDI_RXA_CHICKEN 0xc200c
-#define FDI_RXB_CHICKEN 0xc2010
-#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
-#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN)
+#define _FDI_RXA_CHICKEN 0xc200c
+#define _FDI_RXB_CHICKEN 0xc2010
+#define FDI_RX_PHASE_SYNC_POINTER_OVR (1<<1)
+#define FDI_RX_PHASE_SYNC_POINTER_EN (1<<0)
+#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
#define SOUTH_DSPCLK_GATE_D 0xc2020
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
/* CPU: FDI_TX */
-#define FDI_TXA_CTL 0x60100
-#define FDI_TXB_CTL 0x61100
-#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL)
+#define _FDI_TXA_CTL 0x60100
+#define _FDI_TXB_CTL 0x61100
+#define FDI_TX_CTL(pipe) _PIPE(pipe, _FDI_TXA_CTL, _FDI_TXB_CTL)
#define FDI_TX_DISABLE (0<<31)
#define FDI_TX_ENABLE (1<<31)
#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
@@ -3060,9 +3109,9 @@
#define FDI_SCRAMBLING_DISABLE (1<<7)
/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
-#define FDI_RXA_CTL 0xf000c
-#define FDI_RXB_CTL 0xf100c
-#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL)
+#define _FDI_RXA_CTL 0xf000c
+#define _FDI_RXB_CTL 0xf100c
+#define FDI_RX_CTL(pipe) _PIPE(pipe, _FDI_RXA_CTL, _FDI_RXB_CTL)
#define FDI_RX_ENABLE (1<<31)
/* train, dp width same as FDI_TX */
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
@@ -3087,15 +3136,15 @@
#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
-#define FDI_RXA_MISC 0xf0010
-#define FDI_RXB_MISC 0xf1010
-#define FDI_RXA_TUSIZE1 0xf0030
-#define FDI_RXA_TUSIZE2 0xf0038
-#define FDI_RXB_TUSIZE1 0xf1030
-#define FDI_RXB_TUSIZE2 0xf1038
-#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC)
-#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1)
-#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2)
+#define _FDI_RXA_MISC 0xf0010
+#define _FDI_RXB_MISC 0xf1010
+#define _FDI_RXA_TUSIZE1 0xf0030
+#define _FDI_RXA_TUSIZE2 0xf0038
+#define _FDI_RXB_TUSIZE1 0xf1030
+#define _FDI_RXB_TUSIZE2 0xf1038
+#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
+#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
+#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
/* FDI_RX interrupt register format */
#define FDI_RX_INTER_LANE_ALIGN (1<<10)
@@ -3110,12 +3159,12 @@
#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
-#define FDI_RXA_IIR 0xf0014
-#define FDI_RXA_IMR 0xf0018
-#define FDI_RXB_IIR 0xf1014
-#define FDI_RXB_IMR 0xf1018
-#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR)
-#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR)
+#define _FDI_RXA_IIR 0xf0014
+#define _FDI_RXA_IMR 0xf0018
+#define _FDI_RXB_IIR 0xf1014
+#define _FDI_RXB_IMR 0xf1018
+#define FDI_RX_IIR(pipe) _PIPE(pipe, _FDI_RXA_IIR, _FDI_RXB_IIR)
+#define FDI_RX_IMR(pipe) _PIPE(pipe, _FDI_RXA_IMR, _FDI_RXB_IMR)
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
@@ -3145,11 +3194,15 @@
#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
+#define ADPA_PIPE_ENABLED(V, P) \
+ (((V) & (ADPA_TRANS_SELECT_MASK | ADPA_DAC_ENABLE)) == ((P) << 30 | ADPA_DAC_ENABLE))
+
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER_A (0)
#define TRANSCODER_B (1 << 30)
+#define TRANSCODER_MASK (1 << 30)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@@ -3165,6 +3218,9 @@
#define HSYNC_ACTIVE_HIGH (1 << 3)
#define PORT_DETECTED (1 << 2)
+#define HDMI_PIPE_ENABLED(V, P) \
+ (((V) & (TRANSCODER_MASK | PORT_ENABLE)) == ((P) << 30 | PORT_ENABLE))
+
/* PCH SDVOB multiplex with HDMIB */
#define PCH_SDVOB HDMIB
@@ -3240,6 +3296,7 @@
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
#define TRANS_DP_PORT_SEL_D (2<<29)
+#define TRANS_DP_PORT_SEL_NONE (3<<29)
#define TRANS_DP_PORT_SEL_MASK (3<<29)
#define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_ENH_FRAMING (1<<18)
@@ -3290,15 +3347,28 @@
#define GEN6_RP_DOWN_TIMEOUT 0xA010
#define GEN6_RP_INTERRUPT_LIMITS 0xA014
#define GEN6_RPSTAT1 0xA01C
+#define GEN6_CAGF_SHIFT 8
+#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
#define GEN6_RP_MEDIA_IS_GFX (1<<8)
#define GEN6_RP_ENABLE (1<<7)
-#define GEN6_RP_UP_BUSY_MAX (0x2<<3)
-#define GEN6_RP_DOWN_BUSY_MIN (0x2<<0)
+#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
+#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
+#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
+#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
#define GEN6_RP_UP_THRESHOLD 0xA02C
#define GEN6_RP_DOWN_THRESHOLD 0xA030
+#define GEN6_RP_CUR_UP_EI 0xA050
+#define GEN6_CURICONT_MASK 0xffffff
+#define GEN6_RP_CUR_UP 0xA054
+#define GEN6_CURBSYTAVG_MASK 0xffffff
+#define GEN6_RP_PREV_UP 0xA058
+#define GEN6_RP_CUR_DOWN_EI 0xA05C
+#define GEN6_CURIAVG_MASK 0xffffff
+#define GEN6_RP_CUR_DOWN 0xA060
+#define GEN6_RP_PREV_DOWN 0xA064
#define GEN6_RP_UP_EI 0xA068
#define GEN6_RP_DOWN_EI 0xA06C
#define GEN6_RP_IDLE_HYSTERSIS 0xA070
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 0521ecf26017..7e992a8e9098 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -34,11 +34,10 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
- if (HAS_PCH_SPLIT(dev)) {
- dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
- } else {
- dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
- }
+ if (HAS_PCH_SPLIT(dev))
+ dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B;
+ else
+ dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
}
@@ -46,7 +45,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
@@ -54,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
return;
if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
@@ -68,7 +67,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+ unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
@@ -76,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
return;
if (HAS_PCH_SPLIT(dev))
- reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
+ reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
@@ -241,12 +240,12 @@ static void i915_save_modeset_reg(struct drm_device *dev)
return;
/* Cursor state */
- dev_priv->saveCURACNTR = I915_READ(CURACNTR);
- dev_priv->saveCURAPOS = I915_READ(CURAPOS);
- dev_priv->saveCURABASE = I915_READ(CURABASE);
- dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
- dev_priv->saveCURBPOS = I915_READ(CURBPOS);
- dev_priv->saveCURBBASE = I915_READ(CURBBASE);
+ dev_priv->saveCURACNTR = I915_READ(_CURACNTR);
+ dev_priv->saveCURAPOS = I915_READ(_CURAPOS);
+ dev_priv->saveCURABASE = I915_READ(_CURABASE);
+ dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR);
+ dev_priv->saveCURBPOS = I915_READ(_CURBPOS);
+ dev_priv->saveCURBBASE = I915_READ(_CURBBASE);
if (IS_GEN2(dev))
dev_priv->saveCURSIZE = I915_READ(CURSIZE);
@@ -256,118 +255,118 @@ static void i915_save_modeset_reg(struct drm_device *dev)
}
/* Pipe & plane A info */
- dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
- dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
+ dev_priv->savePIPEACONF = I915_READ(_PIPEACONF);
+ dev_priv->savePIPEASRC = I915_READ(_PIPEASRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
- dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
- dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
+ dev_priv->saveFPA0 = I915_READ(_PCH_FPA0);
+ dev_priv->saveFPA1 = I915_READ(_PCH_FPA1);
+ dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A);
} else {
- dev_priv->saveFPA0 = I915_READ(FPA0);
- dev_priv->saveFPA1 = I915_READ(FPA1);
- dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+ dev_priv->saveFPA0 = I915_READ(_FPA0);
+ dev_priv->saveFPA1 = I915_READ(_FPA1);
+ dev_priv->saveDPLL_A = I915_READ(_DPLL_A);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
- dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
- dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
- dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
- dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
- dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
- dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
+ dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
+ dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A);
+ dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A);
+ dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A);
+ dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A);
+ dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A);
+ dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+ dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
- dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
- dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
- dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1);
-
- dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL);
- dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL);
-
- dev_priv->savePFA_CTL_1 = I915_READ(PFA_CTL_1);
- dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ);
- dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS);
-
- dev_priv->saveTRANSACONF = I915_READ(TRANSACONF);
- dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A);
- dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A);
- dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A);
- dev_priv->saveTRANS_VTOTAL_A = I915_READ(TRANS_VTOTAL_A);
- dev_priv->saveTRANS_VBLANK_A = I915_READ(TRANS_VBLANK_A);
- dev_priv->saveTRANS_VSYNC_A = I915_READ(TRANS_VSYNC_A);
- }
-
- dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
- dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
- dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
- dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
- dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
+ dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
+ dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
+ dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
+ dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
+
+ dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
+ dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
+
+ dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
+ dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
+ dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
+
+ dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF);
+ dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
+ dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
+ dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
+ dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
+ dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
+ dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
+ }
+
+ dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR);
+ dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
+ dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE);
+ dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS);
+ dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPASURF = I915_READ(DSPASURF);
- dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
+ dev_priv->saveDSPASURF = I915_READ(_DSPASURF);
+ dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
- dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
+ dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT);
/* Pipe & plane B info */
- dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
- dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
+ dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF);
+ dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
- dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
- dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
+ dev_priv->saveFPB0 = I915_READ(_PCH_FPB0);
+ dev_priv->saveFPB1 = I915_READ(_PCH_FPB1);
+ dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B);
} else {
- dev_priv->saveFPB0 = I915_READ(FPB0);
- dev_priv->saveFPB1 = I915_READ(FPB1);
- dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+ dev_priv->saveFPB0 = I915_READ(_FPB0);
+ dev_priv->saveFPB1 = I915_READ(_FPB1);
+ dev_priv->saveDPLL_B = I915_READ(_DPLL_B);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
- dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
- dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
- dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
- dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
- dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
- dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
- dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
+ dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
+ dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B);
+ dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B);
+ dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B);
+ dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B);
+ dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B);
+ dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
+ dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
- dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
- dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
- dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1);
-
- dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL);
- dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL);
-
- dev_priv->savePFB_CTL_1 = I915_READ(PFB_CTL_1);
- dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ);
- dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS);
-
- dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF);
- dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B);
- dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B);
- dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B);
- dev_priv->saveTRANS_VTOTAL_B = I915_READ(TRANS_VTOTAL_B);
- dev_priv->saveTRANS_VBLANK_B = I915_READ(TRANS_VBLANK_B);
- dev_priv->saveTRANS_VSYNC_B = I915_READ(TRANS_VSYNC_B);
- }
-
- dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
- dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
- dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
- dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
- dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
+ dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
+ dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
+ dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
+ dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
+
+ dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
+ dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
+
+ dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
+ dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
+ dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
+
+ dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF);
+ dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
+ dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
+ dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
+ dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
+ dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
+ dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
+ }
+
+ dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR);
+ dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
+ dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE);
+ dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS);
+ dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR);
if (INTEL_INFO(dev)->gen >= 4) {
- dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
- dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
+ dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF);
+ dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
- dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+ dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT);
/* Fences */
switch (INTEL_INFO(dev)->gen) {
@@ -426,19 +425,19 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
- dpll_a_reg = PCH_DPLL_A;
- dpll_b_reg = PCH_DPLL_B;
- fpa0_reg = PCH_FPA0;
- fpb0_reg = PCH_FPB0;
- fpa1_reg = PCH_FPA1;
- fpb1_reg = PCH_FPB1;
+ dpll_a_reg = _PCH_DPLL_A;
+ dpll_b_reg = _PCH_DPLL_B;
+ fpa0_reg = _PCH_FPA0;
+ fpb0_reg = _PCH_FPB0;
+ fpa1_reg = _PCH_FPA1;
+ fpb1_reg = _PCH_FPB1;
} else {
- dpll_a_reg = DPLL_A;
- dpll_b_reg = DPLL_B;
- fpa0_reg = FPA0;
- fpb0_reg = FPB0;
- fpa1_reg = FPA1;
- fpb1_reg = FPB1;
+ dpll_a_reg = _DPLL_A;
+ dpll_b_reg = _DPLL_B;
+ fpa0_reg = _FPA0;
+ fpb0_reg = _FPB0;
+ fpa1_reg = _FPA1;
+ fpb1_reg = _FPB1;
}
if (HAS_PCH_SPLIT(dev)) {
@@ -461,60 +460,60 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
POSTING_READ(dpll_a_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
- POSTING_READ(DPLL_A_MD);
+ I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+ POSTING_READ(_DPLL_A_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
- I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
- I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
- I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
- I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
- I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
+ I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A);
+ I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A);
+ I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A);
+ I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A);
+ I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A);
+ I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+ I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
- I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
- I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
- I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
+ I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
+ I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
+ I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
+ I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
- I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
- I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
+ I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
+ I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
- I915_WRITE(PFA_CTL_1, dev_priv->savePFA_CTL_1);
- I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
- I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
+ I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1);
+ I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
+ I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
- I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF);
- I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
- I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
- I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
- I915_WRITE(TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
- I915_WRITE(TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
- I915_WRITE(TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
+ I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF);
+ I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
+ I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
+ I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
+ I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A);
+ I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A);
+ I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A);
}
/* Restore plane info */
- I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
- I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
- I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
- I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
- I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+ I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE);
+ I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS);
+ I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC);
+ I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR);
+ I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
- I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+ I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF);
+ I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
- I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+ I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
- I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
- I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
+ I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR);
+ I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
/* Pipe & plane B info */
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
@@ -530,68 +529,68 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
POSTING_READ(dpll_b_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
- I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
- POSTING_READ(DPLL_B_MD);
+ I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+ POSTING_READ(_DPLL_B_MD);
}
udelay(150);
/* Restore mode */
- I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
- I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
- I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
- I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
- I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
- I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
+ I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B);
+ I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B);
+ I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B);
+ I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B);
+ I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B);
+ I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B);
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+ I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
- I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
- I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
- I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
- I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
+ I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
+ I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
+ I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
+ I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
- I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
- I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
+ I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
+ I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
- I915_WRITE(PFB_CTL_1, dev_priv->savePFB_CTL_1);
- I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
- I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
+ I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1);
+ I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
+ I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
- I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF);
- I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
- I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
- I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
- I915_WRITE(TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
- I915_WRITE(TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
- I915_WRITE(TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
+ I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF);
+ I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
+ I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
+ I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
+ I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B);
+ I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B);
+ I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B);
}
/* Restore plane info */
- I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
- I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
- I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
- I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
- I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+ I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE);
+ I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS);
+ I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC);
+ I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR);
+ I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
- I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
- I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+ I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF);
+ I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
- I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
+ I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
- I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
- I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+ I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR);
+ I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
/* Cursor state */
- I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
- I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
- I915_WRITE(CURABASE, dev_priv->saveCURABASE);
- I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
- I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
- I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
+ I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS);
+ I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR);
+ I915_WRITE(_CURABASE, dev_priv->saveCURABASE);
+ I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS);
+ I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR);
+ I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE);
if (IS_GEN2(dev))
I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
@@ -653,14 +652,14 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveDP_B = I915_READ(DP_B);
dev_priv->saveDP_C = I915_READ(DP_C);
dev_priv->saveDP_D = I915_READ(DP_D);
- dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(PIPEA_GMCH_DATA_M);
- dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(PIPEB_GMCH_DATA_M);
- dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(PIPEA_GMCH_DATA_N);
- dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(PIPEB_GMCH_DATA_N);
- dev_priv->savePIPEA_DP_LINK_M = I915_READ(PIPEA_DP_LINK_M);
- dev_priv->savePIPEB_DP_LINK_M = I915_READ(PIPEB_DP_LINK_M);
- dev_priv->savePIPEA_DP_LINK_N = I915_READ(PIPEA_DP_LINK_N);
- dev_priv->savePIPEB_DP_LINK_N = I915_READ(PIPEB_DP_LINK_N);
+ dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
+ dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
+ dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
+ dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
+ dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
+ dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
+ dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
+ dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
}
/* FIXME: save TV & SDVO state */
@@ -699,14 +698,14 @@ void i915_restore_display(struct drm_device *dev)
/* Display port ratios (must be done before clock is set) */
if (SUPPORTS_INTEGRATED_DP(dev)) {
- I915_WRITE(PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
- I915_WRITE(PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
- I915_WRITE(PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
- I915_WRITE(PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
- I915_WRITE(PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
- I915_WRITE(PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
- I915_WRITE(PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
- I915_WRITE(PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
+ I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M);
+ I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M);
+ I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N);
+ I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N);
+ I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M);
+ I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M);
+ I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N);
+ I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N);
}
/* This is only meaningful in non-KMS mode */
@@ -797,9 +796,6 @@ int i915_save_state(struct drm_device *dev)
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
- /* Hardware status page */
- dev_priv->saveHWS = I915_READ(HWS_PGA);
-
i915_save_display(dev);
/* Interrupt state */
@@ -808,8 +804,8 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveDEIMR = I915_READ(DEIMR);
dev_priv->saveGTIER = I915_READ(GTIER);
dev_priv->saveGTIMR = I915_READ(GTIMR);
- dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
- dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
+ dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
+ dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
dev_priv->saveMCHBAR_RENDER_STANDBY =
I915_READ(RSTDBYCTL);
} else {
@@ -846,9 +842,6 @@ int i915_restore_state(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
- /* Hardware status page */
- I915_WRITE(HWS_PGA, dev_priv->saveHWS);
-
i915_restore_display(dev);
/* Interrupt state */
@@ -857,11 +850,11 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(DEIMR, dev_priv->saveDEIMR);
I915_WRITE(GTIER, dev_priv->saveGTIER);
I915_WRITE(GTIMR, dev_priv->saveGTIMR);
- I915_WRITE(FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
- I915_WRITE(FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
+ I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR);
+ I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR);
} else {
- I915_WRITE (IER, dev_priv->saveIER);
- I915_WRITE (IMR, dev_priv->saveIMR);
+ I915_WRITE(IER, dev_priv->saveIER);
+ I915_WRITE(IMR, dev_priv->saveIMR);
}
/* Clock gating state */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 7f0fc3ed61aa..d623fefbfaca 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -7,6 +7,7 @@
#include <drm/drmP.h>
#include "i915_drv.h"
+#include "intel_ringbuffer.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
@@ -16,9 +17,7 @@
/* object tracking */
TRACE_EVENT(i915_gem_object_create,
-
TP_PROTO(struct drm_i915_gem_object *obj),
-
TP_ARGS(obj),
TP_STRUCT__entry(
@@ -35,33 +34,51 @@ TRACE_EVENT(i915_gem_object_create,
);
TRACE_EVENT(i915_gem_object_bind,
-
- TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
-
- TP_ARGS(obj, gtt_offset, mappable),
+ TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
+ TP_ARGS(obj, mappable),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
- __field(u32, gtt_offset)
+ __field(u32, offset)
+ __field(u32, size)
__field(bool, mappable)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->gtt_offset = gtt_offset;
+ __entry->offset = obj->gtt_space->start;
+ __entry->size = obj->gtt_space->size;
__entry->mappable = mappable;
),
- TP_printk("obj=%p, gtt_offset=%08x%s",
- __entry->obj, __entry->gtt_offset,
+ TP_printk("obj=%p, offset=%08x size=%x%s",
+ __entry->obj, __entry->offset, __entry->size,
__entry->mappable ? ", mappable" : "")
);
-TRACE_EVENT(i915_gem_object_change_domain,
+TRACE_EVENT(i915_gem_object_unbind,
+ TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_ARGS(obj),
+
+ TP_STRUCT__entry(
+ __field(struct drm_i915_gem_object *, obj)
+ __field(u32, offset)
+ __field(u32, size)
+ ),
- TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->offset = obj->gtt_space->start;
+ __entry->size = obj->gtt_space->size;
+ ),
- TP_ARGS(obj, old_read_domains, old_write_domain),
+ TP_printk("obj=%p, offset=%08x size=%x",
+ __entry->obj, __entry->offset, __entry->size)
+);
+
+TRACE_EVENT(i915_gem_object_change_domain,
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
+ TP_ARGS(obj, old_read, old_write),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
@@ -71,177 +88,264 @@ TRACE_EVENT(i915_gem_object_change_domain,
TP_fast_assign(
__entry->obj = obj;
- __entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
- __entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
+ __entry->read_domains = obj->base.read_domains | (old_read << 16);
+ __entry->write_domain = obj->base.write_domain | (old_write << 16);
),
- TP_printk("obj=%p, read=%04x, write=%04x",
+ TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
__entry->obj,
- __entry->read_domains, __entry->write_domain)
+ __entry->read_domains >> 16,
+ __entry->read_domains & 0xffff,
+ __entry->write_domain >> 16,
+ __entry->write_domain & 0xffff)
);
-DECLARE_EVENT_CLASS(i915_gem_object,
+TRACE_EVENT(i915_gem_object_pwrite,
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+ TP_ARGS(obj, offset, len),
- TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_STRUCT__entry(
+ __field(struct drm_i915_gem_object *, obj)
+ __field(u32, offset)
+ __field(u32, len)
+ ),
- TP_ARGS(obj),
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->offset = offset;
+ __entry->len = len;
+ ),
+
+ TP_printk("obj=%p, offset=%u, len=%u",
+ __entry->obj, __entry->offset, __entry->len)
+);
+
+TRACE_EVENT(i915_gem_object_pread,
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
+ TP_ARGS(obj, offset, len),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
+ __field(u32, offset)
+ __field(u32, len)
),
TP_fast_assign(
__entry->obj = obj;
+ __entry->offset = offset;
+ __entry->len = len;
),
- TP_printk("obj=%p", __entry->obj)
+ TP_printk("obj=%p, offset=%u, len=%u",
+ __entry->obj, __entry->offset, __entry->len)
);
-DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
+TRACE_EVENT(i915_gem_object_fault,
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 index, bool gtt, bool write),
+ TP_ARGS(obj, index, gtt, write),
+
+ TP_STRUCT__entry(
+ __field(struct drm_i915_gem_object *, obj)
+ __field(u32, index)
+ __field(bool, gtt)
+ __field(bool, write)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ __entry->index = index;
+ __entry->gtt = gtt;
+ __entry->write = write;
+ ),
+ TP_printk("obj=%p, %s index=%u %s",
+ __entry->obj,
+ __entry->gtt ? "GTT" : "CPU",
+ __entry->index,
+ __entry->write ? ", writable" : "")
+);
+
+DECLARE_EVENT_CLASS(i915_gem_object,
TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_ARGS(obj),
- TP_ARGS(obj)
+ TP_STRUCT__entry(
+ __field(struct drm_i915_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj;
+ ),
+
+ TP_printk("obj=%p", __entry->obj)
);
-DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
+DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
+ TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_ARGS(obj)
+);
+DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
TP_PROTO(struct drm_i915_gem_object *obj),
-
TP_ARGS(obj)
);
-DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
+TRACE_EVENT(i915_gem_evict,
+ TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable),
+ TP_ARGS(dev, size, align, mappable),
- TP_PROTO(struct drm_i915_gem_object *obj),
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, size)
+ __field(u32, align)
+ __field(bool, mappable)
+ ),
- TP_ARGS(obj)
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->size = size;
+ __entry->align = align;
+ __entry->mappable = mappable;
+ ),
+
+ TP_printk("dev=%d, size=%d, align=%d %s",
+ __entry->dev, __entry->size, __entry->align,
+ __entry->mappable ? ", mappable" : "")
);
-/* batch tracing */
+TRACE_EVENT(i915_gem_evict_everything,
+ TP_PROTO(struct drm_device *dev, bool purgeable),
+ TP_ARGS(dev, purgeable),
-TRACE_EVENT(i915_gem_request_submit,
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(bool, purgeable)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->purgeable = purgeable;
+ ),
- TP_PROTO(struct drm_device *dev, u32 seqno),
+ TP_printk("dev=%d%s",
+ __entry->dev,
+ __entry->purgeable ? ", purgeable only" : "")
+);
- TP_ARGS(dev, seqno),
+TRACE_EVENT(i915_gem_ring_dispatch,
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, ring)
__field(u32, seqno)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
__entry->seqno = seqno;
- i915_trace_irq_get(dev, seqno);
+ i915_trace_irq_get(ring, seqno);
),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->seqno)
);
-TRACE_EVENT(i915_gem_request_flush,
-
- TP_PROTO(struct drm_device *dev, u32 seqno,
- u32 flush_domains, u32 invalidate_domains),
-
- TP_ARGS(dev, seqno, flush_domains, invalidate_domains),
+TRACE_EVENT(i915_gem_ring_flush,
+ TP_PROTO(struct intel_ring_buffer *ring, u32 invalidate, u32 flush),
+ TP_ARGS(ring, invalidate, flush),
TP_STRUCT__entry(
__field(u32, dev)
- __field(u32, seqno)
- __field(u32, flush_domains)
- __field(u32, invalidate_domains)
+ __field(u32, ring)
+ __field(u32, invalidate)
+ __field(u32, flush)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
- __entry->seqno = seqno;
- __entry->flush_domains = flush_domains;
- __entry->invalidate_domains = invalidate_domains;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
+ __entry->invalidate = invalidate;
+ __entry->flush = flush;
),
- TP_printk("dev=%u, seqno=%u, flush=%04x, invalidate=%04x",
- __entry->dev, __entry->seqno,
- __entry->flush_domains, __entry->invalidate_domains)
+ TP_printk("dev=%u, ring=%x, invalidate=%04x, flush=%04x",
+ __entry->dev, __entry->ring,
+ __entry->invalidate, __entry->flush)
);
DECLARE_EVENT_CLASS(i915_gem_request,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno),
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, ring)
__field(u32, seqno)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
__entry->seqno = seqno;
),
- TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, seqno=%u",
+ __entry->dev, __entry->ring, __entry->seqno)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
+DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
+);
- TP_ARGS(dev, seqno)
+DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno)
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_begin,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno)
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
-
- TP_PROTO(struct drm_device *dev, u32 seqno),
-
- TP_ARGS(dev, seqno)
+ TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
+ TP_ARGS(ring, seqno)
);
DECLARE_EVENT_CLASS(i915_ring,
-
- TP_PROTO(struct drm_device *dev),
-
- TP_ARGS(dev),
+ TP_PROTO(struct intel_ring_buffer *ring),
+ TP_ARGS(ring),
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, ring)
),
TP_fast_assign(
- __entry->dev = dev->primary->index;
+ __entry->dev = ring->dev->primary->index;
+ __entry->ring = ring->id;
),
- TP_printk("dev=%u", __entry->dev)
+ TP_printk("dev=%u, ring=%u", __entry->dev, __entry->ring)
);
DEFINE_EVENT(i915_ring, i915_ring_wait_begin,
-
- TP_PROTO(struct drm_device *dev),
-
- TP_ARGS(dev)
+ TP_PROTO(struct intel_ring_buffer *ring),
+ TP_ARGS(ring)
);
DEFINE_EVENT(i915_ring, i915_ring_wait_end,
-
- TP_PROTO(struct drm_device *dev),
-
- TP_ARGS(dev)
+ TP_PROTO(struct intel_ring_buffer *ring),
+ TP_ARGS(ring)
);
TRACE_EVENT(i915_flip_request,
@@ -281,26 +385,29 @@ TRACE_EVENT(i915_flip_complete,
);
TRACE_EVENT(i915_reg_rw,
- TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len),
+ TP_PROTO(bool write, u32 reg, u64 val, int len),
- TP_ARGS(cmd, reg, val, len),
+ TP_ARGS(write, reg, val, len),
TP_STRUCT__entry(
- __field(int, cmd)
- __field(uint32_t, reg)
- __field(uint64_t, val)
- __field(int, len)
+ __field(u64, val)
+ __field(u32, reg)
+ __field(u16, write)
+ __field(u16, len)
),
TP_fast_assign(
- __entry->cmd = cmd;
+ __entry->val = (u64)val;
__entry->reg = reg;
- __entry->val = (uint64_t)val;
+ __entry->write = write;
__entry->len = len;
),
- TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d",
- __entry->cmd, __entry->reg, __entry->val, __entry->len)
+ TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
+ __entry->write ? "write" : "read",
+ __entry->reg, __entry->len,
+ (u32)(__entry->val & 0xffffffff),
+ (u32)(__entry->val >> 32))
);
#endif /* _I915_TRACE_H_ */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 0b44956c336b..fb5b4d426ae0 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -226,29 +226,49 @@ static void
parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
- struct bdb_sdvo_lvds_options *sdvo_lvds_options;
struct lvds_dvo_timing *dvo_timing;
struct drm_display_mode *panel_fixed_mode;
+ int index;
- sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
- if (!sdvo_lvds_options)
- return;
+ index = i915_vbt_sdvo_panel_type;
+ if (index == -1) {
+ struct bdb_sdvo_lvds_options *sdvo_lvds_options;
+
+ sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
+ if (!sdvo_lvds_options)
+ return;
+
+ index = sdvo_lvds_options->panel_type;
+ }
dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
if (!dvo_timing)
return;
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
-
if (!panel_fixed_mode)
return;
- fill_detail_timing_data(panel_fixed_mode,
- dvo_timing + sdvo_lvds_options->panel_type);
+ fill_detail_timing_data(panel_fixed_mode, dvo_timing + index);
dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
- return;
+ DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
+ drm_mode_debug_printmodeline(panel_fixed_mode);
+}
+
+static int intel_bios_ssc_frequency(struct drm_device *dev,
+ bool alternate)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ return alternate ? 66 : 48;
+ case 3:
+ case 4:
+ return alternate ? 100 : 96;
+ default:
+ return alternate ? 100 : 120;
+ }
}
static void
@@ -263,13 +283,8 @@ parse_general_features(struct drm_i915_private *dev_priv,
dev_priv->int_tv_support = general->int_tv_support;
dev_priv->int_crt_support = general->int_crt_support;
dev_priv->lvds_use_ssc = general->enable_ssc;
-
- if (IS_I85X(dev))
- dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
- else if (IS_GEN5(dev) || IS_GEN6(dev))
- dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120;
- else
- dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
+ dev_priv->lvds_ssc_freq =
+ intel_bios_ssc_frequency(dev, general->ssc_freq);
}
}
@@ -553,6 +568,8 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
static void
init_vbt_defaults(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = dev_priv->dev;
+
dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
/* LFP panel data */
@@ -565,7 +582,11 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
/* general features */
dev_priv->int_tv_support = 1;
dev_priv->int_crt_support = 1;
- dev_priv->lvds_use_ssc = 0;
+
+ /* Default to using SSC */
+ dev_priv->lvds_use_ssc = 1;
+ dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
+ DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
/* eDP data */
dev_priv->edp.bpp = 18;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 8a77ff4a7237..8342259f3160 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -129,10 +129,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
u32 adpa, dpll_md;
u32 adpa_reg;
- if (intel_crtc->pipe == 0)
- dpll_md_reg = DPLL_A_MD;
- else
- dpll_md_reg = DPLL_B_MD;
+ dpll_md_reg = DPLL_MD(intel_crtc->pipe);
if (HAS_PCH_SPLIT(dev))
adpa_reg = PCH_ADPA;
@@ -160,17 +157,16 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
adpa |= PORT_TRANS_A_SEL_CPT;
else
adpa |= ADPA_PIPE_A_SELECT;
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(BCLRPAT_A, 0);
} else {
if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_B_SEL_CPT;
else
adpa |= ADPA_PIPE_B_SELECT;
- if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(BCLRPAT_B, 0);
}
+ if (!HAS_PCH_SPLIT(dev))
+ I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
+
I915_WRITE(adpa_reg, adpa);
}
@@ -353,21 +349,12 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt)
DRM_DEBUG_KMS("starting load-detect on CRT\n");
- if (pipe == 0) {
- bclrpat_reg = BCLRPAT_A;
- vtotal_reg = VTOTAL_A;
- vblank_reg = VBLANK_A;
- vsync_reg = VSYNC_A;
- pipeconf_reg = PIPEACONF;
- pipe_dsl_reg = PIPEADSL;
- } else {
- bclrpat_reg = BCLRPAT_B;
- vtotal_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- pipeconf_reg = PIPEBCONF;
- pipe_dsl_reg = PIPEBDSL;
- }
+ bclrpat_reg = BCLRPAT(pipe);
+ vtotal_reg = VTOTAL(pipe);
+ vblank_reg = VBLANK(pipe);
+ vsync_reg = VSYNC(pipe);
+ pipeconf_reg = PIPECONF(pipe);
+ pipe_dsl_reg = PIPEDSL(pipe);
save_bclrpat = I915_READ(bclrpat_reg);
save_vtotal = I915_READ(vtotal_reg);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 49fb54fd9a18..3106c0dc8389 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -989,7 +989,7 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
void intel_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int pipestat_reg = (pipe == 0 ? PIPEASTAT : PIPEBSTAT);
+ int pipestat_reg = PIPESTAT(pipe);
/* Clear existing vblank status. Note this will clear any other
* sticky status fields as well.
@@ -1058,6 +1058,612 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
}
}
+static const char *state_string(bool enabled)
+{
+ return enabled ? "on" : "off";
+}
+
+/* Only for pre-ILK configs */
+static void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & DPLL_VCO_ENABLE);
+ WARN(cur_state != state,
+ "PLL state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+
+/* For ILK+ */
+static void assert_pch_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = PCH_DPLL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & DPLL_VCO_ENABLE);
+ WARN(cur_state != state,
+ "PCH PLL state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
+#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
+
+static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_TX_ENABLE);
+ WARN(cur_state != state,
+ "FDI TX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
+#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
+
+static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & FDI_RX_ENABLE);
+ WARN(cur_state != state,
+ "FDI RX state assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
+}
+#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
+#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
+
+static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* ILK FDI PLL is always enabled */
+ if (dev_priv->info->gen == 5)
+ return;
+
+ reg = FDI_TX_CTL(pipe);
+ val = I915_READ(reg);
+ WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ reg = FDI_RX_CTL(pipe);
+ val = I915_READ(reg);
+ WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int pp_reg, lvds_reg;
+ u32 val;
+ enum pipe panel_pipe = PIPE_A;
+ bool locked = locked;
+
+ if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ pp_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ } else {
+ pp_reg = PP_CONTROL;
+ lvds_reg = LVDS;
+ }
+
+ val = I915_READ(pp_reg);
+ if (!(val & PANEL_POWER_ON) ||
+ ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
+ locked = false;
+
+ if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
+ panel_pipe = PIPE_B;
+
+ WARN(panel_pipe == pipe && locked,
+ "panel assertion failure, pipe %c regs locked\n",
+ pipe_name(pipe));
+}
+
+static void assert_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ int reg;
+ u32 val;
+ bool cur_state;
+
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ cur_state = !!(val & PIPECONF_ENABLE);
+ WARN(cur_state != state,
+ "pipe %c assertion failure (expected %s, current %s)\n",
+ pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+
+static void assert_plane_enabled(struct drm_i915_private *dev_priv,
+ enum plane plane)
+{
+ int reg;
+ u32 val;
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ WARN(!(val & DISPLAY_PLANE_ENABLE),
+ "plane %c assertion failure, should be active but is disabled\n",
+ plane_name(plane));
+}
+
+static void assert_planes_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg, i;
+ u32 val;
+ int cur_pipe;
+
+ /* Planes are fixed to pipes on ILK+ */
+ if (HAS_PCH_SPLIT(dev_priv->dev))
+ return;
+
+ /* Need to check both planes against the pipe */
+ for (i = 0; i < 2; i++) {
+ reg = DSPCNTR(i);
+ val = I915_READ(reg);
+ cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
+ DISPPLANE_SEL_PIPE_SHIFT;
+ WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
+ "plane %c assertion failure, should be off on pipe %c but is still active\n",
+ plane_name(i), pipe_name(pipe));
+ }
+}
+
+static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+ bool enabled;
+
+ val = I915_READ(PCH_DREF_CONTROL);
+ enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
+ DREF_SUPERSPREAD_SOURCE_MASK));
+ WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
+}
+
+static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+ bool enabled;
+
+ reg = TRANSCONF(pipe);
+ val = I915_READ(reg);
+ enabled = !!(val & TRANS_ENABLE);
+ WARN(enabled,
+ "transcoder assertion failed, should be off on pipe %c but is still active\n",
+ pipe_name(pipe));
+}
+
+static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ WARN(DP_PIPE_ENABLED(val, pipe),
+ "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+}
+
+static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ WARN(HDMI_PIPE_ENABLED(val, pipe),
+ "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
+ reg, pipe_name(pipe));
+}
+
+static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C);
+ assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D);
+
+ reg = PCH_ADPA;
+ val = I915_READ(reg);
+ WARN(ADPA_PIPE_ENABLED(val, pipe),
+ "PCH VGA enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ reg = PCH_LVDS;
+ val = I915_READ(reg);
+ WARN(LVDS_PIPE_ENABLED(val, pipe),
+ "PCH LVDS enabled on transcoder %c, should be disabled\n",
+ pipe_name(pipe));
+
+ assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
+ assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
+ assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
+}
+
+/**
+ * intel_enable_pll - enable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
+ * make sure the PLL reg is writable first though, since the panel write
+ * protect mechanism may be enabled.
+ *
+ * Note! This is for pre-ILK only.
+ */
+static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* No really, not for ILK+ */
+ BUG_ON(dev_priv->info->gen >= 5);
+
+ /* PLL is protected by panel, make sure we can write it */
+ if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
+ assert_panel_unlocked(dev_priv, pipe);
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ val |= DPLL_VCO_ENABLE;
+
+ /* We do this three times for luck */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+}
+
+/**
+ * intel_disable_pll - disable a PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to disable
+ *
+ * Disable the PLL for @pipe, making sure the pipe is off first.
+ *
+ * Note! This is for pre-ILK only.
+ */
+static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ /* Make sure the pipe isn't still relying on us */
+ assert_pipe_disabled(dev_priv, pipe);
+
+ reg = DPLL(pipe);
+ val = I915_READ(reg);
+ val &= ~DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+}
+
+/**
+ * intel_enable_pch_pll - enable PCH PLL
+ * @dev_priv: i915 private structure
+ * @pipe: pipe PLL to enable
+ *
+ * The PCH PLL needs to be enabled before the PCH transcoder, since it
+ * drives the transcoder clock.
+ */
+static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* PCH refclock must be enabled first */
+ assert_pch_refclk_enabled(dev_priv);
+
+ reg = PCH_DPLL(pipe);
+ val = I915_READ(reg);
+ val |= DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(200);
+}
+
+static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* Make sure transcoder isn't still depending on us */
+ assert_transcoder_disabled(dev_priv, pipe);
+
+ reg = PCH_DPLL(pipe);
+ val = I915_READ(reg);
+ val &= ~DPLL_VCO_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ udelay(200);
+}
+
+static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* PCH only available on ILK+ */
+ BUG_ON(dev_priv->info->gen < 5);
+
+ /* Make sure PCH DPLL is enabled */
+ assert_pch_pll_enabled(dev_priv, pipe);
+
+ /* FDI must be feeding us bits for PCH ports */
+ assert_fdi_tx_enabled(dev_priv, pipe);
+ assert_fdi_rx_enabled(dev_priv, pipe);
+
+ reg = TRANSCONF(pipe);
+ val = I915_READ(reg);
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ val &= ~PIPE_BPC_MASK;
+ val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+ I915_WRITE(reg, val | TRANS_ENABLE);
+ if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
+ DRM_ERROR("failed to enable transcoder %d\n", pipe);
+}
+
+static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* FDI relies on the transcoder */
+ assert_fdi_tx_disabled(dev_priv, pipe);
+ assert_fdi_rx_disabled(dev_priv, pipe);
+
+ /* Ports must be off as well */
+ assert_pch_ports_disabled(dev_priv, pipe);
+
+ reg = TRANSCONF(pipe);
+ val = I915_READ(reg);
+ val &= ~TRANS_ENABLE;
+ I915_WRITE(reg, val);
+ /* wait for PCH transcoder off, transcoder state */
+ if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
+ DRM_ERROR("failed to disable transcoder\n");
+}
+
+/**
+ * intel_enable_pipe - enable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to enable
+ * @pch_port: on ILK+, is this pipe driving a PCH port or not
+ *
+ * Enable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe is actually running (i.e. first vblank) before
+ * returning.
+ */
+static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool pch_port)
+{
+ int reg;
+ u32 val;
+
+ /*
+ * A pipe without a PLL won't actually be able to drive bits from
+ * a plane. On ILK+ the pipe PLLs are integrated, so we don't
+ * need the check.
+ */
+ if (!HAS_PCH_SPLIT(dev_priv->dev))
+ assert_pll_enabled(dev_priv, pipe);
+ else {
+ if (pch_port) {
+ /* if driving the PCH, we need FDI enabled */
+ assert_fdi_rx_pll_enabled(dev_priv, pipe);
+ assert_fdi_tx_pll_enabled(dev_priv, pipe);
+ }
+ /* FIXME: assert CPU port conditions for SNB+ */
+ }
+
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ val |= PIPECONF_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+/**
+ * intel_disable_pipe - disable a pipe, asserting requirements
+ * @dev_priv: i915 private structure
+ * @pipe: pipe to disable
+ *
+ * Disable @pipe, making sure that various hardware specific requirements
+ * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
+ *
+ * @pipe should be %PIPE_A or %PIPE_B.
+ *
+ * Will wait until the pipe has shut down before returning.
+ */
+static void intel_disable_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /*
+ * Make sure planes won't keep trying to pump pixels to us,
+ * or we might hang the display.
+ */
+ assert_planes_disabled(dev_priv, pipe);
+
+ /* Don't disable pipe A or pipe A PLLs if needed */
+ if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ val &= ~PIPECONF_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ intel_wait_for_pipe_off(dev_priv->dev, pipe);
+}
+
+/**
+ * intel_enable_plane - enable a display plane on a given pipe
+ * @dev_priv: i915 private structure
+ * @plane: plane to enable
+ * @pipe: pipe being fed
+ *
+ * Enable @plane on @pipe, making sure that @pipe is running first.
+ */
+static void intel_enable_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ /* If the pipe isn't enabled, we can't pump pixels and may hang */
+ assert_pipe_enabled(dev_priv, pipe);
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ val |= DISPLAY_PLANE_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+/*
+ * Plane regs are double buffered, going from enabled->disabled needs a
+ * trigger in order to latch. The display address reg provides this.
+ */
+static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
+ enum plane plane)
+{
+ u32 reg = DSPADDR(plane);
+ I915_WRITE(reg, I915_READ(reg));
+}
+
+/**
+ * intel_disable_plane - disable a display plane
+ * @dev_priv: i915 private structure
+ * @plane: plane to disable
+ * @pipe: pipe consuming the data
+ *
+ * Disable @plane; should be an independent operation.
+ */
+static void intel_disable_plane(struct drm_i915_private *dev_priv,
+ enum plane plane, enum pipe pipe)
+{
+ int reg;
+ u32 val;
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+ val &= ~DISPLAY_PLANE_ENABLE;
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
+ intel_flush_display_plane(dev_priv, plane);
+ intel_wait_for_vblank(dev_priv->dev, pipe);
+}
+
+static void disable_pch_dp(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ if (DP_PIPE_ENABLED(val, pipe))
+ I915_WRITE(reg, val & ~DP_PORT_EN);
+}
+
+static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int reg)
+{
+ u32 val = I915_READ(reg);
+ if (HDMI_PIPE_ENABLED(val, pipe))
+ I915_WRITE(reg, val & ~PORT_ENABLE);
+}
+
+/* Disable any ports connected to this transcoder */
+static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ u32 reg, val;
+
+ val = I915_READ(PCH_PP_CONTROL);
+ I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
+
+ disable_pch_dp(dev_priv, pipe, PCH_DP_B);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_C);
+ disable_pch_dp(dev_priv, pipe, PCH_DP_D);
+
+ reg = PCH_ADPA;
+ val = I915_READ(reg);
+ if (ADPA_PIPE_ENABLED(val, pipe))
+ I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
+
+ reg = PCH_LVDS;
+ val = I915_READ(reg);
+ if (LVDS_PIPE_ENABLED(val, pipe)) {
+ I915_WRITE(reg, val & ~LVDS_PORT_EN);
+ POSTING_READ(reg);
+ udelay(100);
+ }
+
+ disable_pch_hdmi(dev_priv, pipe, HDMIB);
+ disable_pch_hdmi(dev_priv, pipe, HDMIC);
+ disable_pch_hdmi(dev_priv, pipe, HDMID);
+}
+
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
@@ -1390,7 +1996,7 @@ static void intel_update_fbc(struct drm_device *dev)
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
- if (tmp_crtc->enabled) {
+ if (tmp_crtc->enabled && tmp_crtc->fb) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -1461,6 +2067,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 alignment;
int ret;
@@ -1485,9 +2092,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
BUG();
}
+ dev_priv->mm.interruptible = false;
ret = i915_gem_object_pin(obj, alignment, true);
if (ret)
- return ret;
+ goto err_interruptible;
ret = i915_gem_object_set_to_display_plane(obj, pipelined);
if (ret)
@@ -1499,15 +2107,18 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* a fence as the cost is not that onerous.
*/
if (obj->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence(obj, pipelined, false);
+ ret = i915_gem_object_get_fence(obj, pipelined);
if (ret)
goto err_unpin;
}
+ dev_priv->mm.interruptible = true;
return 0;
err_unpin:
i915_gem_object_unpin(obj);
+err_interruptible:
+ dev_priv->mm.interruptible = true;
return ret;
}
@@ -1641,7 +2252,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- ret = i915_gem_object_flush_gpu(obj, false);
+ ret = i915_gem_object_flush_gpu(obj);
(void) ret;
}
@@ -1753,8 +2364,13 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
u32 reg, temp, tries;
+ /* FDI needs bits from pipe & plane first */
+ assert_pipe_enabled(dev_priv, pipe);
+ assert_plane_enabled(dev_priv, plane);
+
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
@@ -1784,7 +2400,11 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
udelay(150);
/* Ironlake workaround, enable clock pointer after FDI enable*/
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE);
+ if (HAS_PCH_IBX(dev)) {
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
+ FDI_RX_PHASE_SYNC_POINTER_EN);
+ }
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
@@ -1834,7 +2454,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
}
-static const int const snb_b_fdi_train_param [] = {
+static const int snb_b_fdi_train_param [] = {
FDI_LINK_TRAIN_400MV_0DB_SNB_B,
FDI_LINK_TRAIN_400MV_6DB_SNB_B,
FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
@@ -2003,12 +2623,60 @@ static void ironlake_fdi_enable(struct drm_crtc *crtc)
}
}
-static void intel_flush_display_plane(struct drm_device *dev,
- int plane)
+static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 reg = DSPADDR(plane);
- I915_WRITE(reg, I915_READ(reg));
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* disable CPU FDI tx and PCH FDI rx */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
+ POSTING_READ(reg);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(0x7 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+
+ POSTING_READ(reg);
+ udelay(100);
+
+ /* Ironlake workaround, disable clock pointer after downing FDI */
+ if (HAS_PCH_IBX(dev)) {
+ I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_EN));
+ }
+
+ /* still set train pattern 1 */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_PATTERN_1;
+ }
+ /* BPC in FDI rx is consistent with that in PIPECONF */
+ temp &= ~(0x07 << 16);
+ temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
+ I915_WRITE(reg, temp);
+
+ POSTING_READ(reg);
+ udelay(100);
}
/*
@@ -2070,114 +2738,21 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
return true;
}
-static void ironlake_crtc_enable(struct drm_crtc *crtc)
+/*
+ * Enable PCH resources required for PCH ports:
+ * - PCH PLLs
+ * - FDI training & RX/TX
+ * - update transcoder timings
+ * - DP transcoding bits
+ * - transcoder
+ */
+static void ironlake_pch_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
u32 reg, temp;
- bool is_pch_port = false;
-
- if (intel_crtc->active)
- return;
-
- intel_crtc->active = true;
- intel_update_watermarks(dev);
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- if ((temp & LVDS_PORT_EN) == 0)
- I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
- }
-
- is_pch_port = intel_crtc_driving_pch(crtc);
-
- if (is_pch_port)
- ironlake_fdi_enable(crtc);
- else {
- /* disable CPU FDI tx and PCH FDI rx */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
- POSTING_READ(reg);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~(0x7 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
- I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
-
- POSTING_READ(reg);
- udelay(100);
-
- /* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev))
- I915_WRITE(FDI_RX_CHICKEN(pipe),
- I915_READ(FDI_RX_CHICKEN(pipe) &
- ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
-
- /* still set train pattern 1 */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- /* BPC in FDI rx is consistent with that in PIPECONF */
- temp &= ~(0x07 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
- I915_WRITE(reg, temp);
-
- POSTING_READ(reg);
- udelay(100);
- }
-
- /* Enable panel fitting for LVDS */
- if (dev_priv->pch_pf_size &&
- (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
- /* Force use of hard-coded filter coefficients
- * as some pre-programmed values are broken,
- * e.g. x201.
- */
- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1,
- PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(pipe ? PFB_WIN_POS : PFA_WIN_POS,
- dev_priv->pch_pf_pos);
- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ,
- dev_priv->pch_pf_size);
- }
-
- /* Enable CPU pipe */
- reg = PIPECONF(pipe);
- temp = I915_READ(reg);
- if ((temp & PIPECONF_ENABLE) == 0) {
- I915_WRITE(reg, temp | PIPECONF_ENABLE);
- POSTING_READ(reg);
- intel_wait_for_vblank(dev, intel_crtc->pipe);
- }
-
- /* configure and enable CPU plane */
- reg = DSPCNTR(plane);
- temp = I915_READ(reg);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev, plane);
- }
-
- /* Skip the PCH stuff if possible */
- if (!is_pch_port)
- goto done;
/* For PCH output, training FDI link */
if (IS_GEN6(dev))
@@ -2185,14 +2760,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
else
ironlake_fdi_link_train(crtc);
- /* enable PCH DPLL */
- reg = PCH_DPLL(pipe);
- temp = I915_READ(reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
- POSTING_READ(reg);
- udelay(200);
- }
+ intel_enable_pch_pll(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
/* Be sure PCH DPLL SEL is set */
@@ -2204,7 +2772,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_DPLL_SEL, temp);
}
- /* set transcoder timing */
+ /* set transcoder timing, panel must allow it */
+ assert_panel_unlocked(dev_priv, pipe);
I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
@@ -2251,19 +2820,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(reg, temp);
}
- /* enable PCH transcoder */
- reg = TRANSCONF(pipe);
- temp = I915_READ(reg);
- /*
- * make the BPC in transcoder be consistent with
- * that in pipeconf reg.
- */
- temp &= ~PIPE_BPC_MASK;
- temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
- I915_WRITE(reg, temp | TRANS_ENABLE);
- if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
- DRM_ERROR("failed to enable transcoder %d\n", pipe);
-done:
+ intel_enable_transcoder(dev_priv, pipe);
+}
+
+static void ironlake_crtc_enable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int plane = intel_crtc->plane;
+ u32 temp;
+ bool is_pch_port;
+
+ if (intel_crtc->active)
+ return;
+
+ intel_crtc->active = true;
+ intel_update_watermarks(dev);
+
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ temp = I915_READ(PCH_LVDS);
+ if ((temp & LVDS_PORT_EN) == 0)
+ I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
+ }
+
+ is_pch_port = intel_crtc_driving_pch(crtc);
+
+ if (is_pch_port)
+ ironlake_fdi_enable(crtc);
+ else
+ ironlake_fdi_disable(crtc);
+
+ /* Enable panel fitting for LVDS */
+ if (dev_priv->pch_pf_size &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
+ /* Force use of hard-coded filter coefficients
+ * as some pre-programmed values are broken,
+ * e.g. x201.
+ */
+ I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
+ I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
+ I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
+ }
+
+ intel_enable_pipe(dev_priv, pipe, is_pch_port);
+ intel_enable_plane(dev_priv, plane, pipe);
+
+ if (is_pch_port)
+ ironlake_pch_enable(crtc);
+
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
intel_crtc_update_cursor(crtc, true);
@@ -2285,116 +2891,58 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
drm_vblank_off(dev, pipe);
intel_crtc_update_cursor(crtc, false);
- /* Disable display plane */
- reg = DSPCNTR(plane);
- temp = I915_READ(reg);
- if (temp & DISPLAY_PLANE_ENABLE) {
- I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev, plane);
- }
+ intel_disable_plane(dev_priv, plane, pipe);
if (dev_priv->cfb_plane == plane &&
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- /* disable cpu pipe, disable after all planes disabled */
- reg = PIPECONF(pipe);
- temp = I915_READ(reg);
- if (temp & PIPECONF_ENABLE) {
- I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
- POSTING_READ(reg);
- /* wait for cpu pipe off, pipe state */
- intel_wait_for_pipe_off(dev, intel_crtc->pipe);
- }
+ intel_disable_pipe(dev_priv, pipe);
/* Disable PF */
- I915_WRITE(pipe ? PFB_CTL_1 : PFA_CTL_1, 0);
- I915_WRITE(pipe ? PFB_WIN_SZ : PFA_WIN_SZ, 0);
-
- /* disable CPU FDI tx and PCH FDI rx */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
- POSTING_READ(reg);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~(0x7 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
- I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
-
- POSTING_READ(reg);
- udelay(100);
-
- /* Ironlake workaround, disable clock pointer after downing FDI */
- if (HAS_PCH_IBX(dev))
- I915_WRITE(FDI_RX_CHICKEN(pipe),
- I915_READ(FDI_RX_CHICKEN(pipe) &
- ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
-
- /* still set train pattern 1 */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_PATTERN_1;
- }
- /* BPC in FDI rx is consistent with that in PIPECONF */
- temp &= ~(0x07 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
- I915_WRITE(reg, temp);
+ I915_WRITE(PF_CTL(pipe), 0);
+ I915_WRITE(PF_WIN_SZ(pipe), 0);
- POSTING_READ(reg);
- udelay(100);
+ ironlake_fdi_disable(crtc);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- if (temp & LVDS_PORT_EN) {
- I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
- POSTING_READ(PCH_LVDS);
- udelay(100);
- }
- }
+ /* This is a horrible layering violation; we should be doing this in
+ * the connector/encoder ->prepare instead, but we don't always have
+ * enough information there about the config to know whether it will
+ * actually be necessary or just cause undesired flicker.
+ */
+ intel_disable_pch_ports(dev_priv, pipe);
- /* disable PCH transcoder */
- reg = TRANSCONF(plane);
- temp = I915_READ(reg);
- if (temp & TRANS_ENABLE) {
- I915_WRITE(reg, temp & ~TRANS_ENABLE);
- /* wait for PCH transcoder off, transcoder state */
- if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
- DRM_ERROR("failed to disable transcoder\n");
- }
+ intel_disable_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev)) {
/* disable TRANS_DP_CTL */
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
+ temp |= TRANS_DP_PORT_SEL_NONE;
I915_WRITE(reg, temp);
/* disable DPLL_SEL */
temp = I915_READ(PCH_DPLL_SEL);
- if (pipe == 0)
- temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
- else
+ switch (pipe) {
+ case 0:
+ temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
+ break;
+ case 1:
temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
+ break;
+ case 2:
+ /* FIXME: manage transcoder PLLs? */
+ temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
+ break;
+ default:
+ BUG(); /* wtf */
+ }
I915_WRITE(PCH_DPLL_SEL, temp);
}
/* disable PCH DPLL */
- reg = PCH_DPLL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
+ intel_disable_pch_pll(dev_priv, pipe);
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
@@ -2451,9 +2999,12 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
- (void) intel_overlay_switch_off(intel_crtc->overlay, false);
+ dev_priv->mm.interruptible = false;
+ (void) intel_overlay_switch_off(intel_crtc->overlay);
+ dev_priv->mm.interruptible = true;
mutex_unlock(&dev->struct_mutex);
}
@@ -2469,7 +3020,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- u32 reg, temp;
if (intel_crtc->active)
return;
@@ -2477,42 +3027,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_update_watermarks(dev);
- /* Enable the DPLL */
- reg = DPLL(pipe);
- temp = I915_READ(reg);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- I915_WRITE(reg, temp);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(reg);
- udelay(150);
-
- I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(reg);
- udelay(150);
-
- I915_WRITE(reg, temp | DPLL_VCO_ENABLE);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(reg);
- udelay(150);
- }
-
- /* Enable the pipe */
- reg = PIPECONF(pipe);
- temp = I915_READ(reg);
- if ((temp & PIPECONF_ENABLE) == 0)
- I915_WRITE(reg, temp | PIPECONF_ENABLE);
-
- /* Enable the plane */
- reg = DSPCNTR(plane);
- temp = I915_READ(reg);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- I915_WRITE(reg, temp | DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev, plane);
- }
+ intel_enable_pll(dev_priv, pipe);
+ intel_enable_pipe(dev_priv, pipe, false);
+ intel_enable_plane(dev_priv, plane, pipe);
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
@@ -2529,7 +3046,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- u32 reg, temp;
if (!intel_crtc->active)
return;
@@ -2544,45 +3060,10 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- /* Disable display plane */
- reg = DSPCNTR(plane);
- temp = I915_READ(reg);
- if (temp & DISPLAY_PLANE_ENABLE) {
- I915_WRITE(reg, temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- intel_flush_display_plane(dev, plane);
-
- /* Wait for vblank for the disable to take effect */
- if (IS_GEN2(dev))
- intel_wait_for_vblank(dev, pipe);
- }
-
- /* Don't disable pipe A or pipe A PLLs if needed */
- if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
- goto done;
-
- /* Next, disable display pipes */
- reg = PIPECONF(pipe);
- temp = I915_READ(reg);
- if (temp & PIPECONF_ENABLE) {
- I915_WRITE(reg, temp & ~PIPECONF_ENABLE);
-
- /* Wait for the pipe to turn off */
- POSTING_READ(reg);
- intel_wait_for_pipe_off(dev, pipe);
- }
-
- reg = DPLL(pipe);
- temp = I915_READ(reg);
- if (temp & DPLL_VCO_ENABLE) {
- I915_WRITE(reg, temp & ~DPLL_VCO_ENABLE);
-
- /* Wait for the clocks to turn off. */
- POSTING_READ(reg);
- udelay(150);
- }
+ intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_pipe(dev_priv, pipe);
+ intel_disable_pll(dev_priv, pipe);
-done:
intel_crtc->active = false;
intel_update_fbc(dev);
intel_update_watermarks(dev);
@@ -2644,7 +3125,7 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
break;
default:
- DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
+ DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
break;
}
}
@@ -2841,77 +3322,77 @@ struct intel_watermark_params {
};
/* Pineview has different values for various configs */
-static struct intel_watermark_params pineview_display_wm = {
+static const struct intel_watermark_params pineview_display_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
-static struct intel_watermark_params pineview_display_hplloff_wm = {
+static const struct intel_watermark_params pineview_display_hplloff_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_HPLLOFF_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
-static struct intel_watermark_params pineview_cursor_wm = {
+static const struct intel_watermark_params pineview_cursor_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE,
};
-static struct intel_watermark_params pineview_cursor_hplloff_wm = {
+static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
-static struct intel_watermark_params g4x_wm_info = {
+static const struct intel_watermark_params g4x_wm_info = {
G4X_FIFO_SIZE,
G4X_MAX_WM,
G4X_MAX_WM,
2,
G4X_FIFO_LINE_SIZE,
};
-static struct intel_watermark_params g4x_cursor_wm_info = {
+static const struct intel_watermark_params g4x_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
G4X_FIFO_LINE_SIZE,
};
-static struct intel_watermark_params i965_cursor_wm_info = {
+static const struct intel_watermark_params i965_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
I915_FIFO_LINE_SIZE,
};
-static struct intel_watermark_params i945_wm_info = {
+static const struct intel_watermark_params i945_wm_info = {
I945_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
};
-static struct intel_watermark_params i915_wm_info = {
+static const struct intel_watermark_params i915_wm_info = {
I915_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
};
-static struct intel_watermark_params i855_wm_info = {
+static const struct intel_watermark_params i855_wm_info = {
I855GM_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I830_FIFO_LINE_SIZE
};
-static struct intel_watermark_params i830_wm_info = {
+static const struct intel_watermark_params i830_wm_info = {
I830_FIFO_SIZE,
I915_MAX_WM,
1,
@@ -2919,31 +3400,28 @@ static struct intel_watermark_params i830_wm_info = {
I830_FIFO_LINE_SIZE
};
-static struct intel_watermark_params ironlake_display_wm_info = {
+static const struct intel_watermark_params ironlake_display_wm_info = {
ILK_DISPLAY_FIFO,
ILK_DISPLAY_MAXWM,
ILK_DISPLAY_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params ironlake_cursor_wm_info = {
+static const struct intel_watermark_params ironlake_cursor_wm_info = {
ILK_CURSOR_FIFO,
ILK_CURSOR_MAXWM,
ILK_CURSOR_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params ironlake_display_srwm_info = {
+static const struct intel_watermark_params ironlake_display_srwm_info = {
ILK_DISPLAY_SR_FIFO,
ILK_DISPLAY_MAX_SRWM,
ILK_DISPLAY_DFT_SRWM,
2,
ILK_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params ironlake_cursor_srwm_info = {
+static const struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_CURSOR_SR_FIFO,
ILK_CURSOR_MAX_SRWM,
ILK_CURSOR_DFT_SRWM,
@@ -2951,31 +3429,28 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_FIFO_LINE_SIZE
};
-static struct intel_watermark_params sandybridge_display_wm_info = {
+static const struct intel_watermark_params sandybridge_display_wm_info = {
SNB_DISPLAY_FIFO,
SNB_DISPLAY_MAXWM,
SNB_DISPLAY_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params sandybridge_cursor_wm_info = {
+static const struct intel_watermark_params sandybridge_cursor_wm_info = {
SNB_CURSOR_FIFO,
SNB_CURSOR_MAXWM,
SNB_CURSOR_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params sandybridge_display_srwm_info = {
+static const struct intel_watermark_params sandybridge_display_srwm_info = {
SNB_DISPLAY_SR_FIFO,
SNB_DISPLAY_MAX_SRWM,
SNB_DISPLAY_DFT_SRWM,
2,
SNB_FIFO_LINE_SIZE
};
-
-static struct intel_watermark_params sandybridge_cursor_srwm_info = {
+static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
SNB_CURSOR_SR_FIFO,
SNB_CURSOR_MAX_SRWM,
SNB_CURSOR_DFT_SRWM,
@@ -3003,7 +3478,8 @@ static struct intel_watermark_params sandybridge_cursor_srwm_info = {
* will occur, and a display engine hang could result.
*/
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
- struct intel_watermark_params *wm,
+ const struct intel_watermark_params *wm,
+ int fifo_size,
int pixel_size,
unsigned long latency_ns)
{
@@ -3021,7 +3497,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
- wm_size = wm->fifo_size - (entries_required + wm->guard_size);
+ wm_size = fifo_size - (entries_required + wm->guard_size);
DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
@@ -3194,15 +3670,28 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
return size;
}
-static void pineview_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int unused,
- int pixel_size)
+static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
+{
+ struct drm_crtc *crtc, *enabled = NULL;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled && crtc->fb) {
+ if (enabled)
+ return NULL;
+ enabled = crtc;
+ }
+ }
+
+ return enabled;
+}
+
+static void pineview_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
- int sr_clock;
latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
dev_priv->fsb_freq, dev_priv->mem_freq);
@@ -3212,11 +3701,14 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
return;
}
- if (!planea_clock || !planeb_clock) {
- sr_clock = planea_clock ? planea_clock : planeb_clock;
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
+ int clock = crtc->mode.clock;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
/* Display SR */
- wm = intel_calculate_wm(sr_clock, &pineview_display_wm,
+ wm = intel_calculate_wm(clock, &pineview_display_wm,
+ pineview_display_wm.fifo_size,
pixel_size, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
@@ -3225,7 +3717,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
/* cursor SR */
- wm = intel_calculate_wm(sr_clock, &pineview_cursor_wm,
+ wm = intel_calculate_wm(clock, &pineview_cursor_wm,
+ pineview_display_wm.fifo_size,
pixel_size, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
@@ -3233,7 +3726,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
I915_WRITE(DSPFW3, reg);
/* Display HPLL off SR */
- wm = intel_calculate_wm(sr_clock, &pineview_display_hplloff_wm,
+ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
@@ -3241,7 +3735,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
I915_WRITE(DSPFW3, reg);
/* cursor HPLL off SR */
- wm = intel_calculate_wm(sr_clock, &pineview_cursor_hplloff_wm,
+ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
+ pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
@@ -3259,125 +3754,229 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
}
}
-static void g4x_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int sr_htotal,
- int pixel_size)
+static bool g4x_compute_wm0(struct drm_device *dev,
+ int plane,
+ const struct intel_watermark_params *display,
+ int display_latency_ns,
+ const struct intel_watermark_params *cursor,
+ int cursor_latency_ns,
+ int *plane_wm,
+ int *cursor_wm)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int total_size, cacheline_size;
- int planea_wm, planeb_wm, cursora_wm, cursorb_wm, cursor_sr;
- struct intel_watermark_params planea_params, planeb_params;
- unsigned long line_time_us;
- int sr_clock, sr_entries = 0, entries_required;
+ struct drm_crtc *crtc;
+ int htotal, hdisplay, clock, pixel_size;
+ int line_time_us, line_count;
+ int entries, tlb_miss;
- /* Create copies of the base settings for each pipe */
- planea_params = planeb_params = g4x_wm_info;
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled)
+ return false;
- /* Grab a couple of global values before we overwrite them */
- total_size = planea_params.fifo_size;
- cacheline_size = planea_params.cacheline_size;
+ htotal = crtc->mode.htotal;
+ hdisplay = crtc->mode.hdisplay;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
- /*
- * Note: we need to make sure we don't overflow for various clock &
- * latency values.
- * clocks go from a few thousand to several hundred thousand.
- * latency is usually a few thousand
- */
- entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
- 1000;
- entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
- planea_wm = entries_required + planea_params.guard_size;
+ /* Use the small buffer method to calculate plane watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *plane_wm = entries + display->guard_size;
+ if (*plane_wm > (int)display->max_wm)
+ *plane_wm = display->max_wm;
- entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
- 1000;
- entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
- planeb_wm = entries_required + planeb_params.guard_size;
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = ((htotal * 1000) / clock);
+ line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
+ entries = line_count * 64 * pixel_size;
+ tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+ if (*cursor_wm > (int)cursor->max_wm)
+ *cursor_wm = (int)cursor->max_wm;
- cursora_wm = cursorb_wm = 16;
- cursor_sr = 32;
+ return true;
+}
- DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool g4x_check_srwm(struct drm_device *dev,
+ int display_wm, int cursor_wm,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor)
+{
+ DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
+ display_wm, cursor_wm);
- /* Calc sr entries for one plane configs */
- if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
- /* self-refresh has much higher latency */
- static const int sr_latency_ns = 12000;
+ if (display_wm > display->max_wm) {
+ DRM_DEBUG_KMS("display watermark is too large(%d), disabling\n",
+ display_wm, display->max_wm);
+ return false;
+ }
- sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_htotal * 1000) / sr_clock);
+ if (cursor_wm > cursor->max_wm) {
+ DRM_DEBUG_KMS("cursor watermark is too large(%d), disabling\n",
+ cursor_wm, cursor->max_wm);
+ return false;
+ }
- /* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
- sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
-
- entries_required = (((sr_latency_ns / line_time_us) +
- 1000) / 1000) * pixel_size * 64;
- entries_required = DIV_ROUND_UP(entries_required,
- g4x_cursor_wm_info.cacheline_size);
- cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
-
- if (cursor_sr > g4x_cursor_wm_info.max_wm)
- cursor_sr = g4x_cursor_wm_info.max_wm;
- DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
- "cursor %d\n", sr_entries, cursor_sr);
+ if (!(display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("SR latency is 0, disabling\n");
+ return false;
+ }
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
- } else {
- /* Turn off self refresh if both pipes are enabled */
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
+ return true;
+}
+
+static bool g4x_compute_srwm(struct drm_device *dev,
+ int plane,
+ int latency_ns,
+ const struct intel_watermark_params *display,
+ const struct intel_watermark_params *cursor,
+ int *display_wm, int *cursor_wm)
+{
+ struct drm_crtc *crtc;
+ int hdisplay, htotal, pixel_size, clock;
+ unsigned long line_time_us;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *display_wm = *cursor_wm = 0;
+ return false;
}
- DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, SR %d\n",
- planea_wm, planeb_wm, sr_entries);
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
- planea_wm &= 0x3f;
- planeb_wm &= 0x3f;
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
- I915_WRITE(DSPFW1, (sr_entries << DSPFW_SR_SHIFT) |
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *display_wm = entries + display->guard_size;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+
+ return g4x_check_srwm(dev,
+ *display_wm, *cursor_wm,
+ display, cursor);
+}
+
+static inline bool single_plane_enabled(unsigned int mask)
+{
+ return mask && (mask & -mask) == 0;
+}
+
+static void g4x_update_wm(struct drm_device *dev)
+{
+ static const int sr_latency_ns = 12000;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
+ int plane_sr, cursor_sr;
+ unsigned int enabled = 0;
+
+ if (g4x_compute_wm0(dev, 0,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planea_wm, &cursora_wm))
+ enabled |= 1;
+
+ if (g4x_compute_wm0(dev, 1,
+ &g4x_wm_info, latency_ns,
+ &g4x_cursor_wm_info, latency_ns,
+ &planeb_wm, &cursorb_wm))
+ enabled |= 2;
+
+ plane_sr = cursor_sr = 0;
+ if (single_plane_enabled(enabled) &&
+ g4x_compute_srwm(dev, ffs(enabled) - 1,
+ sr_latency_ns,
+ &g4x_wm_info,
+ &g4x_cursor_wm_info,
+ &plane_sr, &cursor_sr))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+ else
+ I915_WRITE(FW_BLC_SELF,
+ I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+
+ DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
+ planea_wm, cursora_wm,
+ planeb_wm, cursorb_wm,
+ plane_sr, cursor_sr);
+
+ I915_WRITE(DSPFW1,
+ (plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
- (planeb_wm << DSPFW_PLANEB_SHIFT) | planea_wm);
- I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
+ (planeb_wm << DSPFW_PLANEB_SHIFT) |
+ planea_wm);
+ I915_WRITE(DSPFW2,
+ (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
/* HPLL off in SR has some issues on G4x... disable it */
- I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
+ I915_WRITE(DSPFW3,
+ (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void i965_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int sr_htotal,
- int pixel_size)
+static void i965_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned long line_time_us;
- int sr_clock, sr_entries, srwm = 1;
+ struct drm_crtc *crtc;
+ int srwm = 1;
int cursor_sr = 16;
/* Calc sr entries for one plane configs */
- if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
+ crtc = single_enabled_crtc(dev);
+ if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
+ int clock = crtc->mode.clock;
+ int htotal = crtc->mode.htotal;
+ int hdisplay = crtc->mode.hdisplay;
+ int pixel_size = crtc->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
- sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_htotal * 1000) / sr_clock);
+ line_time_us = ((htotal * 1000) / clock);
/* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
- sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
- DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
- srwm = I965_FIFO_SIZE - sr_entries;
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
+ srwm = I965_FIFO_SIZE - entries;
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
+ DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
+ entries, srwm);
- sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * 64;
- sr_entries = DIV_ROUND_UP(sr_entries,
+ entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
- (sr_entries + i965_cursor_wm_info.guard_size);
+ (entries + i965_cursor_wm_info.guard_size);
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
@@ -3398,46 +3997,56 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
srwm);
/* 965 has limitations... */
- I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
- (8 << 0));
+ I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
+ (8 << 16) | (8 << 8) | (8 << 0));
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
/* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
-static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int sr_htotal,
- int pixel_size)
+static void i9xx_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
uint32_t fwater_hi;
- int total_size, cacheline_size, cwm, srwm = 1;
+ int cwm, srwm = 1;
+ int fifo_size;
int planea_wm, planeb_wm;
- struct intel_watermark_params planea_params, planeb_params;
- unsigned long line_time_us;
- int sr_clock, sr_entries = 0;
+ struct drm_crtc *crtc, *enabled = NULL;
- /* Create copies of the base settings for each pipe */
- if (IS_CRESTLINE(dev) || IS_I945GM(dev))
- planea_params = planeb_params = i945_wm_info;
+ if (IS_I945GM(dev))
+ wm_info = &i945_wm_info;
else if (!IS_GEN2(dev))
- planea_params = planeb_params = i915_wm_info;
+ wm_info = &i915_wm_info;
else
- planea_params = planeb_params = i855_wm_info;
-
- /* Grab a couple of global values before we overwrite them */
- total_size = planea_params.fifo_size;
- cacheline_size = planea_params.cacheline_size;
-
- /* Update per-plane FIFO sizes */
- planea_params.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
- planeb_params.fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+ wm_info = &i855_wm_info;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ crtc = intel_get_crtc_for_plane(dev, 0);
+ if (crtc->enabled && crtc->fb) {
+ planea_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ enabled = crtc;
+ } else
+ planea_wm = fifo_size - wm_info->guard_size;
+
+ fifo_size = dev_priv->display.get_fifo_size(dev, 1);
+ crtc = intel_get_crtc_for_plane(dev, 1);
+ if (crtc->enabled && crtc->fb) {
+ planeb_wm = intel_calculate_wm(crtc->mode.clock,
+ wm_info, fifo_size,
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ if (enabled == NULL)
+ enabled = crtc;
+ else
+ enabled = NULL;
+ } else
+ planeb_wm = fifo_size - wm_info->guard_size;
- planea_wm = intel_calculate_wm(planea_clock, &planea_params,
- pixel_size, latency_ns);
- planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
- pixel_size, latency_ns);
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
/*
@@ -3445,39 +4054,39 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
*/
cwm = 2;
+ /* Play safe and disable self-refresh before adjusting watermarks. */
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+
/* Calc sr entries for one plane configs */
- if (HAS_FW_BLC(dev) && sr_hdisplay &&
- (!planea_clock || !planeb_clock)) {
+ if (HAS_FW_BLC(dev) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
+ int clock = enabled->mode.clock;
+ int htotal = enabled->mode.htotal;
+ int hdisplay = enabled->mode.hdisplay;
+ int pixel_size = enabled->fb->bits_per_pixel / 8;
+ unsigned long line_time_us;
+ int entries;
- sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_htotal * 1000) / sr_clock);
+ line_time_us = (htotal * 1000) / clock;
/* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
- pixel_size * sr_hdisplay;
- sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
- DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
- srwm = total_size - sr_entries;
+ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * hdisplay;
+ entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
+ DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
+ srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
if (IS_I945G(dev) || IS_I945GM(dev))
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
- else if (IS_I915GM(dev)) {
- /* 915M has a smaller SRWM field */
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else if (IS_I915GM(dev))
I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
- I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
- }
- } else {
- /* Turn off self refresh if both pipes are enabled */
- if (IS_I945G(dev) || IS_I945GM(dev)) {
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
- } else if (IS_I915GM(dev)) {
- I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
- }
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -3492,19 +4101,36 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
I915_WRITE(FW_BLC, fwater_lo);
I915_WRITE(FW_BLC2, fwater_hi);
+
+ if (HAS_FW_BLC(dev)) {
+ if (enabled) {
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF,
+ FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+ else if (IS_I915GM(dev))
+ I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+ DRM_DEBUG_KMS("memory self refresh enabled\n");
+ } else
+ DRM_DEBUG_KMS("memory self refresh disabled\n");
+ }
}
-static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
- int unused2, int unused3, int pixel_size)
+static void i830_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
+ struct drm_crtc *crtc;
+ uint32_t fwater_lo;
int planea_wm;
- i830_wm_info.fifo_size = dev_priv->display.get_fifo_size(dev, 0);
+ crtc = single_enabled_crtc(dev);
+ if (crtc == NULL)
+ return;
- planea_wm = intel_calculate_wm(planea_clock, &i830_wm_info,
- pixel_size, latency_ns);
+ planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+ dev_priv->display.get_fifo_size(dev, 0),
+ crtc->fb->bits_per_pixel / 8,
+ latency_ns);
+ fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
@@ -3613,15 +4239,15 @@ static bool ironlake_check_srwm(struct drm_device *dev, int level,
/*
* Compute watermark values of WM[1-3],
*/
-static bool ironlake_compute_srwm(struct drm_device *dev, int level,
- int hdisplay, int htotal,
- int pixel_size, int clock, int latency_ns,
+static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
+ int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *fbc_wm, int *display_wm, int *cursor_wm)
{
-
+ struct drm_crtc *crtc;
unsigned long line_time_us;
+ int hdisplay, htotal, pixel_size, clock;
int line_count, line_size;
int small, large;
int entries;
@@ -3631,6 +4257,12 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level,
return false;
}
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ hdisplay = crtc->mode.hdisplay;
+ htotal = crtc->mode.htotal;
+ clock = crtc->mode.clock;
+ pixel_size = crtc->fb->bits_per_pixel / 8;
+
line_time_us = (htotal * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
@@ -3658,14 +4290,11 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level,
display, cursor);
}
-static void ironlake_update_wm(struct drm_device *dev,
- int planea_clock, int planeb_clock,
- int hdisplay, int htotal,
- int pixel_size)
+static void ironlake_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int fbc_wm, plane_wm, cursor_wm, enabled;
- int clock;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
enabled = 0;
if (ironlake_compute_wm0(dev, 0,
@@ -3679,7 +4308,7 @@ static void ironlake_update_wm(struct drm_device *dev,
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
- enabled++;
+ enabled |= 1;
}
if (ironlake_compute_wm0(dev, 1,
@@ -3693,7 +4322,7 @@ static void ironlake_update_wm(struct drm_device *dev,
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
- enabled++;
+ enabled |= 2;
}
/*
@@ -3704,14 +4333,13 @@ static void ironlake_update_wm(struct drm_device *dev,
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- if (enabled != 1)
+ if (!single_plane_enabled(enabled))
return;
-
- clock = planea_clock ? planea_clock : planeb_clock;
+ enabled = ffs(enabled) - 1;
/* WM1 */
- if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
- clock, ILK_READ_WM1_LATENCY() * 500,
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ ILK_READ_WM1_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -3725,8 +4353,8 @@ static void ironlake_update_wm(struct drm_device *dev,
cursor_wm);
/* WM2 */
- if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size,
- clock, ILK_READ_WM2_LATENCY() * 500,
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ ILK_READ_WM2_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -3745,15 +4373,12 @@ static void ironlake_update_wm(struct drm_device *dev,
*/
}
-static void sandybridge_update_wm(struct drm_device *dev,
- int planea_clock, int planeb_clock,
- int hdisplay, int htotal,
- int pixel_size)
+static void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
- int fbc_wm, plane_wm, cursor_wm, enabled;
- int clock;
+ int fbc_wm, plane_wm, cursor_wm;
+ unsigned int enabled;
enabled = 0;
if (ironlake_compute_wm0(dev, 0,
@@ -3765,7 +4390,7 @@ static void sandybridge_update_wm(struct drm_device *dev,
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
- enabled++;
+ enabled |= 1;
}
if (ironlake_compute_wm0(dev, 1,
@@ -3777,7 +4402,7 @@ static void sandybridge_update_wm(struct drm_device *dev,
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
- enabled++;
+ enabled |= 2;
}
/*
@@ -3794,14 +4419,13 @@ static void sandybridge_update_wm(struct drm_device *dev,
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- if (enabled != 1)
+ if (!single_plane_enabled(enabled))
return;
-
- clock = planea_clock ? planea_clock : planeb_clock;
+ enabled = ffs(enabled) - 1;
/* WM1 */
- if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
- clock, SNB_READ_WM1_LATENCY() * 500,
+ if (!ironlake_compute_srwm(dev, 1, enabled,
+ SNB_READ_WM1_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -3815,9 +4439,8 @@ static void sandybridge_update_wm(struct drm_device *dev,
cursor_wm);
/* WM2 */
- if (!ironlake_compute_srwm(dev, 2,
- hdisplay, htotal, pixel_size,
- clock, SNB_READ_WM2_LATENCY() * 500,
+ if (!ironlake_compute_srwm(dev, 2, enabled,
+ SNB_READ_WM2_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -3831,9 +4454,8 @@ static void sandybridge_update_wm(struct drm_device *dev,
cursor_wm);
/* WM3 */
- if (!ironlake_compute_srwm(dev, 3,
- hdisplay, htotal, pixel_size,
- clock, SNB_READ_WM3_LATENCY() * 500,
+ if (!ironlake_compute_srwm(dev, 3, enabled,
+ SNB_READ_WM3_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -3882,44 +4504,9 @@ static void sandybridge_update_wm(struct drm_device *dev,
static void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc;
- int sr_hdisplay = 0;
- unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
- int enabled = 0, pixel_size = 0;
- int sr_htotal = 0;
-
- if (!dev_priv->display.update_wm)
- return;
- /* Get the clock config from both planes */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->active) {
- enabled++;
- if (intel_crtc->plane == 0) {
- DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
- intel_crtc->pipe, crtc->mode.clock);
- planea_clock = crtc->mode.clock;
- } else {
- DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
- intel_crtc->pipe, crtc->mode.clock);
- planeb_clock = crtc->mode.clock;
- }
- sr_hdisplay = crtc->mode.hdisplay;
- sr_clock = crtc->mode.clock;
- sr_htotal = crtc->mode.htotal;
- if (crtc->fb)
- pixel_size = crtc->fb->bits_per_pixel / 8;
- else
- pixel_size = 4; /* by default */
- }
- }
-
- if (enabled <= 0)
- return;
-
- dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
- sr_hdisplay, sr_htotal, pixel_size);
+ if (dev_priv->display.update_wm)
+ dev_priv->display.update_wm(dev);
}
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -3951,6 +4538,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int ret;
struct fdi_m_n m_n = {0};
u32 reg, temp;
+ u32 lvds_sync = 0;
int target_clock;
drm_vblank_pre_modeset(dev, pipe);
@@ -4322,9 +4910,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
- dspcntr |= DISPLAY_PLANE_ENABLE;
- pipeconf |= PIPECONF_ENABLE;
- dpll |= DPLL_VCO_ENABLE;
+ if (!HAS_PCH_SPLIT(dev))
+ dpll |= DPLL_VCO_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
@@ -4350,10 +4937,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* enable transcoder DPLL */
if (HAS_PCH_CPT(dev)) {
temp = I915_READ(PCH_DPLL_SEL);
- if (pipe == 0)
+ switch (pipe) {
+ case 0:
temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL;
- else
+ break;
+ case 1:
temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL;
+ break;
+ case 2:
+ /* FIXME: manage transcoder PLLs? */
+ temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL;
+ break;
+ default:
+ BUG();
+ }
I915_WRITE(PCH_DPLL_SEL, temp);
POSTING_READ(PCH_DPLL_SEL);
@@ -4403,6 +5000,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
else
temp &= ~LVDS_ENABLE_DITHER;
}
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ lvds_sync |= LVDS_HSYNC_POLARITY;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ lvds_sync |= LVDS_VSYNC_POLARITY;
+ if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
+ != lvds_sync) {
+ char flags[2] = "-+";
+ DRM_INFO("Changing LVDS panel from "
+ "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
+ flags[!(temp & LVDS_HSYNC_POLARITY)],
+ flags[!(temp & LVDS_VSYNC_POLARITY)],
+ flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
+ flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
+ temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
+ temp |= lvds_sync;
+ }
I915_WRITE(reg, temp);
}
@@ -4420,17 +5033,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else if (HAS_PCH_SPLIT(dev)) {
/* For non-DP output, clear any trans DP clock recovery setting.*/
- if (pipe == 0) {
- I915_WRITE(TRANSA_DATA_M1, 0);
- I915_WRITE(TRANSA_DATA_N1, 0);
- I915_WRITE(TRANSA_DP_LINK_M1, 0);
- I915_WRITE(TRANSA_DP_LINK_N1, 0);
- } else {
- I915_WRITE(TRANSB_DATA_M1, 0);
- I915_WRITE(TRANSB_DATA_N1, 0);
- I915_WRITE(TRANSB_DP_LINK_M1, 0);
- I915_WRITE(TRANSB_DP_LINK_N1, 0);
- }
+ I915_WRITE(TRANSDATA_M1(pipe), 0);
+ I915_WRITE(TRANSDATA_N1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_M1(pipe), 0);
+ I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
@@ -4533,6 +5139,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
+ if (!HAS_PCH_SPLIT(dev))
+ intel_enable_pipe(dev_priv, pipe, false);
intel_wait_for_vblank(dev, pipe);
@@ -4543,6 +5151,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
I915_WRITE(DSPCNTR(plane), dspcntr);
+ POSTING_READ(DSPCNTR(plane));
+ if (!HAS_PCH_SPLIT(dev))
+ intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -4559,7 +5170,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
+ int palreg = PALETTE(intel_crtc->pipe);
int i;
/* The clocks have to be on to load the palette. */
@@ -4568,8 +5179,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
/* use legacy palette for Ironlake */
if (HAS_PCH_SPLIT(dev))
- palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
- LGC_PALETTE_B;
+ palreg = LGC_PALETTE(intel_crtc->pipe);
for (i = 0; i < 256; i++) {
I915_WRITE(palreg + 4 * i,
@@ -4590,12 +5200,12 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
if (intel_crtc->cursor_visible == visible)
return;
- cntl = I915_READ(CURACNTR);
+ cntl = I915_READ(_CURACNTR);
if (visible) {
/* On these chipsets we can only modify the base whilst
* the cursor is disabled.
*/
- I915_WRITE(CURABASE, base);
+ I915_WRITE(_CURABASE, base);
cntl &= ~(CURSOR_FORMAT_MASK);
/* XXX width must be 64, stride 256 => 0x00 << 28 */
@@ -4604,7 +5214,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
CURSOR_FORMAT_ARGB;
} else
cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
- I915_WRITE(CURACNTR, cntl);
+ I915_WRITE(_CURACNTR, cntl);
intel_crtc->cursor_visible = visible;
}
@@ -4618,7 +5228,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
bool visible = base != 0;
if (intel_crtc->cursor_visible != visible) {
- uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
+ uint32_t cntl = I915_READ(CURCNTR(pipe));
if (base) {
cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
@@ -4627,12 +5237,12 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
- I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
+ I915_WRITE(CURCNTR(pipe), cntl);
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
- I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
+ I915_WRITE(CURBASE(pipe), base);
}
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -4682,7 +5292,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
if (!visible && !intel_crtc->cursor_visible)
return;
- I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
+ I915_WRITE(CURPOS(pipe), pos);
if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
else
@@ -4722,7 +5332,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
}
obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
- if (!obj)
+ if (&obj->base == NULL)
return -ENOENT;
if (obj->base.size < width * height * 4) {
@@ -4988,14 +5598,14 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
+ u32 dpll = I915_READ(DPLL(pipe));
u32 fp;
intel_clock_t clock;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = I915_READ((pipe == 0) ? FPA0 : FPB0);
+ fp = FP0(pipe);
else
- fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
+ fp = FP1(pipe);
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
if (IS_PINEVIEW(dev)) {
@@ -5077,10 +5687,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
struct drm_display_mode *mode;
- int htot = I915_READ((pipe == 0) ? HTOTAL_A : HTOTAL_B);
- int hsync = I915_READ((pipe == 0) ? HSYNC_A : HSYNC_B);
- int vtot = I915_READ((pipe == 0) ? VTOTAL_A : VTOTAL_B);
- int vsync = I915_READ((pipe == 0) ? VSYNC_A : VSYNC_B);
+ int htot = I915_READ(HTOTAL(pipe));
+ int hsync = I915_READ(HSYNC(pipe));
+ int vtot = I915_READ(VTOTAL(pipe));
+ int vsync = I915_READ(VSYNC(pipe));
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
@@ -5189,7 +5799,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dpll_reg = DPLL(pipe);
int dpll = I915_READ(dpll_reg);
if (HAS_PCH_SPLIT(dev))
@@ -5237,7 +5847,6 @@ static void intel_idle_update(struct work_struct *work)
struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
- int enabled = 0;
if (!i915_powersave)
return;
@@ -5251,16 +5860,11 @@ static void intel_idle_update(struct work_struct *work)
if (!crtc->fb)
continue;
- enabled++;
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->busy)
intel_decrease_pllclock(crtc);
}
- if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) {
- DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
- }
mutex_unlock(&dev->struct_mutex);
}
@@ -5285,17 +5889,9 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (!dev_priv->busy) {
- if (IS_I945G(dev) || IS_I945GM(dev)) {
- u32 fw_blc_self;
-
- DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
- fw_blc_self = I915_READ(FW_BLC_SELF);
- fw_blc_self &= ~FW_BLC_SELF_EN;
- I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
- }
+ if (!dev_priv->busy)
dev_priv->busy = true;
- } else
+ else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
@@ -5307,14 +5903,6 @@ void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
intel_fb = to_intel_framebuffer(crtc->fb);
if (intel_fb->obj == obj) {
if (!intel_crtc->busy) {
- if (IS_I945G(dev) || IS_I945GM(dev)) {
- u32 fw_blc_self;
-
- DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
- fw_blc_self = I915_READ(FW_BLC_SELF);
- fw_blc_self &= ~FW_BLC_SELF_EN;
- I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
- }
/* Non-busy -> busy, upclock */
intel_increase_pllclock(crtc);
intel_crtc->busy = true;
@@ -5592,7 +6180,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
* pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
*/
pf = 0;
- pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+ pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
OUT_RING(pf | pipesrc);
break;
@@ -5602,8 +6190,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(fb->pitch | obj->tiling_mode);
OUT_RING(obj->gtt_offset);
- pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
- pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
+ pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
+ pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
OUT_RING(pf | pipesrc);
break;
}
@@ -5692,22 +6280,8 @@ static void intel_sanitize_modesetting(struct drm_device *dev,
pipe = !pipe;
/* Disable the plane and wait for it to stop reading from the pipe. */
- I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
- intel_flush_display_plane(dev, plane);
-
- if (IS_GEN2(dev))
- intel_wait_for_vblank(dev, pipe);
-
- if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
- return;
-
- /* Switch off the pipe. */
- reg = PIPECONF(pipe);
- val = I915_READ(reg);
- if (val & PIPECONF_ENABLE) {
- I915_WRITE(reg, val & ~PIPECONF_ENABLE);
- intel_wait_for_pipe_off(dev, pipe);
- }
+ intel_disable_plane(dev_priv, plane, pipe);
+ intel_disable_pipe(dev_priv, pipe);
}
static void intel_crtc_init(struct drm_device *dev, int pipe)
@@ -5997,7 +6571,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
int ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
- if (!obj)
+ if (&obj->base == NULL)
return ERR_PTR(-ENOENT);
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
@@ -6319,18 +6893,18 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
18 << 24 |
6 << 16);
- I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000);
- I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
I915_WRITE(GEN6_RP_UP_EI, 100000);
- I915_WRITE(GEN6_RP_DOWN_EI, 300000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_USE_NORMAL_FREQ |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
- GEN6_RP_UP_BUSY_MAX |
- GEN6_RP_DOWN_BUSY_MIN);
+ GEN6_RP_UP_BUSY_AVG |
+ GEN6_RP_DOWN_IDLE_CONT);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
@@ -6386,6 +6960,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
void intel_enable_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
/*
* Disable clock gating reported to work incorrectly according to the
@@ -6495,12 +7070,10 @@ void intel_enable_clock_gating(struct drm_device *dev)
ILK_DPARB_CLK_GATE |
ILK_DPFD_CLK_GATE);
- I915_WRITE(DSPACNTR,
- I915_READ(DSPACNTR) |
- DISPPLANE_TRICKLE_FEED_DISABLE);
- I915_WRITE(DSPBCNTR,
- I915_READ(DSPBCNTR) |
- DISPPLANE_TRICKLE_FEED_DISABLE);
+ for_each_pipe(pipe)
+ I915_WRITE(DSPCNTR(pipe),
+ I915_READ(DSPCNTR(pipe)) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
}
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
@@ -6855,10 +7428,6 @@ void intel_modeset_init(struct drm_device *dev)
}
dev->mode_config.fb_base = dev->agp->base;
- if (IS_MOBILE(dev) || !IS_GEN2(dev))
- dev_priv->num_pipe = 2;
- else
- dev_priv->num_pipe = 1;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 51cb4e36997f..d29e33f815d7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -49,6 +49,7 @@ struct intel_dp {
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
int force_audio;
+ uint32_t color_range;
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
@@ -685,6 +686,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4, bpp = 24;
struct intel_dp_m_n m_n;
+ int pipe = intel_crtc->pipe;
/*
* Find the lane count in the intel_encoder private
@@ -715,39 +717,19 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
mode->clock, adjusted_mode->clock, &m_n);
if (HAS_PCH_SPLIT(dev)) {
- if (intel_crtc->pipe == 0) {
- I915_WRITE(TRANSA_DATA_M1,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(TRANSA_DATA_N1, m_n.gmch_n);
- I915_WRITE(TRANSA_DP_LINK_M1, m_n.link_m);
- I915_WRITE(TRANSA_DP_LINK_N1, m_n.link_n);
- } else {
- I915_WRITE(TRANSB_DATA_M1,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(TRANSB_DATA_N1, m_n.gmch_n);
- I915_WRITE(TRANSB_DP_LINK_M1, m_n.link_m);
- I915_WRITE(TRANSB_DP_LINK_N1, m_n.link_n);
- }
+ I915_WRITE(TRANSDATA_M1(pipe),
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
+ I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
+ I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
} else {
- if (intel_crtc->pipe == 0) {
- I915_WRITE(PIPEA_GMCH_DATA_M,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(PIPEA_GMCH_DATA_N,
- m_n.gmch_n);
- I915_WRITE(PIPEA_DP_LINK_M, m_n.link_m);
- I915_WRITE(PIPEA_DP_LINK_N, m_n.link_n);
- } else {
- I915_WRITE(PIPEB_GMCH_DATA_M,
- ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
- m_n.gmch_m);
- I915_WRITE(PIPEB_GMCH_DATA_N,
- m_n.gmch_n);
- I915_WRITE(PIPEB_DP_LINK_M, m_n.link_m);
- I915_WRITE(PIPEB_DP_LINK_N, m_n.link_n);
- }
+ I915_WRITE(PIPE_GMCH_DATA_M(pipe),
+ ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
+ m_n.gmch_m);
+ I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
+ I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
+ I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
}
}
@@ -760,8 +742,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- intel_dp->DP = (DP_VOLTAGE_0_4 |
- DP_PRE_EMPHASIS_0);
+ intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
+ intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
@@ -813,6 +795,40 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
+static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+
+ /*
+ * If the panel wasn't on, make sure there's not a currently
+ * active PP sequence before enabling AUX VDD.
+ */
+ if (!(I915_READ(PCH_PP_STATUS) & PP_ON))
+ msleep(dev_priv->panel_t3);
+
+ pp = I915_READ(PCH_PP_CONTROL);
+ pp |= EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+}
+
+static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pp;
+
+ pp = I915_READ(PCH_PP_CONTROL);
+ pp &= ~EDP_FORCE_VDD;
+ I915_WRITE(PCH_PP_CONTROL, pp);
+ POSTING_READ(PCH_PP_CONTROL);
+
+ /* Make sure sequencer is idle before allowing subsequent activity */
+ msleep(dev_priv->panel_t12);
+}
+
/* Returns true if the panel was already on when called */
static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
{
@@ -834,11 +850,6 @@ static bool ironlake_edp_panel_on (struct intel_dp *intel_dp)
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
- /* Ouch. We need to wait here for some panels, like Dell e6510
- * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
- */
- msleep(300);
-
if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask,
5000))
DRM_ERROR("panel on wait timed out: 0x%08x\n",
@@ -875,11 +886,6 @@ static void ironlake_edp_panel_off (struct drm_device *dev)
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
-
- /* Ouch. We need to wait here for some panels, like Dell e6510
- * https://bugs.freedesktop.org/show_bug.cgi?id=29278i
- */
- msleep(300);
}
static void ironlake_edp_backlight_on (struct drm_device *dev)
@@ -945,7 +951,7 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
if (is_edp(intel_dp)) {
ironlake_edp_backlight_off(dev);
- ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_off(dev);
if (!is_pch_edp(intel_dp))
ironlake_edp_pll_on(encoder);
else
@@ -959,10 +965,15 @@ static void intel_dp_commit(struct drm_encoder *encoder)
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
+ if (is_edp(intel_dp))
+ ironlake_edp_panel_vdd_on(intel_dp);
+
intel_dp_start_link_train(intel_dp);
- if (is_edp(intel_dp))
+ if (is_edp(intel_dp)) {
ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp);
+ }
intel_dp_complete_link_train(intel_dp);
@@ -988,9 +999,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
ironlake_edp_pll_off(encoder);
} else {
if (is_edp(intel_dp))
- ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_on(intel_dp);
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_start_link_train(intel_dp);
+ if (is_edp(intel_dp)) {
+ ironlake_edp_panel_on(intel_dp);
+ ironlake_edp_panel_vdd_off(intel_dp);
+ }
intel_dp_complete_link_train(intel_dp);
}
if (is_edp(intel_dp))
@@ -1508,9 +1523,13 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
{
enum drm_connector_status status;
- /* Can't disconnect eDP */
- if (is_edp(intel_dp))
- return connector_status_connected;
+ /* Can't disconnect eDP, but you can close the lid... */
+ if (is_edp(intel_dp)) {
+ status = intel_panel_detect(intel_dp->base.base.dev);
+ if (status == connector_status_unknown)
+ status = connector_status_connected;
+ return status;
+ }
status = connector_status_disconnected;
if (intel_dp_aux_native_read(intel_dp,
@@ -1662,6 +1681,7 @@ intel_dp_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_dp *intel_dp = intel_attached_dp(connector);
int ret;
@@ -1690,6 +1710,14 @@ intel_dp_set_property(struct drm_connector *connector,
goto done;
}
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_dp->color_range)
+ return 0;
+
+ intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
return -EINVAL;
done:
@@ -1809,6 +1837,8 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_dp->force_audio_property->values[1] = 1;
drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
}
+
+ intel_attach_broadcast_rgb_property(connector);
}
void
@@ -1826,6 +1856,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
if (!intel_dp)
return;
+ intel_dp->output_reg = output_reg;
+ intel_dp->dpms_mode = -1;
+
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dp);
@@ -1865,10 +1898,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
- intel_dp->output_reg = output_reg;
- intel_dp->has_audio = false;
- intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
-
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
@@ -1906,21 +1935,33 @@ intel_dp_init(struct drm_device *dev, int output_reg)
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
int ret;
- bool was_on;
+ u32 pp_on, pp_div;
- was_on = ironlake_edp_panel_on(intel_dp);
+ pp_on = I915_READ(PCH_PP_ON_DELAYS);
+ pp_div = I915_READ(PCH_PP_DIVISOR);
+
+ /* Get T3 & T12 values (note: VESA not bspec terminology) */
+ dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16;
+ dev_priv->panel_t3 /= 10; /* t3 in 100us units */
+ dev_priv->panel_t12 = pp_div & 0xf;
+ dev_priv->panel_t12 *= 100; /* t12 in 100ms units */
+
+ ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV,
intel_dp->dpcd,
sizeof(intel_dp->dpcd));
+ ironlake_edp_panel_vdd_off(intel_dp);
if (ret == sizeof(intel_dp->dpcd)) {
if (intel_dp->dpcd[0] >= 0x11)
dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
} else {
+ /* if this fails, presume the device is a ghost */
DRM_ERROR("failed to retrieve link info\n");
+ intel_dp_destroy(&intel_connector->base);
+ intel_dp_encoder_destroy(&intel_dp->base.base);
+ return;
}
- if (!was_on)
- ironlake_edp_panel_off(dev);
}
intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 2c431049963c..5daa991cb287 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -217,6 +217,13 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
return dev_priv->pipe_to_crtc_mapping[pipe];
}
+static inline struct drm_crtc *
+intel_get_crtc_for_plane(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ return dev_priv->plane_to_crtc_mapping[plane];
+}
+
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
@@ -230,6 +237,8 @@ struct intel_unpin_work {
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
+extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
@@ -260,6 +269,7 @@ extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern void intel_panel_setup_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev);
extern void intel_panel_disable_backlight(struct drm_device *dev);
+extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare (struct drm_encoder *encoder);
@@ -321,8 +331,7 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
extern void intel_setup_overlay(struct drm_device *dev);
extern void intel_cleanup_overlay(struct drm_device *dev);
-extern int intel_overlay_switch_off(struct intel_overlay *overlay,
- bool interruptible);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay);
extern int intel_overlay_put_image(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_overlay_attrs(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index ea373283c93b..6eda1b51c636 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -178,7 +178,7 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
int pipe = intel_crtc->pipe;
u32 dvo_val;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
- int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
+ int dpll_reg = DPLL(pipe);
switch (dvo_reg) {
case DVOA:
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index c635c9e357b9..f289b8642976 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -41,6 +41,7 @@ struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
+ uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
int force_audio;
@@ -124,6 +125,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
+ sdvox |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -278,6 +280,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret;
ret = drm_connector_property_set_value(connector, property, val);
@@ -305,6 +308,14 @@ intel_hdmi_set_property(struct drm_connector *connector,
goto done;
}
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_hdmi->color_range)
+ return 0;
+
+ intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
return -EINVAL;
done:
@@ -363,6 +374,8 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_hdmi->force_audio_property->values[1] = 1;
drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
}
+
+ intel_attach_broadcast_rgb_property(connector);
}
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 58040f68ed7a..82d04c5899d2 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -384,7 +384,8 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->reg0 = i | GMBUS_RATE_100KHZ;
/* XXX force bit banging until GMBUS is fully debugged */
- bus->force_bit = intel_gpio_create(dev_priv, i);
+ if (IS_GEN2(dev))
+ bus->force_bit = intel_gpio_create(dev_priv, i);
}
intel_i2c_reset(dev_priv->dev);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index bcdba7bd5cfa..1a311ad01116 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -231,6 +231,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
struct drm_encoder *tmp_encoder;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
+ int pipe;
/* Should never happen!! */
if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
@@ -277,8 +278,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
- I915_WRITE(BCLRPAT_A, 0);
- I915_WRITE(BCLRPAT_B, 0);
+ for_each_pipe(pipe)
+ I915_WRITE(BCLRPAT(pipe), 0);
switch (intel_lvds->fitting_mode) {
case DRM_MODE_SCALE_CENTER:
@@ -474,6 +475,10 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
enum drm_connector_status status = connector_status_connected;
+ status = intel_panel_detect(dev);
+ if (status != connector_status_unknown)
+ return status;
+
/* ACPI lid methods were generally unreliable in this generation, so
* don't even bother.
*/
@@ -496,7 +501,7 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return drm_add_edid_modes(connector, intel_lvds->edid);
mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
- if (mode == 0)
+ if (mode == NULL)
return 0;
drm_mode_probed_add(connector, mode);
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index f70b7cf32bff..9034dd8f33c7 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -80,3 +80,33 @@ int intel_ddc_get_modes(struct drm_connector *connector,
return ret;
}
+
+static const char *broadcast_rgb_names[] = {
+ "Full",
+ "Limited 16:235",
+};
+
+void
+intel_attach_broadcast_rgb_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_property *prop;
+ int i;
+
+ prop = dev_priv->broadcast_rgb_property;
+ if (prop == NULL) {
+ prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "Broadcast RGB",
+ ARRAY_SIZE(broadcast_rgb_names));
+ if (prop == NULL)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
+ drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
+
+ dev_priv->broadcast_rgb_property = prop;
+ }
+
+ drm_connector_attach_property(connector, prop, 0);
+}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 64fd64443ca6..d2c710422908 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -39,6 +39,8 @@
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
+#define ACPI_CLID 0x01ac /* current lid state indicator */
+#define ACPI_CDCK 0x01b0 /* current docking state indicator */
#define OPREGION_SWSCI_OFFSET 0x200
#define OPREGION_ASLE_OFFSET 0x300
#define OPREGION_VBT_OFFSET 0x400
@@ -489,6 +491,8 @@ int intel_opregion_setup(struct drm_device *dev)
opregion->header = base;
opregion->vbt = base + OPREGION_VBT_OFFSET;
+ opregion->lid_state = base + ACPI_CLID;
+
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 3fbb98b948d6..a670c006982e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -213,7 +213,6 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
struct drm_i915_gem_request *request,
- bool interruptible,
void (*tail)(struct intel_overlay *))
{
struct drm_device *dev = overlay->dev;
@@ -221,16 +220,14 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
int ret;
BUG_ON(overlay->last_flip_req);
- ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+ ret = i915_add_request(LP_RING(dev_priv), NULL, request);
if (ret) {
kfree(request);
return ret;
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
- ret = i915_do_wait_request(dev,
- overlay->last_flip_req, true,
- LP_RING(dev_priv));
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
if (ret)
return ret;
@@ -256,7 +253,7 @@ i830_activate_pipe_a(struct drm_device *dev)
return 0;
/* most i8xx have pipe a forced on, so don't trust dpms mode */
- if (I915_READ(PIPEACONF) & PIPECONF_ENABLE)
+ if (I915_READ(_PIPEACONF) & PIPECONF_ENABLE)
return 0;
crtc_funcs = crtc->base.helper_private;
@@ -322,7 +319,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- ret = intel_overlay_do_wait_request(overlay, request, true, NULL);
+ ret = intel_overlay_do_wait_request(overlay, request, NULL);
out:
if (pipe_a_quirk)
i830_deactivate_pipe_a(dev);
@@ -364,7 +361,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
OUT_RING(flip_addr);
ADVANCE_LP_RING();
- ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+ ret = i915_add_request(LP_RING(dev_priv), NULL, request);
if (ret) {
kfree(request);
return ret;
@@ -401,8 +398,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
}
/* overlay needs to be disabled in OCMD reg */
-static int intel_overlay_off(struct intel_overlay *overlay,
- bool interruptible)
+static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -437,14 +433,13 @@ static int intel_overlay_off(struct intel_overlay *overlay,
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
ADVANCE_LP_RING();
- return intel_overlay_do_wait_request(overlay, request, interruptible,
+ return intel_overlay_do_wait_request(overlay, request,
intel_overlay_off_tail);
}
/* recover from an interruption due to a signal
* We have to be careful not to repeat work forever an make forward progess. */
-static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
- bool interruptible)
+static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -453,8 +448,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible, LP_RING(dev_priv));
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
if (ret)
return ret;
@@ -499,7 +493,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
- ret = intel_overlay_do_wait_request(overlay, request, true,
+ ret = intel_overlay_do_wait_request(overlay, request,
intel_overlay_release_old_vid_tail);
if (ret)
return ret;
@@ -868,8 +862,7 @@ out_unpin:
return ret;
}
-int intel_overlay_switch_off(struct intel_overlay *overlay,
- bool interruptible)
+int intel_overlay_switch_off(struct intel_overlay *overlay)
{
struct overlay_registers *regs;
struct drm_device *dev = overlay->dev;
@@ -878,7 +871,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
- ret = intel_overlay_recover_from_interrupt(overlay, interruptible);
+ ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
return ret;
@@ -893,7 +886,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay,
regs->OCMD = 0;
intel_overlay_unmap_regs(overlay, regs);
- ret = intel_overlay_off(overlay, interruptible);
+ ret = intel_overlay_off(overlay);
if (ret != 0)
return ret;
@@ -1135,7 +1128,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
- ret = intel_overlay_switch_off(overlay, true);
+ ret = intel_overlay_switch_off(overlay);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
@@ -1157,7 +1150,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
put_image_rec->bo_handle));
- if (!new_bo) {
+ if (&new_bo->base == NULL) {
ret = -ENOENT;
goto out_free;
}
@@ -1171,13 +1164,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = intel_overlay_recover_from_interrupt(overlay, true);
+ ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
goto out_unlock;
if (overlay->crtc != crtc) {
struct drm_display_mode *mode = &crtc->base.mode;
- ret = intel_overlay_switch_off(overlay, true);
+ ret = intel_overlay_switch_off(overlay);
if (ret != 0)
goto out_unlock;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index f8f86e57df22..a06ff07a4d3b 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -280,3 +280,28 @@ void intel_panel_setup_backlight(struct drm_device *dev)
dev_priv->backlight_level = intel_panel_get_backlight(dev);
dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
}
+
+enum drm_connector_status
+intel_panel_detect(struct drm_device *dev)
+{
+#if 0
+ struct drm_i915_private *dev_priv = dev->dev_private;
+#endif
+
+ if (i915_panel_ignore_lid)
+ return i915_panel_ignore_lid > 0 ?
+ connector_status_connected :
+ connector_status_disconnected;
+
+ /* opregion lid state on HP 2540p is wrong at boot up,
+ * appears to be either the BIOS or Linux ACPI fault */
+#if 0
+ /* Assume that the BIOS does not lie through the OpRegion... */
+ if (dev_priv->opregion.lid_state)
+ return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
+ connector_status_connected :
+ connector_status_disconnected;
+#endif
+
+ return connector_status_unknown;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 445f27efe677..789c47801ba8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -62,18 +62,9 @@ render_ring_flush(struct intel_ring_buffer *ring,
u32 flush_domains)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
u32 cmd;
int ret;
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
- invalidate_domains, flush_domains);
-#endif
-
- trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
- invalidate_domains, flush_domains);
-
if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
/*
* read/write caches:
@@ -122,9 +113,6 @@ render_ring_flush(struct intel_ring_buffer *ring,
(IS_G4X(dev) || IS_GEN5(dev)))
cmd |= MI_INVALIDATE_ISP;
-#if WATCH_EXEC
- DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
-#endif
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
@@ -612,7 +600,6 @@ ring_add_request(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
- DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
*result = seqno;
return 0;
}
@@ -715,11 +702,8 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
{
struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
-
if (IS_I830(dev) || IS_845G(dev)) {
ret = intel_ring_begin(ring, 4);
if (ret)
@@ -894,6 +878,10 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
/* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private;
ret = intel_wait_ring_buffer(ring, ring->size - 8);
+ if (ret)
+ DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
+ ring->name, ret);
+
I915_WRITE_CTL(ring, 0);
drm_core_ioremapfree(&ring->map, ring->dev);
@@ -950,13 +938,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
return 0;
}
- trace_i915_ring_wait_begin (dev);
+ trace_i915_ring_wait_begin(ring);
end = jiffies + 3 * HZ;
do {
ring->head = I915_READ_HEAD(ring);
ring->space = ring_space(ring);
if (ring->space >= n) {
- trace_i915_ring_wait_end(dev);
+ trace_i915_ring_wait_end(ring);
return 0;
}
@@ -970,16 +958,20 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
if (atomic_read(&dev_priv->mm.wedged))
return -EAGAIN;
} while (!time_after(jiffies, end));
- trace_i915_ring_wait_end (dev);
+ trace_i915_ring_wait_end(ring);
return -EBUSY;
}
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
int n = 4*num_dwords;
int ret;
+ if (unlikely(atomic_read(&dev_priv->mm.wedged)))
+ return -EIO;
+
if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 34306865a5df..f23cc5f037a6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -44,7 +44,7 @@ struct intel_ring_buffer {
RING_BLT = 0x4,
} id;
u32 mmio_base;
- void *virtual_start;
+ void __iomem *virtual_start;
struct drm_device *dev;
struct drm_i915_gem_object *obj;
@@ -59,6 +59,7 @@ struct intel_ring_buffer {
u32 irq_refcount;
u32 irq_mask;
u32 irq_seqno; /* last seq seem at irq time */
+ u32 trace_irq_seqno;
u32 waiting_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
@@ -142,6 +143,26 @@ intel_read_status_page(struct intel_ring_buffer *ring,
return ioread32(ring->status_page.page_addr + reg);
}
+/**
+ * Reads a dword out of the status page, which is written to from the command
+ * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
+ * MI_STORE_DATA_IMM.
+ *
+ * The following dwords have a reserved meaning:
+ * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
+ * 0x04: ring 0 head pointer
+ * 0x05: ring 1 head pointer (915-class)
+ * 0x06: ring 2 head pointer (915-class)
+ * 0x10-0x1b: Context status DWords (GM45)
+ * 0x1f: Last written status offset. (GM45)
+ *
+ * The area from dword 0x20 to 0x3ff is available for driver usage.
+ */
+#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
+#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
+#define I915_GEM_HWS_INDEX 0x20
+#define I915_BREADCRUMB_INDEX 0x21
+
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
@@ -167,6 +188,12 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
+static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
+{
+ if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
+ ring->trace_irq_seqno = seqno;
+}
+
/* DRI warts */
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 7c50cdce84f0..4324f33212d6 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -93,6 +93,12 @@ struct intel_sdvo {
uint16_t attached_output;
/**
+ * This is used to select the color range of RBG outputs in HDMI mode.
+ * It is only valid when using TMDS encoding and 8 bit per color mode.
+ */
+ uint32_t color_range;
+
+ /**
* This is set if we're going to treat the device as TV-out.
*
* While we have these nice friendly flags for output types that ought
@@ -585,6 +591,7 @@ static bool intel_sdvo_get_trained_inputs(struct intel_sdvo *intel_sdvo, bool *i
{
struct intel_sdvo_get_trained_inputs_response response;
+ BUILD_BUG_ON(sizeof(response) != 1);
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
&response, sizeof(response)))
return false;
@@ -632,6 +639,7 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_sdvo *intel_sdvo
{
struct intel_sdvo_pixel_clock_range clocks;
+ BUILD_BUG_ON(sizeof(clocks) != 4);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
&clocks, sizeof(clocks)))
@@ -699,6 +707,8 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_get_preferred_input_timing(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_dtd *dtd)
{
+ BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+ BUILD_BUG_ON(sizeof(dtd->part2) != 8);
return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
&dtd->part1, sizeof(dtd->part1)) &&
intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
@@ -796,6 +806,7 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
{
struct intel_sdvo_encode encode;
+ BUILD_BUG_ON(sizeof(encode) != 2);
return intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPP_ENCODE,
&encode, sizeof(encode));
@@ -1051,6 +1062,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* Set the SDVO control regs. */
if (INTEL_INFO(dev)->gen >= 4) {
sdvox = 0;
+ if (intel_sdvo->is_hdmi)
+ sdvox |= intel_sdvo->color_range;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -1162,6 +1175,7 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
{
+ BUILD_BUG_ON(sizeof(*caps) != 8);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_DEVICE_CAPS,
caps, sizeof(*caps)))
@@ -1268,33 +1282,9 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
static bool
intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
{
- int caps = 0;
-
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
- caps++;
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1))
- caps++;
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_SVID1))
- caps++;
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_CVBS1))
- caps++;
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_YPRPB1))
- caps++;
-
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1))
- caps++;
-
- if (intel_sdvo->caps.output_flags &
- (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1))
- caps++;
-
- return (caps > 1);
+ /* Is there more than one type of output? */
+ int caps = intel_sdvo->caps.output_flags & 0xf;
+ return caps & -caps;
}
static struct edid *
@@ -1482,7 +1472,7 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
* Note! This is in reply order (see loop in get_tv_modes).
* XXX: all 60Hz refresh?
*/
-struct drm_display_mode sdvo_tv_modes[] = {
+static const struct drm_display_mode sdvo_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
416, 0, 200, 201, 232, 233, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
@@ -1713,6 +1703,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint16_t temp_value;
uint8_t cmd;
int ret;
@@ -1742,6 +1733,14 @@ intel_sdvo_set_property(struct drm_connector *connector,
goto done;
}
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!intel_sdvo->color_range)
+ return 0;
+
+ intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
#define CHECK_PROPERTY(name, NAME) \
if (intel_sdvo_connector->name == property) { \
if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
@@ -2046,6 +2045,9 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
drm_connector_attach_property(&connector->base.base,
connector->force_audio_property, 0);
}
+
+ if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
+ intel_attach_broadcast_rgb_property(&connector->base.base);
}
static bool
@@ -2268,6 +2270,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
if (!intel_sdvo_set_target_output(intel_sdvo, type))
return false;
+ BUILD_BUG_ON(sizeof(format) != 6);
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
&format, sizeof(format)))
@@ -2474,6 +2477,8 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
uint16_t response;
} enhancements;
+ BUILD_BUG_ON(sizeof(enhancements) != 2);
+
enhancements.response = 0;
intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index fe4a53a50b83..4256b8ef3947 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1006,6 +1006,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
const struct video_levels *video_levels;
const struct color_conversion *color_conversion;
bool burst_ena;
+ int pipe = intel_crtc->pipe;
if (!tv_mode)
return; /* can't happen (mode_prepare prevents this) */
@@ -1149,14 +1150,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
(video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
{
- int pipeconf_reg = (intel_crtc->pipe == 0) ?
- PIPEACONF : PIPEBCONF;
- int dspcntr_reg = (intel_crtc->plane == 0) ?
- DSPACNTR : DSPBCNTR;
+ int pipeconf_reg = PIPECONF(pipe);
+ int dspcntr_reg = DSPCNTR(pipe);
int pipeconf = I915_READ(pipeconf_reg);
int dspcntr = I915_READ(dspcntr_reg);
- int dspbase_reg = (intel_crtc->plane == 0) ?
- DSPAADDR : DSPBADDR;
+ int dspbase_reg = DSPADDR(pipe);
int xpos = 0x0, ypos = 0x0;
unsigned int xsize, ysize;
/* Pipe must be off here */
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 08868ac3048a..1e1eb1d7e971 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -703,7 +703,7 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
static int mga_do_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t *dma_bs)
{
- const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
+ const int is_agp = (dma_bs->agp_mode != 0) && drm_pci_device_is_agp(dev);
int err;
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 0aaf5f67a436..42d31874edf2 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -75,10 +75,6 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -88,15 +84,20 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver mga_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init mga_init(void)
{
driver.num_ioctls = mga_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &mga_pci_driver);
}
static void __exit mga_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &mga_pci_driver);
}
module_init(mga_init);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 6bdab891c64e..8314a49b6b9a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -282,7 +282,7 @@ static void still_alive(void)
{
#if 0
sync();
- msleep(2);
+ mdelay(2);
#endif
}
@@ -1904,7 +1904,7 @@ init_condition_time(struct nvbios *bios, uint16_t offset,
BIOSLOG(bios, "0x%04X: "
"Condition not met, sleeping for 20ms\n",
offset);
- msleep(20);
+ mdelay(20);
}
}
@@ -1938,7 +1938,7 @@ init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n",
offset, time);
- msleep(time);
+ mdelay(time);
return 3;
}
@@ -2962,7 +2962,7 @@ init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (time < 1000)
udelay(time);
else
- msleep((time + 900) / 1000);
+ mdelay((time + 900) / 1000);
return 3;
}
@@ -3856,7 +3856,7 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entr
if (script == LVDS_PANEL_OFF) {
/* off-on delay in ms */
- msleep(ROM16(bios->data[bios->fp.xlated_entry + 7]));
+ mdelay(ROM16(bios->data[bios->fp.xlated_entry + 7]));
}
#ifdef __powerpc__
/* Powerbook specific quirks */
@@ -5950,6 +5950,11 @@ apply_dcb_connector_quirks(struct nvbios *bios, int idx)
}
}
+static const u8 hpd_gpio[16] = {
+ 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
+};
+
static void
parse_dcb_connector_table(struct nvbios *bios)
{
@@ -5986,23 +5991,9 @@ parse_dcb_connector_table(struct nvbios *bios)
cte->type = (cte->entry & 0x000000ff) >> 0;
cte->index2 = (cte->entry & 0x00000f00) >> 8;
- switch (cte->entry & 0x00033000) {
- case 0x00001000:
- cte->gpio_tag = 0x07;
- break;
- case 0x00002000:
- cte->gpio_tag = 0x08;
- break;
- case 0x00010000:
- cte->gpio_tag = 0x51;
- break;
- case 0x00020000:
- cte->gpio_tag = 0x52;
- break;
- default:
- cte->gpio_tag = 0xff;
- break;
- }
+
+ cte->gpio_tag = ffs((cte->entry & 0x07033000) >> 12);
+ cte->gpio_tag = hpd_gpio[cte->gpio_tag];
if (cte->type == 0xff)
continue;
@@ -6702,11 +6693,11 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
struct nvbios *bios = &dev_priv->vbios;
struct init_exec iexec = { true, false };
- mutex_lock(&bios->lock);
+ spin_lock_bh(&bios->lock);
bios->display.output = dcbent;
parse_init_table(bios, table, &iexec);
bios->display.output = NULL;
- mutex_unlock(&bios->lock);
+ spin_unlock_bh(&bios->lock);
}
static bool NVInitVBIOS(struct drm_device *dev)
@@ -6715,7 +6706,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
struct nvbios *bios = &dev_priv->vbios;
memset(bios, 0, sizeof(struct nvbios));
- mutex_init(&bios->lock);
+ spin_lock_init(&bios->lock);
bios->dev = dev;
if (!NVShadowVBIOS(dev, bios->data))
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 50a648e01c49..8a54fa7edf5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -251,7 +251,7 @@ struct nvbios {
uint8_t digital_min_front_porch;
bool fp_no_ddc;
- struct mutex lock;
+ spinlock_t lock;
uint8_t data[NV_PROM_SIZE];
unsigned int length;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index a52184007f5f..2ad49cbf7c8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -57,8 +57,8 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
}
static void
-nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
- int *page_shift)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
+ int *align, int *size, int *page_shift)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
@@ -83,7 +83,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
}
} else {
if (likely(dev_priv->chan_vm)) {
- if (*size > 256 * 1024)
+ if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024)
*page_shift = dev_priv->chan_vm->lpg_shift;
else
*page_shift = dev_priv->chan_vm->spg_shift;
@@ -101,8 +101,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
int
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
int size, int align, uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, bool no_vm, bool mappable,
- struct nouveau_bo **pnvbo)
+ uint32_t tile_flags, struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
@@ -113,16 +112,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
return -ENOMEM;
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
- nvbo->mappable = mappable;
- nvbo->no_vm = no_vm;
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev;
- nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
+ nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
align >>= PAGE_SHIFT;
- if (!nvbo->no_vm && dev_priv->chan_vm) {
+ if (dev_priv->chan_vm) {
ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
NV_MEM_ACCESS_RW, &nvbo->vma);
if (ret) {
@@ -144,11 +141,8 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
}
nvbo->channel = NULL;
- if (nvbo->vma.node) {
- if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
- nvbo->bo.offset = nvbo->vma.offset;
- }
-
+ if (nvbo->vma.node)
+ nvbo->bo.offset = nvbo->vma.offset;
*pnvbo = nvbo;
return 0;
}
@@ -318,11 +312,8 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
if (ret)
return ret;
- if (nvbo->vma.node) {
- if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
- nvbo->bo.offset = nvbo->vma.offset;
- }
-
+ if (nvbo->vma.node)
+ nvbo->bo.offset = nvbo->vma.offset;
return 0;
}
@@ -385,7 +376,8 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
case NOUVEAU_GART_AGP:
return ttm_agp_backend_init(bdev, dev->agp->bridge);
#endif
- case NOUVEAU_GART_SGDMA:
+ case NOUVEAU_GART_PDMA:
+ case NOUVEAU_GART_HW:
return nouveau_sgdma_init_ttm(dev);
default:
NV_ERROR(dev, "Unknown GART type %d\n",
@@ -431,7 +423,10 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_TT:
- man->func = &ttm_bo_manager_func;
+ if (dev_priv->card_type >= NV_50)
+ man->func = &nouveau_gart_manager;
+ else
+ man->func = &ttm_bo_manager_func;
switch (dev_priv->gart_info.type) {
case NOUVEAU_GART_AGP:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -439,7 +434,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
break;
- case NOUVEAU_GART_SGDMA:
+ case NOUVEAU_GART_PDMA:
+ case NOUVEAU_GART_HW:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING;
@@ -501,45 +497,22 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret;
}
-static inline uint32_t
-nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
- struct nouveau_channel *chan, struct ttm_mem_reg *mem)
-{
- struct nouveau_bo *nvbo = nouveau_bo(bo);
-
- if (nvbo->no_vm) {
- if (mem->mem_type == TTM_PL_TT)
- return NvDmaGART;
- return NvDmaVRAM;
- }
-
- if (mem->mem_type == TTM_PL_TT)
- return chan->gart_handle;
- return chan->vram_handle;
-}
-
static int
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *old_node = old_mem->mm_node;
+ struct nouveau_mem *new_node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- u64 src_offset = old_mem->start << PAGE_SHIFT;
- u64 dst_offset = new_mem->start << PAGE_SHIFT;
u32 page_count = new_mem->num_pages;
+ u64 src_offset, dst_offset;
int ret;
- if (!nvbo->no_vm) {
- if (old_mem->mem_type == TTM_PL_VRAM)
- src_offset = nvbo->vma.offset;
- else
- src_offset += dev_priv->gart_info.aper_base;
-
- if (new_mem->mem_type == TTM_PL_VRAM)
- dst_offset = nvbo->vma.offset;
- else
- dst_offset += dev_priv->gart_info.aper_base;
- }
+ src_offset = old_node->tmp_vma.offset;
+ if (new_node->tmp_vma.node)
+ dst_offset = new_node->tmp_vma.offset;
+ else
+ dst_offset = nvbo->vma.offset;
page_count = new_mem->num_pages;
while (page_count) {
@@ -574,33 +547,18 @@ static int
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *old_node = old_mem->mm_node;
+ struct nouveau_mem *new_node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 length = (new_mem->num_pages << PAGE_SHIFT);
u64 src_offset, dst_offset;
int ret;
- src_offset = old_mem->start << PAGE_SHIFT;
- dst_offset = new_mem->start << PAGE_SHIFT;
- if (!nvbo->no_vm) {
- if (old_mem->mem_type == TTM_PL_VRAM)
- src_offset = nvbo->vma.offset;
- else
- src_offset += dev_priv->gart_info.aper_base;
-
- if (new_mem->mem_type == TTM_PL_VRAM)
- dst_offset = nvbo->vma.offset;
- else
- dst_offset += dev_priv->gart_info.aper_base;
- }
-
- ret = RING_SPACE(chan, 3);
- if (ret)
- return ret;
-
- BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
+ src_offset = old_node->tmp_vma.offset;
+ if (new_node->tmp_vma.node)
+ dst_offset = new_node->tmp_vma.offset;
+ else
+ dst_offset = nvbo->vma.offset;
while (length) {
u32 amount, stride, height;
@@ -681,6 +639,15 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
return 0;
}
+static inline uint32_t
+nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
+ struct nouveau_channel *chan, struct ttm_mem_reg *mem)
+{
+ if (mem->mem_type == TTM_PL_TT)
+ return chan->gart_handle;
+ return chan->vram_handle;
+}
+
static int
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
@@ -734,15 +701,43 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct ttm_mem_reg *old_mem = &bo->mem;
struct nouveau_channel *chan;
int ret;
chan = nvbo->channel;
- if (!chan || nvbo->no_vm) {
+ if (!chan) {
chan = dev_priv->channel;
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
}
+ /* create temporary vma for old memory, this will get cleaned
+ * up after ttm destroys the ttm_mem_reg
+ */
+ if (dev_priv->card_type >= NV_50) {
+ struct nouveau_mem *node = old_mem->mm_node;
+ if (!node->tmp_vma.node) {
+ u32 page_shift = nvbo->vma.node->type;
+ if (old_mem->mem_type == TTM_PL_TT)
+ page_shift = nvbo->vma.vm->spg_shift;
+
+ ret = nouveau_vm_get(chan->vm,
+ old_mem->num_pages << PAGE_SHIFT,
+ page_shift, NV_MEM_ACCESS_RO,
+ &node->tmp_vma);
+ if (ret)
+ goto out;
+ }
+
+ if (old_mem->mem_type == TTM_PL_VRAM)
+ nouveau_vm_map(&node->tmp_vma, node);
+ else {
+ nouveau_vm_map_sg(&node->tmp_vma, 0,
+ old_mem->num_pages << PAGE_SHIFT,
+ node, node->pages);
+ }
+ }
+
if (dev_priv->card_type < NV_50)
ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
else
@@ -756,6 +751,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
no_wait_gpu, new_mem);
}
+out:
if (chan == dev_priv->channel)
mutex_unlock(&chan->mutex);
return ret;
@@ -766,6 +762,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
struct ttm_mem_reg tmp_mem;
@@ -785,7 +782,23 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
+ if (dev_priv->card_type >= NV_50) {
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_mem *node = tmp_mem.mm_node;
+ struct nouveau_vma *vma = &nvbo->vma;
+ if (vma->node->type != vma->vm->spg_shift)
+ vma = &node->tmp_vma;
+ nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
+ node, node->pages);
+ }
+
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
+
+ if (dev_priv->card_type >= NV_50) {
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ nouveau_vm_unmap(&nvbo->vma);
+ }
+
if (ret)
goto out;
@@ -828,6 +841,36 @@ out:
return ret;
}
+static void
+nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_mem *node = new_mem->mm_node;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_vma *vma = &nvbo->vma;
+ struct nouveau_vm *vm = vma->vm;
+
+ if (dev_priv->card_type < NV_50)
+ return;
+
+ switch (new_mem->mem_type) {
+ case TTM_PL_VRAM:
+ nouveau_vm_map(vma, node);
+ break;
+ case TTM_PL_TT:
+ if (vma->node->type != vm->spg_shift) {
+ nouveau_vm_unmap(vma);
+ vma = &node->tmp_vma;
+ }
+ nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
+ node, node->pages);
+ break;
+ default:
+ nouveau_vm_unmap(&nvbo->vma);
+ break;
+ }
+}
+
static int
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct nouveau_tile_reg **new_tile)
@@ -835,19 +878,13 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- uint64_t offset;
+ u64 offset = new_mem->start << PAGE_SHIFT;
- if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
- /* Nothing to do. */
- *new_tile = NULL;
+ *new_tile = NULL;
+ if (new_mem->mem_type != TTM_PL_VRAM)
return 0;
- }
-
- offset = new_mem->start << PAGE_SHIFT;
- if (dev_priv->chan_vm) {
- nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
- } else if (dev_priv->card_type >= NV_10) {
+ if (dev_priv->card_type >= NV_10) {
*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
nvbo->tile_mode,
nvbo->tile_flags);
@@ -864,11 +901,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct drm_device *dev = dev_priv->dev;
- if (dev_priv->card_type >= NV_10 &&
- dev_priv->card_type < NV_50) {
- nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
- *old_tile = new_tile;
- }
+ nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
+ *old_tile = new_tile;
}
static int
@@ -882,9 +916,11 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
struct nouveau_tile_reg *new_tile = NULL;
int ret = 0;
- ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
- if (ret)
- return ret;
+ if (dev_priv->card_type < NV_50) {
+ ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
+ if (ret)
+ return ret;
+ }
/* Fake bo copy. */
if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
@@ -915,10 +951,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
- if (ret)
- nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
- else
- nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
+ if (dev_priv->card_type < NV_50) {
+ if (ret)
+ nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
+ else
+ nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
+ }
return ret;
}
@@ -959,7 +997,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
break;
case TTM_PL_VRAM:
{
- struct nouveau_vram *vram = mem->mm_node;
+ struct nouveau_mem *node = mem->mm_node;
u8 page_shift;
if (!dev_priv->bar1_vm) {
@@ -970,23 +1008,23 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
}
if (dev_priv->card_type == NV_C0)
- page_shift = vram->page_shift;
+ page_shift = node->page_shift;
else
page_shift = 12;
ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
page_shift, NV_MEM_ACCESS_RW,
- &vram->bar_vma);
+ &node->bar_vma);
if (ret)
return ret;
- nouveau_vm_map(&vram->bar_vma, vram);
+ nouveau_vm_map(&node->bar_vma, node);
if (ret) {
- nouveau_vm_put(&vram->bar_vma);
+ nouveau_vm_put(&node->bar_vma);
return ret;
}
- mem->bus.offset = vram->bar_vma.offset;
+ mem->bus.offset = node->bar_vma.offset;
if (dev_priv->card_type == NV_50) /*XXX*/
mem->bus.offset -= 0x0020000000ULL;
mem->bus.base = pci_resource_start(dev->pdev, 1);
@@ -1003,16 +1041,16 @@ static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct nouveau_vram *vram = mem->mm_node;
+ struct nouveau_mem *node = mem->mm_node;
if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
return;
- if (!vram->bar_vma.node)
+ if (!node->bar_vma.node)
return;
- nouveau_vm_unmap(&vram->bar_vma);
- nouveau_vm_put(&vram->bar_vma);
+ nouveau_vm_unmap(&node->bar_vma);
+ nouveau_vm_put(&node->bar_vma);
}
static int
@@ -1062,6 +1100,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.evict_flags = nouveau_bo_evict_flags,
+ .move_notify = nouveau_bo_move_ntfy,
.move = nouveau_bo_move,
.verify_access = nouveau_bo_verify_access,
.sync_obj_signaled = __nouveau_fence_signalled,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 3960d66d7aba..3837090d66af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -35,7 +35,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *pb = chan->pushbuf_bo;
struct nouveau_gpuobj *pushbuf = NULL;
- int ret;
+ int ret = 0;
if (dev_priv->card_type >= NV_50) {
if (dev_priv->card_type < NV_C0) {
@@ -90,8 +90,7 @@ nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
else
location = TTM_PL_FLAG_TT;
- ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
- true, &pushbuf);
+ ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf);
if (ret) {
NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
return NULL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 505c6bfb4d75..764c15d537ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -32,6 +32,7 @@
#include "nouveau_hw.h"
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
+#include "nv50_display.h"
static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
@@ -61,18 +62,59 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
};
int
-nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
- struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
+nouveau_framebuffer_init(struct drm_device *dev,
+ struct nouveau_framebuffer *nv_fb,
+ struct drm_mode_fb_cmd *mode_cmd,
+ struct nouveau_bo *nvbo)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = &nv_fb->base;
int ret;
- ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
+ ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
if (ret) {
return ret;
}
- drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
- nouveau_fb->nvbo = nvbo;
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+ nv_fb->nvbo = nvbo;
+
+ if (dev_priv->card_type >= NV_50) {
+ u32 tile_flags = nouveau_bo_tile_layout(nvbo);
+ if (tile_flags == 0x7a00 ||
+ tile_flags == 0xfe00)
+ nv_fb->r_dma = NvEvoFB32;
+ else
+ if (tile_flags == 0x7000)
+ nv_fb->r_dma = NvEvoFB16;
+ else
+ nv_fb->r_dma = NvEvoVRAM_LP;
+
+ switch (fb->depth) {
+ case 8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break;
+ case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break;
+ case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break;
+ case 24:
+ case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break;
+ case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break;
+ default:
+ NV_ERROR(dev, "unknown depth %d\n", fb->depth);
+ return -EINVAL;
+ }
+
+ if (dev_priv->chipset == 0x50)
+ nv_fb->r_format |= (tile_flags << 8);
+
+ if (!tile_flags)
+ nv_fb->r_pitch = 0x00100000 | fb->pitch;
+ else {
+ u32 mode = nvbo->tile_mode;
+ if (dev_priv->card_type >= NV_C0)
+ mode >>= 4;
+ nv_fb->r_pitch = ((fb->pitch / 4) << 4) | mode;
+ }
+ }
+
return 0;
}
@@ -182,6 +224,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_page_flip_state *s,
struct nouveau_fence **pfence)
{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct drm_device *dev = chan->dev;
unsigned long flags;
int ret;
@@ -201,9 +244,12 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
if (ret)
goto fail;
- BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
- OUT_RING(chan, 0);
- FIRE_RING(chan);
+ if (dev_priv->card_type < NV_C0)
+ BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+ else
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0500, 1);
+ OUT_RING (chan, 0);
+ FIRE_RING (chan);
ret = nouveau_fence_new(chan, pfence, true);
if (ret)
@@ -244,7 +290,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
- { { }, s->event, nouveau_crtc(crtc)->index,
+ { { }, event, nouveau_crtc(crtc)->index,
fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
new_bo->bo.offset };
@@ -255,6 +301,14 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
mutex_lock(&chan->mutex);
/* Emit a page flip */
+ if (dev_priv->card_type >= NV_50) {
+ ret = nv50_display_flip_next(crtc, fb, chan);
+ if (ret) {
+ nouveau_channel_put(&chan);
+ goto fail_unreserve;
+ }
+ }
+
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
nouveau_channel_put(&chan);
if (ret)
@@ -305,7 +359,8 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
}
list_del(&s->head);
- *ps = *s;
+ if (ps)
+ *ps = *s;
kfree(s);
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index b368ed74aad7..ce38e97b9428 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -97,13 +97,15 @@ nouveau_dma_init(struct nouveau_channel *chan)
OUT_RING(chan, 0);
/* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
- ret = RING_SPACE(chan, 4);
+ ret = RING_SPACE(chan, 6);
if (ret)
return ret;
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
- OUT_RING(chan, NvM2MF);
- BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
- OUT_RING(chan, NvNotify0);
+ OUT_RING (chan, NvM2MF);
+ BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 3);
+ OUT_RING (chan, NvNotify0);
+ OUT_RING (chan, chan->vram_handle);
+ OUT_RING (chan, chan->gart_handle);
/* Sit back and pray the channel works.. */
FIRE_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index c36f1763feaa..23d4edf992b7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -61,8 +61,6 @@ enum {
NvM2MF = 0x80000001,
NvDmaFB = 0x80000002,
NvDmaTT = 0x80000003,
- NvDmaVRAM = 0x80000004,
- NvDmaGART = 0x80000005,
NvNotify0 = 0x80000006,
Nv2D = 0x80000007,
NvCtxSurf2D = 0x80000008,
@@ -73,12 +71,15 @@ enum {
NvImageBlit = 0x8000000d,
NvSw = 0x8000000e,
NvSema = 0x8000000f,
+ NvEvoSema0 = 0x80000010,
+ NvEvoSema1 = 0x80000011,
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
NvEvoFB16 = 0x01000001,
NvEvoFB32 = 0x01000002,
- NvEvoVRAM_LP = 0x01000003
+ NvEvoVRAM_LP = 0x01000003,
+ NvEvoSync = 0xcafe0000
};
#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 38d599554bce..7beb82a0315d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -175,7 +175,6 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct bit_displayport_encoder_table_entry *dpse;
struct bit_displayport_encoder_table *dpe;
int ret, i, dpe_headerlen, vs = 0, pre = 0;
uint8_t request[2];
@@ -183,7 +182,6 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
if (!dpe)
return false;
- dpse = (void *)((char *)dpe + dpe_headerlen);
ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index f658a04eecf9..155ebdcbf06f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -408,14 +408,6 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = nouveau_pci_probe,
- .remove = nouveau_pci_remove,
- .suspend = nouveau_pci_suspend,
- .resume = nouveau_pci_resume
- },
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
@@ -432,6 +424,15 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver nouveau_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = nouveau_pci_probe,
+ .remove = nouveau_pci_remove,
+ .suspend = nouveau_pci_suspend,
+ .resume = nouveau_pci_resume
+};
+
static int __init nouveau_init(void)
{
driver.num_ioctls = nouveau_max_ioctl;
@@ -449,7 +450,7 @@ static int __init nouveau_init(void)
return 0;
nouveau_register_dsm_handler();
- return drm_init(&driver);
+ return drm_pci_init(&driver, &nouveau_pci_driver);
}
static void __exit nouveau_exit(void)
@@ -457,7 +458,7 @@ static void __exit nouveau_exit(void)
if (!nouveau_modeset)
return;
- drm_exit(&driver);
+ drm_pci_exit(&driver, &nouveau_pci_driver);
nouveau_unregister_dsm_handler();
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 982d70b12722..06111887b789 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -57,7 +57,7 @@ struct nouveau_fpriv {
#include "nouveau_util.h"
struct nouveau_grctx;
-struct nouveau_vram;
+struct nouveau_mem;
#include "nouveau_vm.h"
#define MAX_NUM_DCB_ENTRIES 16
@@ -65,13 +65,16 @@ struct nouveau_vram;
#define NOUVEAU_MAX_CHANNEL_NR 128
#define NOUVEAU_MAX_TILE_NR 15
-struct nouveau_vram {
+struct nouveau_mem {
struct drm_device *dev;
struct nouveau_vma bar_vma;
+ struct nouveau_vma tmp_vma;
u8 page_shift;
+ struct drm_mm_node *tag;
struct list_head regions;
+ dma_addr_t *pages;
u32 memtype;
u64 offset;
u64 size;
@@ -90,6 +93,7 @@ struct nouveau_tile_reg {
struct nouveau_bo {
struct ttm_buffer_object bo;
struct ttm_placement placement;
+ u32 valid_domains;
u32 placements[3];
u32 busy_placements[3];
struct ttm_bo_kmap_obj kmap;
@@ -104,8 +108,6 @@ struct nouveau_bo {
struct nouveau_channel *channel;
struct nouveau_vma vma;
- bool mappable;
- bool no_vm;
uint32_t tile_mode;
uint32_t tile_flags;
@@ -387,6 +389,7 @@ struct nouveau_pgraph_engine {
};
struct nouveau_display_engine {
+ void *priv;
int (*early_init)(struct drm_device *);
void (*late_takedown)(struct drm_device *);
int (*create)(struct drm_device *);
@@ -509,8 +512,8 @@ struct nouveau_crypt_engine {
struct nouveau_vram_engine {
int (*init)(struct drm_device *);
int (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
- u32 type, struct nouveau_vram **);
- void (*put)(struct drm_device *, struct nouveau_vram **);
+ u32 type, struct nouveau_mem **);
+ void (*put)(struct drm_device *, struct nouveau_mem **);
bool (*flags_valid)(struct drm_device *, u32 tile_flags);
};
@@ -652,8 +655,6 @@ struct drm_nouveau_private {
/* interrupt handling */
void (*irq_handler[32])(struct drm_device *);
bool msi_enabled;
- struct workqueue_struct *wq;
- struct work_struct irq_work;
struct list_head vbl_waiting;
@@ -691,15 +692,22 @@ struct drm_nouveau_private {
struct {
enum {
NOUVEAU_GART_NONE = 0,
- NOUVEAU_GART_AGP,
- NOUVEAU_GART_SGDMA
+ NOUVEAU_GART_AGP, /* AGP */
+ NOUVEAU_GART_PDMA, /* paged dma object */
+ NOUVEAU_GART_HW /* on-chip gart/vm */
} type;
uint64_t aper_base;
uint64_t aper_size;
uint64_t aper_free;
+ struct ttm_backend_func *func;
+
+ struct {
+ struct page *page;
+ dma_addr_t addr;
+ } dummy;
+
struct nouveau_gpuobj *sg_ctxdma;
- struct nouveau_vma vma;
} gart_info;
/* nv10-nv40 tiling regions */
@@ -740,14 +748,6 @@ struct drm_nouveau_private {
struct backlight_device *backlight;
- struct nouveau_channel *evo;
- u32 evo_alloc;
- struct {
- struct dcb_entry *dcb;
- u16 script;
- u32 pclk;
- } evo_irq;
-
struct {
struct dentry *channel_root;
} debugfs;
@@ -847,6 +847,7 @@ extern void nv10_mem_put_tile_region(struct drm_device *dev,
struct nouveau_tile_reg *tile,
struct nouveau_fence *fence);
extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
+extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
/* nouveau_notifier.c */
extern int nouveau_notifier_init_channel(struct nouveau_channel *);
@@ -1076,7 +1077,7 @@ extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
/* nv50_fb.c */
extern int nv50_fb_init(struct drm_device *);
extern void nv50_fb_takedown(struct drm_device *);
-extern void nv50_fb_vm_trap(struct drm_device *, int display, const char *);
+extern void nv50_fb_vm_trap(struct drm_device *, int display);
/* nvc0_fb.c */
extern int nvc0_fb_init(struct drm_device *);
@@ -1295,7 +1296,7 @@ extern struct ttm_bo_driver nouveau_bo_driver;
extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
int size, int align, uint32_t flags,
uint32_t tile_mode, uint32_t tile_flags,
- bool no_vm, bool mappable, struct nouveau_bo **);
+ struct nouveau_bo **);
extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
extern int nouveau_bo_unpin(struct nouveau_bo *);
extern int nouveau_bo_map(struct nouveau_bo *);
@@ -1356,9 +1357,9 @@ static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
- int size, int align, uint32_t flags,
+ int size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags,
- bool no_vm, bool mappable, struct nouveau_bo **);
+ struct nouveau_bo **);
extern int nouveau_gem_object_new(struct drm_gem_object *);
extern void nouveau_gem_object_del(struct drm_gem_object *);
extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index d432134b71e0..a3a88ad00f86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -30,6 +30,9 @@
struct nouveau_framebuffer {
struct drm_framebuffer base;
struct nouveau_bo *nvbo;
+ u32 r_dma;
+ u32 r_format;
+ u32 r_pitch;
};
static inline struct nouveau_framebuffer *
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 60769d2f9a66..889c4454682e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -296,8 +296,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
size = mode_cmd.pitch * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
- ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nvbo);
+ ret = nouveau_gem_new(dev, dev_priv->channel, size, 0,
+ NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
if (ret) {
NV_ERROR(dev, "failed to allocate framebuffer\n");
goto out;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 221b8462ea37..4b9f4493c9f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -27,13 +27,15 @@
#include "drmP.h"
#include "drm.h"
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
#include "nouveau_dma.h"
#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
-#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \
- nouveau_private(dev)->card_type < NV_C0)
+#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
struct nouveau_fence {
struct nouveau_channel *channel;
@@ -230,7 +232,8 @@ int
__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
{
unsigned long timeout = jiffies + (3 * DRM_HZ);
- unsigned long sleep_time = jiffies + 1;
+ unsigned long sleep_time = NSEC_PER_MSEC / 1000;
+ ktime_t t;
int ret = 0;
while (1) {
@@ -244,8 +247,13 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
__set_current_state(intr ? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE);
- if (lazy && time_after_eq(jiffies, sleep_time))
- schedule_timeout(1);
+ if (lazy) {
+ t = ktime_set(0, sleep_time);
+ schedule_hrtimeout(&t, HRTIMER_MODE_REL);
+ sleep_time *= 2;
+ if (sleep_time > NSEC_PER_MSEC)
+ sleep_time = NSEC_PER_MSEC;
+ }
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
@@ -259,11 +267,12 @@ __nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
}
static struct nouveau_semaphore *
-alloc_semaphore(struct drm_device *dev)
+semaphore_alloc(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_semaphore *sema;
- int ret;
+ int size = (dev_priv->chipset < 0x84) ? 4 : 16;
+ int ret, i;
if (!USE_SEMA(dev))
return NULL;
@@ -277,9 +286,9 @@ alloc_semaphore(struct drm_device *dev)
goto fail;
spin_lock(&dev_priv->fence.lock);
- sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0);
+ sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
if (sema->mem)
- sema->mem = drm_mm_get_block_atomic(sema->mem, 4, 0);
+ sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
spin_unlock(&dev_priv->fence.lock);
if (!sema->mem)
@@ -287,7 +296,8 @@ alloc_semaphore(struct drm_device *dev)
kref_init(&sema->ref);
sema->dev = dev;
- nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 0);
+ for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
+ nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
return sema;
fail:
@@ -296,7 +306,7 @@ fail:
}
static void
-free_semaphore(struct kref *ref)
+semaphore_free(struct kref *ref)
{
struct nouveau_semaphore *sema =
container_of(ref, struct nouveau_semaphore, ref);
@@ -318,61 +328,107 @@ semaphore_work(void *priv, bool signalled)
if (unlikely(!signalled))
nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
- kref_put(&sema->ref, free_semaphore);
+ kref_put(&sema->ref, semaphore_free);
}
static int
-emit_semaphore(struct nouveau_channel *chan, int method,
- struct nouveau_semaphore *sema)
+semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
{
- struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
- struct nouveau_fence *fence;
- bool smart = (dev_priv->card_type >= NV_50);
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_fence *fence = NULL;
int ret;
- ret = RING_SPACE(chan, smart ? 8 : 4);
+ if (dev_priv->chipset < 0x84) {
+ ret = RING_SPACE(chan, 3);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 2);
+ OUT_RING (chan, sema->mem->start);
+ OUT_RING (chan, 1);
+ } else
+ if (dev_priv->chipset < 0xc0) {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 1); /* ACQUIRE_EQ */
+ } else {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
+ }
+
+ /* Delay semaphore destruction until its work is done */
+ ret = nouveau_fence_new(chan, &fence, true);
if (ret)
return ret;
- if (smart) {
- BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
- OUT_RING(chan, NvSema);
- }
- BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
- OUT_RING(chan, sema->mem->start);
-
- if (smart && method == NV_SW_SEMAPHORE_ACQUIRE) {
- /*
- * NV50 tries to be too smart and context-switch
- * between semaphores instead of doing a "first come,
- * first served" strategy like previous cards
- * do.
- *
- * That's bad because the ACQUIRE latency can get as
- * large as the PFIFO context time slice in the
- * typical DRI2 case where you have several
- * outstanding semaphores at the same moment.
- *
- * If we're going to ACQUIRE, force the card to
- * context switch before, just in case the matching
- * RELEASE is already scheduled to be executed in
- * another channel.
- */
- BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
- OUT_RING(chan, 0);
- }
+ kref_get(&sema->ref);
+ nouveau_fence_work(fence, semaphore_work, sema);
+ nouveau_fence_unref(&fence);
+ return 0;
+}
+
+static int
+semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_fence *fence = NULL;
+ int ret;
+
+ if (dev_priv->chipset < 0x84) {
+ ret = RING_SPACE(chan, 4);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
+ OUT_RING (chan, sema->mem->start);
+ BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
+ OUT_RING (chan, 1);
+ } else
+ if (dev_priv->chipset < 0xc0) {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
+
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_RING(chan, NvSubSw, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 2); /* RELEASE */
+ } else {
+ struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
+ u64 offset = vma->offset + sema->mem->start;
- BEGIN_RING(chan, NvSubSw, method, 1);
- OUT_RING(chan, 1);
-
- if (smart && method == NV_SW_SEMAPHORE_RELEASE) {
- /*
- * Force the card to context switch, there may be
- * another channel waiting for the semaphore we just
- * released.
- */
- BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
- OUT_RING(chan, 0);
+ ret = RING_SPACE(chan, 5);
+ if (ret)
+ return ret;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 1);
+ OUT_RING (chan, 0x1002); /* RELEASE */
}
/* Delay semaphore destruction until its work is done */
@@ -383,7 +439,6 @@ emit_semaphore(struct nouveau_channel *chan, int method,
kref_get(&sema->ref);
nouveau_fence_work(fence, semaphore_work, sema);
nouveau_fence_unref(&fence);
-
return 0;
}
@@ -400,7 +455,7 @@ nouveau_fence_sync(struct nouveau_fence *fence,
nouveau_fence_signalled(fence)))
goto out;
- sema = alloc_semaphore(dev);
+ sema = semaphore_alloc(dev);
if (!sema) {
/* Early card or broken userspace, fall back to
* software sync. */
@@ -418,17 +473,17 @@ nouveau_fence_sync(struct nouveau_fence *fence,
}
/* Make wchan wait until it gets signalled */
- ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
+ ret = semaphore_acquire(wchan, sema);
if (ret)
goto out_unlock;
/* Signal the semaphore from chan */
- ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
+ ret = semaphore_release(chan, sema);
out_unlock:
mutex_unlock(&chan->mutex);
out_unref:
- kref_put(&sema->ref, free_semaphore);
+ kref_put(&sema->ref, semaphore_free);
out:
if (chan)
nouveau_channel_put_unlocked(&chan);
@@ -449,22 +504,23 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
struct nouveau_gpuobj *obj = NULL;
int ret;
+ if (dev_priv->card_type >= NV_C0)
+ goto out_initialised;
+
/* Create an NV_SW object for various sync purposes */
ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
if (ret)
return ret;
/* we leave subchannel empty for nvc0 */
- if (dev_priv->card_type < NV_C0) {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
- BEGIN_RING(chan, NvSubSw, 0, 1);
- OUT_RING(chan, NvSw);
- }
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, 0, 1);
+ OUT_RING(chan, NvSw);
/* Create a DMA object for the shared cross-channel sync area. */
- if (USE_SEMA(dev)) {
+ if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
@@ -484,14 +540,20 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
return ret;
BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
OUT_RING(chan, NvSema);
+ } else {
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+ OUT_RING (chan, chan->vram_handle); /* whole VM */
}
FIRE_RING(chan);
+out_initialised:
INIT_LIST_HEAD(&chan->fence.pending);
spin_lock_init(&chan->fence.lock);
atomic_set(&chan->fence.last_sequence_irq, 0);
-
return 0;
}
@@ -519,12 +581,13 @@ int
nouveau_fence_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
int ret;
/* Create a shared VRAM heap for cross-channel sync. */
if (USE_SEMA(dev)) {
- ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM,
- 0, 0, false, true, &dev_priv->fence.bo);
+ ret = nouveau_bo_new(dev, NULL, size, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, &dev_priv->fence.bo);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 506c508b7eda..e8b04f4aed7e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -61,19 +61,36 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
int
nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
- int size, int align, uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, bool no_vm, bool mappable,
- struct nouveau_bo **pnvbo)
+ int size, int align, uint32_t domain, uint32_t tile_mode,
+ uint32_t tile_flags, struct nouveau_bo **pnvbo)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
+ u32 flags = 0;
int ret;
+ if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
+ flags |= TTM_PL_FLAG_VRAM;
+ if (domain & NOUVEAU_GEM_DOMAIN_GART)
+ flags |= TTM_PL_FLAG_TT;
+ if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
+ flags |= TTM_PL_FLAG_SYSTEM;
+
ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
- tile_flags, no_vm, mappable, pnvbo);
+ tile_flags, pnvbo);
if (ret)
return ret;
nvbo = *pnvbo;
+ /* we restrict allowed domains on nv50+ to only the types
+ * that were requested at creation time. not possibly on
+ * earlier chips without busting the ABI.
+ */
+ nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+ NOUVEAU_GEM_DOMAIN_GART;
+ if (dev_priv->card_type >= NV_50)
+ nvbo->valid_domains &= domain;
+
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) {
nouveau_bo_ref(NULL, pnvbo);
@@ -97,7 +114,7 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->offset = nvbo->bo.offset;
- rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
+ rep->map_handle = nvbo->bo.addr_space_offset;
rep->tile_mode = nvbo->tile_mode;
rep->tile_flags = nvbo->tile_flags;
return 0;
@@ -111,19 +128,11 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
struct nouveau_channel *chan = NULL;
- uint32_t flags = 0;
int ret = 0;
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
- if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
- flags |= TTM_PL_FLAG_VRAM;
- if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
- flags |= TTM_PL_FLAG_TT;
- if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
- flags |= TTM_PL_FLAG_SYSTEM;
-
if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
@@ -135,10 +144,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
return PTR_ERR(chan);
}
- ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
- req->info.tile_mode, req->info.tile_flags, false,
- (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
- &nvbo);
+ ret = nouveau_gem_new(dev, chan, req->info.size, req->align,
+ req->info.domain, req->info.tile_mode,
+ req->info.tile_flags, &nvbo);
if (chan)
nouveau_channel_put(&chan);
if (ret)
@@ -161,7 +169,7 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
{
struct nouveau_bo *nvbo = gem->driver_private;
struct ttm_buffer_object *bo = &nvbo->bo;
- uint32_t domains = valid_domains &
+ uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains);
uint32_t pref_flags = 0, valid_flags = 0;
@@ -592,7 +600,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (push[i].bo_index >= req->nr_buffers) {
NV_ERROR(dev, "push %d buffer not in list\n", i);
ret = -EINVAL;
- goto out;
+ goto out_prevalid;
}
bo[push[i].bo_index].read_domains |= (1 << 31);
@@ -604,7 +612,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (ret) {
if (ret != -ERESTARTSYS)
NV_ERROR(dev, "validate: %d\n", ret);
- goto out;
+ goto out_prevalid;
}
/* Apply any relocations that are required */
@@ -697,6 +705,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
out:
validate_fini(&op, fence);
nouveau_fence_unref(&fence);
+
+out_prevalid:
kfree(bo);
kfree(push);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index b0fb9bdcddb7..2683377f4131 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -152,7 +152,6 @@ nouveau_mem_vram_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- nouveau_bo_unpin(dev_priv->vga_ram);
nouveau_bo_ref(NULL, &dev_priv->vga_ram);
ttm_bo_device_release(&dev_priv->ttm.bdev);
@@ -393,11 +392,17 @@ nouveau_mem_vram_init(struct drm_device *dev)
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
int ret, dma_bits;
- if (dev_priv->card_type >= NV_50 &&
- pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
- dma_bits = 40;
- else
- dma_bits = 32;
+ dma_bits = 32;
+ if (dev_priv->card_type >= NV_50) {
+ if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
+ dma_bits = 40;
+ } else
+ if (drm_pci_device_is_pcie(dev) &&
+ dev_priv->chipset != 0x40 &&
+ dev_priv->chipset != 0x45) {
+ if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
+ dma_bits = 39;
+ }
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret)
@@ -419,14 +424,32 @@ nouveau_mem_vram_init(struct drm_device *dev)
}
/* reserve space at end of VRAM for PRAMIN */
- if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
- dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
- dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
- else
- if (dev_priv->card_type >= NV_40)
- dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
- else
- dev_priv->ramin_rsvd_vram = (512 * 1024);
+ if (dev_priv->card_type >= NV_50) {
+ dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
+ } else
+ if (dev_priv->card_type >= NV_40) {
+ u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
+ u32 rsvd;
+
+ /* estimate grctx size, the magics come from nv40_grctx.c */
+ if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
+ else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
+ else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
+ else rsvd = 0x4a40 * vs;
+ rsvd += 16 * 1024;
+ rsvd *= dev_priv->engine.fifo.channels;
+
+ /* pciegart table */
+ if (drm_pci_device_is_pcie(dev))
+ rsvd += 512 * 1024;
+
+ /* object storage */
+ rsvd += 512 * 1024;
+
+ dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
+ } else {
+ dev_priv->ramin_rsvd_vram = 512 * 1024;
+ }
ret = dev_priv->engine.vram.init(dev);
if (ret)
@@ -455,13 +478,17 @@ nouveau_mem_vram_init(struct drm_device *dev)
return ret;
}
- ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
- 0, 0, true, true, &dev_priv->vga_ram);
- if (ret == 0)
- ret = nouveau_bo_pin(dev_priv->vga_ram, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_WARN(dev, "failed to reserve VGA memory\n");
- nouveau_bo_ref(NULL, &dev_priv->vga_ram);
+ if (dev_priv->card_type < NV_50) {
+ ret = nouveau_bo_new(dev, NULL, 256*1024, 0, TTM_PL_FLAG_VRAM,
+ 0, 0, &dev_priv->vga_ram);
+ if (ret == 0)
+ ret = nouveau_bo_pin(dev_priv->vga_ram,
+ TTM_PL_FLAG_VRAM);
+
+ if (ret) {
+ NV_WARN(dev, "failed to reserve VGA memory\n");
+ nouveau_bo_ref(NULL, &dev_priv->vga_ram);
+ }
}
dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
@@ -480,7 +507,7 @@ nouveau_mem_gart_init(struct drm_device *dev)
dev_priv->gart_info.type = NOUVEAU_GART_NONE;
#if !defined(__powerpc__) && !defined(__ia64__)
- if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
+ if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
ret = nouveau_mem_init_agp(dev);
if (ret)
NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
@@ -666,13 +693,14 @@ nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_mm *mm;
- u32 b_size;
+ u64 size, block, rsvd;
int ret;
- p_size = (p_size << PAGE_SHIFT) >> 12;
- b_size = dev_priv->vram_rblock_size >> 12;
+ rsvd = (256 * 1024); /* vga memory */
+ size = (p_size << PAGE_SHIFT) - rsvd;
+ block = dev_priv->vram_rblock_size;
- ret = nouveau_mm_init(&mm, 0, p_size, b_size);
+ ret = nouveau_mm_init(&mm, rsvd >> 12, size >> 12, block >> 12);
if (ret)
return ret;
@@ -700,9 +728,15 @@ nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct nouveau_mem *node = mem->mm_node;
struct drm_device *dev = dev_priv->dev;
- vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
+ if (node->tmp_vma.node) {
+ nouveau_vm_unmap(&node->tmp_vma);
+ nouveau_vm_put(&node->tmp_vma);
+ }
+
+ vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
}
static int
@@ -715,7 +749,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct nouveau_vram *node;
+ struct nouveau_mem *node;
u32 size_nc = 0;
int ret;
@@ -724,7 +758,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
- (nvbo->tile_flags >> 8) & 0xff, &node);
+ (nvbo->tile_flags >> 8) & 0x3ff, &node);
if (ret) {
mem->mm_node = NULL;
return (ret == -ENOSPC) ? 0 : ret;
@@ -771,3 +805,84 @@ const struct ttm_mem_type_manager_func nouveau_vram_manager = {
nouveau_vram_manager_del,
nouveau_vram_manager_debug
};
+
+static int
+nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
+{
+ return 0;
+}
+
+static int
+nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
+{
+ return 0;
+}
+
+static void
+nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
+ struct ttm_mem_reg *mem)
+{
+ struct nouveau_mem *node = mem->mm_node;
+
+ if (node->tmp_vma.node) {
+ nouveau_vm_unmap(&node->tmp_vma);
+ nouveau_vm_put(&node->tmp_vma);
+ }
+ mem->mm_node = NULL;
+}
+
+static int
+nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *bo,
+ struct ttm_placement *placement,
+ struct ttm_mem_reg *mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_vma *vma = &nvbo->vma;
+ struct nouveau_vm *vm = vma->vm;
+ struct nouveau_mem *node;
+ int ret;
+
+ if (unlikely((mem->num_pages << PAGE_SHIFT) >=
+ dev_priv->gart_info.aper_size))
+ return -ENOMEM;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ /* This node must be for evicting large-paged VRAM
+ * to system memory. Due to a nv50 limitation of
+ * not being able to mix large/small pages within
+ * the same PDE, we need to create a temporary
+ * small-paged VMA for the eviction.
+ */
+ if (vma->node->type != vm->spg_shift) {
+ ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
+ vm->spg_shift, NV_MEM_ACCESS_RW,
+ &node->tmp_vma);
+ if (ret) {
+ kfree(node);
+ return ret;
+ }
+ }
+
+ node->page_shift = nvbo->vma.node->type;
+ mem->mm_node = node;
+ mem->start = 0;
+ return 0;
+}
+
+void
+nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+}
+
+const struct ttm_mem_type_manager_func nouveau_gart_manager = {
+ nouveau_gart_manager_init,
+ nouveau_gart_manager_fini,
+ nouveau_gart_manager_new,
+ nouveau_gart_manager_del,
+ nouveau_gart_manager_debug
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
index 798eaf39691c..1f7483aae9a4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -53,13 +53,13 @@ void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
int nv50_vram_init(struct drm_device *);
int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
- u32 memtype, struct nouveau_vram **);
-void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
+ u32 memtype, struct nouveau_mem **);
+void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
int nvc0_vram_init(struct drm_device *);
int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nouveau_vram **);
+ u32 memtype, struct nouveau_mem **);
bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 5ea167623a82..7ba3fc0b30c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -39,12 +39,11 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
int ret;
if (nouveau_vram_notify)
- flags = TTM_PL_FLAG_VRAM;
+ flags = NOUVEAU_GEM_DOMAIN_VRAM;
else
- flags = TTM_PL_FLAG_TT;
+ flags = NOUVEAU_GEM_DOMAIN_GART;
- ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags,
- 0, 0x0000, false, true, &ntfy);
+ ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
if (ret)
return ret;
@@ -100,6 +99,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
uint32_t *b_offset)
{
struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
struct drm_mm_node *mem;
uint32_t offset;
@@ -114,11 +114,16 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
return -ENOMEM;
}
- if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
- target = NV_MEM_TARGET_VRAM;
- else
- target = NV_MEM_TARGET_GART;
- offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
+ if (dev_priv->card_type < NV_50) {
+ if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
+ target = NV_MEM_TARGET_VRAM;
+ else
+ target = NV_MEM_TARGET_GART;
+ offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
+ } else {
+ target = NV_MEM_TARGET_VM;
+ offset = chan->notifier_bo->vma.offset;
+ }
offset += mem->start;
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 30b6544467ca..4f00c87ed86e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -36,6 +36,7 @@
#include "nouveau_drm.h"
#include "nouveau_ramht.h"
#include "nouveau_vm.h"
+#include "nv50_display.h"
struct nouveau_gpuobj_method {
struct list_head head;
@@ -490,16 +491,22 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
}
if (target == NV_MEM_TARGET_GART) {
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
- target = NV_MEM_TARGET_PCI_NOSNOOP;
- base += dev_priv->gart_info.aper_base;
- } else
- if (base != 0) {
- base = nouveau_sgdma_get_physical(dev, base);
+ struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
+
+ if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
+ if (base == 0) {
+ nouveau_gpuobj_ref(gart, pobj);
+ return 0;
+ }
+
+ base = nouveau_sgdma_get_physical(dev, base);
target = NV_MEM_TARGET_PCI;
} else {
- nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
- return 0;
+ base += dev_priv->gart_info.aper_base;
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
+ target = NV_MEM_TARGET_PCI_NOSNOOP;
+ else
+ target = NV_MEM_TARGET_PCI;
}
}
@@ -776,7 +783,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *vram = NULL, *tt = NULL;
- int ret;
+ int ret, i;
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
@@ -841,6 +848,25 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
nouveau_gpuobj_ref(NULL, &ramht);
if (ret)
return ret;
+
+ /* dma objects for display sync channel semaphore blocks */
+ for (i = 0; i < 2; i++) {
+ struct nouveau_gpuobj *sem = NULL;
+ struct nv50_display_crtc *dispc =
+ &nv50_display(dev)->crtc[i];
+ u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
+
+ ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
+ NV_MEM_ACCESS_RW,
+ NV_MEM_TARGET_VRAM, &sem);
+ if (ret)
+ return ret;
+
+ ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem);
+ nouveau_gpuobj_ref(NULL, &sem);
+ if (ret)
+ return ret;
+ }
}
/* VRAM ctxdma */
@@ -909,7 +935,7 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
- if (chan->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
nouveau_gpuobj_ref(NULL, &chan->ramin);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index bef3e6910418..a24a81f5a89e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -114,7 +114,9 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
(gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
} else {
if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
- ctx = (gpuobj->cinst << 10) | chan->id;
+ ctx = (gpuobj->cinst << 10) |
+ (chan->id << 28) |
+ chan->id; /* HASH_TAG */
} else {
ctx = (gpuobj->cinst >> 4) |
((gpuobj->engine <<
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 9a250eb53098..a33fe4019286 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -12,6 +12,7 @@ struct nouveau_sgdma_be {
struct drm_device *dev;
dma_addr_t *pages;
+ bool *ttm_alloced;
unsigned nr_pages;
u64 offset;
@@ -20,7 +21,8 @@ struct nouveau_sgdma_be {
static int
nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
- struct page **pages, struct page *dummy_read_page)
+ struct page **pages, struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
@@ -34,15 +36,25 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
if (!nvbe->pages)
return -ENOMEM;
+ nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
+ if (!nvbe->ttm_alloced)
+ return -ENOMEM;
+
nvbe->nr_pages = 0;
while (num_pages--) {
- nvbe->pages[nvbe->nr_pages] =
- pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
+ if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
+ nvbe->pages[nvbe->nr_pages] =
+ dma_addrs[nvbe->nr_pages];
+ nvbe->ttm_alloced[nvbe->nr_pages] = true;
+ } else {
+ nvbe->pages[nvbe->nr_pages] =
+ pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev,
- nvbe->pages[nvbe->nr_pages])) {
- be->func->clear(be);
- return -EFAULT;
+ if (pci_dma_mapping_error(dev->pdev,
+ nvbe->pages[nvbe->nr_pages])) {
+ be->func->clear(be);
+ return -EFAULT;
+ }
}
nvbe->nr_pages++;
@@ -65,17 +77,36 @@ nouveau_sgdma_clear(struct ttm_backend *be)
be->func->unbind(be);
while (nvbe->nr_pages--) {
- pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
+ if (!nvbe->ttm_alloced[nvbe->nr_pages])
+ pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
kfree(nvbe->pages);
+ kfree(nvbe->ttm_alloced);
nvbe->pages = NULL;
+ nvbe->ttm_alloced = NULL;
nvbe->nr_pages = 0;
}
}
+static void
+nouveau_sgdma_destroy(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+
+ if (be) {
+ NV_DEBUG(nvbe->dev, "\n");
+
+ if (nvbe) {
+ if (nvbe->pages)
+ be->func->clear(be);
+ kfree(nvbe);
+ }
+ }
+}
+
static int
-nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
@@ -102,7 +133,7 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
}
static int
-nouveau_sgdma_unbind(struct ttm_backend *be)
+nv04_sgdma_unbind(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_device *dev = nvbe->dev;
@@ -125,59 +156,245 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
return 0;
}
+static struct ttm_backend_func nv04_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
+ .bind = nv04_sgdma_bind,
+ .unbind = nv04_sgdma_unbind,
+ .destroy = nouveau_sgdma_destroy
+};
+
static void
-nouveau_sgdma_destroy(struct ttm_backend *be)
+nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
+{
+ struct drm_device *dev = nvbe->dev;
+
+ nv_wr32(dev, 0x100810, 0x00000022);
+ if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
+ NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
+ nv_rd32(dev, 0x100810));
+ nv_wr32(dev, 0x100810, 0x00000000);
+}
+
+static int
+nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ dma_addr_t *list = nvbe->pages;
+ u32 pte = mem->start << 2;
+ u32 cnt = nvbe->nr_pages;
- if (be) {
- NV_DEBUG(nvbe->dev, "\n");
+ nvbe->offset = mem->start << PAGE_SHIFT;
- if (nvbe) {
- if (nvbe->pages)
- be->func->clear(be);
- kfree(nvbe);
+ while (cnt--) {
+ nv_wo32(pgt, pte, (*list++ >> 7) | 1);
+ pte += 4;
+ }
+
+ nv41_sgdma_flush(nvbe);
+ nvbe->bound = true;
+ return 0;
+}
+
+static int
+nv41_sgdma_unbind(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ u32 pte = (nvbe->offset >> 12) << 2;
+ u32 cnt = nvbe->nr_pages;
+
+ while (cnt--) {
+ nv_wo32(pgt, pte, 0x00000000);
+ pte += 4;
+ }
+
+ nv41_sgdma_flush(nvbe);
+ nvbe->bound = false;
+ return 0;
+}
+
+static struct ttm_backend_func nv41_sgdma_backend = {
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
+ .bind = nv41_sgdma_bind,
+ .unbind = nv41_sgdma_unbind,
+ .destroy = nouveau_sgdma_destroy
+};
+
+static void
+nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
+{
+ struct drm_device *dev = nvbe->dev;
+
+ nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
+ nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
+ if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
+ NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
+ nv_rd32(dev, 0x100808));
+ nv_wr32(dev, 0x100808, 0x00000000);
+}
+
+static void
+nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
+{
+ struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
+ dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
+ u32 pte, tmp[4];
+
+ pte = base >> 2;
+ base &= ~0x0000000f;
+
+ tmp[0] = nv_ro32(pgt, base + 0x0);
+ tmp[1] = nv_ro32(pgt, base + 0x4);
+ tmp[2] = nv_ro32(pgt, base + 0x8);
+ tmp[3] = nv_ro32(pgt, base + 0xc);
+ while (cnt--) {
+ u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
+ switch (pte++ & 0x3) {
+ case 0:
+ tmp[0] &= ~0x07ffffff;
+ tmp[0] |= addr;
+ break;
+ case 1:
+ tmp[0] &= ~0xf8000000;
+ tmp[0] |= addr << 27;
+ tmp[1] &= ~0x003fffff;
+ tmp[1] |= addr >> 5;
+ break;
+ case 2:
+ tmp[1] &= ~0xffc00000;
+ tmp[1] |= addr << 22;
+ tmp[2] &= ~0x0001ffff;
+ tmp[2] |= addr >> 10;
+ break;
+ case 3:
+ tmp[2] &= ~0xfffe0000;
+ tmp[2] |= addr << 17;
+ tmp[3] &= ~0x00000fff;
+ tmp[3] |= addr >> 15;
+ break;
}
}
+
+ tmp[3] |= 0x40000000;
+
+ nv_wo32(pgt, base + 0x0, tmp[0]);
+ nv_wo32(pgt, base + 0x4, tmp[1]);
+ nv_wo32(pgt, base + 0x8, tmp[2]);
+ nv_wo32(pgt, base + 0xc, tmp[3]);
}
static int
-nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ dma_addr_t *list = nvbe->pages;
+ u32 pte = mem->start << 2, tmp[4];
+ u32 cnt = nvbe->nr_pages;
+ int i;
nvbe->offset = mem->start << PAGE_SHIFT;
- nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
- nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
+ if (pte & 0x0000000c) {
+ u32 max = 4 - ((pte >> 2) & 0x3);
+ u32 part = (cnt > max) ? max : cnt;
+ nv44_sgdma_fill(pgt, list, pte, part);
+ pte += (part << 2);
+ list += part;
+ cnt -= part;
+ }
+
+ while (cnt >= 4) {
+ for (i = 0; i < 4; i++)
+ tmp[i] = *list++ >> 12;
+ nv_wo32(pgt, pte + 0x0, tmp[0] >> 0 | tmp[1] << 27);
+ nv_wo32(pgt, pte + 0x4, tmp[1] >> 5 | tmp[2] << 22);
+ nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
+ nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
+ pte += 0x10;
+ cnt -= 4;
+ }
+
+ if (cnt)
+ nv44_sgdma_fill(pgt, list, pte, cnt);
+
+ nv44_sgdma_flush(nvbe);
nvbe->bound = true;
return 0;
}
static int
-nv50_sgdma_unbind(struct ttm_backend *be)
+nv44_sgdma_unbind(struct ttm_backend *be)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
+ u32 pte = (nvbe->offset >> 12) << 2;
+ u32 cnt = nvbe->nr_pages;
+
+ if (pte & 0x0000000c) {
+ u32 max = 4 - ((pte >> 2) & 0x3);
+ u32 part = (cnt > max) ? max : cnt;
+ nv44_sgdma_fill(pgt, NULL, pte, part);
+ pte += (part << 2);
+ cnt -= part;
+ }
- if (!nvbe->bound)
- return 0;
+ while (cnt >= 4) {
+ nv_wo32(pgt, pte + 0x0, 0x00000000);
+ nv_wo32(pgt, pte + 0x4, 0x00000000);
+ nv_wo32(pgt, pte + 0x8, 0x00000000);
+ nv_wo32(pgt, pte + 0xc, 0x00000000);
+ pte += 0x10;
+ cnt -= 4;
+ }
+
+ if (cnt)
+ nv44_sgdma_fill(pgt, NULL, pte, cnt);
- nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
- nvbe->nr_pages << PAGE_SHIFT);
+ nv44_sgdma_flush(nvbe);
nvbe->bound = false;
return 0;
}
-static struct ttm_backend_func nouveau_sgdma_backend = {
+static struct ttm_backend_func nv44_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
- .bind = nouveau_sgdma_bind,
- .unbind = nouveau_sgdma_unbind,
+ .bind = nv44_sgdma_bind,
+ .unbind = nv44_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
+static int
+nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_mem *node = mem->mm_node;
+ /* noop: bound in move_notify() */
+ node->pages = nvbe->pages;
+ nvbe->pages = (dma_addr_t *)node;
+ nvbe->bound = true;
+ return 0;
+}
+
+static int
+nv50_sgdma_unbind(struct ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
+ /* noop: unbound in move_notify() */
+ nvbe->pages = node->pages;
+ node->pages = NULL;
+ nvbe->bound = false;
+ return 0;
+}
+
static struct ttm_backend_func nv50_sgdma_backend = {
.populate = nouveau_sgdma_populate,
.clear = nouveau_sgdma_clear,
@@ -198,10 +415,7 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
nvbe->dev = dev;
- if (dev_priv->card_type < NV_50)
- nvbe->backend.func = &nouveau_sgdma_backend;
- else
- nvbe->backend.func = &nv50_sgdma_backend;
+ nvbe->backend.func = dev_priv->gart_info.func;
return &nvbe->backend;
}
@@ -210,21 +424,64 @@ nouveau_sgdma_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = NULL;
- uint32_t aper_size, obj_size;
- int i, ret;
+ u32 aper_size, align;
+ int ret;
- if (dev_priv->card_type < NV_50) {
- if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024)
- aper_size = 64 * 1024 * 1024;
- else
- aper_size = 512 * 1024 * 1024;
+ if (dev_priv->card_type >= NV_50 || drm_pci_device_is_pcie(dev))
+ aper_size = 512 * 1024 * 1024;
+ else
+ aper_size = 64 * 1024 * 1024;
+
+ /* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
+ * christmas. The cards before it have them, the cards after
+ * it have them, why is NV44 so unloved?
+ */
+ dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
+ if (!dev_priv->gart_info.dummy.page)
+ return -ENOMEM;
+
+ dev_priv->gart_info.dummy.addr =
+ pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
+ 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
+ NV_ERROR(dev, "error mapping dummy page\n");
+ __free_page(dev_priv->gart_info.dummy.page);
+ dev_priv->gart_info.dummy.page = NULL;
+ return -ENOMEM;
+ }
+
+ if (dev_priv->card_type >= NV_50) {
+ dev_priv->gart_info.aper_base = 0;
+ dev_priv->gart_info.aper_size = aper_size;
+ dev_priv->gart_info.type = NOUVEAU_GART_HW;
+ dev_priv->gart_info.func = &nv50_sgdma_backend;
+ } else
+ if (drm_pci_device_is_pcie(dev) &&
+ dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
+ if (nv44_graph_class(dev)) {
+ dev_priv->gart_info.func = &nv44_sgdma_backend;
+ align = 512 * 1024;
+ } else {
+ dev_priv->gart_info.func = &nv41_sgdma_backend;
+ align = 16;
+ }
- obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
- obj_size += 8; /* ctxdma header */
+ ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+ if (ret) {
+ NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
+ return ret;
+ }
- ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+ dev_priv->gart_info.sg_ctxdma = gpuobj;
+ dev_priv->gart_info.aper_base = 0;
+ dev_priv->gart_info.aper_size = aper_size;
+ dev_priv->gart_info.type = NOUVEAU_GART_HW;
+ } else {
+ ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj);
if (ret) {
NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
return ret;
@@ -236,25 +493,14 @@ nouveau_sgdma_init(struct drm_device *dev)
(0 << 14) /* RW */ |
(2 << 16) /* PCI */);
nv_wo32(gpuobj, 4, aper_size - 1);
- for (i = 2; i < 2 + (aper_size >> 12); i++)
- nv_wo32(gpuobj, i * 4, 0x00000000);
dev_priv->gart_info.sg_ctxdma = gpuobj;
dev_priv->gart_info.aper_base = 0;
dev_priv->gart_info.aper_size = aper_size;
- } else
- if (dev_priv->chan_vm) {
- ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
- 12, NV_MEM_ACCESS_RW,
- &dev_priv->gart_info.vma);
- if (ret)
- return ret;
-
- dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
- dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
+ dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
+ dev_priv->gart_info.func = &nv04_sgdma_backend;
}
- dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
return 0;
}
@@ -264,7 +510,13 @@ nouveau_sgdma_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
- nouveau_vm_put(&dev_priv->gart_info.vma);
+
+ if (dev_priv->gart_info.dummy.page) {
+ pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ __free_page(dev_priv->gart_info.dummy.page);
+ dev_priv->gart_info.dummy.page = NULL;
+ }
}
uint32_t
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a54fc431fe98..05294910e135 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -544,7 +544,6 @@ static int
nouveau_card_init_channel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *gpuobj = NULL;
int ret;
ret = nouveau_channel_alloc(dev, &dev_priv->channel,
@@ -552,41 +551,8 @@ nouveau_card_init_channel(struct drm_device *dev)
if (ret)
return ret;
- /* no dma objects on fermi... */
- if (dev_priv->card_type >= NV_C0)
- goto out_done;
-
- ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->vram_size,
- NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
- &gpuobj);
- if (ret)
- goto out_err;
-
- ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj);
- nouveau_gpuobj_ref(NULL, &gpuobj);
- if (ret)
- goto out_err;
-
- ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->gart_info.aper_size,
- NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
- &gpuobj);
- if (ret)
- goto out_err;
-
- ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj);
- nouveau_gpuobj_ref(NULL, &gpuobj);
- if (ret)
- goto out_err;
-
-out_done:
mutex_unlock(&dev_priv->channel->mutex);
return 0;
-
-out_err:
- nouveau_channel_put(&dev_priv->channel);
- return ret;
}
static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
@@ -929,12 +895,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
dev->pci_vendor, dev->pci_device, dev->pdev->class);
- dev_priv->wq = create_workqueue("nouveau");
- if (!dev_priv->wq) {
- ret = -EINVAL;
- goto err_priv;
- }
-
/* resource 0 is mmio regs */
/* resource 1 is linear FB */
/* resource 2 is RAMIN (mmio regs + 0x1000000) */
@@ -947,7 +907,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_ERROR(dev, "Unable to initialize the mmio mapping. "
"Please report your setup to " DRIVER_EMAIL "\n");
ret = -EINVAL;
- goto err_wq;
+ goto err_priv;
}
NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
(unsigned long long)mmio_start_offs);
@@ -1054,8 +1014,6 @@ err_ramin:
iounmap(dev_priv->ramin);
err_mmio:
iounmap(dev_priv->mmio);
-err_wq:
- destroy_workqueue(dev_priv->wq);
err_priv:
kfree(dev_priv);
dev->dev_private = NULL;
@@ -1103,9 +1061,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = dev->pci_device;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
- if (drm_device_is_agp(dev))
+ if (drm_pci_device_is_agp(dev))
getparam->value = NV_AGP;
- else if (drm_device_is_pcie(dev))
+ else if (drm_pci_device_is_pcie(dev))
getparam->value = NV_PCIE;
else
getparam->value = NV_PCI;
@@ -1126,7 +1084,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = 1;
break;
case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
- getparam->value = (dev_priv->card_type < NV_50);
+ getparam->value = 1;
break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 8d9968e1cba8..649b0413b09f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -239,11 +239,9 @@ static bool
probe_monitoring_device(struct nouveau_i2c_chan *i2c,
struct i2c_board_info *info)
{
- char modalias[16] = "i2c:";
struct i2c_client *client;
- strlcat(modalias, info->type, sizeof(modalias));
- request_module(modalias);
+ request_module("%s%s", I2C_MODULE_PREFIX, info->type);
client = i2c_new_device(&i2c->adapter, info);
if (!client)
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c
index fbe0fb13bc1e..e51b51503baa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_util.c
+++ b/drivers/gpu/drm/nouveau/nouveau_util.c
@@ -47,18 +47,27 @@ nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
printk(" (unknown bits 0x%08x)", value);
}
-void
-nouveau_enum_print(const struct nouveau_enum *en, u32 value)
+const struct nouveau_enum *
+nouveau_enum_find(const struct nouveau_enum *en, u32 value)
{
while (en->name) {
- if (value == en->value) {
- printk("%s", en->name);
- return;
- }
-
+ if (en->value == value)
+ return en;
en++;
}
+ return NULL;
+}
+
+void
+nouveau_enum_print(const struct nouveau_enum *en, u32 value)
+{
+ en = nouveau_enum_find(en, value);
+ if (en) {
+ printk("%s", en->name);
+ return;
+ }
+
printk("(unknown enum 0x%08x)", value);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
index d9ceaea26f4b..b97719fbb739 100644
--- a/drivers/gpu/drm/nouveau/nouveau_util.h
+++ b/drivers/gpu/drm/nouveau/nouveau_util.h
@@ -36,10 +36,14 @@ struct nouveau_bitfield {
struct nouveau_enum {
u32 value;
const char *name;
+ void *data;
};
void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
void nouveau_enum_print(const struct nouveau_enum *, u32 value);
+const struct nouveau_enum *
+nouveau_enum_find(const struct nouveau_enum *, u32 value);
+
int nouveau_ratelimit(void);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 97d82aedf86b..0059e6f58a8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -28,7 +28,7 @@
#include "nouveau_vm.h"
void
-nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
+nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
{
struct nouveau_vm *vm = vma->vm;
struct nouveau_mm_node *r;
@@ -40,7 +40,8 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
u32 max = 1 << (vm->pgt_bits - bits);
u32 end, len;
- list_for_each_entry(r, &vram->regions, rl_entry) {
+ delta = 0;
+ list_for_each_entry(r, &node->regions, rl_entry) {
u64 phys = (u64)r->offset << 12;
u32 num = r->length >> bits;
@@ -52,7 +53,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
end = max;
len = end - pte;
- vm->map(vma, pgt, vram, pte, len, phys);
+ vm->map(vma, pgt, node, pte, len, phys, delta);
num -= len;
pte += len;
@@ -60,6 +61,8 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
pde++;
pte = 0;
}
+
+ delta += (u64)len << vma->node->type;
}
}
@@ -67,14 +70,14 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
}
void
-nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
{
- nouveau_vm_map_at(vma, 0, vram);
+ nouveau_vm_map_at(vma, 0, node);
}
void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
- dma_addr_t *list)
+ struct nouveau_mem *mem, dma_addr_t *list)
{
struct nouveau_vm *vm = vma->vm;
int big = vma->node->type != vm->spg_shift;
@@ -94,7 +97,7 @@ nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
end = max;
len = end - pte;
- vm->map_sg(vma, pgt, pte, list, len);
+ vm->map_sg(vma, pgt, mem, pte, len, list);
num -= len;
pte += len;
@@ -311,18 +314,7 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
vm->spg_shift = 12;
vm->lpg_shift = 17;
pgt_bits = 27;
-
- /* Should be 4096 everywhere, this is a hack that's
- * currently necessary to avoid an elusive bug that
- * causes corruption when mixing small/large pages
- */
- if (length < (1ULL << 40))
- block = 4096;
- else {
- block = (1 << pgt_bits);
- if (length < block)
- block = length;
- }
+ block = 4096;
} else {
kfree(vm);
return -ENOSYS;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index e1193515771b..2e06b55cfdc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -67,9 +67,10 @@ struct nouveau_vm {
void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+ struct nouveau_mem *, u32 pte, u32 cnt,
+ u64 phys, u64 delta);
void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
- u32 pte, dma_addr_t *, u32 cnt);
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
void (*flush)(struct nouveau_vm *);
};
@@ -82,20 +83,20 @@ int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
u32 access, struct nouveau_vma *);
void nouveau_vm_put(struct nouveau_vma *);
-void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *);
-void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *);
+void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
+void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
- dma_addr_t *);
+ struct nouveau_mem *, dma_addr_t *);
/* nv50_vm.c */
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+ struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
- u32 pte, dma_addr_t *, u32 cnt);
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
void nv50_vm_flush(struct nouveau_vm *);
void nv50_vm_flush_engine(struct drm_device *, int engine);
@@ -104,9 +105,9 @@ void nv50_vm_flush_engine(struct drm_device *, int engine);
void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2]);
void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+ struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
- u32 pte, dma_addr_t *, u32 cnt);
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
void nvc0_vm_flush(struct nouveau_vm *);
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 297505eb98d5..a260fbbe3d9b 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -1031,7 +1031,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index f89d104698df..db465a3ee1b2 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -379,6 +379,15 @@ out:
return handled;
}
+static const char *nv_dma_state_err(u32 state)
+{
+ static const char * const desc[] = {
+ "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
+ "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
+ };
+ return desc[(state >> 29) & 0x7];
+}
+
void
nv04_fifo_isr(struct drm_device *dev)
{
@@ -460,9 +469,10 @@ nv04_fifo_isr(struct drm_device *dev)
if (nouveau_ratelimit())
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
"Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
- "State 0x%08x Push 0x%08x\n",
+ "State 0x%08x (err: %s) Push 0x%08x\n",
chid, ho_get, dma_get, ho_put,
dma_put, ib_get, ib_put, state,
+ nv_dma_state_err(state),
push);
/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
@@ -476,8 +486,9 @@ nv04_fifo_isr(struct drm_device *dev)
}
} else {
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
- "Put 0x%08x State 0x%08x Push 0x%08x\n",
- chid, dma_get, dma_put, state, push);
+ "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
+ chid, dma_get, dma_put, state,
+ nv_dma_state_err(state), push);
if (dma_get != dma_put)
nv_wr32(dev, 0x003244, dma_put);
@@ -505,7 +516,7 @@ nv04_fifo_isr(struct drm_device *dev)
if (dev_priv->card_type == NV_50) {
if (status & 0x00000010) {
- nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
+ nv50_fb_vm_trap(dev, nouveau_ratelimit());
status &= ~0x00000010;
nv_wr32(dev, 0x002100, 0x00000010);
}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 28119fd19d03..3900cebba560 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -197,10 +197,12 @@ static int nv17_tv_get_ld_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
- struct drm_display_mode *mode, *tv_mode;
+ const struct drm_display_mode *tv_mode;
int n = 0;
for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
+ struct drm_display_mode *mode;
+
mode = drm_mode_duplicate(encoder->dev, tv_mode);
mode->clock = tv_norm->tv_enc_mode.vrefresh *
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
index 6bf03840f9eb..622e72221682 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.h
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -112,7 +112,7 @@ extern struct nv17_tv_norm_params {
} nv17_tv_norms[NUM_TV_NORMS];
#define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm])
-extern struct drm_display_mode nv17_tv_modes[];
+extern const struct drm_display_mode nv17_tv_modes[];
static inline int interpolate(int y0, int y1, int y2, int x)
{
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
index 9d3893c50a41..4d1d29f60307 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv_modes.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -438,7 +438,7 @@ void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
/* Timings similar to the ones the blob sets */
-struct drm_display_mode nv17_tv_modes[] = {
+const struct drm_display_mode nv17_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index f3d9c0505f7b..f0ac2a768c67 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -24,6 +24,53 @@ nv40_fb_set_tile_region(struct drm_device *dev, int i)
}
}
+static void
+nv40_fb_init_gart(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
+
+ if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
+ nv_wr32(dev, 0x100800, 0x00000001);
+ return;
+ }
+
+ nv_wr32(dev, 0x100800, gart->pinst | 0x00000002);
+ nv_mask(dev, 0x10008c, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x100820, 0x00000000);
+}
+
+static void
+nv44_fb_init_gart(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
+ u32 vinst;
+
+ if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
+ nv_wr32(dev, 0x100850, 0x80000000);
+ nv_wr32(dev, 0x100800, 0x00000001);
+ return;
+ }
+
+ /* calculate vram address of this PRAMIN block, object
+ * must be allocated on 512KiB alignment, and not exceed
+ * a total size of 512KiB for this to work correctly
+ */
+ vinst = nv_rd32(dev, 0x10020c);
+ vinst -= ((gart->pinst >> 19) + 1) << 19;
+
+ nv_wr32(dev, 0x100850, 0x80000000);
+ nv_wr32(dev, 0x100818, dev_priv->gart_info.dummy.addr);
+
+ nv_wr32(dev, 0x100804, dev_priv->gart_info.aper_size);
+ nv_wr32(dev, 0x100850, 0x00008000);
+ nv_mask(dev, 0x10008c, 0x00000200, 0x00000200);
+ nv_wr32(dev, 0x100820, 0x00000000);
+ nv_wr32(dev, 0x10082c, 0x00000001);
+ nv_wr32(dev, 0x100800, vinst | 0x00000010);
+}
+
int
nv40_fb_init(struct drm_device *dev)
{
@@ -32,12 +79,12 @@ nv40_fb_init(struct drm_device *dev)
uint32_t tmp;
int i;
- /* This is strictly a NV4x register (don't know about NV5x). */
- /* The blob sets these to all kinds of values, and they mess up our setup. */
- /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
- /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
- /* Any idea what this is? */
- nv_wr32(dev, NV40_PFB_UNK_800, 0x1);
+ if (dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
+ if (nv44_graph_class(dev))
+ nv44_fb_init_gart(dev);
+ else
+ nv40_fb_init_gart(dev);
+ }
switch (dev_priv->chipset) {
case 0x40:
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 9023c4dbb449..2b9984027f41 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -65,7 +65,7 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
{
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int index = nv_crtc->index, ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
@@ -135,8 +135,7 @@ static int
nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
{
struct drm_device *dev = nv_crtc->base.dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
NV_DEBUG_KMS(dev, "\n");
@@ -186,8 +185,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
struct nouveau_connector *nv_connector =
nouveau_crtc_connector_get(nv_crtc);
struct drm_device *dev = nv_crtc->base.dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct drm_display_mode *native_mode = NULL;
struct drm_display_mode *mode = &nv_crtc->base.mode;
uint32_t outX, outY, horiz, vert;
@@ -445,6 +443,42 @@ nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
{
}
+static int
+nv50_crtc_wait_complete(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_channel *evo = disp->master;
+ u64 start;
+ int ret;
+
+ ret = RING_SPACE(evo, 6);
+ if (ret)
+ return ret;
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x80000000);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0);
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000000);
+
+ nv_wo32(disp->ntfy, 0x000, 0x00000000);
+ FIRE_RING (evo);
+
+ start = ptimer->read(dev);
+ do {
+ nv_wr32(dev, 0x61002c, 0x370);
+ nv_wr32(dev, 0x000140, 1);
+
+ if (nv_ro32(disp->ntfy, 0x000))
+ return 0;
+ } while (ptimer->read(dev) - start < 2000000000ULL);
+
+ return -EBUSY;
+}
+
static void
nv50_crtc_prepare(struct drm_crtc *crtc)
{
@@ -453,6 +487,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ nv50_display_flip_stop(crtc);
drm_vblank_pre_modeset(dev, nv_crtc->index);
nv50_crtc_blank(nv_crtc, true);
}
@@ -461,24 +496,14 @@ static void
nv50_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
nv50_crtc_blank(nv_crtc, false);
drm_vblank_post_modeset(dev, nv_crtc->index);
-
- ret = RING_SPACE(evo, 2);
- if (ret) {
- NV_ERROR(dev, "no space while committing crtc\n");
- return;
- }
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING (evo, 0);
- FIRE_RING (evo);
+ nv50_crtc_wait_complete(crtc);
+ nv50_display_flip_next(crtc, crtc->fb, NULL);
}
static bool
@@ -491,15 +516,15 @@ nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
static int
nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *passed_fb,
- int x, int y, bool update, bool atomic)
+ int x, int y, bool atomic)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = nv_crtc->base.dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- int ret, format;
+ int ret;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
@@ -525,28 +550,6 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
}
}
- switch (drm_fb->depth) {
- case 8:
- format = NV50_EVO_CRTC_FB_DEPTH_8;
- break;
- case 15:
- format = NV50_EVO_CRTC_FB_DEPTH_15;
- break;
- case 16:
- format = NV50_EVO_CRTC_FB_DEPTH_16;
- break;
- case 24:
- case 32:
- format = NV50_EVO_CRTC_FB_DEPTH_24;
- break;
- case 30:
- format = NV50_EVO_CRTC_FB_DEPTH_30;
- break;
- default:
- NV_ERROR(dev, "unknown depth %d\n", drm_fb->depth);
- return -EINVAL;
- }
-
nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
@@ -556,14 +559,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return ret;
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
- if (nv_crtc->fb.tile_flags == 0x7a00 ||
- nv_crtc->fb.tile_flags == 0xfe00)
- OUT_RING(evo, NvEvoFB32);
- else
- if (nv_crtc->fb.tile_flags == 0x7000)
- OUT_RING(evo, NvEvoFB16);
- else
- OUT_RING(evo, NvEvoVRAM_LP);
+ OUT_RING (evo, fb->r_dma);
}
ret = RING_SPACE(evo, 12);
@@ -571,45 +567,26 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
return ret;
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
- OUT_RING(evo, nv_crtc->fb.offset >> 8);
- OUT_RING(evo, 0);
- OUT_RING(evo, (drm_fb->height << 16) | drm_fb->width);
- if (!nv_crtc->fb.tile_flags) {
- OUT_RING(evo, drm_fb->pitch | (1 << 20));
- } else {
- u32 tile_mode = fb->nvbo->tile_mode;
- if (dev_priv->card_type >= NV_C0)
- tile_mode >>= 4;
- OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | tile_mode);
- }
- if (dev_priv->chipset == 0x50)
- OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
- else
- OUT_RING(evo, format);
+ OUT_RING (evo, nv_crtc->fb.offset >> 8);
+ OUT_RING (evo, 0);
+ OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
+ OUT_RING (evo, fb->r_pitch);
+ OUT_RING (evo, fb->r_format);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
- OUT_RING(evo, fb->base.depth == 8 ?
- NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
+ OUT_RING (evo, fb->base.depth == 8 ?
+ NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
- OUT_RING(evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
+ OUT_RING (evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
- OUT_RING(evo, (y << 16) | x);
+ OUT_RING (evo, (y << 16) | x);
if (nv_crtc->lut.depth != fb->base.depth) {
nv_crtc->lut.depth = fb->base.depth;
nv50_crtc_lut_load(crtc);
}
- if (update) {
- ret = RING_SPACE(evo, 2);
- if (ret)
- return ret;
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
- }
-
return 0;
}
@@ -619,8 +596,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_connector *nv_connector = NULL;
uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end;
@@ -700,14 +676,25 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
- return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false, false);
+ return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
}
static int
nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
- return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, true, false);
+ int ret;
+
+ nv50_display_flip_stop(crtc);
+ ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
+ if (ret)
+ return ret;
+
+ ret = nv50_crtc_wait_complete(crtc);
+ if (ret)
+ return ret;
+
+ return nv50_display_flip_next(crtc, crtc->fb, NULL);
}
static int
@@ -715,7 +702,14 @@ nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
- return nv50_crtc_do_mode_set_base(crtc, fb, x, y, true, true);
+ int ret;
+
+ nv50_display_flip_stop(crtc);
+ ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true);
+ if (ret)
+ return ret;
+
+ return nv50_crtc_wait_complete(crtc);
}
static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@@ -758,7 +752,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
nv_crtc->lut.depth = 0;
ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nv_crtc->lut.nvbo);
+ 0, 0x0000, &nv_crtc->lut.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -784,7 +778,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
+ 0, 0x0000, &nv_crtc->cursor.nvbo);
if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
index 1b9ce3021aa3..9752c35bb84b 100644
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -36,9 +36,9 @@
static void
nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
{
- struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
NV_DEBUG_KMS(dev, "\n");
@@ -71,9 +71,9 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
static void
nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
{
- struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
NV_DEBUG_KMS(dev, "\n");
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 875414b09ade..808f3ec8f827 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -41,8 +41,7 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
if (!nv_encoder->crtc)
@@ -216,8 +215,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
uint32_t mode_ctl = 0, mode_ctl2 = 0;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7cc94ed9ed95..75a376cc342a 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -24,6 +24,7 @@
*
*/
+#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
#include "nv50_display.h"
#include "nouveau_crtc.h"
#include "nouveau_encoder.h"
@@ -34,6 +35,7 @@
#include "drm_crtc_helper.h"
static void nv50_display_isr(struct drm_device *);
+static void nv50_display_bh(unsigned long);
static inline int
nv50_sor_nr(struct drm_device *dev)
@@ -172,16 +174,16 @@ nv50_display_init(struct drm_device *dev)
ret = nv50_evo_init(dev);
if (ret)
return ret;
- evo = dev_priv->evo;
+ evo = nv50_display(dev)->master;
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
- ret = RING_SPACE(evo, 11);
+ ret = RING_SPACE(evo, 15);
if (ret)
return ret;
BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
- OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE);
+ OUT_RING(evo, NvEvoSync);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
@@ -190,6 +192,11 @@ nv50_display_init(struct drm_device *dev)
OUT_RING(evo, 0);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
OUT_RING(evo, 0);
+ /* required to make display sync channels not hate life */
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK900), 1);
+ OUT_RING (evo, 0x00000311);
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(1, UNK900), 1);
+ OUT_RING (evo, 0x00000311);
FIRE_RING(evo);
if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
NV_ERROR(dev, "evo pushbuf stalled\n");
@@ -201,6 +208,8 @@ nv50_display_init(struct drm_device *dev)
static int nv50_display_disable(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_channel *evo = disp->master;
struct drm_crtc *drm_crtc;
int ret, i;
@@ -212,12 +221,12 @@ static int nv50_display_disable(struct drm_device *dev)
nv50_crtc_blank(crtc, true);
}
- ret = RING_SPACE(dev_priv->evo, 2);
+ ret = RING_SPACE(evo, 2);
if (ret == 0) {
- BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(dev_priv->evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING(evo, 0);
}
- FIRE_RING(dev_priv->evo);
+ FIRE_RING(evo);
/* Almost like ack'ing a vblank interrupt, maybe in the spirit of
* cleaning up?
@@ -267,10 +276,16 @@ int nv50_display_create(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &dev_priv->vbios.dcb;
struct drm_connector *connector, *ct;
+ struct nv50_display *priv;
int ret, i;
NV_DEBUG_KMS(dev, "\n");
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_priv->engine.display.priv = priv;
+
/* init basic kernel modesetting */
drm_mode_config_init(dev);
@@ -330,7 +345,7 @@ int nv50_display_create(struct drm_device *dev)
}
}
- INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
+ tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
nouveau_irq_register(dev, 26, nv50_display_isr);
ret = nv50_display_init(dev);
@@ -345,12 +360,131 @@ int nv50_display_create(struct drm_device *dev)
void
nv50_display_destroy(struct drm_device *dev)
{
+ struct nv50_display *disp = nv50_display(dev);
+
NV_DEBUG_KMS(dev, "\n");
drm_mode_config_cleanup(dev);
nv50_display_disable(dev);
nouveau_irq_unregister(dev, 26);
+ kfree(disp);
+}
+
+void
+nv50_display_flip_stop(struct drm_crtc *crtc)
+{
+ struct nv50_display *disp = nv50_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
+ struct nouveau_channel *evo = dispc->sync;
+ int ret;
+
+ ret = RING_SPACE(evo, 8);
+ if (ret) {
+ WARN_ON(1);
+ return;
+ }
+
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x0094, 1);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x00c0, 1);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0x00000000);
+ FIRE_RING (evo);
+}
+
+int
+nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
+ struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+ struct nv50_display *disp = nv50_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index];
+ struct nouveau_channel *evo = dispc->sync;
+ int ret;
+
+ ret = RING_SPACE(evo, 24);
+ if (unlikely(ret))
+ return ret;
+
+ /* synchronise with the rendering channel, if necessary */
+ if (likely(chan)) {
+ u64 offset = dispc->sem.bo->vma.offset + dispc->sem.offset;
+
+ ret = RING_SPACE(chan, 10);
+ if (ret) {
+ WIND_RING(evo);
+ return ret;
+ }
+
+ if (dev_priv->chipset < 0xc0) {
+ BEGIN_RING(chan, NvSubSw, 0x0060, 2);
+ OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
+ OUT_RING (chan, dispc->sem.offset);
+ BEGIN_RING(chan, NvSubSw, 0x006c, 1);
+ OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
+ BEGIN_RING(chan, NvSubSw, 0x0064, 2);
+ OUT_RING (chan, dispc->sem.offset ^ 0x10);
+ OUT_RING (chan, 0x74b1e000);
+ BEGIN_RING(chan, NvSubSw, 0x0060, 1);
+ if (dev_priv->chipset < 0x84)
+ OUT_RING (chan, NvSema);
+ else
+ OUT_RING (chan, chan->vram_handle);
+ } else {
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 0xf00d0000 | dispc->sem.value);
+ OUT_RING (chan, 0x1002);
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset ^ 0x10));
+ OUT_RING (chan, 0x74b1e000);
+ OUT_RING (chan, 0x1001);
+ }
+ FIRE_RING (chan);
+ } else {
+ nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4,
+ 0xf00d0000 | dispc->sem.value);
+ }
+
+ /* queue the flip on the crtc's "display sync" channel */
+ BEGIN_RING(evo, 0, 0x0100, 1);
+ OUT_RING (evo, 0xfffe0000);
+ BEGIN_RING(evo, 0, 0x0084, 5);
+ OUT_RING (evo, chan ? 0x00000100 : 0x00000010);
+ OUT_RING (evo, dispc->sem.offset);
+ OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
+ OUT_RING (evo, 0x74b1e000);
+ OUT_RING (evo, NvEvoSync);
+ BEGIN_RING(evo, 0, 0x00a0, 2);
+ OUT_RING (evo, 0x00000000);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x00c0, 1);
+ OUT_RING (evo, nv_fb->r_dma);
+ BEGIN_RING(evo, 0, 0x0110, 2);
+ OUT_RING (evo, 0x00000000);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x0800, 5);
+ OUT_RING (evo, (nv_fb->nvbo->bo.mem.start << PAGE_SHIFT) >> 8);
+ OUT_RING (evo, 0);
+ OUT_RING (evo, (fb->height << 16) | fb->width);
+ OUT_RING (evo, nv_fb->r_pitch);
+ OUT_RING (evo, nv_fb->r_format);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0x00000000);
+ FIRE_RING (evo);
+
+ dispc->sem.offset ^= 0x10;
+ dispc->sem.value++;
+ return 0;
}
static u16
@@ -466,11 +600,12 @@ static void
nv50_display_unk10_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), mc;
int i, crtc, or, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
- dev_priv->evo_irq.dcb = NULL;
+ disp->irq.dcb = NULL;
nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
@@ -541,7 +676,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
if (dcb->type == type && (dcb->or & (1 << or))) {
nouveau_bios_run_display_table(dev, dcb, 0, -1);
- dev_priv->evo_irq.dcb = dcb;
+ disp->irq.dcb = dcb;
goto ack;
}
}
@@ -587,15 +722,16 @@ static void
nv50_display_unk20_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc;
+ struct nv50_display *disp = nv50_display(dev);
+ u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
struct dcb_entry *dcb;
int i, crtc, or, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
- dcb = dev_priv->evo_irq.dcb;
+ dcb = disp->irq.dcb;
if (dcb) {
nouveau_bios_run_display_table(dev, dcb, 0, -2);
- dev_priv->evo_irq.dcb = NULL;
+ disp->irq.dcb = NULL;
}
/* CRTC clock change requested? */
@@ -692,9 +828,9 @@ nv50_display_unk20_handler(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
}
- dev_priv->evo_irq.dcb = dcb;
- dev_priv->evo_irq.pclk = pclk;
- dev_priv->evo_irq.script = script;
+ disp->irq.dcb = dcb;
+ disp->irq.pclk = pclk;
+ disp->irq.script = script;
ack:
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
@@ -735,13 +871,13 @@ nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb)
static void
nv50_display_unk40_handler(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_entry *dcb = dev_priv->evo_irq.dcb;
- u16 script = dev_priv->evo_irq.script;
- u32 unk30 = nv_rd32(dev, 0x610030), pclk = dev_priv->evo_irq.pclk;
+ struct nv50_display *disp = nv50_display(dev);
+ struct dcb_entry *dcb = disp->irq.dcb;
+ u16 script = disp->irq.script;
+ u32 unk30 = nv_rd32(dev, 0x610030), pclk = disp->irq.pclk;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
- dev_priv->evo_irq.dcb = NULL;
+ disp->irq.dcb = NULL;
if (!dcb)
goto ack;
@@ -754,12 +890,10 @@ ack:
nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
}
-void
-nv50_display_irq_handler_bh(struct work_struct *work)
+static void
+nv50_display_bh(unsigned long data)
{
- struct drm_nouveau_private *dev_priv =
- container_of(work, struct drm_nouveau_private, irq_work);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = (struct drm_device *)data;
for (;;) {
uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
@@ -807,7 +941,7 @@ nv50_display_error_handler(struct drm_device *dev)
static void
nv50_display_isr(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
uint32_t delayed = 0;
while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
@@ -835,8 +969,7 @@ nv50_display_isr(struct drm_device *dev)
NV50_PDISPLAY_INTR_1_CLK_UNK40));
if (clock) {
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
- if (!work_pending(&dev_priv->irq_work))
- queue_work(dev_priv->wq, &dev_priv->irq_work);
+ tasklet_schedule(&disp->tasklet);
delayed |= clock;
intr1 &= ~clock;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index f0e30b78ef6b..c2da503a22aa 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,7 +35,36 @@
#include "nouveau_crtc.h"
#include "nv50_evo.h"
-void nv50_display_irq_handler_bh(struct work_struct *work);
+struct nv50_display_crtc {
+ struct nouveau_channel *sync;
+ struct {
+ struct nouveau_bo *bo;
+ u32 offset;
+ u16 value;
+ } sem;
+};
+
+struct nv50_display {
+ struct nouveau_channel *master;
+ struct nouveau_gpuobj *ntfy;
+
+ struct nv50_display_crtc crtc[2];
+
+ struct tasklet_struct tasklet;
+ struct {
+ struct dcb_entry *dcb;
+ u16 script;
+ u32 pclk;
+ } irq;
+};
+
+static inline struct nv50_display *
+nv50_display(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ return dev_priv->engine.display.priv;
+}
+
int nv50_display_early_init(struct drm_device *dev);
void nv50_display_late_takedown(struct drm_device *dev);
int nv50_display_create(struct drm_device *dev);
@@ -44,4 +73,15 @@ void nv50_display_destroy(struct drm_device *dev);
int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
+int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+ struct nouveau_channel *chan);
+void nv50_display_flip_stop(struct drm_crtc *);
+
+int nv50_evo_init(struct drm_device *dev);
+void nv50_evo_fini(struct drm_device *dev);
+void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
+ u64 size);
+int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
+ u64 base, u64 size, struct nouveau_gpuobj **);
+
#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 0ea090f4244a..a2cfaa691e9b 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -27,20 +27,17 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_ramht.h"
+#include "nv50_display.h"
static void
nv50_evo_channel_del(struct nouveau_channel **pevo)
{
- struct drm_nouveau_private *dev_priv;
struct nouveau_channel *evo = *pevo;
if (!evo)
return;
*pevo = NULL;
- dev_priv = evo->dev->dev_private;
- dev_priv->evo_alloc &= ~(1 << evo->id);
-
nouveau_gpuobj_channel_takedown(evo);
nouveau_bo_unmap(evo->pushbuf_bo);
nouveau_bo_ref(NULL, &evo->pushbuf_bo);
@@ -51,42 +48,61 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
kfree(evo);
}
+void
+nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size)
+{
+ struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
+ u32 flags5;
+
+ if (dev_priv->chipset < 0xc0) {
+ /* not supported on 0x50, specified in format mthd */
+ if (dev_priv->chipset == 0x50)
+ memtype = 0;
+ flags5 = 0x00010000;
+ } else {
+ if (memtype & 0x80000000)
+ flags5 = 0x00000000; /* large pages */
+ else
+ flags5 = 0x00020000;
+ }
+
+ nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM,
+ NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0);
+ nv_wo32(obj, 0x14, flags5);
+ dev_priv->engine.instmem.flush(obj->dev);
+}
+
int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
- u32 tile_flags, u32 magic_flags, u32 offset, u32 limit,
- u32 flags5)
+nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
+ u64 base, u64 size, struct nouveau_gpuobj **pobj)
{
- struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
- struct drm_device *dev = evo->dev;
+ struct nv50_display *disp = nv50_display(evo->dev);
struct nouveau_gpuobj *obj = NULL;
int ret;
- ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj);
+ ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj);
if (ret)
return ret;
obj->engine = NVOBJ_ENGINE_DISPLAY;
- nv_wo32(obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
- nv_wo32(obj, 4, limit);
- nv_wo32(obj, 8, offset);
- nv_wo32(obj, 12, 0x00000000);
- nv_wo32(obj, 16, 0x00000000);
- nv_wo32(obj, 20, flags5);
- dev_priv->engine.instmem.flush(dev);
+ nv50_evo_dmaobj_init(obj, memtype, base, size);
- ret = nouveau_ramht_insert(evo, name, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- if (ret) {
- return ret;
- }
+ ret = nouveau_ramht_insert(evo, handle, obj);
+ if (ret)
+ goto out;
- return 0;
+ if (pobj)
+ nouveau_gpuobj_ref(obj, pobj);
+out:
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
}
static int
-nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
+nv50_evo_channel_new(struct drm_device *dev, int chid,
+ struct nouveau_channel **pevo)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
struct nouveau_channel *evo;
int ret;
@@ -95,25 +111,13 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
return -ENOMEM;
*pevo = evo;
- for (evo->id = 0; evo->id < 5; evo->id++) {
- if (dev_priv->evo_alloc & (1 << evo->id))
- continue;
-
- dev_priv->evo_alloc |= (1 << evo->id);
- break;
- }
-
- if (evo->id == 5) {
- kfree(evo);
- return -ENODEV;
- }
-
+ evo->id = chid;
evo->dev = dev;
evo->user_get = 4;
evo->user_put = 0;
ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
- false, true, &evo->pushbuf_bo);
+ &evo->pushbuf_bo);
if (ret == 0)
ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
if (ret) {
@@ -138,8 +142,8 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
}
/* bind primary evo channel's ramht to the channel */
- if (dev_priv->evo && evo != dev_priv->evo)
- nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL);
+ if (disp->master && evo != disp->master)
+ nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL);
return 0;
}
@@ -212,21 +216,39 @@ nv50_evo_channel_fini(struct nouveau_channel *evo)
}
}
+static void
+nv50_evo_destroy(struct drm_device *dev)
+{
+ struct nv50_display *disp = nv50_display(dev);
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (disp->crtc[i].sem.bo) {
+ nouveau_bo_unmap(disp->crtc[i].sem.bo);
+ nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
+ }
+ nv50_evo_channel_del(&disp->crtc[i].sync);
+ }
+ nouveau_gpuobj_ref(NULL, &disp->ntfy);
+ nv50_evo_channel_del(&disp->master);
+}
+
static int
nv50_evo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
struct nouveau_gpuobj *ramht = NULL;
struct nouveau_channel *evo;
- int ret;
+ int ret, i, j;
/* create primary evo channel, the one we use for modesetting
* purporses
*/
- ret = nv50_evo_channel_new(dev, &dev_priv->evo);
+ ret = nv50_evo_channel_new(dev, 0, &disp->master);
if (ret)
return ret;
- evo = dev_priv->evo;
+ evo = disp->master;
/* setup object management on it, any other evo channel will
* use this also as there's no per-channel support on the
@@ -236,109 +258,167 @@ nv50_evo_create(struct drm_device *dev)
NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
if (ret) {
NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ goto err;
}
ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
if (ret) {
NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ goto err;
}
ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
if (ret) {
NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ goto err;
}
ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
nouveau_gpuobj_ref(NULL, &ramht);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ if (ret)
+ goto err;
+
+ /* not sure exactly what this is..
+ *
+ * the first dword of the structure is used by nvidia to wait on
+ * full completion of an EVO "update" command.
+ *
+ * method 0x8c on the master evo channel will fill a lot more of
+ * this structure with some undefined info
+ */
+ ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
+ disp->ntfy->vinst, disp->ntfy->size, NULL);
+ if (ret)
+ goto err;
/* create some default objects for the scanout memtypes we support */
- if (dev_priv->card_type >= NV_C0) {
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19,
- 0, 0xffffffff, 0x00000000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
- 0, dev_priv->vram_size, 0x00020000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
- 0, dev_priv->vram_size, 0x00000000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
- } else {
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
- 0, 0xffffffff, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+ ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
- 0, 0xffffffff, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ /* create "display sync" channels and other structures we need
+ * to implement page flipping
+ */
+ for (i = 0; i < 2; i++) {
+ struct nv50_display_crtc *dispc = &disp->crtc[i];
+ u64 offset;
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
- 0, dev_priv->vram_size, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
+ ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
+ if (ret)
+ goto err;
+
+ ret = nouveau_bo_new(dev, NULL, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, &dispc->sem.bo);
+ if (!ret) {
+ offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
+
+ ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(dispc->sem.bo);
+ if (ret)
+ nouveau_bo_ref(NULL, &dispc->sem.bo);
}
- ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
- 0, dev_priv->vram_size, 0x00010000);
- if (ret) {
- nv50_evo_channel_del(&dev_priv->evo);
- return ret;
- }
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
+ offset, 4096, NULL);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ?
+ 0x7a00 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+
+ ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
+ (dev_priv->chipset < 0xc0 ?
+ 0x7000 : 0xfe00),
+ 0, dev_priv->vram_size, NULL);
+ if (ret)
+ goto err;
+
+ for (j = 0; j < 4096; j += 4)
+ nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
+ dispc->sem.offset = 0;
}
return 0;
+
+err:
+ nv50_evo_destroy(dev);
+ return ret;
}
int
nv50_evo_init(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int ret;
+ struct nv50_display *disp = nv50_display(dev);
+ int ret, i;
- if (!dev_priv->evo) {
+ if (!disp->master) {
ret = nv50_evo_create(dev);
if (ret)
return ret;
}
- return nv50_evo_channel_init(dev_priv->evo);
+ ret = nv50_evo_channel_init(disp->master);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 2; i++) {
+ ret = nv50_evo_channel_init(disp->crtc[i].sync);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
void
nv50_evo_fini(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
+ int i;
- if (dev_priv->evo) {
- nv50_evo_channel_fini(dev_priv->evo);
- nv50_evo_channel_del(&dev_priv->evo);
+ for (i = 0; i < 2; i++) {
+ if (disp->crtc[i].sync)
+ nv50_evo_channel_fini(disp->crtc[i].sync);
}
+
+ if (disp->master)
+ nv50_evo_channel_fini(disp->master);
+
+ nv50_evo_destroy(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
index aa4f0d3cea8e..3860ca62cb19 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -27,12 +27,6 @@
#ifndef __NV50_EVO_H__
#define __NV50_EVO_H__
-int nv50_evo_init(struct drm_device *dev);
-void nv50_evo_fini(struct drm_device *dev);
-int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
- u32 tile_flags, u32 magic_flags,
- u32 offset, u32 limit);
-
#define NV50_EVO_UPDATE 0x00000080
#define NV50_EVO_UNK84 0x00000084
#define NV50_EVO_UNK84_NOTIFY 0x40000000
@@ -119,5 +113,7 @@ int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
/* Both of these are needed, otherwise nothing happens. */
#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
+#define NV50_EVO_CRTC_UNK900 0x00000900
+#define NV50_EVO_CRTC_UNK904 0x00000904
#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index 50290dea0ac4..bdd2afe29205 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -8,31 +8,61 @@ struct nv50_fb_priv {
dma_addr_t r100c08;
};
+static void
+nv50_fb_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nv50_fb_priv *priv = pfb->priv;
+
+ if (drm_mm_initialized(&pfb->tag_heap))
+ drm_mm_takedown(&pfb->tag_heap);
+
+ if (priv->r100c08_page) {
+ pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c08_page);
+ }
+
+ kfree(priv);
+ pfb->priv = NULL;
+}
+
static int
nv50_fb_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nv50_fb_priv *priv;
+ u32 tagmem;
+ int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ pfb->priv = priv;
priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!priv->r100c08_page) {
- kfree(priv);
+ nv50_fb_destroy(dev);
return -ENOMEM;
}
priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
- __free_page(priv->r100c08_page);
- kfree(priv);
+ nv50_fb_destroy(dev);
return -EFAULT;
}
- dev_priv->engine.fb.priv = priv;
+ tagmem = nv_rd32(dev, 0x100320);
+ NV_DEBUG(dev, "%d tags available\n", tagmem);
+ ret = drm_mm_init(&pfb->tag_heap, 0, tagmem);
+ if (ret) {
+ nv50_fb_destroy(dev);
+ return ret;
+ }
+
return 0;
}
@@ -81,26 +111,112 @@ nv50_fb_init(struct drm_device *dev)
void
nv50_fb_takedown(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fb_priv *priv;
+ nv50_fb_destroy(dev);
+}
- priv = dev_priv->engine.fb.priv;
- if (!priv)
- return;
- dev_priv->engine.fb.priv = NULL;
+static struct nouveau_enum vm_dispatch_subclients[] = {
+ { 0x00000000, "GRCTX", NULL },
+ { 0x00000001, "NOTIFY", NULL },
+ { 0x00000002, "QUERY", NULL },
+ { 0x00000003, "COND", NULL },
+ { 0x00000004, "M2M_IN", NULL },
+ { 0x00000005, "M2M_OUT", NULL },
+ { 0x00000006, "M2M_NOTIFY", NULL },
+ {}
+};
- pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->r100c08_page);
- kfree(priv);
-}
+static struct nouveau_enum vm_ccache_subclients[] = {
+ { 0x00000000, "CB", NULL },
+ { 0x00000001, "TIC", NULL },
+ { 0x00000002, "TSC", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_prop_subclients[] = {
+ { 0x00000000, "RT0", NULL },
+ { 0x00000001, "RT1", NULL },
+ { 0x00000002, "RT2", NULL },
+ { 0x00000003, "RT3", NULL },
+ { 0x00000004, "RT4", NULL },
+ { 0x00000005, "RT5", NULL },
+ { 0x00000006, "RT6", NULL },
+ { 0x00000007, "RT7", NULL },
+ { 0x00000008, "ZETA", NULL },
+ { 0x00000009, "LOCAL", NULL },
+ { 0x0000000a, "GLOBAL", NULL },
+ { 0x0000000b, "STACK", NULL },
+ { 0x0000000c, "DST2D", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_pfifo_subclients[] = {
+ { 0x00000000, "PUSHBUF", NULL },
+ { 0x00000001, "SEMAPHORE", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_bar_subclients[] = {
+ { 0x00000000, "FB", NULL },
+ { 0x00000001, "IN", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_client[] = {
+ { 0x00000000, "STRMOUT", NULL },
+ { 0x00000003, "DISPATCH", vm_dispatch_subclients },
+ { 0x00000004, "PFIFO_WRITE", NULL },
+ { 0x00000005, "CCACHE", vm_ccache_subclients },
+ { 0x00000006, "PPPP", NULL },
+ { 0x00000007, "CLIPID", NULL },
+ { 0x00000008, "PFIFO_READ", NULL },
+ { 0x00000009, "VFETCH", NULL },
+ { 0x0000000a, "TEXTURE", NULL },
+ { 0x0000000b, "PROP", vm_prop_subclients },
+ { 0x0000000c, "PVP", NULL },
+ { 0x0000000d, "PBSP", NULL },
+ { 0x0000000e, "PCRYPT", NULL },
+ { 0x0000000f, "PCOUNTER", NULL },
+ { 0x00000011, "PDAEMON", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_engine[] = {
+ { 0x00000000, "PGRAPH", NULL },
+ { 0x00000001, "PVP", NULL },
+ { 0x00000004, "PEEPHOLE", NULL },
+ { 0x00000005, "PFIFO", vm_pfifo_subclients },
+ { 0x00000006, "BAR", vm_bar_subclients },
+ { 0x00000008, "PPPP", NULL },
+ { 0x00000009, "PBSP", NULL },
+ { 0x0000000a, "PCRYPT", NULL },
+ { 0x0000000b, "PCOUNTER", NULL },
+ { 0x0000000c, "SEMAPHORE_BG", NULL },
+ { 0x0000000d, "PCOPY", NULL },
+ { 0x0000000e, "PDAEMON", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_fault[] = {
+ { 0x00000000, "PT_NOT_PRESENT", NULL },
+ { 0x00000001, "PT_TOO_SHORT", NULL },
+ { 0x00000002, "PAGE_NOT_PRESENT", NULL },
+ { 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
+ { 0x00000004, "PAGE_READ_ONLY", NULL },
+ { 0x00000006, "NULL_DMAOBJ", NULL },
+ { 0x00000007, "WRONG_MEMTYPE", NULL },
+ { 0x0000000b, "VRAM_LIMIT", NULL },
+ { 0x0000000f, "DMAOBJ_LIMIT", NULL },
+ {}
+};
void
-nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
+nv50_fb_vm_trap(struct drm_device *dev, int display)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const struct nouveau_enum *en, *cl;
unsigned long flags;
u32 trap[6], idx, chinst;
+ u8 st0, st1, st2, st3;
int i, ch;
idx = nv_rd32(dev, 0x100c90);
@@ -117,8 +233,8 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
if (!display)
return;
+ /* lookup channel id */
chinst = (trap[2] << 16) | trap[1];
-
spin_lock_irqsave(&dev_priv->channels.lock, flags);
for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
@@ -131,9 +247,48 @@ nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
}
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
- "channel %d (0x%08x)\n",
- name, (trap[5] & 0x100 ? "read" : "write"),
- trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff,
- trap[0], ch, chinst);
+ /* decode status bits into something more useful */
+ if (dev_priv->chipset < 0xa3 ||
+ dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
+ st0 = (trap[0] & 0x0000000f) >> 0;
+ st1 = (trap[0] & 0x000000f0) >> 4;
+ st2 = (trap[0] & 0x00000f00) >> 8;
+ st3 = (trap[0] & 0x0000f000) >> 12;
+ } else {
+ st0 = (trap[0] & 0x000000ff) >> 0;
+ st1 = (trap[0] & 0x0000ff00) >> 8;
+ st2 = (trap[0] & 0x00ff0000) >> 16;
+ st3 = (trap[0] & 0xff000000) >> 24;
+ }
+
+ NV_INFO(dev, "VM: trapped %s at 0x%02x%04x%04x on ch %d [0x%08x] ",
+ (trap[5] & 0x00000100) ? "read" : "write",
+ trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, ch, chinst);
+
+ en = nouveau_enum_find(vm_engine, st0);
+ if (en)
+ printk("%s/", en->name);
+ else
+ printk("%02x/", st0);
+
+ cl = nouveau_enum_find(vm_client, st2);
+ if (cl)
+ printk("%s/", cl->name);
+ else
+ printk("%02x/", st2);
+
+ if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
+ else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
+ else cl = NULL;
+ if (cl)
+ printk("%s", cl->name);
+ else
+ printk("%02x", st3);
+
+ printk(" reason: ");
+ en = nouveau_enum_find(vm_fault, st1);
+ if (en)
+ printk("%s\n", en->name);
+ else
+ printk("0x%08x\n", st1);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 8dd04c5dac67..c34a074f7ea1 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -149,6 +149,7 @@ nv50_fifo_init_regs(struct drm_device *dev)
nv_wr32(dev, 0x3204, 0);
nv_wr32(dev, 0x3210, 0);
nv_wr32(dev, 0x3270, 0);
+ nv_wr32(dev, 0x2044, 0x01003fff);
/* Enable dummy channels setup by nv50_instmem.c */
nv50_fifo_channel_enable(dev, 0);
@@ -273,7 +274,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
nv_wo32(ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj->cinst >> 4));
- nv_wo32(ramfc, 0x44, 0x2101ffff);
+ nv_wo32(ramfc, 0x44, 0x01003fff);
nv_wo32(ramfc, 0x60, 0x7fffffff);
nv_wo32(ramfc, 0x40, 0x00000000);
nv_wo32(ramfc, 0x7c, 0x30000001);
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index 6b149c0cc06d..d4f4206dad7e 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -137,6 +137,7 @@ nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
struct nv50_gpio_priv *priv = pgpio->priv;
struct nv50_gpio_handler *gpioh, *tmp;
struct dcb_gpio_entry *gpio;
+ LIST_HEAD(tofree);
unsigned long flags;
gpio = nouveau_bios_gpio_entry(dev, tag);
@@ -149,10 +150,14 @@ nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
gpioh->handler != handler ||
gpioh->data != data)
continue;
- list_del(&gpioh->head);
- kfree(gpioh);
+ list_move(&gpioh->head, &tofree);
}
spin_unlock_irqrestore(&priv->lock, flags);
+
+ list_for_each_entry_safe(gpioh, tmp, &tofree, head) {
+ flush_work_sync(&gpioh->work);
+ kfree(gpioh);
+ }
}
bool
@@ -205,7 +210,6 @@ nv50_gpio_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv;
int ret;
if (!pgpio->priv) {
@@ -213,7 +217,6 @@ nv50_gpio_init(struct drm_device *dev)
if (ret)
return ret;
}
- priv = pgpio->priv;
/* disable, and ack any pending gpio interrupts */
nv_wr32(dev, 0xe050, 0x00000000);
@@ -293,7 +296,7 @@ nv50_gpio_isr(struct drm_device *dev)
continue;
gpioh->inhibit = true;
- queue_work(dev_priv->wq, &gpioh->work);
+ schedule_work(&gpioh->work);
}
spin_unlock(&priv->lock);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 37e21d2be95b..8675b00caf18 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -95,13 +95,41 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
}
static void
-nv50_graph_init_regs(struct drm_device *dev)
+nv50_graph_init_zcull(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
NV_DEBUG(dev, "\n");
- nv_wr32(dev, NV04_PGRAPH_DEBUG_3,
- (1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */);
- nv_wr32(dev, 0x402ca8, 0x800);
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x50:
+ case 0x80:
+ case 0x90:
+ nv_wr32(dev, 0x402ca8, 0x00000800);
+ break;
+ case 0xa0:
+ default:
+ nv_wr32(dev, 0x402cc0, 0x00000000);
+ if (dev_priv->chipset == 0xa0 ||
+ dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac) {
+ nv_wr32(dev, 0x402ca8, 0x00000802);
+ } else {
+ nv_wr32(dev, 0x402cc0, 0x00000000);
+ nv_wr32(dev, 0x402ca8, 0x00000002);
+ }
+
+ break;
+ }
+
+ /* zero out zcull regions */
+ for (i = 0; i < 8; i++) {
+ nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000);
+ nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000);
+ nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
+ nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
+ }
}
static int
@@ -136,6 +164,7 @@ nv50_graph_init_ctxctl(struct drm_device *dev)
}
kfree(cp);
+ nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
nv_wr32(dev, 0x400320, 4);
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
@@ -151,7 +180,7 @@ nv50_graph_init(struct drm_device *dev)
nv50_graph_init_reset(dev);
nv50_graph_init_regs__nv(dev);
- nv50_graph_init_regs(dev);
+ nv50_graph_init_zcull(dev);
ret = nv50_graph_init_ctxctl(dev);
if (ret)
@@ -409,12 +438,7 @@ static int
nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data)
{
- struct nouveau_page_flip_state s;
-
- if (!nouveau_finish_page_flip(chan, &s)) {
- /* XXX - Do something here */
- }
-
+ nouveau_finish_page_flip(chan, NULL);
return 0;
}
@@ -526,11 +550,11 @@ nv86_graph_tlb_flush(struct drm_device *dev)
static struct nouveau_enum nv50_mp_exec_error_names[] =
{
- { 3, "STACK_UNDERFLOW" },
- { 4, "QUADON_ACTIVE" },
- { 8, "TIMEOUT" },
- { 0x10, "INVALID_OPCODE" },
- { 0x40, "BREAKPOINT" },
+ { 3, "STACK_UNDERFLOW", NULL },
+ { 4, "QUADON_ACTIVE", NULL },
+ { 8, "TIMEOUT", NULL },
+ { 0x10, "INVALID_OPCODE", NULL },
+ { 0x40, "BREAKPOINT", NULL },
{}
};
@@ -558,47 +582,47 @@ static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
/* There must be a *lot* of these. Will take some time to gather them up. */
struct nouveau_enum nv50_data_error_names[] = {
- { 0x00000003, "INVALID_QUERY_OR_TEXTURE" },
- { 0x00000004, "INVALID_VALUE" },
- { 0x00000005, "INVALID_ENUM" },
- { 0x00000008, "INVALID_OBJECT" },
- { 0x00000009, "READ_ONLY_OBJECT" },
- { 0x0000000a, "SUPERVISOR_OBJECT" },
- { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT" },
- { 0x0000000c, "INVALID_BITFIELD" },
- { 0x0000000d, "BEGIN_END_ACTIVE" },
- { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT" },
- { 0x0000000f, "VIEWPORT_ID_NEEDS_GP" },
- { 0x00000010, "RT_DOUBLE_BIND" },
- { 0x00000011, "RT_TYPES_MISMATCH" },
- { 0x00000012, "RT_LINEAR_WITH_ZETA" },
- { 0x00000015, "FP_TOO_FEW_REGS" },
- { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH" },
- { 0x00000017, "RT_LINEAR_WITH_MSAA" },
- { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT" },
- { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT" },
- { 0x0000001a, "RT_INVALID_ALIGNMENT" },
- { 0x0000001b, "SAMPLER_OVER_LIMIT" },
- { 0x0000001c, "TEXTURE_OVER_LIMIT" },
- { 0x0000001e, "GP_TOO_MANY_OUTPUTS" },
- { 0x0000001f, "RT_BPP128_WITH_MS8" },
- { 0x00000021, "Z_OUT_OF_BOUNDS" },
- { 0x00000023, "XY_OUT_OF_BOUNDS" },
- { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED" },
- { 0x00000028, "CP_NO_REG_SPACE_STRIPED" },
- { 0x00000029, "CP_NO_REG_SPACE_PACKED" },
- { 0x0000002a, "CP_NOT_ENOUGH_WARPS" },
- { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH" },
- { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS" },
- { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS" },
- { 0x0000002e, "CP_NO_BLOCKDIM_LATCH" },
- { 0x00000031, "ENG2D_FORMAT_MISMATCH" },
- { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP" },
- { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT" },
- { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT" },
- { 0x00000046, "LAYER_ID_NEEDS_GP" },
- { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT" },
- { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT" },
+ { 0x00000003, "INVALID_QUERY_OR_TEXTURE", NULL },
+ { 0x00000004, "INVALID_VALUE", NULL },
+ { 0x00000005, "INVALID_ENUM", NULL },
+ { 0x00000008, "INVALID_OBJECT", NULL },
+ { 0x00000009, "READ_ONLY_OBJECT", NULL },
+ { 0x0000000a, "SUPERVISOR_OBJECT", NULL },
+ { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
+ { 0x0000000c, "INVALID_BITFIELD", NULL },
+ { 0x0000000d, "BEGIN_END_ACTIVE", NULL },
+ { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
+ { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
+ { 0x00000010, "RT_DOUBLE_BIND", NULL },
+ { 0x00000011, "RT_TYPES_MISMATCH", NULL },
+ { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
+ { 0x00000015, "FP_TOO_FEW_REGS", NULL },
+ { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
+ { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
+ { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
+ { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
+ { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
+ { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
+ { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
+ { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
+ { 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
+ { 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
+ { 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
+ { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
+ { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
+ { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
+ { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
+ { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
+ { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
+ { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
+ { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
+ { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
+ { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
+ { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
+ { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
+ { 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
+ { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
+ { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
{}
};
@@ -678,7 +702,6 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
tps++;
switch (type) {
case 6: /* texture error... unknown for now */
- nv50_fb_vm_trap(dev, display, name);
if (display) {
NV_ERROR(dev, "magic set %d:\n", i);
for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
@@ -701,7 +724,6 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
- nv50_fb_vm_trap(dev, display, name);
/* 2d engine destination */
if (ustatus & 0x00000010) {
if (display) {
@@ -912,10 +934,10 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
printk("\n");
NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
" %08x %08x %08x\n",
- nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804),
- nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c),
- nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814),
- nv_rd32(dev, 0x40581c));
+ nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004),
+ nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c),
+ nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014),
+ nv_rd32(dev, 0x40501c));
}
@@ -1044,6 +1066,7 @@ nv50_graph_isr(struct drm_device *dev)
NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
"class 0x%04x mthd 0x%04x data 0x%08x\n",
chid, inst, subc, class, mthd, data);
+ nv50_fb_vm_trap(dev, 1);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index e57caa2a00e3..a6f8aa651fc6 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -56,7 +56,7 @@ nv50_channel_del(struct nouveau_channel **pchan)
nouveau_gpuobj_ref(NULL, &chan->ramfc);
nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
nouveau_gpuobj_ref(NULL, &chan->vm_pd);
- if (chan->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
nouveau_gpuobj_ref(NULL, &chan->ramin);
kfree(chan);
@@ -259,7 +259,7 @@ nv50_instmem_takedown(struct drm_device *dev)
nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
- if (dev_priv->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&dev_priv->ramin_heap))
drm_mm_takedown(&dev_priv->ramin_heap);
dev_priv->engine.instmem.priv = NULL;
@@ -300,7 +300,7 @@ nv50_instmem_resume(struct drm_device *dev)
}
struct nv50_gpuobj_node {
- struct nouveau_vram *vram;
+ struct nouveau_mem *vram;
struct nouveau_vma chan_vma;
u32 align;
};
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index b4a5ecb199f9..c25c59386420 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -41,8 +41,7 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(dev)->master;
int ret;
if (!nv_encoder->crtc)
@@ -184,8 +183,7 @@ static void
nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
- struct nouveau_channel *evo = dev_priv->evo;
+ struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 6144156f255a..4fd3432b5b8d 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -31,7 +31,6 @@ void
nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
struct nouveau_gpuobj *pgt[2])
{
- struct drm_nouveau_private *dev_priv = pgd->dev->dev_private;
u64 phys = 0xdeadcafe00000000ULL;
u32 coverage = 0;
@@ -58,10 +57,9 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
}
static inline u64
-nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- u64 phys, u32 memtype, u32 target)
+nv50_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
{
- struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
+ struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
phys |= 1; /* present */
phys |= (u64)memtype << 40;
@@ -85,12 +83,13 @@ nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void
nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
+ u32 comp = (mem->memtype & 0x180) >> 7;
u32 block;
int i;
- phys = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0);
+ phys = nv50_vm_addr(vma, phys, mem->memtype, 0);
pte <<= 3;
cnt <<= 3;
@@ -107,6 +106,11 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
phys += block << (vma->node->type - 3);
cnt -= block;
+ if (comp) {
+ u32 tag = mem->tag->start + ((delta >> 16) * comp);
+ offset_h |= (tag << 17);
+ delta += block << (vma->node->type - 3);
+ }
while (block) {
nv_wo32(pgt, pte + 0, offset_l);
@@ -119,11 +123,11 @@ nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void
nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- u32 pte, dma_addr_t *list, u32 cnt)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte <<= 3;
while (cnt--) {
- u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2);
+ u64 phys = nv50_vm_addr(vma, (u64)*list++, mem->memtype, 2);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index 58e98ad36347..ffbc3d8cf5be 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -48,42 +48,49 @@ nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
}
void
-nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram)
+nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *this;
- struct nouveau_vram *vram;
+ struct nouveau_mem *mem;
- vram = *pvram;
- *pvram = NULL;
- if (unlikely(vram == NULL))
+ mem = *pmem;
+ *pmem = NULL;
+ if (unlikely(mem == NULL))
return;
mutex_lock(&mm->mutex);
- while (!list_empty(&vram->regions)) {
- this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+ while (!list_empty(&mem->regions)) {
+ this = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
list_del(&this->rl_entry);
nouveau_mm_put(mm, this);
}
+
+ if (mem->tag) {
+ drm_mm_put_block(mem->tag);
+ mem->tag = NULL;
+ }
mutex_unlock(&mm->mutex);
- kfree(vram);
+ kfree(mem);
}
int
nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
- u32 type, struct nouveau_vram **pvram)
+ u32 memtype, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
- struct nouveau_vram *vram;
+ struct nouveau_mem *mem;
+ int comp = (memtype & 0x300) >> 8;
+ int type = (memtype & 0x07f);
int ret;
if (!types[type])
@@ -92,32 +99,46 @@ nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
align >>= 12;
size_nc >>= 12;
- vram = kzalloc(sizeof(*vram), GFP_KERNEL);
- if (!vram)
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
return -ENOMEM;
- INIT_LIST_HEAD(&vram->regions);
- vram->dev = dev_priv->dev;
- vram->memtype = type;
- vram->size = size;
-
mutex_lock(&mm->mutex);
+ if (comp) {
+ if (align == 16) {
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int n = (size >> 4) * comp;
+
+ mem->tag = drm_mm_search_free(&pfb->tag_heap, n, 0, 0);
+ if (mem->tag)
+ mem->tag = drm_mm_get_block(mem->tag, n, 0);
+ }
+
+ if (unlikely(!mem->tag))
+ comp = 0;
+ }
+
+ INIT_LIST_HEAD(&mem->regions);
+ mem->dev = dev_priv->dev;
+ mem->memtype = (comp << 7) | type;
+ mem->size = size;
+
do {
ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
if (ret) {
mutex_unlock(&mm->mutex);
- nv50_vram_del(dev, &vram);
+ nv50_vram_del(dev, &mem);
return ret;
}
- list_add_tail(&r->rl_entry, &vram->regions);
+ list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
mutex_unlock(&mm->mutex);
- r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
- vram->offset = (u64)r->offset << 12;
- *pvram = vram;
+ r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+ mem->offset = (u64)r->offset << 12;
+ *pmem = mem;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
index ec18ae1c3886..fabc7fd30b1d 100644
--- a/drivers/gpu/drm/nouveau/nv84_crypt.c
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -136,5 +136,5 @@ nv84_crypt_isr(struct drm_device *dev)
nv_wr32(dev, 0x102130, stat);
nv_wr32(dev, 0x10200c, 0x10);
- nv50_fb_vm_trap(dev, show, "PCRYPT");
+ nv50_fb_vm_trap(dev, show);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index e6f92c541dba..2886f2726a9e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -116,7 +116,7 @@ nvc0_fifo_create_context(struct nouveau_channel *chan)
/* allocate vram for control regs, map into polling area */
ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM,
- 0, 0, true, true, &fifoch->user);
+ 0, 0, &fifoch->user);
if (ret)
goto error;
@@ -418,6 +418,12 @@ nvc0_fifo_isr(struct drm_device *dev)
{
u32 stat = nv_rd32(dev, 0x002100);
+ if (stat & 0x00000100) {
+ NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
+ nv_wr32(dev, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
if (stat & 0x10000000) {
u32 units = nv_rd32(dev, 0x00259c);
u32 u = units;
@@ -446,10 +452,15 @@ nvc0_fifo_isr(struct drm_device *dev)
stat &= ~0x20000000;
}
+ if (stat & 0x40000000) {
+ NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
+ nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
+ stat &= ~0x40000000;
+ }
+
if (stat) {
NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
nv_wr32(dev, 0x002100, stat);
+ nv_wr32(dev, 0x002140, 0);
}
-
- nv_wr32(dev, 0x2140, 0);
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index eb18a7e89f5b..3de9b721d8db 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -299,6 +299,14 @@ nvc0_graph_takedown(struct drm_device *dev)
}
static int
+nvc0_graph_mthd_page_flip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ nouveau_finish_page_flip(chan, NULL);
+ return 0;
+}
+
+static int
nvc0_graph_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -395,6 +403,7 @@ nvc0_graph_create(struct drm_device *dev)
nouveau_irq_register(dev, 25, nvc0_runk140_isr);
NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
+ NVOBJ_MTHD (dev, 0x9039, 0x0500, nvc0_graph_mthd_page_flip);
NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
return 0;
@@ -640,7 +649,6 @@ nvc0_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nvc0_graph_priv *priv;
int ret;
dev_priv->engine.graph.accel_blocked = true;
@@ -665,7 +673,6 @@ nvc0_graph_init(struct drm_device *dev)
if (ret)
return ret;
}
- priv = pgraph->priv;
nvc0_graph_init_obj418880(dev);
nvc0_graph_init_regs(dev);
@@ -730,9 +737,12 @@ nvc0_graph_isr(struct drm_device *dev)
u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
if (stat & 0x00000010) {
- NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
+ if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
nv_wr32(dev, 0x400100, 0x00000010);
stat &= ~0x00000010;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index c09091749054..82357d2df1f4 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -67,7 +67,7 @@ nvc0_channel_del(struct nouveau_channel **pchan)
return;
nouveau_vm_ref(NULL, &chan->vm, NULL);
- if (chan->ramin_heap.free_stack.next)
+ if (drm_mm_initialized(&chan->ramin_heap))
drm_mm_takedown(&chan->ramin_heap);
nouveau_gpuobj_ref(NULL, &chan->ramin);
kfree(chan);
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
index e4e83c2caf5b..69af0ba7edd3 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -59,7 +59,7 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
void
nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
u32 next = 1 << (vma->node->type - 8);
@@ -75,11 +75,11 @@ nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
void
nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- u32 pte, dma_addr_t *list, u32 cnt)
+ struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte <<= 3;
while (cnt--) {
- u64 phys = nvc0_vm_addr(vma, *list++, 0, 5);
+ u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, 5);
nv_wo32(pgt, pte + 0, lower_32_bits(phys));
nv_wo32(pgt, pte + 4, upper_32_bits(phys));
pte += 8;
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index 858eda5dedd1..67c6ec6f34ea 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -26,64 +26,78 @@
#include "nouveau_drv.h"
#include "nouveau_mm.h"
+/* 0 = unsupported
+ * 1 = non-compressed
+ * 3 = compressed
+ */
+static const u8 types[256] = {
+ 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
+ 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
+ 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
+ 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
+ 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
+ 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
+};
+
bool
nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
{
- switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
- case 0x0000:
- case 0xfe00:
- case 0xdb00:
- case 0x1100:
- return true;
- default:
- break;
- }
-
- return false;
+ u8 memtype = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
+ return likely((types[memtype] == 1));
}
int
nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
- u32 type, struct nouveau_vram **pvram)
+ u32 type, struct nouveau_mem **pmem)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
struct nouveau_mm *mm = man->priv;
struct nouveau_mm_node *r;
- struct nouveau_vram *vram;
+ struct nouveau_mem *mem;
int ret;
size >>= 12;
align >>= 12;
ncmin >>= 12;
- vram = kzalloc(sizeof(*vram), GFP_KERNEL);
- if (!vram)
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
return -ENOMEM;
- INIT_LIST_HEAD(&vram->regions);
- vram->dev = dev_priv->dev;
- vram->memtype = type;
- vram->size = size;
+ INIT_LIST_HEAD(&mem->regions);
+ mem->dev = dev_priv->dev;
+ mem->memtype = (type & 0xff);
+ mem->size = size;
mutex_lock(&mm->mutex);
do {
ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
if (ret) {
mutex_unlock(&mm->mutex);
- nv50_vram_del(dev, &vram);
+ nv50_vram_del(dev, &mem);
return ret;
}
- list_add_tail(&r->rl_entry, &vram->regions);
+ list_add_tail(&r->rl_entry, &mem->regions);
size -= r->length;
} while (size);
mutex_unlock(&mm->mutex);
- r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
- vram->offset = (u64)r->offset << 12;
- *pvram = vram;
+ r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+ mem->offset = (u64)r->offset << 12;
+ *pmem = mem;
return 0;
}
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 18c3c71e41b1..b9e8efd2b754 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -71,10 +71,7 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -89,16 +86,21 @@ int r128_driver_load(struct drm_device *dev, unsigned long flags)
return drm_vblank_init(dev, 1);
}
+static struct pci_driver r128_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init r128_init(void)
{
driver.num_ioctls = r128_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &r128_pci_driver);
}
static void __exit r128_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &r128_pci_driver);
}
module_init(r128_init);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index e47eecfc2df4..3896ef811102 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -36,6 +36,9 @@ $(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
$(call if_changed,mkregtable)
+$(obj)/cayman_reg_safe.h: $(src)/reg_srcs/cayman $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
$(obj)/r200.o: $(obj)/r200_reg_safe.h
@@ -50,7 +53,7 @@ $(obj)/rs600.o: $(obj)/rs600_reg_safe.h
$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
-$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h
+$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
radeon_irq.o r300_cmdbuf.o r600_cp.o
@@ -66,7 +69,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
- radeon_trace_points.o ni.o
+ radeon_trace_points.o ni.o cayman_blit_shaders.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a4e5e53e0a62..3cd3234ba0af 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -61,8 +61,8 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
} else if (a2 > a1) {
- args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
- args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+ args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+ args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
}
break;
case RMX_FULL:
@@ -1026,7 +1026,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
* just update base pointers
*/
obj = radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -1135,7 +1135,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
- rbo = radeon_fb->obj->driver_private;
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -1181,7 +1181,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
}
obj = radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -1292,7 +1292,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
- rbo = radeon_fb->obj->driver_private;
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
new file mode 100644
index 000000000000..e148ab04b80b
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+/*
+ * evergreen cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup. Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables. The regsiter state and shaders
+ * were hand generated to support blitting functionality. See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
+const u32 cayman_default_state[] =
+{
+ /* XXX fill in additional blit state */
+
+ 0xc0026900,
+ 0x00000316,
+ 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ 0x00000010, /* */
+
+ 0xc0026900,
+ 0x000000d9,
+ 0x00000000, /* CP_RINGID */
+ 0x00000000, /* CP_VMID */
+};
+
+const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.h b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
new file mode 100644
index 000000000000..33b75e5d0fa4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef CAYMAN_BLIT_SHADERS_H
+#define CAYMAN_BLIT_SHADERS_H
+
+extern const u32 cayman_default_state[];
+
+extern const u32 cayman_default_size;
+
+#endif
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 6140ea1de45a..b9427e689cf3 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -804,7 +804,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
}
}
-static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
+int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
u32 tmp;
@@ -957,7 +957,7 @@ void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
}
-static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
save->vga_control[0] = RREG32(D1VGA_CONTROL);
save->vga_control[1] = RREG32(D2VGA_CONTROL);
@@ -1011,7 +1011,7 @@ static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_sa
WREG32(EVERGREEN_D6VGA_CONTROL, 0);
}
-static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
@@ -1108,7 +1108,7 @@ static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
-static void evergreen_mc_program(struct radeon_device *rdev)
+void evergreen_mc_program(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
u32 tmp;
@@ -2576,7 +2576,7 @@ void evergreen_irq_disable(struct radeon_device *rdev)
evergreen_disable_interrupt_state(rdev);
}
-static void evergreen_irq_suspend(struct radeon_device *rdev)
+void evergreen_irq_suspend(struct radeon_device *rdev)
{
evergreen_irq_disable(rdev);
r600_rlc_stop(rdev);
@@ -2899,7 +2899,7 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
}
- r = btc_mc_load_microcode(rdev);
+ r = ni_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
return r;
@@ -2981,7 +2981,7 @@ int evergreen_resume(struct radeon_device *rdev)
r = evergreen_startup(rdev);
if (r) {
- DRM_ERROR("r600 startup failed on resume\n");
+ DRM_ERROR("evergreen startup failed on resume\n");
return r;
}
@@ -3061,7 +3061,7 @@ int evergreen_init(struct radeon_device *rdev)
}
/* Must be an ATOMBIOS */
if (!rdev->is_atom_bios) {
- dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
+ dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
return -EINVAL;
}
r = radeon_atombios_init(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 2be698e78ff2..ba06a69c6de8 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -579,7 +579,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
obj_size += evergreen_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("evergreen failed to allocate shader\n");
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 345a75a03c96..edde90b37554 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -29,6 +29,7 @@
#include "radeon.h"
#include "evergreend.h"
#include "evergreen_reg_safe.h"
+#include "cayman_reg_safe.h"
static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
@@ -292,33 +293,28 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
if (wait_reg_mem.type != PACKET_TYPE3 ||
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
/* bit 4 is reg (0) or mem (1) */
if (wait_reg_mem_info & 0x10) {
DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
/* waiting for value to be equal */
if ((wait_reg_mem_info & 0x7) != 0x3) {
DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
/* jump over the NOP */
@@ -336,8 +332,7 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
@@ -362,12 +357,10 @@ static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
break;
default:
DRM_ERROR("unknown crtc reloc\n");
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
}
-out:
- return r;
+ return 0;
}
static int evergreen_packet0_check(struct radeon_cs_parser *p,
@@ -425,18 +418,28 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
{
struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
struct radeon_cs_reloc *reloc;
- u32 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+ u32 last_reg;
u32 m, i, tmp, *ib;
int r;
+ if (p->rdev->family >= CHIP_CAYMAN)
+ last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
+ else
+ last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
+
i = (reg >> 7);
if (i > last_reg) {
dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
m = 1 << ((reg >> 2) & 31);
- if (!(evergreen_reg_safe_bm[i] & m))
- return 0;
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ if (!(cayman_reg_safe_bm[i] & m))
+ return 0;
+ } else {
+ if (!(evergreen_reg_safe_bm[i] & m))
+ return 0;
+ }
ib = p->ib->ptr;
switch (reg) {
/* force following reg to 0 in an attemp to disable out buffer
@@ -468,12 +471,42 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
case SQ_VSTMP_RING_ITEMSIZE:
case VGT_TF_RING_SIZE:
/* get value to populate the IB don't remove */
- tmp =radeon_get_ib_value(p, idx);
- ib[idx] = 0;
+ /*tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;*/
+ break;
+ case SQ_ESGS_RING_BASE:
+ case SQ_GSVS_RING_BASE:
+ case SQ_ESTMP_RING_BASE:
+ case SQ_GSTMP_RING_BASE:
+ case SQ_HSTMP_RING_BASE:
+ case SQ_LSTMP_RING_BASE:
+ case SQ_PSTMP_RING_BASE:
+ case SQ_VSTMP_RING_BASE:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case DB_DEPTH_CONTROL:
track->db_depth_control = radeon_get_ib_value(p, idx);
break;
+ case CAYMAN_DB_EQAA:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ break;
+ case CAYMAN_DB_DEPTH_INFO:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ break;
case DB_Z_INFO:
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -559,9 +592,23 @@ static inline int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u3
track->cb_shader_mask = radeon_get_ib_value(p, idx);
break;
case PA_SC_AA_CONFIG:
+ if (p->rdev->family >= CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
track->nsamples = 1 << tmp;
break;
+ case CAYMAN_PA_SC_AA_CONFIG:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
+ track->nsamples = 1 << tmp;
+ break;
case CB_COLOR0_VIEW:
case CB_COLOR1_VIEW:
case CB_COLOR2_VIEW:
@@ -942,6 +989,37 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
idx_value = radeon_get_ib_value(p, idx);
switch (pkt->opcode) {
+ case PACKET3_SET_PREDICATION:
+ {
+ int pred_op;
+ int tmp;
+ if (pkt->count != 1) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx + 1);
+ pred_op = (tmp >> 16) & 0x7;
+
+ /* for the clear predicate operation */
+ if (pred_op == 0)
+ return 0;
+
+ if (pred_op > 2) {
+ DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ return -EINVAL;
+ }
+
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
+ }
+ break;
case PACKET3_CONTEXT_CONTROL:
if (pkt->count != 1) {
DRM_ERROR("bad CONTEXT_CONTROL\n");
@@ -956,6 +1034,16 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
break;
+ case CAYMAN_PACKET3_DEALLOC_STATE:
+ if (p->rdev->family < CHIP_CAYMAN) {
+ DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
+ return -EINVAL;
+ }
+ if (pkt->count) {
+ DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+ return -EINVAL;
+ }
+ break;
case PACKET3_INDEX_BASE:
if (pkt->count != 1) {
DRM_ERROR("bad INDEX_BASE\n");
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index eb4acf4528ff..9aaa3f0c9372 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -755,13 +755,21 @@
#define SQ_CONST_MEM_BASE 0x8df8
+#define SQ_ESGS_RING_BASE 0x8c40
#define SQ_ESGS_RING_SIZE 0x8c44
+#define SQ_GSVS_RING_BASE 0x8c48
#define SQ_GSVS_RING_SIZE 0x8c4c
+#define SQ_ESTMP_RING_BASE 0x8c50
#define SQ_ESTMP_RING_SIZE 0x8c54
+#define SQ_GSTMP_RING_BASE 0x8c58
#define SQ_GSTMP_RING_SIZE 0x8c5c
+#define SQ_VSTMP_RING_BASE 0x8c60
#define SQ_VSTMP_RING_SIZE 0x8c64
+#define SQ_PSTMP_RING_BASE 0x8c68
#define SQ_PSTMP_RING_SIZE 0x8c6c
+#define SQ_LSTMP_RING_BASE 0x8e10
#define SQ_LSTMP_RING_SIZE 0x8e14
+#define SQ_HSTMP_RING_BASE 0x8e18
#define SQ_HSTMP_RING_SIZE 0x8e1c
#define VGT_TF_RING_SIZE 0x8988
@@ -1093,5 +1101,14 @@
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+/* cayman 3D regs */
+#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B0
+#define CAYMAN_DB_EQAA 0x28804
+#define CAYMAN_DB_DEPTH_INFO 0x2803C
+#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
+#define CAYMAN_MSAA_NUM_SAMPLES_SHIFT 0
+#define CAYMAN_MSAA_NUM_SAMPLES_MASK 0x7
+/* cayman packet3 addition */
+#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
#endif
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 5e0bef80ad7f..7aade20f63a8 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -31,12 +31,25 @@
#include "nid.h"
#include "atom.h"
#include "ni_reg.h"
+#include "cayman_blit_shaders.h"
+
+extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
+extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
+extern void evergreen_mc_program(struct radeon_device *rdev);
+extern void evergreen_irq_suspend(struct radeon_device *rdev);
+extern int evergreen_mc_init(struct radeon_device *rdev);
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
#define EVERGREEN_RLC_UCODE_SIZE 768
#define BTC_MC_UCODE_SIZE 6024
+#define CAYMAN_PFP_UCODE_SIZE 2176
+#define CAYMAN_PM4_UCODE_SIZE 2176
+#define CAYMAN_RLC_UCODE_SIZE 1024
+#define CAYMAN_MC_UCODE_SIZE 6037
+
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
MODULE_FIRMWARE("radeon/BARTS_me.bin");
@@ -48,6 +61,10 @@ MODULE_FIRMWARE("radeon/TURKS_mc.bin");
MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
MODULE_FIRMWARE("radeon/CAICOS_me.bin");
MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
+MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
#define BTC_IO_MC_REGS_SIZE 29
@@ -147,12 +164,44 @@ static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
{0x0000009f, 0x00916a00}
};
-int btc_mc_load_microcode(struct radeon_device *rdev)
+static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+ {0x00000077, 0xff010100},
+ {0x00000078, 0x00000000},
+ {0x00000079, 0x00001434},
+ {0x0000007a, 0xcc08ec08},
+ {0x0000007b, 0x00040000},
+ {0x0000007c, 0x000080c0},
+ {0x0000007d, 0x09000000},
+ {0x0000007e, 0x00210404},
+ {0x00000081, 0x08a8e800},
+ {0x00000082, 0x00030444},
+ {0x00000083, 0x00000000},
+ {0x00000085, 0x00000001},
+ {0x00000086, 0x00000002},
+ {0x00000087, 0x48490000},
+ {0x00000088, 0x20244647},
+ {0x00000089, 0x00000005},
+ {0x0000008b, 0x66030000},
+ {0x0000008c, 0x00006603},
+ {0x0000008d, 0x00000100},
+ {0x0000008f, 0x00001c0a},
+ {0x00000090, 0xff000001},
+ {0x00000094, 0x00101101},
+ {0x00000095, 0x00000fff},
+ {0x00000096, 0x00116fff},
+ {0x00000097, 0x60010000},
+ {0x00000098, 0x10010000},
+ {0x00000099, 0x00006000},
+ {0x0000009a, 0x00001000},
+ {0x0000009f, 0x00976b00}
+};
+
+int ni_mc_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
u32 mem_type, running, blackout = 0;
u32 *io_mc_regs;
- int i;
+ int i, ucode_size, regs_size;
if (!rdev->mc_fw)
return -EINVAL;
@@ -160,13 +209,24 @@ int btc_mc_load_microcode(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_BARTS:
io_mc_regs = (u32 *)&barts_io_mc_regs;
+ ucode_size = BTC_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
break;
case CHIP_TURKS:
io_mc_regs = (u32 *)&turks_io_mc_regs;
+ ucode_size = BTC_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
break;
case CHIP_CAICOS:
default:
io_mc_regs = (u32 *)&caicos_io_mc_regs;
+ ucode_size = BTC_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
+ break;
+ case CHIP_CAYMAN:
+ io_mc_regs = (u32 *)&cayman_io_mc_regs;
+ ucode_size = CAYMAN_MC_UCODE_SIZE;
+ regs_size = BTC_IO_MC_REGS_SIZE;
break;
}
@@ -184,13 +244,13 @@ int btc_mc_load_microcode(struct radeon_device *rdev)
WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
/* load mc io regs */
- for (i = 0; i < BTC_IO_MC_REGS_SIZE; i++) {
+ for (i = 0; i < regs_size; i++) {
WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
}
/* load the MC ucode */
fw_data = (const __be32 *)rdev->mc_fw->data;
- for (i = 0; i < BTC_MC_UCODE_SIZE; i++)
+ for (i = 0; i < ucode_size; i++)
WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
/* put the engine back into the active state */
@@ -231,23 +291,38 @@ int ni_init_microcode(struct radeon_device *rdev)
case CHIP_BARTS:
chip_name = "BARTS";
rlc_chip_name = "BTC";
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ mc_req_size = BTC_MC_UCODE_SIZE * 4;
break;
case CHIP_TURKS:
chip_name = "TURKS";
rlc_chip_name = "BTC";
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ mc_req_size = BTC_MC_UCODE_SIZE * 4;
break;
case CHIP_CAICOS:
chip_name = "CAICOS";
rlc_chip_name = "BTC";
+ pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+ me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+ mc_req_size = BTC_MC_UCODE_SIZE * 4;
+ break;
+ case CHIP_CAYMAN:
+ chip_name = "CAYMAN";
+ rlc_chip_name = "CAYMAN";
+ pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
+ me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
+ rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
+ mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
break;
default: BUG();
}
- pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
- me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
- rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
- mc_req_size = BTC_MC_UCODE_SIZE * 4;
-
DRM_INFO("Loading %s Microcode\n", chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
@@ -314,3 +389,1204 @@ out:
return err;
}
+/*
+ * Core functions
+ */
+static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ u32 num_tile_pipes,
+ u32 num_backends_per_asic,
+ u32 *backend_disable_mask_per_asic,
+ u32 num_shader_engines)
+{
+ u32 backend_map = 0;
+ u32 enabled_backends_mask = 0;
+ u32 enabled_backends_count = 0;
+ u32 num_backends_per_se;
+ u32 cur_pipe;
+ u32 swizzle_pipe[CAYMAN_MAX_PIPES];
+ u32 cur_backend = 0;
+ u32 i;
+ bool force_no_swizzle;
+
+ /* force legal values */
+ if (num_tile_pipes < 1)
+ num_tile_pipes = 1;
+ if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
+ num_tile_pipes = rdev->config.cayman.max_tile_pipes;
+ if (num_shader_engines < 1)
+ num_shader_engines = 1;
+ if (num_shader_engines > rdev->config.cayman.max_shader_engines)
+ num_shader_engines = rdev->config.cayman.max_shader_engines;
+ if (num_backends_per_asic > num_shader_engines)
+ num_backends_per_asic = num_shader_engines;
+ if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
+ num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
+
+ /* make sure we have the same number of backends per se */
+ num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
+ /* set up the number of backends per se */
+ num_backends_per_se = num_backends_per_asic / num_shader_engines;
+ if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
+ num_backends_per_se = rdev->config.cayman.max_backends_per_se;
+ num_backends_per_asic = num_backends_per_se * num_shader_engines;
+ }
+
+ /* create enable mask and count for enabled backends */
+ for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
+ if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
+ enabled_backends_mask |= (1 << i);
+ ++enabled_backends_count;
+ }
+ if (enabled_backends_count == num_backends_per_asic)
+ break;
+ }
+
+ /* force the backends mask to match the current number of backends */
+ if (enabled_backends_count != num_backends_per_asic) {
+ u32 this_backend_enabled;
+ u32 shader_engine;
+ u32 backend_per_se;
+
+ enabled_backends_mask = 0;
+ enabled_backends_count = 0;
+ *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
+ for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
+ /* calc the current se */
+ shader_engine = i / rdev->config.cayman.max_backends_per_se;
+ /* calc the backend per se */
+ backend_per_se = i % rdev->config.cayman.max_backends_per_se;
+ /* default to not enabled */
+ this_backend_enabled = 0;
+ if ((shader_engine < num_shader_engines) &&
+ (backend_per_se < num_backends_per_se))
+ this_backend_enabled = 1;
+ if (this_backend_enabled) {
+ enabled_backends_mask |= (1 << i);
+ *backend_disable_mask_per_asic &= ~(1 << i);
+ ++enabled_backends_count;
+ }
+ }
+ }
+
+
+ memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
+ switch (rdev->family) {
+ case CHIP_CAYMAN:
+ force_no_swizzle = true;
+ break;
+ default:
+ force_no_swizzle = false;
+ break;
+ }
+ if (force_no_swizzle) {
+ bool last_backend_enabled = false;
+
+ force_no_swizzle = false;
+ for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
+ if (((enabled_backends_mask >> i) & 1) == 1) {
+ if (last_backend_enabled)
+ force_no_swizzle = true;
+ last_backend_enabled = true;
+ } else
+ last_backend_enabled = false;
+ }
+ }
+
+ switch (num_tile_pipes) {
+ case 1:
+ case 3:
+ case 5:
+ case 7:
+ DRM_ERROR("odd number of pipes!\n");
+ break;
+ case 2:
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ break;
+ case 4:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 1;
+ swizzle_pipe[3] = 3;
+ }
+ break;
+ case 6:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 1;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 5;
+ }
+ break;
+ case 8:
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ swizzle_pipe[7] = 7;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 1;
+ swizzle_pipe[5] = 3;
+ swizzle_pipe[6] = 5;
+ swizzle_pipe[7] = 7;
+ }
+ break;
+ }
+
+ for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
+ while (((1 << cur_backend) & enabled_backends_mask) == 0)
+ cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
+
+ backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
+
+ cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
+ }
+
+ return backend_map;
+}
+
+static void cayman_program_channel_remap(struct radeon_device *rdev)
+{
+ u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
+
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ default:
+ /* default mapping */
+ mc_shared_chremap = 0x00fac688;
+ break;
+ }
+
+ switch (rdev->family) {
+ case CHIP_CAYMAN:
+ default:
+ //tcp_chan_steer_lo = 0x54763210
+ tcp_chan_steer_lo = 0x76543210;
+ tcp_chan_steer_hi = 0x0000ba98;
+ break;
+ }
+
+ WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
+ WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
+ WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+}
+
+static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
+ u32 disable_mask_per_se,
+ u32 max_disable_mask_per_se,
+ u32 num_shader_engines)
+{
+ u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
+ u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
+
+ if (num_shader_engines == 1)
+ return disable_mask_per_asic;
+ else if (num_shader_engines == 2)
+ return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
+ else
+ return 0xffffffff;
+}
+
+static void cayman_gpu_init(struct radeon_device *rdev)
+{
+ u32 cc_rb_backend_disable = 0;
+ u32 cc_gc_shader_pipe_config;
+ u32 gb_addr_config = 0;
+ u32 mc_shared_chmap, mc_arb_ramcfg;
+ u32 gb_backend_map;
+ u32 cgts_tcc_disable;
+ u32 sx_debug_1;
+ u32 smx_dc_ctl0;
+ u32 gc_user_shader_pipe_config;
+ u32 gc_user_rb_backend_disable;
+ u32 cgts_user_tcc_disable;
+ u32 cgts_sm_ctrl_reg;
+ u32 hdp_host_path_cntl;
+ u32 tmp;
+ int i, j;
+
+ switch (rdev->family) {
+ case CHIP_CAYMAN:
+ default:
+ rdev->config.cayman.max_shader_engines = 2;
+ rdev->config.cayman.max_pipes_per_simd = 4;
+ rdev->config.cayman.max_tile_pipes = 8;
+ rdev->config.cayman.max_simds_per_se = 12;
+ rdev->config.cayman.max_backends_per_se = 4;
+ rdev->config.cayman.max_texture_channel_caches = 8;
+ rdev->config.cayman.max_gprs = 256;
+ rdev->config.cayman.max_threads = 256;
+ rdev->config.cayman.max_gs_threads = 32;
+ rdev->config.cayman.max_stack_entries = 512;
+ rdev->config.cayman.sx_num_of_sets = 8;
+ rdev->config.cayman.sx_max_export_size = 256;
+ rdev->config.cayman.sx_max_export_pos_size = 64;
+ rdev->config.cayman.sx_max_export_smx_size = 192;
+ rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sq_num_cf_insts = 2;
+
+ rdev->config.cayman.sc_prim_fifo_size = 0x100;
+ rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+ break;
+ }
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+
+ WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
+
+ mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
+ mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
+ cgts_tcc_disable = RREG32(CGTS_TCC_DISABLE);
+ gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
+ gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
+ cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
+
+ rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
+ tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
+ rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
+ rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
+ tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
+ rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
+ tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
+ rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
+ tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
+ rdev->config.cayman.backend_disable_mask_per_asic =
+ cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
+ rdev->config.cayman.num_shader_engines);
+ rdev->config.cayman.backend_map =
+ cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
+ rdev->config.cayman.num_backends_per_se *
+ rdev->config.cayman.num_shader_engines,
+ &rdev->config.cayman.backend_disable_mask_per_asic,
+ rdev->config.cayman.num_shader_engines);
+ tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
+ rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
+ tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
+ rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
+ if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
+ rdev->config.cayman.mem_max_burst_length_bytes = 512;
+ tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
+ rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
+ if (rdev->config.cayman.mem_row_size_in_kb > 4)
+ rdev->config.cayman.mem_row_size_in_kb = 4;
+ /* XXX use MC settings? */
+ rdev->config.cayman.shader_engine_tile_size = 32;
+ rdev->config.cayman.num_gpus = 1;
+ rdev->config.cayman.multi_gpu_tile_size = 64;
+
+ //gb_addr_config = 0x02011003
+#if 0
+ gb_addr_config = RREG32(GB_ADDR_CONFIG);
+#else
+ gb_addr_config = 0;
+ switch (rdev->config.cayman.num_tile_pipes) {
+ case 1:
+ default:
+ gb_addr_config |= NUM_PIPES(0);
+ break;
+ case 2:
+ gb_addr_config |= NUM_PIPES(1);
+ break;
+ case 4:
+ gb_addr_config |= NUM_PIPES(2);
+ break;
+ case 8:
+ gb_addr_config |= NUM_PIPES(3);
+ break;
+ }
+
+ tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
+ gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
+ gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
+ tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
+ gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
+ switch (rdev->config.cayman.num_gpus) {
+ case 1:
+ default:
+ gb_addr_config |= NUM_GPUS(0);
+ break;
+ case 2:
+ gb_addr_config |= NUM_GPUS(1);
+ break;
+ case 4:
+ gb_addr_config |= NUM_GPUS(2);
+ break;
+ }
+ switch (rdev->config.cayman.multi_gpu_tile_size) {
+ case 16:
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
+ break;
+ case 32:
+ default:
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
+ break;
+ case 64:
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
+ break;
+ case 128:
+ gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
+ break;
+ }
+ switch (rdev->config.cayman.mem_row_size_in_kb) {
+ case 1:
+ default:
+ gb_addr_config |= ROW_SIZE(0);
+ break;
+ case 2:
+ gb_addr_config |= ROW_SIZE(1);
+ break;
+ case 4:
+ gb_addr_config |= ROW_SIZE(2);
+ break;
+ }
+#endif
+
+ tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
+ rdev->config.cayman.num_tile_pipes = (1 << tmp);
+ tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
+ rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
+ tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
+ rdev->config.cayman.num_shader_engines = tmp + 1;
+ tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
+ rdev->config.cayman.num_gpus = tmp + 1;
+ tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
+ rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
+ tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
+ rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
+
+ //gb_backend_map = 0x76541032;
+#if 0
+ gb_backend_map = RREG32(GB_BACKEND_MAP);
+#else
+ gb_backend_map =
+ cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
+ rdev->config.cayman.num_backends_per_se *
+ rdev->config.cayman.num_shader_engines,
+ &rdev->config.cayman.backend_disable_mask_per_asic,
+ rdev->config.cayman.num_shader_engines);
+#endif
+ /* setup tiling info dword. gb_addr_config is not adequate since it does
+ * not have bank info, so create a custom tiling dword.
+ * bits 3:0 num_pipes
+ * bits 7:4 num_banks
+ * bits 11:8 group_size
+ * bits 15:12 row_size
+ */
+ rdev->config.cayman.tile_config = 0;
+ switch (rdev->config.cayman.num_tile_pipes) {
+ case 1:
+ default:
+ rdev->config.cayman.tile_config |= (0 << 0);
+ break;
+ case 2:
+ rdev->config.cayman.tile_config |= (1 << 0);
+ break;
+ case 4:
+ rdev->config.cayman.tile_config |= (2 << 0);
+ break;
+ case 8:
+ rdev->config.cayman.tile_config |= (3 << 0);
+ break;
+ }
+ rdev->config.cayman.tile_config |=
+ ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+ rdev->config.cayman.tile_config |=
+ (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
+ rdev->config.cayman.tile_config |=
+ ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
+
+ WREG32(GB_BACKEND_MAP, gb_backend_map);
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+
+ cayman_program_channel_remap(rdev);
+
+ /* primary versions */
+ WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+
+ WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
+ WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
+
+ /* user versions */
+ WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+
+ WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
+ WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
+
+ /* reprogram the shader complex */
+ cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
+ for (i = 0; i < 16; i++)
+ WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
+ WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
+
+ /* set HW defaults for 3D engine */
+ WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
+
+ sx_debug_1 = RREG32(SX_DEBUG_1);
+ sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
+ WREG32(SX_DEBUG_1, sx_debug_1);
+
+ smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
+ smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
+ smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+ WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+
+ WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
+
+ /* need to be explicitly zero-ed */
+ WREG32(VGT_OFFCHIP_LDS_BASE, 0);
+ WREG32(SQ_LSTMP_RING_BASE, 0);
+ WREG32(SQ_HSTMP_RING_BASE, 0);
+ WREG32(SQ_ESTMP_RING_BASE, 0);
+ WREG32(SQ_GSTMP_RING_BASE, 0);
+ WREG32(SQ_VSTMP_RING_BASE, 0);
+ WREG32(SQ_PSTMP_RING_BASE, 0);
+
+ WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
+
+ WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
+ POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
+ SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+
+ WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
+ SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
+ SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+
+
+ WREG32(VGT_NUM_INSTANCES, 1);
+
+ WREG32(CP_PERFMON_CNTL, 0);
+
+ WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+ FETCH_FIFO_HIWATER(0x4) |
+ DONE_FIFO_HIWATER(0xe0) |
+ ALU_UPDATE_FIFO_HIWATER(0x8)));
+
+ WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
+ WREG32(SQ_CONFIG, (VC_ENABLE |
+ EXPORT_SRC_C |
+ GFX_PRIO(0) |
+ CS1_PRIO(0) |
+ CS2_PRIO(1)));
+ WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
+
+ WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
+ FORCE_EOV_MAX_REZ_CNT(255)));
+
+ WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
+ AUTO_INVLD_EN(ES_AND_GS_AUTO));
+
+ WREG32(VGT_GS_VERTEX_REUSE, 16);
+ WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
+
+ WREG32(CB_PERF_CTR0_SEL_0, 0);
+ WREG32(CB_PERF_CTR0_SEL_1, 0);
+ WREG32(CB_PERF_CTR1_SEL_0, 0);
+ WREG32(CB_PERF_CTR1_SEL_1, 0);
+ WREG32(CB_PERF_CTR2_SEL_0, 0);
+ WREG32(CB_PERF_CTR2_SEL_1, 0);
+ WREG32(CB_PERF_CTR3_SEL_0, 0);
+ WREG32(CB_PERF_CTR3_SEL_1, 0);
+
+ hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
+ WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
+
+ WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
+
+ udelay(50);
+}
+
+/*
+ * GART
+ */
+void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
+{
+ /* flush hdp cache */
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ WREG32(VM_INVALIDATE_REQUEST, 1);
+}
+
+int cayman_pcie_gart_enable(struct radeon_device *rdev)
+{
+ int r;
+
+ if (rdev->gart.table.vram.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
+ radeon_gart_restore(rdev);
+ /* Setup TLB control */
+ WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB |
+ ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
+ WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+ /* setup context0 */
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
+ WREG32(VM_CONTEXT0_CNTL2, 0);
+ WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ /* disable context1-7 */
+ WREG32(VM_CONTEXT1_CNTL2, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
+
+ cayman_pcie_gart_tlb_flush(rdev);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+void cayman_pcie_gart_disable(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Disable all tables */
+ WREG32(VM_CONTEXT0_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, 0);
+ /* Setup TLB control */
+ WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7) |
+ CONTEXT1_IDENTITY_ACCESS_MODE(1));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
+ L2_CACHE_BIGK_FRAGMENT_SIZE(6));
+ if (rdev->gart.table.vram.robj) {
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ }
+}
+
+void cayman_pcie_gart_fini(struct radeon_device *rdev)
+{
+ cayman_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+/*
+ * CP.
+ */
+static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
+{
+ if (enable)
+ WREG32(CP_ME_CNTL, 0);
+ else {
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+ WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
+ WREG32(SCRATCH_UMSK, 0);
+ }
+}
+
+static int cayman_cp_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ int i;
+
+ if (!rdev->me_fw || !rdev->pfp_fw)
+ return -EINVAL;
+
+ cayman_cp_enable(rdev, false);
+
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
+ WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ WREG32(CP_ME_RAM_WADDR, 0);
+ for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
+ WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
+
+ WREG32(CP_PFP_UCODE_ADDR, 0);
+ WREG32(CP_ME_RAM_WADDR, 0);
+ WREG32(CP_ME_RAM_RADDR, 0);
+ return 0;
+}
+
+static int cayman_cp_start(struct radeon_device *rdev)
+{
+ int r, i;
+
+ r = radeon_ring_lock(rdev, 7);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+ radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(rdev, 0x1);
+ radeon_ring_write(rdev, 0x0);
+ radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1);
+ radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(rdev, 0);
+ radeon_ring_write(rdev, 0);
+ radeon_ring_unlock_commit(rdev);
+
+ cayman_cp_enable(rdev, true);
+
+ r = radeon_ring_lock(rdev, cayman_default_size + 19);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
+
+ /* setup clear context state */
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ for (i = 0; i < cayman_default_size; i++)
+ radeon_ring_write(rdev, cayman_default_state[i]);
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ /* set clear context state */
+ radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(rdev, 0);
+
+ /* SQ_VTX_BASE_VTX_LOC */
+ radeon_ring_write(rdev, 0xc0026f00);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(rdev, 0x00000000);
+
+ /* Clear consts */
+ radeon_ring_write(rdev, 0xc0036f00);
+ radeon_ring_write(rdev, 0x00000bc4);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(rdev, 0xffffffff);
+
+ radeon_ring_write(rdev, 0xc0026900);
+ radeon_ring_write(rdev, 0x00000316);
+ radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(rdev, 0x00000010); /* */
+
+ radeon_ring_unlock_commit(rdev);
+
+ /* XXX init other rings */
+
+ return 0;
+}
+
+static void cayman_cp_fini(struct radeon_device *rdev)
+{
+ cayman_cp_enable(rdev, false);
+ radeon_ring_fini(rdev);
+}
+
+int cayman_cp_resume(struct radeon_device *rdev)
+{
+ u32 tmp;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
+ WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
+ SOFT_RESET_PA |
+ SOFT_RESET_SH |
+ SOFT_RESET_VGT |
+ SOFT_RESET_SX));
+ RREG32(GRBM_SOFT_RESET);
+ mdelay(15);
+ WREG32(GRBM_SOFT_RESET, 0);
+ RREG32(GRBM_SOFT_RESET);
+
+ WREG32(CP_SEM_WAIT_TIMER, 0x4);
+
+ /* Set the write pointer delay */
+ WREG32(CP_RB_WPTR_DELAY, 0);
+
+ WREG32(CP_DEBUG, (1 << 27));
+
+ /* ring 0 - compute and gfx */
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB0_CNTL, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB0_WPTR, 0);
+
+ /* set the wb address wether it's enabled or not */
+ WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
+ WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
+
+ if (rdev->wb.enabled)
+ WREG32(SCRATCH_UMSK, 0xff);
+ else {
+ tmp |= RB_NO_UPDATE;
+ WREG32(SCRATCH_UMSK, 0);
+ }
+
+ mdelay(1);
+ WREG32(CP_RB0_CNTL, tmp);
+
+ WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
+
+ rdev->cp.rptr = RREG32(CP_RB0_RPTR);
+ rdev->cp.wptr = RREG32(CP_RB0_WPTR);
+
+ /* ring1 - compute only */
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(rdev->cp1.ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB1_CNTL, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB1_WPTR, 0);
+
+ /* set the wb address wether it's enabled or not */
+ WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
+
+ mdelay(1);
+ WREG32(CP_RB1_CNTL, tmp);
+
+ WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
+
+ rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
+ rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
+
+ /* ring2 - compute only */
+ /* Set ring buffer size */
+ rb_bufsz = drm_order(rdev->cp2.ring_size / 8);
+ tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+#ifdef __BIG_ENDIAN
+ tmp |= BUF_SWAP_32BIT;
+#endif
+ WREG32(CP_RB2_CNTL, tmp);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
+ WREG32(CP_RB2_WPTR, 0);
+
+ /* set the wb address wether it's enabled or not */
+ WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
+ WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
+
+ mdelay(1);
+ WREG32(CP_RB2_CNTL, tmp);
+
+ WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
+
+ rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
+ rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
+
+ /* start the rings */
+ cayman_cp_start(rdev);
+ rdev->cp.ready = true;
+ rdev->cp1.ready = true;
+ rdev->cp2.ready = true;
+ /* this only test cp0 */
+ r = radeon_ring_test(rdev);
+ if (r) {
+ rdev->cp.ready = false;
+ rdev->cp1.ready = false;
+ rdev->cp2.ready = false;
+ return r;
+ }
+
+ return 0;
+}
+
+bool cayman_gpu_is_lockup(struct radeon_device *rdev)
+{
+ u32 srbm_status;
+ u32 grbm_status;
+ u32 grbm_status_se0, grbm_status_se1;
+ struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup;
+ int r;
+
+ srbm_status = RREG32(SRBM_STATUS);
+ grbm_status = RREG32(GRBM_STATUS);
+ grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
+ grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
+ if (!(grbm_status & GUI_ACTIVE)) {
+ r100_gpu_lockup_update(lockup, &rdev->cp);
+ return false;
+ }
+ /* force CP activities */
+ r = radeon_ring_lock(rdev, 2);
+ if (!r) {
+ /* PACKET2 NOP */
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_unlock_commit(rdev);
+ }
+ /* XXX deal with CP0,1,2 */
+ rdev->cp.rptr = RREG32(CP_RB0_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+}
+
+static int cayman_gpu_soft_reset(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 grbm_reset = 0;
+
+ if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+ return 0;
+
+ dev_info(rdev->dev, "GPU softreset \n");
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ /* Disable CP parsing/prefetching */
+ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
+
+ /* reset all the gfx blocks */
+ grbm_reset = (SOFT_RESET_CP |
+ SOFT_RESET_CB |
+ SOFT_RESET_DB |
+ SOFT_RESET_GDS |
+ SOFT_RESET_PA |
+ SOFT_RESET_SC |
+ SOFT_RESET_SPI |
+ SOFT_RESET_SH |
+ SOFT_RESET_SX |
+ SOFT_RESET_TC |
+ SOFT_RESET_TA |
+ SOFT_RESET_VGT |
+ SOFT_RESET_IA);
+
+ dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
+ WREG32(GRBM_SOFT_RESET, grbm_reset);
+ (void)RREG32(GRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(GRBM_SOFT_RESET, 0);
+ (void)RREG32(GRBM_SOFT_RESET);
+ /* Wait a little for things to settle down */
+ udelay(50);
+ dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
+ RREG32(GRBM_STATUS));
+ dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
+ RREG32(GRBM_STATUS_SE0));
+ dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
+ RREG32(GRBM_STATUS_SE1));
+ dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
+ RREG32(SRBM_STATUS));
+ evergreen_mc_resume(rdev, &save);
+ return 0;
+}
+
+int cayman_asic_reset(struct radeon_device *rdev)
+{
+ return cayman_gpu_soft_reset(rdev);
+}
+
+static int cayman_startup(struct radeon_device *rdev)
+{
+ int r;
+
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+ r = ni_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+ r = ni_mc_load_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load MC firmware!\n");
+ return r;
+ }
+
+ evergreen_mc_program(rdev);
+ r = cayman_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ cayman_gpu_init(rdev);
+
+#if 0
+ r = cayman_blit_init(rdev);
+ if (r) {
+ cayman_blit_fini(rdev);
+ rdev->asic->copy = NULL;
+ dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
+ }
+#endif
+
+ /* allocate wb buffer */
+ r = radeon_wb_init(rdev);
+ if (r)
+ return r;
+
+ /* Enable IRQ */
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ evergreen_irq_set(rdev);
+
+ r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ if (r)
+ return r;
+ r = cayman_cp_load_microcode(rdev);
+ if (r)
+ return r;
+ r = cayman_cp_resume(rdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+int cayman_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+ * posting will perform necessary task to bring back GPU into good
+ * shape.
+ */
+ /* post card */
+ atom_asic_init(rdev->mode_info.atom_context);
+
+ r = cayman_startup(rdev);
+ if (r) {
+ DRM_ERROR("cayman startup failed on resume\n");
+ return r;
+ }
+
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ return r;
+ }
+
+ return r;
+
+}
+
+int cayman_suspend(struct radeon_device *rdev)
+{
+ /* int r; */
+
+ /* FIXME: we should wait for ring to be empty */
+ cayman_cp_enable(rdev, false);
+ rdev->cp.ready = false;
+ evergreen_irq_suspend(rdev);
+ radeon_wb_disable(rdev);
+ cayman_pcie_gart_disable(rdev);
+
+#if 0
+ /* unpin shaders bo */
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+#endif
+ return 0;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int cayman_init(struct radeon_device *rdev)
+{
+ int r;
+
+ /* This don't do much */
+ r = radeon_gem_init(rdev);
+ if (r)
+ return r;
+ /* Read BIOS */
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ /* Must be an ATOMBIOS */
+ if (!rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
+ return -EINVAL;
+ }
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+
+ /* Post card if necessary */
+ if (!radeon_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
+ DRM_INFO("GPU not posted. posting now...\n");
+ atom_asic_init(rdev->mode_info.atom_context);
+ }
+ /* Initialize scratch registers */
+ r600_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* initialize memory controller */
+ r = evergreen_mc_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
+
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
+
+ r = r600_pcie_gart_init(rdev);
+ if (r)
+ return r;
+
+ rdev->accel_working = true;
+ r = cayman_startup(rdev);
+ if (r) {
+ dev_err(rdev->dev, "disabling GPU acceleration\n");
+ cayman_cp_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ cayman_pcie_gart_fini(rdev);
+ rdev->accel_working = false;
+ }
+ if (rdev->accel_working) {
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ }
+
+ /* Don't start up if the MC ucode is missing.
+ * The default clocks and voltages before the MC ucode
+ * is loaded are not suffient for advanced operations.
+ */
+ if (!rdev->mc_fw) {
+ DRM_ERROR("radeon: MC ucode required for NI+.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void cayman_fini(struct radeon_device *rdev)
+{
+ /* cayman_blit_fini(rdev); */
+ cayman_cp_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_wb_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ cayman_pcie_gart_fini(rdev);
+ radeon_gem_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+}
+
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index f7b445390e02..0f9a08b53fbd 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -24,7 +24,101 @@
#ifndef NI_H
#define NI_H
+#define CAYMAN_MAX_SH_GPRS 256
+#define CAYMAN_MAX_TEMP_GPRS 16
+#define CAYMAN_MAX_SH_THREADS 256
+#define CAYMAN_MAX_SH_STACK_ENTRIES 4096
+#define CAYMAN_MAX_FRC_EOV_CNT 16384
+#define CAYMAN_MAX_BACKENDS 8
+#define CAYMAN_MAX_BACKENDS_MASK 0xFF
+#define CAYMAN_MAX_BACKENDS_PER_SE_MASK 0xF
+#define CAYMAN_MAX_SIMDS 16
+#define CAYMAN_MAX_SIMDS_MASK 0xFFFF
+#define CAYMAN_MAX_SIMDS_PER_SE_MASK 0xFFF
+#define CAYMAN_MAX_PIPES 8
+#define CAYMAN_MAX_PIPES_MASK 0xFF
+#define CAYMAN_MAX_LDS_NUM 0xFFFF
+#define CAYMAN_MAX_TCC 16
+#define CAYMAN_MAX_TCC_MASK 0xFF
+
+#define DMIF_ADDR_CONFIG 0xBD4
+#define SRBM_STATUS 0x0E50
+
+#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
+#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
+#define RESPONSE_TYPE_MASK 0x000000F0
+#define RESPONSE_TYPE_SHIFT 4
+#define VM_L2_CNTL 0x1400
+#define ENABLE_L2_CACHE (1 << 0)
+#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
+#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
+#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10)
+#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 14)
+#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 18)
+/* CONTEXT1_IDENTITY_ACCESS_MODE
+ * 0 physical = logical
+ * 1 logical via context1 page table
+ * 2 inside identity aperture use translation, outside physical = logical
+ * 3 inside identity aperture physical = logical, outside use translation
+ */
+#define VM_L2_CNTL2 0x1404
+#define INVALIDATE_ALL_L1_TLBS (1 << 0)
+#define INVALIDATE_L2_CACHE (1 << 1)
+#define VM_L2_CNTL3 0x1408
+#define BANK_SELECT(x) ((x) << 0)
+#define CACHE_UPDATE_MODE(x) ((x) << 6)
+#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20)
+#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15)
+#define VM_L2_STATUS 0x140C
+#define L2_BUSY (1 << 0)
+#define VM_CONTEXT0_CNTL 0x1410
+#define ENABLE_CONTEXT (1 << 0)
+#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
+#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
+#define VM_CONTEXT1_CNTL 0x1414
+#define VM_CONTEXT0_CNTL2 0x1430
+#define VM_CONTEXT1_CNTL2 0x1434
+#define VM_INVALIDATE_REQUEST 0x1478
+#define VM_INVALIDATE_RESPONSE 0x147c
+#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
+#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c
+#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
+#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
+#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
+
+#define MC_SHARED_CHMAP 0x2004
+#define NOOFCHAN_SHIFT 12
+#define NOOFCHAN_MASK 0x00003000
+#define MC_SHARED_CHREMAP 0x2008
+
+#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
+#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
+#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
+#define MC_VM_MX_L1_TLB_CNTL 0x2064
+#define ENABLE_L1_TLB (1 << 0)
+#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
+#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
+#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
+#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
+#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
+#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
+#define ENABLE_ADVANCED_DRIVER_MODEL (1 << 6)
+
#define MC_SHARED_BLACKOUT_CNTL 0x20ac
+#define MC_ARB_RAMCFG 0x2760
+#define NOOFBANK_SHIFT 0
+#define NOOFBANK_MASK 0x00000003
+#define NOOFRANK_SHIFT 2
+#define NOOFRANK_MASK 0x00000004
+#define NOOFROWS_SHIFT 3
+#define NOOFROWS_MASK 0x00000038
+#define NOOFCOLS_SHIFT 6
+#define NOOFCOLS_MASK 0x000000C0
+#define CHANSIZE_SHIFT 8
+#define CHANSIZE_MASK 0x00000100
+#define BURSTLENGTH_SHIFT 9
+#define BURSTLENGTH_MASK 0x00000200
+#define CHANSIZE_OVERRIDE (1 << 11)
#define MC_SEQ_SUP_CNTL 0x28c8
#define RUN_MASK (1 << 0)
#define MC_SEQ_SUP_PGM 0x28cc
@@ -37,5 +131,406 @@
#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
#define MC_SEQ_IO_DEBUG_DATA 0x2a48
+#define HDP_HOST_PATH_CNTL 0x2C00
+#define HDP_NONSURFACE_BASE 0x2C04
+#define HDP_NONSURFACE_INFO 0x2C08
+#define HDP_NONSURFACE_SIZE 0x2C0C
+#define HDP_ADDR_CONFIG 0x2F48
+
+#define CC_SYS_RB_BACKEND_DISABLE 0x3F88
+#define GC_USER_SYS_RB_BACKEND_DISABLE 0x3F8C
+#define CGTS_SYS_TCC_DISABLE 0x3F90
+#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
+
+#define CONFIG_MEMSIZE 0x5428
+
+#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
+#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+
+#define GRBM_CNTL 0x8000
+#define GRBM_READ_TIMEOUT(x) ((x) << 0)
+#define GRBM_STATUS 0x8010
+#define CMDFIFO_AVAIL_MASK 0x0000000F
+#define RING2_RQ_PENDING (1 << 4)
+#define SRBM_RQ_PENDING (1 << 5)
+#define RING1_RQ_PENDING (1 << 6)
+#define CF_RQ_PENDING (1 << 7)
+#define PF_RQ_PENDING (1 << 8)
+#define GDS_DMA_RQ_PENDING (1 << 9)
+#define GRBM_EE_BUSY (1 << 10)
+#define SX_CLEAN (1 << 11)
+#define DB_CLEAN (1 << 12)
+#define CB_CLEAN (1 << 13)
+#define TA_BUSY (1 << 14)
+#define GDS_BUSY (1 << 15)
+#define VGT_BUSY_NO_DMA (1 << 16)
+#define VGT_BUSY (1 << 17)
+#define IA_BUSY_NO_DMA (1 << 18)
+#define IA_BUSY (1 << 19)
+#define SX_BUSY (1 << 20)
+#define SH_BUSY (1 << 21)
+#define SPI_BUSY (1 << 22)
+#define SC_BUSY (1 << 24)
+#define PA_BUSY (1 << 25)
+#define DB_BUSY (1 << 26)
+#define CP_COHERENCY_BUSY (1 << 28)
+#define CP_BUSY (1 << 29)
+#define CB_BUSY (1 << 30)
+#define GUI_ACTIVE (1 << 31)
+#define GRBM_STATUS_SE0 0x8014
+#define GRBM_STATUS_SE1 0x8018
+#define SE_SX_CLEAN (1 << 0)
+#define SE_DB_CLEAN (1 << 1)
+#define SE_CB_CLEAN (1 << 2)
+#define SE_VGT_BUSY (1 << 23)
+#define SE_PA_BUSY (1 << 24)
+#define SE_TA_BUSY (1 << 25)
+#define SE_SX_BUSY (1 << 26)
+#define SE_SPI_BUSY (1 << 27)
+#define SE_SH_BUSY (1 << 28)
+#define SE_SC_BUSY (1 << 29)
+#define SE_DB_BUSY (1 << 30)
+#define SE_CB_BUSY (1 << 31)
+#define GRBM_SOFT_RESET 0x8020
+#define SOFT_RESET_CP (1 << 0)
+#define SOFT_RESET_CB (1 << 1)
+#define SOFT_RESET_DB (1 << 3)
+#define SOFT_RESET_GDS (1 << 4)
+#define SOFT_RESET_PA (1 << 5)
+#define SOFT_RESET_SC (1 << 6)
+#define SOFT_RESET_SPI (1 << 8)
+#define SOFT_RESET_SH (1 << 9)
+#define SOFT_RESET_SX (1 << 10)
+#define SOFT_RESET_TC (1 << 11)
+#define SOFT_RESET_TA (1 << 12)
+#define SOFT_RESET_VGT (1 << 14)
+#define SOFT_RESET_IA (1 << 15)
+
+#define SCRATCH_REG0 0x8500
+#define SCRATCH_REG1 0x8504
+#define SCRATCH_REG2 0x8508
+#define SCRATCH_REG3 0x850C
+#define SCRATCH_REG4 0x8510
+#define SCRATCH_REG5 0x8514
+#define SCRATCH_REG6 0x8518
+#define SCRATCH_REG7 0x851C
+#define SCRATCH_UMSK 0x8540
+#define SCRATCH_ADDR 0x8544
+#define CP_SEM_WAIT_TIMER 0x85BC
+#define CP_ME_CNTL 0x86D8
+#define CP_ME_HALT (1 << 28)
+#define CP_PFP_HALT (1 << 26)
+#define CP_RB2_RPTR 0x86f8
+#define CP_RB1_RPTR 0x86fc
+#define CP_RB0_RPTR 0x8700
+#define CP_RB_WPTR_DELAY 0x8704
+#define CP_MEQ_THRESHOLDS 0x8764
+#define MEQ1_START(x) ((x) << 0)
+#define MEQ2_START(x) ((x) << 8)
+#define CP_PERFMON_CNTL 0x87FC
+
+#define VGT_CACHE_INVALIDATION 0x88C4
+#define CACHE_INVALIDATION(x) ((x) << 0)
+#define VC_ONLY 0
+#define TC_ONLY 1
+#define VC_AND_TC 2
+#define AUTO_INVLD_EN(x) ((x) << 6)
+#define NO_AUTO 0
+#define ES_AUTO 1
+#define GS_AUTO 2
+#define ES_AND_GS_AUTO 3
+#define VGT_GS_VERTEX_REUSE 0x88D4
+
+#define CC_GC_SHADER_PIPE_CONFIG 0x8950
+#define GC_USER_SHADER_PIPE_CONFIG 0x8954
+#define INACTIVE_QD_PIPES(x) ((x) << 8)
+#define INACTIVE_QD_PIPES_MASK 0x0000FF00
+#define INACTIVE_QD_PIPES_SHIFT 8
+#define INACTIVE_SIMDS(x) ((x) << 16)
+#define INACTIVE_SIMDS_MASK 0xFFFF0000
+#define INACTIVE_SIMDS_SHIFT 16
+
+#define VGT_PRIMITIVE_TYPE 0x8958
+#define VGT_NUM_INSTANCES 0x8974
+#define VGT_TF_RING_SIZE 0x8988
+#define VGT_OFFCHIP_LDS_BASE 0x89b4
+
+#define PA_SC_LINE_STIPPLE_STATE 0x8B10
+#define PA_CL_ENHANCE 0x8A14
+#define CLIP_VTX_REORDER_ENA (1 << 0)
+#define NUM_CLIP_SEQ(x) ((x) << 1)
+#define PA_SC_FIFO_SIZE 0x8BCC
+#define SC_PRIM_FIFO_SIZE(x) ((x) << 0)
+#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 12)
+#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 20)
+#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
+#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
+#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
+
+#define SQ_CONFIG 0x8C00
+#define VC_ENABLE (1 << 0)
+#define EXPORT_SRC_C (1 << 1)
+#define GFX_PRIO(x) ((x) << 2)
+#define CS1_PRIO(x) ((x) << 4)
+#define CS2_PRIO(x) ((x) << 6)
+#define SQ_GPR_RESOURCE_MGMT_1 0x8C04
+#define NUM_PS_GPRS(x) ((x) << 0)
+#define NUM_VS_GPRS(x) ((x) << 16)
+#define NUM_CLAUSE_TEMP_GPRS(x) ((x) << 28)
+#define SQ_ESGS_RING_SIZE 0x8c44
+#define SQ_GSVS_RING_SIZE 0x8c4c
+#define SQ_ESTMP_RING_BASE 0x8c50
+#define SQ_ESTMP_RING_SIZE 0x8c54
+#define SQ_GSTMP_RING_BASE 0x8c58
+#define SQ_GSTMP_RING_SIZE 0x8c5c
+#define SQ_VSTMP_RING_BASE 0x8c60
+#define SQ_VSTMP_RING_SIZE 0x8c64
+#define SQ_PSTMP_RING_BASE 0x8c68
+#define SQ_PSTMP_RING_SIZE 0x8c6c
+#define SQ_MS_FIFO_SIZES 0x8CF0
+#define CACHE_FIFO_SIZE(x) ((x) << 0)
+#define FETCH_FIFO_HIWATER(x) ((x) << 8)
+#define DONE_FIFO_HIWATER(x) ((x) << 16)
+#define ALU_UPDATE_FIFO_HIWATER(x) ((x) << 24)
+#define SQ_LSTMP_RING_BASE 0x8e10
+#define SQ_LSTMP_RING_SIZE 0x8e14
+#define SQ_HSTMP_RING_BASE 0x8e18
+#define SQ_HSTMP_RING_SIZE 0x8e1c
+#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
+#define DYN_GPR_ENABLE (1 << 8)
+#define SQ_CONST_MEM_BASE 0x8df8
+
+#define SX_EXPORT_BUFFER_SIZES 0x900C
+#define COLOR_BUFFER_SIZE(x) ((x) << 0)
+#define POSITION_BUFFER_SIZE(x) ((x) << 8)
+#define SMX_BUFFER_SIZE(x) ((x) << 16)
+#define SX_DEBUG_1 0x9058
+#define ENABLE_NEW_SMX_ADDRESS (1 << 16)
+
+#define SPI_CONFIG_CNTL 0x9100
+#define GPR_WRITE_PRIORITY(x) ((x) << 0)
+#define SPI_CONFIG_CNTL_1 0x913C
+#define VTX_DONE_DELAY(x) ((x) << 0)
+#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
+#define CRC_SIMD_ID_WADDR_DISABLE (1 << 8)
+
+#define CGTS_TCC_DISABLE 0x9148
+#define CGTS_USER_TCC_DISABLE 0x914C
+#define TCC_DISABLE_MASK 0xFFFF0000
+#define TCC_DISABLE_SHIFT 16
+#define CGTS_SM_CTRL_REG 0x915C
+#define OVERRIDE (1 << 21)
+
+#define TA_CNTL_AUX 0x9508
+#define DISABLE_CUBE_WRAP (1 << 0)
+#define DISABLE_CUBE_ANISO (1 << 1)
+
+#define TCP_CHAN_STEER_LO 0x960c
+#define TCP_CHAN_STEER_HI 0x9610
+
+#define CC_RB_BACKEND_DISABLE 0x98F4
+#define BACKEND_DISABLE(x) ((x) << 16)
+#define GB_ADDR_CONFIG 0x98F8
+#define NUM_PIPES(x) ((x) << 0)
+#define NUM_PIPES_MASK 0x00000007
+#define NUM_PIPES_SHIFT 0
+#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
+#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070
+#define PIPE_INTERLEAVE_SIZE_SHIFT 4
+#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
+#define NUM_SHADER_ENGINES(x) ((x) << 12)
+#define NUM_SHADER_ENGINES_MASK 0x00003000
+#define NUM_SHADER_ENGINES_SHIFT 12
+#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
+#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000
+#define SHADER_ENGINE_TILE_SIZE_SHIFT 16
+#define NUM_GPUS(x) ((x) << 20)
+#define NUM_GPUS_MASK 0x00700000
+#define NUM_GPUS_SHIFT 20
+#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
+#define MULTI_GPU_TILE_SIZE_MASK 0x03000000
+#define MULTI_GPU_TILE_SIZE_SHIFT 24
+#define ROW_SIZE(x) ((x) << 28)
+#define ROW_SIZE_MASK 0x30000007
+#define ROW_SIZE_SHIFT 28
+#define NUM_LOWER_PIPES(x) ((x) << 30)
+#define NUM_LOWER_PIPES_MASK 0x40000000
+#define NUM_LOWER_PIPES_SHIFT 30
+#define GB_BACKEND_MAP 0x98FC
+
+#define CB_PERF_CTR0_SEL_0 0x9A20
+#define CB_PERF_CTR0_SEL_1 0x9A24
+#define CB_PERF_CTR1_SEL_0 0x9A28
+#define CB_PERF_CTR1_SEL_1 0x9A2C
+#define CB_PERF_CTR2_SEL_0 0x9A30
+#define CB_PERF_CTR2_SEL_1 0x9A34
+#define CB_PERF_CTR3_SEL_0 0x9A38
+#define CB_PERF_CTR3_SEL_1 0x9A3C
+
+#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
+#define BACKEND_DISABLE_MASK 0x00FF0000
+#define BACKEND_DISABLE_SHIFT 16
+
+#define SMX_DC_CTL0 0xA020
+#define USE_HASH_FUNCTION (1 << 0)
+#define NUMBER_OF_SETS(x) ((x) << 1)
+#define FLUSH_ALL_ON_EVENT (1 << 10)
+#define STALL_ON_EVENT (1 << 11)
+#define SMX_EVENT_CTL 0xA02C
+#define ES_FLUSH_CTL(x) ((x) << 0)
+#define GS_FLUSH_CTL(x) ((x) << 3)
+#define ACK_FLUSH_CTL(x) ((x) << 6)
+#define SYNC_FLUSH_CTL (1 << 8)
+
+#define CP_RB0_BASE 0xC100
+#define CP_RB0_CNTL 0xC104
+#define RB_BUFSZ(x) ((x) << 0)
+#define RB_BLKSZ(x) ((x) << 8)
+#define RB_NO_UPDATE (1 << 27)
+#define RB_RPTR_WR_ENA (1 << 31)
+#define BUF_SWAP_32BIT (2 << 16)
+#define CP_RB0_RPTR_ADDR 0xC10C
+#define CP_RB0_RPTR_ADDR_HI 0xC110
+#define CP_RB0_WPTR 0xC114
+#define CP_RB1_BASE 0xC180
+#define CP_RB1_CNTL 0xC184
+#define CP_RB1_RPTR_ADDR 0xC188
+#define CP_RB1_RPTR_ADDR_HI 0xC18C
+#define CP_RB1_WPTR 0xC190
+#define CP_RB2_BASE 0xC194
+#define CP_RB2_CNTL 0xC198
+#define CP_RB2_RPTR_ADDR 0xC19C
+#define CP_RB2_RPTR_ADDR_HI 0xC1A0
+#define CP_RB2_WPTR 0xC1A4
+#define CP_PFP_UCODE_ADDR 0xC150
+#define CP_PFP_UCODE_DATA 0xC154
+#define CP_ME_RAM_RADDR 0xC158
+#define CP_ME_RAM_WADDR 0xC15C
+#define CP_ME_RAM_DATA 0xC160
+#define CP_DEBUG 0xC1FC
+
+/*
+ * PM4
+ */
+#define PACKET_TYPE0 0
+#define PACKET_TYPE1 1
+#define PACKET_TYPE2 2
+#define PACKET_TYPE3 3
+
+#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
+#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
+#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
+#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
+#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
+ (((reg) >> 2) & 0xFFFF) | \
+ ((n) & 0x3FFF) << 16)
+#define CP_PACKET2 0x80000000
+#define PACKET2_PAD_SHIFT 0
+#define PACKET2_PAD_MASK (0x3fffffff << 0)
+
+#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
+
+#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+
+/* Packet 3 types */
+#define PACKET3_NOP 0x10
+#define PACKET3_SET_BASE 0x11
+#define PACKET3_CLEAR_STATE 0x12
+#define PACKET3_INDEX_BUFFER_SIZE 0x13
+#define PACKET3_DEALLOC_STATE 0x14
+#define PACKET3_DISPATCH_DIRECT 0x15
+#define PACKET3_DISPATCH_INDIRECT 0x16
+#define PACKET3_INDIRECT_BUFFER_END 0x17
+#define PACKET3_SET_PREDICATION 0x20
+#define PACKET3_REG_RMW 0x21
+#define PACKET3_COND_EXEC 0x22
+#define PACKET3_PRED_EXEC 0x23
+#define PACKET3_DRAW_INDIRECT 0x24
+#define PACKET3_DRAW_INDEX_INDIRECT 0x25
+#define PACKET3_INDEX_BASE 0x26
+#define PACKET3_DRAW_INDEX_2 0x27
+#define PACKET3_CONTEXT_CONTROL 0x28
+#define PACKET3_DRAW_INDEX_OFFSET 0x29
+#define PACKET3_INDEX_TYPE 0x2A
+#define PACKET3_DRAW_INDEX 0x2B
+#define PACKET3_DRAW_INDEX_AUTO 0x2D
+#define PACKET3_DRAW_INDEX_IMMD 0x2E
+#define PACKET3_NUM_INSTANCES 0x2F
+#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
+#define PACKET3_INDIRECT_BUFFER 0x32
+#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
+#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
+#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
+#define PACKET3_WRITE_DATA 0x37
+#define PACKET3_MEM_SEMAPHORE 0x39
+#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_WAIT_REG_MEM 0x3C
+#define PACKET3_MEM_WRITE 0x3D
+#define PACKET3_SURFACE_SYNC 0x43
+# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
+# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
+# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
+# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
+# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
+# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
+# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
+# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
+# define PACKET3_CB8_DEST_BASE_ENA (1 << 15)
+# define PACKET3_CB9_DEST_BASE_ENA (1 << 16)
+# define PACKET3_CB10_DEST_BASE_ENA (1 << 17)
+# define PACKET3_CB11_DEST_BASE_ENA (1 << 18)
+# define PACKET3_FULL_CACHE_ENA (1 << 20)
+# define PACKET3_TC_ACTION_ENA (1 << 23)
+# define PACKET3_CB_ACTION_ENA (1 << 25)
+# define PACKET3_DB_ACTION_ENA (1 << 26)
+# define PACKET3_SH_ACTION_ENA (1 << 27)
+# define PACKET3_SX_ACTION_ENA (1 << 28)
+#define PACKET3_ME_INITIALIZE 0x44
+#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
+#define PACKET3_COND_WRITE 0x45
+#define PACKET3_EVENT_WRITE 0x46
+#define PACKET3_EVENT_WRITE_EOP 0x47
+#define PACKET3_EVENT_WRITE_EOS 0x48
+#define PACKET3_PREAMBLE_CNTL 0x4A
+# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
+# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
+#define PACKET3_ALU_PS_CONST_BUFFER_COPY 0x4C
+#define PACKET3_ALU_VS_CONST_BUFFER_COPY 0x4D
+#define PACKET3_ALU_PS_CONST_UPDATE 0x4E
+#define PACKET3_ALU_VS_CONST_UPDATE 0x4F
+#define PACKET3_ONE_REG_WRITE 0x57
+#define PACKET3_SET_CONFIG_REG 0x68
+#define PACKET3_SET_CONFIG_REG_START 0x00008000
+#define PACKET3_SET_CONFIG_REG_END 0x0000ac00
+#define PACKET3_SET_CONTEXT_REG 0x69
+#define PACKET3_SET_CONTEXT_REG_START 0x00028000
+#define PACKET3_SET_CONTEXT_REG_END 0x00029000
+#define PACKET3_SET_ALU_CONST 0x6A
+/* alu const buffers only; no reg file */
+#define PACKET3_SET_BOOL_CONST 0x6B
+#define PACKET3_SET_BOOL_CONST_START 0x0003a500
+#define PACKET3_SET_BOOL_CONST_END 0x0003a518
+#define PACKET3_SET_LOOP_CONST 0x6C
+#define PACKET3_SET_LOOP_CONST_START 0x0003a200
+#define PACKET3_SET_LOOP_CONST_END 0x0003a500
+#define PACKET3_SET_RESOURCE 0x6D
+#define PACKET3_SET_RESOURCE_START 0x00030000
+#define PACKET3_SET_RESOURCE_END 0x00038000
+#define PACKET3_SET_SAMPLER 0x6E
+#define PACKET3_SET_SAMPLER_START 0x0003c000
+#define PACKET3_SET_SAMPLER_END 0x0003c600
+#define PACKET3_SET_CTL_CONST 0x6F
+#define PACKET3_SET_CTL_CONST_START 0x0003cff0
+#define PACKET3_SET_CTL_CONST_END 0x0003ff0c
+#define PACKET3_SET_RESOURCE_OFFSET 0x70
+#define PACKET3_SET_ALU_CONST_VS 0x71
+#define PACKET3_SET_ALU_CONST_DI 0x72
+#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
+#define PACKET3_SET_RESOURCE_INDIRECT 0x74
+#define PACKET3_SET_APPEND_CNT 0x75
+
#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index e372f9e1e5ce..fcc23e4e0b3c 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1205,14 +1205,12 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
if (waitreloc.reg != RADEON_WAIT_UNTIL ||
waitreloc.count != 0) {
DRM_ERROR("vline wait had illegal wait until segment\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
DRM_ERROR("vline wait had illegal wait until\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
/* jump over the NOP */
@@ -1230,8 +1228,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
@@ -1253,14 +1250,13 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
break;
default:
DRM_ERROR("unknown crtc reloc\n");
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
ib[h_idx] = header;
ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
}
-out:
- return r;
+
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9b3fad23b76c..12fdebf9aed8 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -47,6 +47,7 @@
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
#define EVERGREEN_RLC_UCODE_SIZE 768
+#define CAYMAN_RLC_UCODE_SIZE 1024
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -2739,7 +2740,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev)
/* Allocate ring buffer */
if (rdev->ih.ring_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
+ r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->ih.ring_obj);
@@ -2820,13 +2821,20 @@ static int r600_rlc_init(struct radeon_device *rdev)
WREG32(RLC_HB_CNTL, 0);
WREG32(RLC_HB_RPTR, 0);
WREG32(RLC_HB_WPTR, 0);
- WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
- WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+ if (rdev->family <= CHIP_CAICOS) {
+ WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
+ WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
+ }
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
fw_data = (const __be32 *)rdev->rlc_fw->data;
- if (rdev->family >= CHIP_CEDAR) {
+ if (rdev->family >= CHIP_CAYMAN) {
+ for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
+ WREG32(RLC_UCODE_ADDR, i);
+ WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
+ }
+ } else if (rdev->family >= CHIP_CEDAR) {
for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index b5443fe1c1d1..846fae576399 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -26,6 +26,7 @@
#include "drmP.h"
#include "radeon.h"
#include "radeon_reg.h"
+#include "radeon_asic.h"
#include "atom.h"
#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index df68d91e8190..9aa74c3f8cb6 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -512,7 +512,7 @@ int r600_blit_init(struct radeon_device *rdev)
obj_size += r6xx_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
- r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("r600 failed to allocate shader\n");
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 153095fba62f..3324620b2db6 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -71,75 +71,167 @@ struct r600_cs_track {
u64 db_bo_mc;
};
+#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc }
+#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc }
+#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0 }
+#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc }
+#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0 }
+#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc }
+#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0 }
+#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16, vc }
+
+struct gpu_formats {
+ unsigned blockwidth;
+ unsigned blockheight;
+ unsigned blocksize;
+ unsigned valid_color;
+};
+
+static const struct gpu_formats color_formats_table[] = {
+ /* 8 bit */
+ FMT_8_BIT(V_038004_COLOR_8, 1),
+ FMT_8_BIT(V_038004_COLOR_4_4, 1),
+ FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
+ FMT_8_BIT(V_038004_FMT_1, 0),
+
+ /* 16-bit */
+ FMT_16_BIT(V_038004_COLOR_16, 1),
+ FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
+ FMT_16_BIT(V_038004_COLOR_8_8, 1),
+ FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
+ FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
+ FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
+ FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
+ FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
+
+ /* 24-bit */
+ FMT_24_BIT(V_038004_FMT_8_8_8),
+
+ /* 32-bit */
+ FMT_32_BIT(V_038004_COLOR_32, 1),
+ FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_16_16, 1),
+ FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_8_24, 1),
+ FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_24_8, 1),
+ FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
+ FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
+ FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
+ FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
+ FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
+ FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
+ FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
+ FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
+ FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
+
+ /* 48-bit */
+ FMT_48_BIT(V_038004_FMT_16_16_16),
+ FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
+
+ /* 64-bit */
+ FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
+ FMT_64_BIT(V_038004_COLOR_32_32, 1),
+ FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
+ FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
+ FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
+
+ FMT_96_BIT(V_038004_FMT_32_32_32),
+ FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
+
+ /* 128-bit */
+ FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
+ FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
+
+ [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
+ [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
+
+ /* block compressed formats */
+ [V_038004_FMT_BC1] = { 4, 4, 8, 0 },
+ [V_038004_FMT_BC2] = { 4, 4, 16, 0 },
+ [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
+ [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
+ [V_038004_FMT_BC5] = { 4, 4, 16, 0},
+
+};
+
+static inline bool fmt_is_valid_color(u32 format)
+{
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return false;
+
+ if (color_formats_table[format].valid_color)
+ return true;
+
+ return false;
+}
+
+static inline bool fmt_is_valid_texture(u32 format)
+{
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return false;
+
+ if (color_formats_table[format].blockwidth > 0)
+ return true;
+
+ return false;
+}
+
+static inline int fmt_get_blocksize(u32 format)
+{
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return 0;
+
+ return color_formats_table[format].blocksize;
+}
+
+static inline int fmt_get_nblocksx(u32 format, u32 w)
+{
+ unsigned bw;
+
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return 0;
+
+ bw = color_formats_table[format].blockwidth;
+ if (bw == 0)
+ return 0;
+
+ return (w + bw - 1) / bw;
+}
+
+static inline int fmt_get_nblocksy(u32 format, u32 h)
+{
+ unsigned bh;
+
+ if (format >= ARRAY_SIZE(color_formats_table))
+ return 0;
+
+ bh = color_formats_table[format].blockheight;
+ if (bh == 0)
+ return 0;
+
+ return (h + bh - 1) / bh;
+}
+
static inline int r600_bpe_from_format(u32 *bpe, u32 format)
{
- switch (format) {
- case V_038004_COLOR_8:
- case V_038004_COLOR_4_4:
- case V_038004_COLOR_3_3_2:
- case V_038004_FMT_1:
- *bpe = 1;
- break;
- case V_038004_COLOR_16:
- case V_038004_COLOR_16_FLOAT:
- case V_038004_COLOR_8_8:
- case V_038004_COLOR_5_6_5:
- case V_038004_COLOR_6_5_5:
- case V_038004_COLOR_1_5_5_5:
- case V_038004_COLOR_4_4_4_4:
- case V_038004_COLOR_5_5_5_1:
- *bpe = 2;
- break;
- case V_038004_FMT_8_8_8:
- *bpe = 3;
- break;
- case V_038004_COLOR_32:
- case V_038004_COLOR_32_FLOAT:
- case V_038004_COLOR_16_16:
- case V_038004_COLOR_16_16_FLOAT:
- case V_038004_COLOR_8_24:
- case V_038004_COLOR_8_24_FLOAT:
- case V_038004_COLOR_24_8:
- case V_038004_COLOR_24_8_FLOAT:
- case V_038004_COLOR_10_11_11:
- case V_038004_COLOR_10_11_11_FLOAT:
- case V_038004_COLOR_11_11_10:
- case V_038004_COLOR_11_11_10_FLOAT:
- case V_038004_COLOR_2_10_10_10:
- case V_038004_COLOR_8_8_8_8:
- case V_038004_COLOR_10_10_10_2:
- case V_038004_FMT_5_9_9_9_SHAREDEXP:
- case V_038004_FMT_32_AS_8:
- case V_038004_FMT_32_AS_8_8:
- *bpe = 4;
- break;
- case V_038004_COLOR_X24_8_32_FLOAT:
- case V_038004_COLOR_32_32:
- case V_038004_COLOR_32_32_FLOAT:
- case V_038004_COLOR_16_16_16_16:
- case V_038004_COLOR_16_16_16_16_FLOAT:
- *bpe = 8;
- break;
- case V_038004_FMT_16_16_16:
- case V_038004_FMT_16_16_16_FLOAT:
- *bpe = 6;
- break;
- case V_038004_FMT_32_32_32:
- case V_038004_FMT_32_32_32_FLOAT:
- *bpe = 12;
- break;
- case V_038004_COLOR_32_32_32_32:
- case V_038004_COLOR_32_32_32_32_FLOAT:
- *bpe = 16;
- break;
- case V_038004_FMT_GB_GR:
- case V_038004_FMT_BG_RG:
- case V_038004_COLOR_INVALID:
- default:
- *bpe = 16;
- return -EINVAL;
- }
+ unsigned res;
+
+ if (format >= ARRAY_SIZE(color_formats_table))
+ goto fail;
+
+ res = color_formats_table[format].blocksize;
+ if (res == 0)
+ goto fail;
+
+ *bpe = res;
return 0;
+
+fail:
+ *bpe = 16;
+ return -EINVAL;
}
struct array_mode_checker {
@@ -148,7 +240,7 @@ struct array_mode_checker {
u32 nbanks;
u32 npipes;
u32 nsamples;
- u32 bpe;
+ u32 blocksize;
};
/* returns alignment in pixels for pitch/height/depth and bytes for base */
@@ -162,7 +254,7 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
u32 tile_height = 8;
u32 macro_tile_width = values->nbanks;
u32 macro_tile_height = values->npipes;
- u32 tile_bytes = tile_width * tile_height * values->bpe * values->nsamples;
+ u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
switch (values->array_mode) {
@@ -174,7 +266,7 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
*base_align = 1;
break;
case ARRAY_LINEAR_ALIGNED:
- *pitch_align = max((u32)64, (u32)(values->group_size / values->bpe));
+ *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
*height_align = tile_height;
*depth_align = 1;
*base_align = values->group_size;
@@ -182,7 +274,7 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
case ARRAY_1D_TILED_THIN1:
*pitch_align = max((u32)tile_width,
(u32)(values->group_size /
- (tile_height * values->bpe * values->nsamples)));
+ (tile_height * values->blocksize * values->nsamples)));
*height_align = tile_height;
*depth_align = 1;
*base_align = values->group_size;
@@ -190,12 +282,12 @@ static inline int r600_get_array_mode_alignment(struct array_mode_checker *value
case ARRAY_2D_TILED_THIN1:
*pitch_align = max((u32)macro_tile_width,
(u32)(((values->group_size / tile_height) /
- (values->bpe * values->nsamples)) *
+ (values->blocksize * values->nsamples)) *
values->nbanks)) * tile_width;
*height_align = macro_tile_height * tile_height;
*depth_align = 1;
*base_align = max(macro_tile_bytes,
- (*pitch_align) * values->bpe * (*height_align) * values->nsamples);
+ (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
break;
default:
return -EINVAL;
@@ -234,21 +326,22 @@ static void r600_cs_track_init(struct r600_cs_track *track)
static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
{
struct r600_cs_track *track = p->track;
- u32 bpe = 0, slice_tile_max, size, tmp;
+ u32 slice_tile_max, size, tmp;
u32 height, height_align, pitch, pitch_align, depth_align;
u64 base_offset, base_align;
struct array_mode_checker array_check;
volatile u32 *ib = p->ib->ptr;
unsigned array_mode;
-
+ u32 format;
if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
return -EINVAL;
}
size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
- if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
+ format = G_0280A0_FORMAT(track->cb_color_info[i]);
+ if (!fmt_is_valid_color(format)) {
dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
- __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
+ __func__, __LINE__, format,
i, track->cb_color_info[i]);
return -EINVAL;
}
@@ -267,7 +360,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = track->nsamples;
- array_check.bpe = bpe;
+ array_check.blocksize = fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
@@ -311,7 +404,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
}
/* check offset */
- tmp = height * pitch * bpe;
+ tmp = fmt_get_nblocksy(format, height) * fmt_get_nblocksx(format, pitch) * fmt_get_blocksize(format);
if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
/* the initial DDX does bad things with the CB size occasionally */
@@ -436,7 +529,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = track->nsamples;
- array_check.bpe = bpe;
+ array_check.blocksize = bpe;
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
@@ -687,33 +780,28 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
if (wait_reg_mem.type != PACKET_TYPE3 ||
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
/* bit 4 is reg (0) or mem (1) */
if (wait_reg_mem_info & 0x10) {
DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
/* waiting for value to be equal */
if ((wait_reg_mem_info & 0x7) != 0x3) {
DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
- r = -EINVAL;
- return r;
+ return -EINVAL;
}
/* jump over the NOP */
@@ -732,8 +820,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_ERROR("cannot find crtc %d\n", crtc_id);
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
crtc = obj_to_crtc(obj);
radeon_crtc = to_radeon_crtc(crtc);
@@ -756,14 +843,13 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
break;
default:
DRM_ERROR("unknown crtc reloc\n");
- r = -EINVAL;
- goto out;
+ return -EINVAL;
}
ib[h_idx] = header;
ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
}
-out:
- return r;
+
+ return 0;
}
static int r600_packet0_check(struct radeon_cs_parser *p,
@@ -1113,39 +1199,61 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
return 0;
}
-static inline unsigned minify(unsigned size, unsigned levels)
+static inline unsigned mip_minify(unsigned size, unsigned level)
{
- size = size >> levels;
- if (size < 1)
- size = 1;
- return size;
+ unsigned val;
+
+ val = max(1U, size >> level);
+ if (level > 0)
+ val = roundup_pow_of_two(val);
+ return val;
}
-static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
- unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
- unsigned pitch_align,
+static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
+ unsigned w0, unsigned h0, unsigned d0, unsigned format,
+ unsigned block_align, unsigned height_align, unsigned base_align,
unsigned *l0_size, unsigned *mipmap_size)
{
- unsigned offset, i, level, face;
- unsigned width, height, depth, rowstride, size;
-
- w0 = minify(w0, 0);
- h0 = minify(h0, 0);
- d0 = minify(d0, 0);
+ unsigned offset, i, level;
+ unsigned width, height, depth, size;
+ unsigned blocksize;
+ unsigned nbx, nby;
+ unsigned nlevels = llevel - blevel + 1;
+
+ *l0_size = -1;
+ blocksize = fmt_get_blocksize(format);
+
+ w0 = mip_minify(w0, 0);
+ h0 = mip_minify(h0, 0);
+ d0 = mip_minify(d0, 0);
for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
- width = minify(w0, i);
- height = minify(h0, i);
- depth = minify(d0, i);
- for(face = 0; face < nfaces; face++) {
- rowstride = ALIGN((width * bpe), pitch_align);
- size = height * rowstride * depth;
- offset += size;
- offset = (offset + 0x1f) & ~0x1f;
- }
+ width = mip_minify(w0, i);
+ nbx = fmt_get_nblocksx(format, width);
+
+ nbx = round_up(nbx, block_align);
+
+ height = mip_minify(h0, i);
+ nby = fmt_get_nblocksy(format, height);
+ nby = round_up(nby, height_align);
+
+ depth = mip_minify(d0, i);
+
+ size = nbx * nby * blocksize;
+ if (nfaces)
+ size *= nfaces;
+ else
+ size *= depth;
+
+ if (i == 0)
+ *l0_size = size;
+
+ if (i == 0 || i == 1)
+ offset = round_up(offset, base_align);
+
+ offset += size;
}
- *l0_size = ALIGN((w0 * bpe), pitch_align) * h0 * d0;
*mipmap_size = offset;
- if (!nlevels)
+ if (llevel == 0)
*mipmap_size = *l0_size;
if (!blevel)
*mipmap_size -= *l0_size;
@@ -1169,11 +1277,13 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
u32 tiling_flags)
{
struct r600_cs_track *track = p->track;
- u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
- u32 word0, word1, l0_size, mipmap_size;
+ u32 nfaces, llevel, blevel, w0, h0, d0;
+ u32 word0, word1, l0_size, mipmap_size, word2, word3;
u32 height_align, pitch, pitch_align, depth_align;
+ u32 array, barray, larray;
u64 base_align;
struct array_mode_checker array_check;
+ u32 format;
/* on legacy kernel we don't perform advanced check */
if (p->rdev == NULL)
@@ -1199,19 +1309,25 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
case V_038000_SQ_TEX_DIM_3D:
break;
case V_038000_SQ_TEX_DIM_CUBEMAP:
- nfaces = 6;
+ if (p->family >= CHIP_RV770)
+ nfaces = 8;
+ else
+ nfaces = 6;
break;
case V_038000_SQ_TEX_DIM_1D_ARRAY:
case V_038000_SQ_TEX_DIM_2D_ARRAY:
+ array = 1;
+ break;
case V_038000_SQ_TEX_DIM_2D_MSAA:
case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
default:
dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
return -EINVAL;
}
- if (r600_bpe_from_format(&bpe, G_038004_DATA_FORMAT(word1))) {
+ format = G_038004_DATA_FORMAT(word1);
+ if (!fmt_is_valid_texture(format)) {
dev_warn(p->dev, "%s:%d texture invalid format %d\n",
- __func__, __LINE__, G_038004_DATA_FORMAT(word1));
+ __func__, __LINE__, format);
return -EINVAL;
}
@@ -1222,7 +1338,7 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
array_check.nbanks = track->nbanks;
array_check.npipes = track->npipes;
array_check.nsamples = 1;
- array_check.bpe = bpe;
+ array_check.blocksize = fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
@@ -1248,25 +1364,34 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
return -EINVAL;
}
+ word2 = radeon_get_ib_value(p, idx + 2) << 8;
+ word3 = radeon_get_ib_value(p, idx + 3) << 8;
+
word0 = radeon_get_ib_value(p, idx + 4);
word1 = radeon_get_ib_value(p, idx + 5);
blevel = G_038010_BASE_LEVEL(word0);
- nlevels = G_038014_LAST_LEVEL(word1);
- r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe,
- (pitch_align * bpe),
+ llevel = G_038014_LAST_LEVEL(word1);
+ if (array == 1) {
+ barray = G_038014_BASE_ARRAY(word1);
+ larray = G_038014_LAST_ARRAY(word1);
+
+ nfaces = larray - barray + 1;
+ }
+ r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format,
+ pitch_align, height_align, base_align,
&l0_size, &mipmap_size);
/* using get ib will give us the offset into the texture bo */
- word0 = radeon_get_ib_value(p, idx + 2) << 8;
- if ((l0_size + word0) > radeon_bo_size(texture)) {
+ if ((l0_size + word2) > radeon_bo_size(texture)) {
dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
- w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
+ w0, h0, format, word2, l0_size, radeon_bo_size(texture));
+ dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
return -EINVAL;
}
/* using get ib will give us the offset into the mipmap bo */
- word0 = radeon_get_ib_value(p, idx + 3) << 8;
- if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
+ word3 = radeon_get_ib_value(p, idx + 3) << 8;
+ if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
- w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));*/
+ w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
}
return 0;
}
@@ -1289,6 +1414,38 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
idx_value = radeon_get_ib_value(p, idx);
switch (pkt->opcode) {
+ case PACKET3_SET_PREDICATION:
+ {
+ int pred_op;
+ int tmp;
+ if (pkt->count != 1) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ tmp = radeon_get_ib_value(p, idx + 1);
+ pred_op = (tmp >> 16) & 0x7;
+
+ /* for the clear predicate operation */
+ if (pred_op == 0)
+ return 0;
+
+ if (pred_op > 2) {
+ DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ return -EINVAL;
+ }
+
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad SET PREDICATION\n");
+ return -EINVAL;
+ }
+
+ ib[idx + 0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
+ ib[idx + 1] = tmp + (upper_32_bits(reloc->lobj.gpu_offset) & 0xff);
+ }
+ break;
+
case PACKET3_START_3D_CMDBUF:
if (p->family >= CHIP_RV770 || pkt->count) {
DRM_ERROR("bad START_3D\n");
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e6a58ed48dcf..50db6d62eec2 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -26,6 +26,7 @@
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
/*
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 04bac0bbd3ec..b2b944bcd05a 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1304,6 +1304,11 @@
#define V_038004_FMT_16_16_16_FLOAT 0x0000002E
#define V_038004_FMT_32_32_32 0x0000002F
#define V_038004_FMT_32_32_32_FLOAT 0x00000030
+#define V_038004_FMT_BC1 0x00000031
+#define V_038004_FMT_BC2 0x00000032
+#define V_038004_FMT_BC3 0x00000033
+#define V_038004_FMT_BC4 0x00000034
+#define V_038004_FMT_BC5 0x00000035
#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 6b3429495118..cfe3af1a7935 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -258,8 +258,9 @@ struct radeon_bo {
int surface_reg;
/* Constant after initialization */
struct radeon_device *rdev;
- struct drm_gem_object *gobj;
+ struct drm_gem_object gem_base;
};
+#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
struct radeon_bo_list {
struct ttm_validate_buffer tv;
@@ -288,6 +289,15 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr);
void radeon_gem_object_unpin(struct drm_gem_object *obj);
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
/*
* GART structures, functions & helpers
@@ -319,6 +329,7 @@ struct radeon_gart {
union radeon_gart_table table;
struct page **pages;
dma_addr_t *pages_addr;
+ bool *ttm_alloced;
bool ready;
};
@@ -331,7 +342,8 @@ void radeon_gart_fini(struct radeon_device *rdev);
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
- int pages, struct page **pagelist);
+ int pages, struct page **pagelist,
+ dma_addr_t *dma_addr);
/*
@@ -651,6 +663,8 @@ struct radeon_wb {
#define RADEON_WB_SCRATCH_OFFSET 0
#define RADEON_WB_CP_RPTR_OFFSET 1024
+#define RADEON_WB_CP1_RPTR_OFFSET 1280
+#define RADEON_WB_CP2_RPTR_OFFSET 1536
#define R600_WB_IH_WPTR_OFFSET 2048
#define R600_WB_EVENT_OFFSET 3072
@@ -1037,12 +1051,52 @@ struct evergreen_asic {
struct r100_gpu_lockup lockup;
};
+struct cayman_asic {
+ unsigned max_shader_engines;
+ unsigned max_pipes_per_simd;
+ unsigned max_tile_pipes;
+ unsigned max_simds_per_se;
+ unsigned max_backends_per_se;
+ unsigned max_texture_channel_caches;
+ unsigned max_gprs;
+ unsigned max_threads;
+ unsigned max_gs_threads;
+ unsigned max_stack_entries;
+ unsigned sx_num_of_sets;
+ unsigned sx_max_export_size;
+ unsigned sx_max_export_pos_size;
+ unsigned sx_max_export_smx_size;
+ unsigned max_hw_contexts;
+ unsigned sq_num_cf_insts;
+ unsigned sc_prim_fifo_size;
+ unsigned sc_hiz_tile_fifo_size;
+ unsigned sc_earlyz_tile_fifo_size;
+
+ unsigned num_shader_engines;
+ unsigned num_shader_pipes_per_simd;
+ unsigned num_tile_pipes;
+ unsigned num_simds_per_se;
+ unsigned num_backends_per_se;
+ unsigned backend_disable_mask_per_asic;
+ unsigned backend_map;
+ unsigned num_texture_channel_caches;
+ unsigned mem_max_burst_length_bytes;
+ unsigned mem_row_size_in_kb;
+ unsigned shader_engine_tile_size;
+ unsigned num_gpus;
+ unsigned multi_gpu_tile_size;
+
+ unsigned tile_config;
+ struct r100_gpu_lockup lockup;
+};
+
union radeon_asic_config {
struct r300_asic r300;
struct r100_asic r100;
struct r600_asic r600;
struct rv770_asic rv770;
struct evergreen_asic evergreen;
+ struct cayman_asic cayman;
};
/*
@@ -1133,6 +1187,9 @@ struct radeon_device {
struct radeon_mman mman;
struct radeon_fence_driver fence_drv;
struct radeon_cp cp;
+ /* cayman compute rings */
+ struct radeon_cp cp1;
+ struct radeon_cp cp2;
struct radeon_ib_pool ib_pool;
struct radeon_irq irq;
struct radeon_asic *asic;
@@ -1185,19 +1242,6 @@ int radeon_device_init(struct radeon_device *rdev,
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
-/* r600 blit */
-int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
-void r600_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- int size_bytes);
-/* evergreen blit */
-int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
-void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
-void evergreen_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- int size_bytes);
-
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
if (reg < rdev->rmmio_size)
@@ -1449,62 +1493,15 @@ extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
-/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
-extern bool r600_card_posted(struct radeon_device *rdev);
-extern void r600_cp_stop(struct radeon_device *rdev);
-extern int r600_cp_start(struct radeon_device *rdev);
-extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
-extern int r600_cp_resume(struct radeon_device *rdev);
-extern void r600_cp_fini(struct radeon_device *rdev);
-extern int r600_count_pipe_bits(uint32_t val);
-extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
-extern int r600_pcie_gart_init(struct radeon_device *rdev);
-extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
-extern int r600_ib_test(struct radeon_device *rdev);
-extern int r600_ring_test(struct radeon_device *rdev);
-extern void r600_scratch_init(struct radeon_device *rdev);
-extern int r600_blit_init(struct radeon_device *rdev);
-extern void r600_blit_fini(struct radeon_device *rdev);
-extern int r600_init_microcode(struct radeon_device *rdev);
-extern int r600_asic_reset(struct radeon_device *rdev);
-/* r600 irq */
-extern int r600_irq_init(struct radeon_device *rdev);
-extern void r600_irq_fini(struct radeon_device *rdev);
-extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
-extern int r600_irq_set(struct radeon_device *rdev);
-extern void r600_irq_suspend(struct radeon_device *rdev);
-extern void r600_disable_interrupts(struct radeon_device *rdev);
-extern void r600_rlc_stop(struct radeon_device *rdev);
-/* r600 audio */
-extern int r600_audio_init(struct radeon_device *rdev);
-extern int r600_audio_tmds_index(struct drm_encoder *encoder);
-extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
-extern int r600_audio_channels(struct radeon_device *rdev);
-extern int r600_audio_bits_per_sample(struct radeon_device *rdev);
-extern int r600_audio_rate(struct radeon_device *rdev);
-extern uint8_t r600_audio_status_bits(struct radeon_device *rdev);
-extern uint8_t r600_audio_category_code(struct radeon_device *rdev);
-extern void r600_audio_schedule_polling(struct radeon_device *rdev);
-extern void r600_audio_enable_polling(struct drm_encoder *encoder);
-extern void r600_audio_disable_polling(struct drm_encoder *encoder);
-extern void r600_audio_fini(struct radeon_device *rdev);
-extern void r600_hdmi_init(struct drm_encoder *encoder);
+/*
+ * r600 functions used by radeon_encoder.c
+ */
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
-extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
-extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
-
-extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
-extern void r700_cp_stop(struct radeon_device *rdev);
-extern void r700_cp_fini(struct radeon_device *rdev);
-extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
-extern int evergreen_irq_set(struct radeon_device *rdev);
-extern int evergreen_blit_init(struct radeon_device *rdev);
-extern void evergreen_blit_fini(struct radeon_device *rdev);
extern int ni_init_microcode(struct radeon_device *rdev);
-extern int btc_mc_load_microcode(struct radeon_device *rdev);
+extern int ni_mc_load_microcode(struct radeon_device *rdev);
/* radeon_acpi.c */
#if defined(CONFIG_ACPI)
@@ -1513,14 +1510,6 @@ extern int radeon_acpi_init(struct radeon_device *rdev);
static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
#endif
-/* evergreen */
-struct evergreen_mc_save {
- u32 vga_control[6];
- u32 vga_render_control;
- u32 vga_hdp_control;
- u32 crtc_control[6];
-};
-
#include "radeon_object.h"
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 793c5e6026ad..eb888ee5f674 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -885,6 +885,52 @@ static struct radeon_asic btc_asic = {
.post_page_flip = &evergreen_post_page_flip,
};
+static struct radeon_asic cayman_asic = {
+ .init = &cayman_init,
+ .fini = &cayman_fini,
+ .suspend = &cayman_suspend,
+ .resume = &cayman_resume,
+ .cp_commit = &r600_cp_commit,
+ .gpu_is_lockup = &cayman_gpu_is_lockup,
+ .asic_reset = &cayman_asic_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &evergreen_ring_ib_execute,
+ .irq_set = &evergreen_irq_set,
+ .irq_process = &evergreen_irq_process,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .copy_blit = NULL,
+ .copy_dma = NULL,
+ .copy = NULL,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
+ .gui_idle = &r600_gui_idle,
+ .pm_misc = &evergreen_pm_misc,
+ .pm_prepare = &evergreen_pm_prepare,
+ .pm_finish = &evergreen_pm_finish,
+ .pm_init_profile = &r600_pm_init_profile,
+ .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+ .pre_page_flip = &evergreen_pre_page_flip,
+ .page_flip = &evergreen_page_flip,
+ .post_page_flip = &evergreen_post_page_flip,
+};
+
int radeon_asic_init(struct radeon_device *rdev)
{
radeon_register_accessor_init(rdev);
@@ -977,6 +1023,9 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_CAICOS:
rdev->asic = &btc_asic;
break;
+ case CHIP_CAYMAN:
+ rdev->asic = &cayman_asic;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c59bd98a2029..3d7a0d7c6a9a 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -57,8 +57,6 @@ int r100_init(struct radeon_device *rdev);
void r100_fini(struct radeon_device *rdev);
int r100_suspend(struct radeon_device *rdev);
int r100_resume(struct radeon_device *rdev);
-uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
-void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
bool r100_gpu_is_lockup(struct radeon_device *rdev);
int r100_asic_reset(struct radeon_device *rdev);
@@ -164,8 +162,6 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
-extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,7 +204,6 @@ void rs400_gart_adjust_size(struct radeon_device *rdev);
void rs400_gart_disable(struct radeon_device *rdev);
void rs400_gart_fini(struct radeon_device *rdev);
-
/*
* rs600.
*/
@@ -270,8 +265,6 @@ void rv515_fini(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_ring_start(struct radeon_device *rdev);
-uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
-void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_bandwidth_update(struct radeon_device *rdev);
int rv515_resume(struct radeon_device *rdev);
int rv515_suspend(struct radeon_device *rdev);
@@ -307,14 +300,13 @@ void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-int r600_irq_process(struct radeon_device *rdev);
-int r600_irq_set(struct radeon_device *rdev);
bool r600_gpu_is_lockup(struct radeon_device *rdev);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
+int r600_ib_test(struct radeon_device *rdev);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
@@ -333,6 +325,50 @@ extern void rs780_pm_init_profile(struct radeon_device *rdev);
extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int r600_get_pcie_lanes(struct radeon_device *rdev);
+bool r600_card_posted(struct radeon_device *rdev);
+void r600_cp_stop(struct radeon_device *rdev);
+int r600_cp_start(struct radeon_device *rdev);
+void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
+int r600_cp_resume(struct radeon_device *rdev);
+void r600_cp_fini(struct radeon_device *rdev);
+int r600_count_pipe_bits(uint32_t val);
+int r600_mc_wait_for_idle(struct radeon_device *rdev);
+int r600_pcie_gart_init(struct radeon_device *rdev);
+void r600_scratch_init(struct radeon_device *rdev);
+int r600_blit_init(struct radeon_device *rdev);
+void r600_blit_fini(struct radeon_device *rdev);
+int r600_init_microcode(struct radeon_device *rdev);
+/* r600 irq */
+int r600_irq_process(struct radeon_device *rdev);
+int r600_irq_init(struct radeon_device *rdev);
+void r600_irq_fini(struct radeon_device *rdev);
+void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
+int r600_irq_set(struct radeon_device *rdev);
+void r600_irq_suspend(struct radeon_device *rdev);
+void r600_disable_interrupts(struct radeon_device *rdev);
+void r600_rlc_stop(struct radeon_device *rdev);
+/* r600 audio */
+int r600_audio_init(struct radeon_device *rdev);
+int r600_audio_tmds_index(struct drm_encoder *encoder);
+void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
+int r600_audio_channels(struct radeon_device *rdev);
+int r600_audio_bits_per_sample(struct radeon_device *rdev);
+int r600_audio_rate(struct radeon_device *rdev);
+uint8_t r600_audio_status_bits(struct radeon_device *rdev);
+uint8_t r600_audio_category_code(struct radeon_device *rdev);
+void r600_audio_schedule_polling(struct radeon_device *rdev);
+void r600_audio_enable_polling(struct drm_encoder *encoder);
+void r600_audio_disable_polling(struct drm_encoder *encoder);
+void r600_audio_fini(struct radeon_device *rdev);
+void r600_hdmi_init(struct drm_encoder *encoder);
+int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
+void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
+/* r600 blit */
+int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
+void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+void r600_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ int size_bytes);
/*
* rv770,rv730,rv710,rv740
@@ -341,12 +377,21 @@ int rv770_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
-extern void rv770_pm_misc(struct radeon_device *rdev);
-extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+void rv770_pm_misc(struct radeon_device *rdev);
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+void r700_cp_stop(struct radeon_device *rdev);
+void r700_cp_fini(struct radeon_device *rdev);
/*
* evergreen
*/
+struct evergreen_mc_save {
+ u32 vga_control[6];
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+ u32 crtc_control[6];
+};
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
@@ -374,5 +419,25 @@ extern void evergreen_pm_finish(struct radeon_device *rdev);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
+void evergreen_disable_interrupt_state(struct radeon_device *rdev);
+int evergreen_blit_init(struct radeon_device *rdev);
+void evergreen_blit_fini(struct radeon_device *rdev);
+/* evergreen blit */
+int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
+void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
+void evergreen_kms_blit_copy(struct radeon_device *rdev,
+ u64 src_gpu_addr, u64 dst_gpu_addr,
+ int size_bytes);
+
+/*
+ * cayman
+ */
+void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
+int cayman_init(struct radeon_device *rdev);
+void cayman_fini(struct radeon_device *rdev);
+int cayman_suspend(struct radeon_device *rdev);
+int cayman_resume(struct radeon_device *rdev);
+bool cayman_gpu_is_lockup(struct radeon_device *rdev);
+int cayman_asic_reset(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index c558685cc637..10191d9372d8 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
size = bsize;
n = 1024;
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
if (r) {
goto out_cleanup;
}
@@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
if (r) {
goto out_cleanup;
}
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj);
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 22b7e3dc0eca..3f3c9aac46cc 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -972,7 +972,16 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
- else
+ else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
+ if (ASIC_IS_DCE3(rdev)) {
+ /* HDMI 1.3+ supports max clock of 340 Mhz */
+ if (mode->clock > 340000)
+ return MODE_CLOCK_HIGH;
+ else
+ return MODE_OK;
+ } else
+ return MODE_CLOCK_HIGH;
+ } else
return MODE_CLOCK_HIGH;
}
return MODE_OK;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index eb6b9eed7349..3d599e33b9cc 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2113,9 +2113,9 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
break;
}
- if (drm_device_is_agp(dev))
+ if (drm_pci_device_is_agp(dev))
dev_priv->flags |= RADEON_IS_AGP;
- else if (drm_device_is_pcie(dev))
+ else if (drm_pci_device_is_pcie(dev))
dev_priv->flags |= RADEON_IS_PCIE;
else
dev_priv->flags |= RADEON_IS_PCI;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 35b5eb8fbe2a..8c1916941871 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -75,7 +75,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
return -ENOENT;
}
p->relocs_ptr[i] = &p->relocs[i];
- p->relocs[i].robj = p->relocs[i].gobj->driver_private;
+ p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
p->relocs[i].lobj.bo = p->relocs[i].robj;
p->relocs[i].lobj.wdomain = r->write_domain;
p->relocs[i].lobj.rdomain = r->read_domains;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4954e2d6ffa2..f0209be7a34b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -85,6 +85,7 @@ static const char radeon_family_name[][16] = {
"BARTS",
"TURKS",
"CAICOS",
+ "CAYMAN",
"LAST",
};
@@ -184,7 +185,7 @@ int radeon_wb_init(struct radeon_device *rdev)
int r;
if (rdev->wb.wb_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
@@ -860,7 +861,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
if (rfb == NULL || rfb->obj == NULL) {
continue;
}
- robj = rfb->obj->driver_private;
+ robj = gem_to_radeon_bo(rfb->obj);
/* don't unpin kernel fb objects */
if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
r = radeon_bo_reserve(robj, false);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 3e7e7f9eb781..4be58793dc17 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -371,7 +371,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
new_radeon_fb = to_radeon_framebuffer(fb);
/* schedule unpin of the old buffer */
obj = old_radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
work->old_rbo = rbo;
INIT_WORK(&work->work, radeon_unpin_work_func);
@@ -391,7 +391,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
/* pin the new buffer */
obj = new_radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
work->old_rbo, rbo);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 275b26a708d6..63d2de8771dc 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -49,9 +49,10 @@
* - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
* 2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
* 2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
+ * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 8
+#define KMS_DRIVER_MINOR 9
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -84,6 +85,16 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
extern struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl;
int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p);
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor);
void radeon_debugfs_cleanup(struct drm_minor *minor);
@@ -228,11 +239,6 @@ static struct drm_driver driver_old = {
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -322,6 +328,9 @@ static struct drm_driver kms_driver = {
.gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
.dma_ioctl = radeon_dma_ioctl_kms,
+ .dumb_create = radeon_mode_dumb_create,
+ .dumb_map_offset = radeon_mode_dumb_mmap,
+ .dumb_destroy = radeon_mode_dumb_destroy,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -336,15 +345,6 @@ static struct drm_driver kms_driver = {
#endif
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = radeon_pci_probe,
- .remove = radeon_pci_remove,
- .suspend = radeon_pci_suspend,
- .resume = radeon_pci_resume,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -354,15 +354,32 @@ static struct drm_driver kms_driver = {
};
static struct drm_driver *driver;
+static struct pci_driver *pdriver;
+
+static struct pci_driver radeon_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
+static struct pci_driver radeon_kms_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = radeon_pci_probe,
+ .remove = radeon_pci_remove,
+ .suspend = radeon_pci_suspend,
+ .resume = radeon_pci_resume,
+};
static int __init radeon_init(void)
{
driver = &driver_old;
+ pdriver = &radeon_pci_driver;
driver->num_ioctls = radeon_max_ioctl;
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && radeon_modeset == -1) {
DRM_INFO("VGACON disable radeon kernel modesetting.\n");
driver = &driver_old;
+ pdriver = &radeon_pci_driver;
driver->driver_features &= ~DRIVER_MODESET;
radeon_modeset = 0;
}
@@ -380,18 +397,19 @@ static int __init radeon_init(void)
if (radeon_modeset == 1) {
DRM_INFO("radeon kernel modesetting enabled.\n");
driver = &kms_driver;
+ pdriver = &radeon_kms_pci_driver;
driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = radeon_max_kms_ioctl;
radeon_register_atpx_handler();
}
/* if the vga console setting is enabled still
* let modprobe override it */
- return drm_init(driver);
+ return drm_pci_init(driver, pdriver);
}
static void __exit radeon_exit(void)
{
- drm_exit(driver);
+ drm_pci_exit(driver, pdriver);
radeon_unregister_atpx_handler();
}
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 1ca55eb09ad3..6f1d9e563e77 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -84,6 +84,7 @@ enum radeon_family {
CHIP_BARTS,
CHIP_TURKS,
CHIP_CAICOS,
+ CHIP_CAYMAN,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index cc44bdfec80f..0b7b486c97e8 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -64,7 +64,7 @@ static struct fb_ops radeonfb_ops = {
};
-static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
+int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
{
int aligned = width;
int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
@@ -90,7 +90,7 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo
static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
{
- struct radeon_bo *rbo = gobj->driver_private;
+ struct radeon_bo *rbo = gem_to_radeon_bo(gobj);
int ret;
ret = radeon_bo_reserve(rbo, false);
@@ -131,7 +131,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
aligned_size);
return -ENOMEM;
}
- rbo = gobj->driver_private;
+ rbo = gem_to_radeon_bo(gobj);
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
@@ -205,7 +205,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
mode_cmd.depth = sizes->surface_depth;
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
- rbo = gobj->driver_private;
+ rbo = gem_to_radeon_bo(gobj);
/* okay we have an object now allocate the framebuffer */
info = framebuffer_alloc(0, device);
@@ -406,14 +406,14 @@ int radeon_fbdev_total_size(struct radeon_device *rdev)
struct radeon_bo *robj;
int size = 0;
- robj = rdev->mode_info.rfbdev->rfb.obj->driver_private;
+ robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj);
size += radeon_bo_size(robj);
return size;
}
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
- if (robj == rdev->mode_info.rfbdev->rfb.obj->driver_private)
+ if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
return true;
return false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 171b0b2e3a64..9e59868d354e 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -60,8 +60,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
trace_radeon_fence_emit(rdev->ddev, fence->seq);
fence->emited = true;
- list_del(&fence->list);
- list_add_tail(&fence->list, &rdev->fence_drv.emited);
+ list_move_tail(&fence->list, &rdev->fence_drv.emited);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
}
@@ -121,8 +120,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
i = n;
do {
n = i->prev;
- list_del(i);
- list_add_tail(i, &rdev->fence_drv.signaled);
+ list_move_tail(i, &rdev->fence_drv.signaled);
fence = list_entry(i, struct radeon_fence, list);
fence->signaled = true;
i = n;
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 65016117d95f..f0534ef2f331 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -78,7 +78,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
int r;
if (rdev->gart.table.vram.robj == NULL) {
- r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
+ r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->gart.table.vram.robj);
if (r) {
@@ -149,8 +149,9 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
- pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (!rdev->gart.ttm_alloced[p])
+ pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p];
@@ -165,7 +166,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
}
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
- int pages, struct page **pagelist)
+ int pages, struct page **pagelist, dma_addr_t *dma_addr)
{
unsigned t;
unsigned p;
@@ -180,15 +181,22 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- /* we need to support large memory configurations */
- /* assume that unbind have already been call on the range */
- rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
+ /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32
+ * is requested. */
+ if (dma_addr[i] != DMA_ERROR_CODE) {
+ rdev->gart.ttm_alloced[p] = true;
+ rdev->gart.pages_addr[p] = dma_addr[i];
+ } else {
+ /* we need to support large memory configurations */
+ /* assume that unbind have already been call on the range */
+ rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
- /* FIXME: failed to map page (return -ENOMEM?) */
- radeon_gart_unbind(rdev, offset, pages);
- return -ENOMEM;
+ if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
+ /* FIXME: failed to map page (return -ENOMEM?) */
+ radeon_gart_unbind(rdev, offset, pages);
+ return -ENOMEM;
+ }
}
rdev->gart.pages[p] = pagelist[i];
page_base = rdev->gart.pages_addr[p];
@@ -251,6 +259,12 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
+ rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
+ rdev->gart.num_cpu_pages, GFP_KERNEL);
+ if (rdev->gart.ttm_alloced == NULL) {
+ radeon_gart_fini(rdev);
+ return -ENOMEM;
+ }
/* set GART entry to point to the dummy page by default */
for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -267,6 +281,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
rdev->gart.ready = false;
kfree(rdev->gart.pages);
kfree(rdev->gart.pages_addr);
+ kfree(rdev->gart.ttm_alloced);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
+ rdev->gart.ttm_alloced = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 1fe95dfe48c9..aa1ca2dea42f 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -32,21 +32,18 @@
int radeon_gem_object_init(struct drm_gem_object *obj)
{
- /* we do nothings here */
+ BUG();
+
return 0;
}
void radeon_gem_object_free(struct drm_gem_object *gobj)
{
- struct radeon_bo *robj = gobj->driver_private;
+ struct radeon_bo *robj = gem_to_radeon_bo(gobj);
- gobj->driver_private = NULL;
if (robj) {
radeon_bo_unref(&robj);
}
-
- drm_gem_object_release(gobj);
- kfree(gobj);
}
int radeon_gem_object_create(struct radeon_device *rdev, int size,
@@ -54,36 +51,34 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
bool discardable, bool kernel,
struct drm_gem_object **obj)
{
- struct drm_gem_object *gobj;
struct radeon_bo *robj;
int r;
*obj = NULL;
- gobj = drm_gem_object_alloc(rdev->ddev, size);
- if (!gobj) {
- return -ENOMEM;
- }
/* At least align on page size */
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
- r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj);
+ r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
- drm_gem_object_unreference_unlocked(gobj);
return r;
}
- gobj->driver_private = robj;
- *obj = gobj;
+ *obj = &robj->gem_base;
+
+ mutex_lock(&rdev->gem.mutex);
+ list_add_tail(&robj->list, &rdev->gem.objects);
+ mutex_unlock(&rdev->gem.mutex);
+
return 0;
}
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr)
{
- struct radeon_bo *robj = obj->driver_private;
+ struct radeon_bo *robj = gem_to_radeon_bo(obj);
int r;
r = radeon_bo_reserve(robj, false);
@@ -96,7 +91,7 @@ int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
void radeon_gem_object_unpin(struct drm_gem_object *obj)
{
- struct radeon_bo *robj = obj->driver_private;
+ struct radeon_bo *robj = gem_to_radeon_bo(obj);
int r;
r = radeon_bo_reserve(robj, false);
@@ -114,7 +109,7 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
int r;
/* FIXME: reeimplement */
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
/* work out where to validate the buffer to */
domain = wdomain;
if (!domain) {
@@ -231,7 +226,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
@@ -239,23 +234,31 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return r;
}
-int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp)
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p)
{
- struct drm_radeon_gem_mmap *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
- gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ gobj = drm_gem_object_lookup(dev, filp, handle);
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
- args->addr_ptr = radeon_bo_mmap_offset(robj);
+ robj = gem_to_radeon_bo(gobj);
+ *offset_p = radeon_bo_mmap_offset(robj);
drm_gem_object_unreference_unlocked(gobj);
return 0;
}
+int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_radeon_gem_mmap *args = data;
+
+ return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
+}
+
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -269,7 +272,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, &cur_placement, true);
switch (cur_placement) {
case TTM_PL_VRAM:
@@ -299,7 +302,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
if (gobj == NULL) {
return -ENOENT;
}
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, NULL, false);
/* callback hw specific functions if any */
if (robj->rdev->asic->ioctl_wait_idle)
@@ -320,7 +323,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
return -ENOENT;
- robj = gobj->driver_private;
+ robj = gem_to_radeon_bo(gobj);
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
drm_gem_object_unreference_unlocked(gobj);
return r;
@@ -338,7 +341,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
return -ENOENT;
- rbo = gobj->driver_private;
+ rbo = gem_to_radeon_bo(gobj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
goto out;
@@ -348,3 +351,40 @@ out:
drm_gem_object_unreference_unlocked(gobj);
return r;
}
+
+int radeon_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_gem_object *gobj;
+ uint32_t handle;
+ int r;
+
+ args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
+ args->size = args->pitch * args->height;
+ args->size = ALIGN(args->size, PAGE_SIZE);
+
+ r = radeon_gem_object_create(rdev, args->size, 0,
+ RADEON_GEM_DOMAIN_VRAM,
+ false, ttm_bo_type_device,
+ &gobj);
+ if (r)
+ return -ENOMEM;
+
+ r = drm_gem_handle_create(file_priv, gobj, &handle);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(gobj);
+ if (r) {
+ return r;
+ }
+ args->handle = handle;
+ return 0;
+}
+
+int radeon_mode_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8387d32caaa7..bf7d4c061451 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -58,9 +58,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)rdev;
/* update BUS flag */
- if (drm_device_is_agp(dev)) {
+ if (drm_pci_device_is_agp(dev)) {
flags |= RADEON_IS_AGP;
- } else if (drm_device_is_pcie(dev)) {
+ } else if (drm_pci_device_is_pcie(dev)) {
flags |= RADEON_IS_PCIE;
} else {
flags |= RADEON_IS_PCI;
@@ -169,7 +169,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
value = rdev->accel_working;
break;
case RADEON_INFO_TILING_CONFIG:
- if (rdev->family >= CHIP_CEDAR)
+ if (rdev->family >= CHIP_CAYMAN)
+ value = rdev->config.cayman.tile_config;
+ else if (rdev->family >= CHIP_CEDAR)
value = rdev->config.evergreen.tile_config;
else if (rdev->family >= CHIP_RV770)
value = rdev->config.rv770.tile_config;
@@ -205,6 +207,20 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
/* return clock value in KHz */
value = rdev->clock.spll.reference_freq * 10;
break;
+ case RADEON_INFO_NUM_BACKENDS:
+ if (rdev->family >= CHIP_CAYMAN)
+ value = rdev->config.cayman.max_backends_per_se *
+ rdev->config.cayman.max_shader_engines;
+ else if (rdev->family >= CHIP_CEDAR)
+ value = rdev->config.evergreen.max_backends;
+ else if (rdev->family >= CHIP_RV770)
+ value = rdev->config.rv770.max_backends;
+ else if (rdev->family >= CHIP_R600)
+ value = rdev->config.r600.max_backends;
+ else {
+ return -EINVAL;
+ }
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 78968b738e88..66c9af1b3d96 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -415,7 +415,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
/* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj;
- rbo = obj->driver_private;
+ rbo = gem_to_radeon_bo(obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
@@ -521,7 +521,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
if (!atomic && fb && fb != crtc->fb) {
radeon_fb = to_radeon_framebuffer(fb);
- rbo = radeon_fb->obj->driver_private;
+ rbo = gem_to_radeon_bo(radeon_fb->obj);
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index a670caaee29e..5067d18d0009 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -676,4 +676,5 @@ void radeon_fb_output_poll_changed(struct radeon_device *rdev);
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
+int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7d6b8e88f746..976c3b1b1b6e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -55,6 +55,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
+ drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
@@ -86,7 +87,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placement.num_busy_placement = c;
}
-int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
+int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain,
struct radeon_bo **bo_ptr)
{
@@ -96,6 +97,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
unsigned long max_size = 0;
int r;
+ size = ALIGN(size, PAGE_SIZE);
+
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
}
@@ -118,8 +121,13 @@ retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
+ r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
+ if (unlikely(r)) {
+ kfree(bo);
+ return r;
+ }
bo->rdev = rdev;
- bo->gobj = gobj;
+ bo->gem_base.driver_private = NULL;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
radeon_ttm_placement_from_domain(bo, domain);
@@ -142,12 +150,9 @@ retry:
return r;
}
*bo_ptr = bo;
- if (gobj) {
- mutex_lock(&bo->rdev->gem.mutex);
- list_add_tail(&bo->list, &rdev->gem.objects);
- mutex_unlock(&bo->rdev->gem.mutex);
- }
+
trace_radeon_bo_create(bo);
+
return 0;
}
@@ -260,7 +265,6 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
void radeon_bo_force_delete(struct radeon_device *rdev)
{
struct radeon_bo *bo, *n;
- struct drm_gem_object *gobj;
if (list_empty(&rdev->gem.objects)) {
return;
@@ -268,16 +272,14 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
dev_err(rdev->dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
mutex_lock(&rdev->ddev->struct_mutex);
- gobj = bo->gobj;
dev_err(rdev->dev, "%p %p %lu %lu force free\n",
- gobj, bo, (unsigned long)gobj->size,
- *((unsigned long *)&gobj->refcount));
+ &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
+ *((unsigned long *)&bo->gem_base.refcount));
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
- radeon_bo_unref(&bo);
- gobj->driver_private = NULL;
- drm_gem_object_unreference(gobj);
+ /* this should unref the ttm bo */
+ drm_gem_object_unreference(&bo->gem_base);
mutex_unlock(&rdev->ddev->struct_mutex);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 22d4c237dea5..7f8e778dba46 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -137,10 +137,9 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
}
extern int radeon_bo_create(struct radeon_device *rdev,
- struct drm_gem_object *gobj, unsigned long size,
- int byte_align,
- bool kernel, u32 domain,
- struct radeon_bo **bo_ptr);
+ unsigned long size, int byte_align,
+ bool kernel, u32 domain,
+ struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 06e79822a2bf..992d99d13fc5 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -175,7 +175,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
return 0;
INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
/* Allocate 1M object buffer */
- r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
+ r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
&rdev->ib_pool.robj);
if (r) {
@@ -332,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
rdev->cp.ring_size = ring_size;
/* Allocate ring buffer */
if (rdev->cp.ring_obj == NULL) {
- r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->cp.ring_obj);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 5b44f652145c..dee4a0c1b4b2 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev)
goto out_cleanup;
}
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
@@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev)
void **gtt_start, **gtt_end;
void **vram_start, **vram_end;
- r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 8389b4c63d12..60125ddba1e9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -529,7 +529,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
- r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true,
+ r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM,
&rdev->stollen_vga_memory);
if (r) {
@@ -661,6 +661,7 @@ struct radeon_ttm_backend {
unsigned long num_pages;
struct page **pages;
struct page *dummy_read_page;
+ dma_addr_t *dma_addrs;
bool populated;
bool bound;
unsigned offset;
@@ -669,12 +670,14 @@ struct radeon_ttm_backend {
static int radeon_ttm_backend_populate(struct ttm_backend *backend,
unsigned long num_pages,
struct page **pages,
- struct page *dummy_read_page)
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct radeon_ttm_backend *gtt;
gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->pages = pages;
+ gtt->dma_addrs = dma_addrs;
gtt->num_pages = num_pages;
gtt->dummy_read_page = dummy_read_page;
gtt->populated = true;
@@ -687,6 +690,7 @@ static void radeon_ttm_backend_clear(struct ttm_backend *backend)
gtt = container_of(backend, struct radeon_ttm_backend, backend);
gtt->pages = NULL;
+ gtt->dma_addrs = NULL;
gtt->num_pages = 0;
gtt->dummy_read_page = NULL;
gtt->populated = false;
@@ -707,7 +711,7 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend,
gtt->num_pages, bo_mem, backend);
}
r = radeon_gart_bind(gtt->rdev, gtt->offset,
- gtt->num_pages, gtt->pages);
+ gtt->num_pages, gtt->pages, gtt->dma_addrs);
if (r) {
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
gtt->num_pages, gtt->offset);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman
new file mode 100644
index 000000000000..6334f8ac1209
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/cayman
@@ -0,0 +1,619 @@
+cayman 0x9400
+0x0000802C GRBM_GFX_INDEX
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x000088C4 VGT_CACHE_INVALIDATION
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00008958 VGT_PRIMITIVE_TYPE
+0x0000895C VGT_INDEX_TYPE
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00008990 VGT_COMPUTE_DIM_X
+0x00008994 VGT_COMPUTE_DIM_Y
+0x00008998 VGT_COMPUTE_DIM_Z
+0x0000899C VGT_COMPUTE_START_X
+0x000089A0 VGT_COMPUTE_START_Y
+0x000089A4 VGT_COMPUTE_START_Z
+0x000089A8 VGT_COMPUTE_INDEX
+0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
+0x000089B0 VGT_HS_OFFCHIP_PARAM
+0x00008A14 PA_CL_ENHANCE
+0x00008A60 PA_SC_LINE_STIPPLE_VALUE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00008BF0 PA_SC_ENHANCE
+0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
+0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
+0x00008DF8 SQ_CONST_MEM_BASE
+0x00008E20 SQ_STATIC_THREAD_MGMT_1
+0x00008E24 SQ_STATIC_THREAD_MGMT_2
+0x00008E28 SQ_STATIC_THREAD_MGMT_3
+0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x00009830 DB_DEBUG
+0x00009834 DB_DEBUG2
+0x00009838 DB_DEBUG3
+0x0000983C DB_DEBUG4
+0x00009854 DB_WATERMARKS
+0x0000A400 TD_PS_BORDER_COLOR_INDEX
+0x0000A404 TD_PS_BORDER_COLOR_RED
+0x0000A408 TD_PS_BORDER_COLOR_GREEN
+0x0000A40C TD_PS_BORDER_COLOR_BLUE
+0x0000A410 TD_PS_BORDER_COLOR_ALPHA
+0x0000A414 TD_VS_BORDER_COLOR_INDEX
+0x0000A418 TD_VS_BORDER_COLOR_RED
+0x0000A41C TD_VS_BORDER_COLOR_GREEN
+0x0000A420 TD_VS_BORDER_COLOR_BLUE
+0x0000A424 TD_VS_BORDER_COLOR_ALPHA
+0x0000A428 TD_GS_BORDER_COLOR_INDEX
+0x0000A42C TD_GS_BORDER_COLOR_RED
+0x0000A430 TD_GS_BORDER_COLOR_GREEN
+0x0000A434 TD_GS_BORDER_COLOR_BLUE
+0x0000A438 TD_GS_BORDER_COLOR_ALPHA
+0x0000A43C TD_HS_BORDER_COLOR_INDEX
+0x0000A440 TD_HS_BORDER_COLOR_RED
+0x0000A444 TD_HS_BORDER_COLOR_GREEN
+0x0000A448 TD_HS_BORDER_COLOR_BLUE
+0x0000A44C TD_HS_BORDER_COLOR_ALPHA
+0x0000A450 TD_LS_BORDER_COLOR_INDEX
+0x0000A454 TD_LS_BORDER_COLOR_RED
+0x0000A458 TD_LS_BORDER_COLOR_GREEN
+0x0000A45C TD_LS_BORDER_COLOR_BLUE
+0x0000A460 TD_LS_BORDER_COLOR_ALPHA
+0x0000A464 TD_CS_BORDER_COLOR_INDEX
+0x0000A468 TD_CS_BORDER_COLOR_RED
+0x0000A46C TD_CS_BORDER_COLOR_GREEN
+0x0000A470 TD_CS_BORDER_COLOR_BLUE
+0x0000A474 TD_CS_BORDER_COLOR_ALPHA
+0x00028000 DB_RENDER_CONTROL
+0x00028004 DB_COUNT_CONTROL
+0x0002800C DB_RENDER_OVERRIDE
+0x00028010 DB_RENDER_OVERRIDE2
+0x00028028 DB_STENCIL_CLEAR
+0x0002802C DB_DEPTH_CLEAR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x0002805C DB_DEPTH_SLICE
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028230 PA_SC_EDGERULE
+0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x00028350 SX_MISC
+0x00028354 SX_SURFACE_SYNC
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x00028400 VGT_MAX_VTX_INDX
+0x00028404 VGT_MIN_VTX_INDX
+0x00028408 VGT_INDX_OFFSET
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028414 CB_BLEND_RED
+0x00028418 CB_BLEND_GREEN
+0x0002841C CB_BLEND_BLUE
+0x00028420 CB_BLEND_ALPHA
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028438 SX_ALPHA_REF
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x000285BC PA_CL_UCP_0_X
+0x000285C0 PA_CL_UCP_0_Y
+0x000285C4 PA_CL_UCP_0_Z
+0x000285C8 PA_CL_UCP_0_W
+0x000285CC PA_CL_UCP_1_X
+0x000285D0 PA_CL_UCP_1_Y
+0x000285D4 PA_CL_UCP_1_Z
+0x000285D8 PA_CL_UCP_1_W
+0x000285DC PA_CL_UCP_2_X
+0x000285E0 PA_CL_UCP_2_Y
+0x000285E4 PA_CL_UCP_2_Z
+0x000285E8 PA_CL_UCP_2_W
+0x000285EC PA_CL_UCP_3_X
+0x000285F0 PA_CL_UCP_3_Y
+0x000285F4 PA_CL_UCP_3_Z
+0x000285F8 PA_CL_UCP_3_W
+0x000285FC PA_CL_UCP_4_X
+0x00028600 PA_CL_UCP_4_Y
+0x00028604 PA_CL_UCP_4_Z
+0x00028608 PA_CL_UCP_4_W
+0x0002860C PA_CL_UCP_5_X
+0x00028610 PA_CL_UCP_5_Y
+0x00028614 PA_CL_UCP_5_Z
+0x00028618 PA_CL_UCP_5_W
+0x0002861C SPI_VS_OUT_ID_0
+0x00028620 SPI_VS_OUT_ID_1
+0x00028624 SPI_VS_OUT_ID_2
+0x00028628 SPI_VS_OUT_ID_3
+0x0002862C SPI_VS_OUT_ID_4
+0x00028630 SPI_VS_OUT_ID_5
+0x00028634 SPI_VS_OUT_ID_6
+0x00028638 SPI_VS_OUT_ID_7
+0x0002863C SPI_VS_OUT_ID_8
+0x00028640 SPI_VS_OUT_ID_9
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286C4 SPI_VS_OUT_CONFIG
+0x000286C8 SPI_THREAD_GROUPING
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286D4 SPI_INTERP_CONTROL_0
+0x000286D8 SPI_INPUT_Z
+0x000286DC SPI_FOG_CNTL
+0x000286E0 SPI_BARYC_CNTL
+0x000286E4 SPI_PS_IN_CONTROL_2
+0x000286E8 SPI_COMPUTE_INPUT_CNTL
+0x000286EC SPI_COMPUTE_NUM_THREAD_X
+0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
+0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
+0x000286F8 SPI_GPR_MGMT
+0x000286FC SPI_LDS_MGMT
+0x00028700 SPI_STACK_MGMT
+0x00028704 SPI_WAVE_MGMT_1
+0x00028708 SPI_WAVE_MGMT_2
+0x00028724 GDS_ADDR_SIZE
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x000287CC CS_COPY_STATE
+0x000287D0 GFX_COPY_STATE
+0x000287D4 PA_CL_POINT_X_RAD
+0x000287D8 PA_CL_POINT_Y_RAD
+0x000287DC PA_CL_POINT_SIZE
+0x000287E0 PA_CL_POINT_CULL_RAD
+0x00028808 CB_COLOR_CONTROL
+0x0002880C DB_SHADER_CONTROL
+0x00028810 PA_CL_CLIP_CNTL
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028820 PA_CL_NANINF_CNTL
+0x00028824 PA_SU_LINE_STIPPLE_CNTL
+0x00028828 PA_SU_LINE_STIPPLE_SCALE
+0x0002882C PA_SU_PRIM_FILTER_CNTL
+0x00028844 SQ_PGM_RESOURCES_PS
+0x00028848 SQ_PGM_RESOURCES_2_PS
+0x0002884C SQ_PGM_EXPORTS_PS
+0x00028860 SQ_PGM_RESOURCES_VS
+0x00028864 SQ_PGM_RESOURCES_2_VS
+0x00028878 SQ_PGM_RESOURCES_GS
+0x0002887C SQ_PGM_RESOURCES_2_GS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x00028894 SQ_PGM_RESOURCES_2_ES
+0x000288A8 SQ_PGM_RESOURCES_FS
+0x000288BC SQ_PGM_RESOURCES_HS
+0x000288C0 SQ_PGM_RESOURCES_2_HS
+0x000288D4 SQ_PGM_RESOURCES_LS
+0x000288D8 SQ_PGM_RESOURCES_2_LS
+0x000288E8 SQ_LDS_ALLOC
+0x000288EC SQ_LDS_ALLOC_PS
+0x000288F0 SQ_VTX_SEMANTIC_CLEAR
+0x00028A00 PA_SU_POINT_SIZE
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A2C VGT_GROUP_DECR
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A48 PA_SC_MODE_CNTL_0
+0x00028A4C PA_SC_MODE_CNTL_1
+0x00028A50 VGT_ENHANCE
+0x00028A54 VGT_GS_PER_ES
+0x00028A58 VGT_ES_PER_GS
+0x00028A5C VGT_GS_PER_VS
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028A70 IA_ENHANCE
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x00028AA8 IA_MULTI_VGT_PARAM
+0x00028AB4 VGT_REUSE_OFF
+0x00028AB8 VGT_VTX_CNT_EN
+0x00028ABC DB_HTILE_SURFACE
+0x00028AC0 DB_SRESULTS_COMPARE_STATE0
+0x00028AC4 DB_SRESULTS_COMPARE_STATE1
+0x00028AC8 DB_PRELOAD_CONTROL
+0x00028B38 VGT_GS_MAX_VERT_OUT
+0x00028B54 VGT_SHADER_STAGES_EN
+0x00028B58 VGT_LS_HS_CONFIG
+0x00028B6C VGT_TF_PARAM
+0x00028B70 DB_ALPHA_TO_MASK
+0x00028B74 VGT_DISPATCH_INITIATOR
+0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028B7C PA_SU_POLY_OFFSET_CLAMP
+0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028B74 VGT_GS_INSTANCE_CNT
+0x00028BD4 PA_SC_CENTROID_PRIORITY_0
+0x00028BD8 PA_SC_CENTROID_PRIORITY_1
+0x00028BDC PA_SC_LINE_CNTL
+0x00028BE4 PA_SU_VTX_CNTL
+0x00028BE8 PA_CL_GB_VERT_CLIP_ADJ
+0x00028BEC PA_CL_GB_VERT_DISC_ADJ
+0x00028BF0 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028BF4 PA_CL_GB_HORZ_DISC_ADJ
+0x00028BF8 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_0
+0x00028BFC PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_1
+0x00028C00 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_2
+0x00028C04 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_3
+0x00028C08 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_0
+0x00028C0C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_1
+0x00028C10 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_2
+0x00028C14 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_3
+0x00028C18 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_0
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_1
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_2
+0x00028C24 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_3
+0x00028C28 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_0
+0x00028C2C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_1
+0x00028C30 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_2
+0x00028C34 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_3
+0x00028C38 PA_SC_AA_MASK_X0_Y0_X1_Y0
+0x00028C3C PA_SC_AA_MASK_X0_Y1_X1_Y1
+0x00028C8C CB_COLOR0_CLEAR_WORD0
+0x00028C90 CB_COLOR0_CLEAR_WORD1
+0x00028C94 CB_COLOR0_CLEAR_WORD2
+0x00028C98 CB_COLOR0_CLEAR_WORD3
+0x00028CC8 CB_COLOR1_CLEAR_WORD0
+0x00028CCC CB_COLOR1_CLEAR_WORD1
+0x00028CD0 CB_COLOR1_CLEAR_WORD2
+0x00028CD4 CB_COLOR1_CLEAR_WORD3
+0x00028D04 CB_COLOR2_CLEAR_WORD0
+0x00028D08 CB_COLOR2_CLEAR_WORD1
+0x00028D0C CB_COLOR2_CLEAR_WORD2
+0x00028D10 CB_COLOR2_CLEAR_WORD3
+0x00028D40 CB_COLOR3_CLEAR_WORD0
+0x00028D44 CB_COLOR3_CLEAR_WORD1
+0x00028D48 CB_COLOR3_CLEAR_WORD2
+0x00028D4C CB_COLOR3_CLEAR_WORD3
+0x00028D7C CB_COLOR4_CLEAR_WORD0
+0x00028D80 CB_COLOR4_CLEAR_WORD1
+0x00028D84 CB_COLOR4_CLEAR_WORD2
+0x00028D88 CB_COLOR4_CLEAR_WORD3
+0x00028DB8 CB_COLOR5_CLEAR_WORD0
+0x00028DBC CB_COLOR5_CLEAR_WORD1
+0x00028DC0 CB_COLOR5_CLEAR_WORD2
+0x00028DC4 CB_COLOR5_CLEAR_WORD3
+0x00028DF4 CB_COLOR6_CLEAR_WORD0
+0x00028DF8 CB_COLOR6_CLEAR_WORD1
+0x00028DFC CB_COLOR6_CLEAR_WORD2
+0x00028E00 CB_COLOR6_CLEAR_WORD3
+0x00028E30 CB_COLOR7_CLEAR_WORD0
+0x00028E34 CB_COLOR7_CLEAR_WORD1
+0x00028E38 CB_COLOR7_CLEAR_WORD2
+0x00028E3C CB_COLOR7_CLEAR_WORD3
+0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
+0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
+0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
+0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
+0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
+0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
+0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
+0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
+0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
+0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
+0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
+0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
+0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
+0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
+0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
+0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
+0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
+0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
+0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
+0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
+0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
+0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
+0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
+0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
+0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
+0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
+0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
+0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
+0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
+0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
+0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
+0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
+0x0003CFF0 SQ_VTX_BASE_VTX_LOC
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003FF00 SQ_TEX_SAMPLER_CLEAR
+0x0003FF04 SQ_TEX_RESOURCE_CLEAR
+0x0003FF08 SQ_LOOP_BOOL_CLEAR
diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen
index 9177f9191837..7e1637176e08 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/evergreen
+++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen
@@ -1,4 +1,5 @@
evergreen 0x9400
+0x0000802C GRBM_GFX_INDEX
0x00008040 WAIT_UNTIL
0x00008044 WAIT_UNTIL_POLL_CNTL
0x00008048 WAIT_UNTIL_POLL_MASK
@@ -220,6 +221,7 @@ evergreen 0x9400
0x00028348 PA_SC_VPORT_ZMIN_15
0x0002834C PA_SC_VPORT_ZMAX_15
0x00028350 SX_MISC
+0x00028354 SX_SURFACE_SYNC
0x00028380 SQ_VTX_SEMANTIC_0
0x00028384 SQ_VTX_SEMANTIC_1
0x00028388 SQ_VTX_SEMANTIC_2
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 714ad45757d0..4cc7b717fedd 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1003,7 +1003,7 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev)
u64 gpu_addr;
if (rdev->vram_scratch.robj == NULL) {
- r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
+ r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->vram_scratch.robj);
if (r) {
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index fa64d25d4248..6464490b240b 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -55,11 +55,6 @@ static struct drm_driver driver = {
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -68,15 +63,20 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver savage_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init savage_init(void)
{
driver.num_ioctls = savage_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &savage_pci_driver);
}
static void __exit savage_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &savage_pci_driver);
}
module_init(savage_init);
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 4caf5d01cfd3..46d5be6e97e5 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -82,10 +82,6 @@ static struct drm_driver driver = {
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -95,15 +91,20 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver sis_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init sis_init(void)
{
driver.num_ioctls = sis_max_ioctl;
- return drm_init(&driver);
+ return drm_pci_init(&driver, &sis_pci_driver);
}
static void __exit sis_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &sis_pci_driver);
}
module_init(sis_init);
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index b70fa91d761a..8bf98810a8d6 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -52,10 +52,6 @@ static struct drm_driver driver = {
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -65,14 +61,19 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver tdfx_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init tdfx_init(void)
{
- return drm_init(&driver);
+ return drm_pci_init(&driver, &tdfx_pci_driver);
}
static void __exit tdfx_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &tdfx_pci_driver);
}
module_init(tdfx_init);
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index f999e36f30b4..1c4a72f681c1 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -47,7 +47,8 @@ struct ttm_agp_backend {
static int ttm_agp_populate(struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page)
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct ttm_agp_backend *agp_be =
container_of(backend, struct ttm_agp_backend, backend);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index af61fc29e843..0b6a55ac2f87 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -406,11 +406,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+ if (bdev->driver->move_notify)
+ bdev->driver->move_notify(bo, mem);
bo->mem = *mem;
mem->mm_node = NULL;
goto moved;
}
-
}
if (bdev->driver->move_notify)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index b1e02fffd3cc..737a2a2e46a5 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -38,6 +38,7 @@
#include <linux/mm.h>
#include <linux/seq_file.h> /* for seq_printf */
#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include <asm/atomic.h>
@@ -662,7 +663,8 @@ out:
* cached pages.
*/
int ttm_get_pages(struct list_head *pages, int flags,
- enum ttm_caching_state cstate, unsigned count)
+ enum ttm_caching_state cstate, unsigned count,
+ dma_addr_t *dma_address)
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p = NULL;
@@ -681,14 +683,22 @@ int ttm_get_pages(struct list_head *pages, int flags,
gfp_flags |= GFP_HIGHUSER;
for (r = 0; r < count; ++r) {
- p = alloc_page(gfp_flags);
+ if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
+ void *addr;
+ addr = dma_alloc_coherent(NULL, PAGE_SIZE,
+ &dma_address[r],
+ gfp_flags);
+ if (addr == NULL)
+ return -ENOMEM;
+ p = virt_to_page(addr);
+ } else
+ p = alloc_page(gfp_flags);
if (!p) {
printk(KERN_ERR TTM_PFX
"Unable to allocate page.");
return -ENOMEM;
}
-
list_add(&p->lru, pages);
}
return 0;
@@ -720,7 +730,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
printk(KERN_ERR TTM_PFX
"Failed to allocate extra pages "
"for large request.");
- ttm_put_pages(pages, 0, flags, cstate);
+ ttm_put_pages(pages, 0, flags, cstate, NULL);
return r;
}
}
@@ -731,17 +741,29 @@ int ttm_get_pages(struct list_head *pages, int flags,
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
- enum ttm_caching_state cstate)
+ enum ttm_caching_state cstate, dma_addr_t *dma_address)
{
unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p, *tmp;
+ unsigned r;
if (pool == NULL) {
/* No pool for this memory type so free the pages */
+ r = page_count-1;
list_for_each_entry_safe(p, tmp, pages, lru) {
- __free_page(p);
+ if ((flags & TTM_PAGE_FLAG_DMA32) && dma_address) {
+ void *addr = page_address(p);
+ WARN_ON(!addr || !dma_address[r]);
+ if (addr)
+ dma_free_coherent(NULL, PAGE_SIZE,
+ addr,
+ dma_address[r]);
+ dma_address[r] = 0;
+ } else
+ __free_page(p);
+ r--;
}
/* Make the pages list empty */
INIT_LIST_HEAD(pages);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index af789dc869b9..86d5b1745a45 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -49,12 +49,16 @@ static int ttm_tt_swapin(struct ttm_tt *ttm);
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
+ ttm->dma_address = drm_calloc_large(ttm->num_pages,
+ sizeof(*ttm->dma_address));
}
static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
{
drm_free_large(ttm->pages);
ttm->pages = NULL;
+ drm_free_large(ttm->dma_address);
+ ttm->dma_address = NULL;
}
static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
@@ -105,7 +109,8 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
INIT_LIST_HEAD(&h);
- ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1);
+ ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
+ &ttm->dma_address[index]);
if (ret != 0)
return NULL;
@@ -164,7 +169,7 @@ int ttm_tt_populate(struct ttm_tt *ttm)
}
be->func->populate(be, ttm->num_pages, ttm->pages,
- ttm->dummy_read_page);
+ ttm->dummy_read_page, ttm->dma_address);
ttm->state = tt_unbound;
return 0;
}
@@ -298,7 +303,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
count++;
}
}
- ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state);
+ ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
+ ttm->dma_address);
ttm->state = tt_unpopulated;
ttm->first_himem_page = ttm->num_pages;
ttm->last_lomem_page = -1;
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index e1ff4e7a6eb0..920a55214bcf 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -62,10 +62,6 @@ static struct drm_driver driver = {
.fasync = drm_fasync,
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- },
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -75,16 +71,21 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct pci_driver via_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+};
+
static int __init via_init(void)
{
driver.num_ioctls = via_max_ioctl;
via_init_command_verifier();
- return drm_init(&driver);
+ return drm_pci_init(&driver, &via_pci_driver);
}
static void __exit via_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &via_pci_driver);
}
module_init(via_init);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 80bc37b274e7..87e43e0733bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -102,7 +102,8 @@ struct vmw_ttm_backend {
static int vmw_ttm_populate(struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page)
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct vmw_ttm_backend *vmw_be =
container_of(backend, struct vmw_ttm_backend, backend);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 10ca97ee0206..96949b93d920 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -909,15 +909,6 @@ static struct drm_driver driver = {
#endif
.llseek = noop_llseek,
},
- .pci_driver = {
- .name = VMWGFX_DRIVER_NAME,
- .id_table = vmw_pci_id_list,
- .probe = vmw_probe,
- .remove = vmw_remove,
- .driver = {
- .pm = &vmw_pm_ops
- }
- },
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
.date = VMWGFX_DRIVER_DATE,
@@ -926,6 +917,16 @@ static struct drm_driver driver = {
.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
};
+static struct pci_driver vmw_pci_driver = {
+ .name = VMWGFX_DRIVER_NAME,
+ .id_table = vmw_pci_id_list,
+ .probe = vmw_probe,
+ .remove = vmw_remove,
+ .driver = {
+ .pm = &vmw_pm_ops
+ }
+};
+
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_pci_dev(pdev, ent, &driver);
@@ -934,7 +935,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
static int __init vmwgfx_init(void)
{
int ret;
- ret = drm_init(&driver);
+ ret = drm_pci_init(&driver, &vmw_pci_driver);
if (ret)
DRM_ERROR("Failed initializing DRM.\n");
return ret;
@@ -942,7 +943,7 @@ static int __init vmwgfx_init(void)
static void __exit vmwgfx_exit(void)
{
- drm_exit(&driver);
+ drm_pci_exit(&driver, &vmw_pci_driver);
}
module_init(vmwgfx_init);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 29113c9b26a8..b3a2cd5118d7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -345,7 +345,7 @@ static enum drm_connector_status
return connector_status_disconnected;
}
-static struct drm_display_mode vmw_ldu_connector_builtin[] = {
+static const struct drm_display_mode vmw_ldu_connector_builtin[] = {
/* 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 489, 492, 525, 0,
@@ -429,7 +429,6 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_display_mode *mode = NULL;
- struct drm_display_mode *bmode;
struct drm_display_mode prefmode = { DRM_MODE("preferred",
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -459,6 +458,8 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
}
for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
+ const struct drm_display_mode *bmode;
+
bmode = &vmw_ldu_connector_builtin[i];
if (bmode->hdisplay > max_width ||
bmode->vdisplay > max_height)
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 297bc9a7d6e6..1bfb4439e4e1 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -467,6 +467,17 @@ config SENSORS_JC42
This driver can also be built as a module. If so, the module
will be called jc42.
+config SENSORS_LINEAGE
+ tristate "Lineage Compact Power Line Power Entry Module"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for the Lineage Compact Power Line
+ series of DC/DC and AC/DC converters such as CP1800, CP2000AC,
+ CP2000DC, CP2725, and others.
+
+ This driver can also be built as a module. If so, the module
+ will be called lineage-pem.
+
config SENSORS_LM63
tristate "National Semiconductor LM63 and LM64"
depends on I2C
@@ -625,6 +636,17 @@ config SENSORS_LM93
This driver can also be built as a module. If so, the module
will be called lm93.
+config SENSORS_LTC4151
+ tristate "Linear Technology LTC4151"
+ depends on I2C
+ default n
+ help
+ If you say yes here you get support for Linear Technology LTC4151
+ High Voltage I2C Current and Voltage Monitor interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called ltc4151.
+
config SENSORS_LTC4215
tristate "Linear Technology LTC4215"
depends on I2C && EXPERIMENTAL
@@ -685,6 +707,16 @@ config SENSORS_MAX1619
This driver can also be built as a module. If so, the module
will be called max1619.
+config SENSORS_MAX6639
+ tristate "Maxim MAX6639 sensor chip"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for the MAX6639
+ sensor chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6639.
+
config SENSORS_MAX6650
tristate "Maxim MAX6650 sensor chip"
depends on I2C && EXPERIMENTAL
@@ -735,6 +767,61 @@ config SENSORS_PCF8591
These devices are hard to detect and rarely found on mainstream
hardware. If unsure, say N.
+config PMBUS
+ tristate "PMBus support"
+ depends on I2C && EXPERIMENTAL
+ default n
+ help
+ Say yes here if you want to enable PMBus support.
+
+ This driver can also be built as a module. If so, the module will
+ be called pmbus_core.
+
+if PMBUS
+
+config SENSORS_PMBUS
+ tristate "Generic PMBus devices"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for generic
+ PMBus devices, including but not limited to BMR450, BMR451, BMR453,
+ BMR454, and LTC2978.
+
+ This driver can also be built as a module. If so, the module will
+ be called pmbus.
+
+config SENSORS_MAX16064
+ tristate "Maxim MAX16064"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX16064.
+
+ This driver can also be built as a module. If so, the module will
+ be called max16064.
+
+config SENSORS_MAX34440
+ tristate "Maxim MAX34440/MAX34441"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX34440 and MAX34441.
+
+ This driver can also be built as a module. If so, the module will
+ be called max34440.
+
+config SENSORS_MAX8688
+ tristate "Maxim MAX8688"
+ default n
+ help
+ If you say yes here you get hardware monitoring support for Maxim
+ MAX8688.
+
+ This driver can also be built as a module. If so, the module will
+ be called max8688.
+
+endif # PMBUS
+
config SENSORS_SHT15
tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
depends on GENERIC_GPIO
@@ -1083,7 +1170,7 @@ config SENSORS_W83627HF
will be called w83627hf.
config SENSORS_W83627EHF
- tristate "Winbond W83627EHF/EHG/DHG, W83667HG"
+ tristate "Winbond W83627EHF/EHG/DHG, W83667HG, NCT6775F, NCT6776F"
select HWMON_VID
help
If you say yes here you get support for the hardware
@@ -1094,7 +1181,8 @@ config SENSORS_W83627EHF
chip suited for specific Intel processors that use PECI such as
the Core 2 Duo.
- This driver also supports the W83667HG chip.
+ This driver also supports Nuvoton W83667HG, W83667HG-B, NCT6775F
+ (also known as W83667HG-I), and NCT6776F.
This driver can also be built as a module. If so, the module
will be called w83627ehf.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index dde02d99c238..bd0410e4b44f 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_SENSORS_JC42) += jc42.o
obj-$(CONFIG_SENSORS_JZ4740) += jz4740-hwmon.o
obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o
obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o
+obj-$(CONFIG_SENSORS_LINEAGE) += lineage-pem.o
obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o hp_accel.o
obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d.o lis3lv02d_spi.o
obj-$(CONFIG_SENSORS_LIS3_I2C) += lis3lv02d.o lis3lv02d_i2c.o
@@ -79,11 +80,13 @@ obj-$(CONFIG_SENSORS_LM90) += lm90.o
obj-$(CONFIG_SENSORS_LM92) += lm92.o
obj-$(CONFIG_SENSORS_LM93) += lm93.o
obj-$(CONFIG_SENSORS_LM95241) += lm95241.o
+obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o
obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o
obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o
obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o
obj-$(CONFIG_SENSORS_MAX1111) += max1111.o
obj-$(CONFIG_SENSORS_MAX1619) += max1619.o
+obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
obj-$(CONFIG_SENSORS_PC87360) += pc87360.o
@@ -112,6 +115,13 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
+# PMBus drivers
+obj-$(CONFIG_PMBUS) += pmbus_core.o
+obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o
+obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
+obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
+obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
+
ifeq ($(CONFIG_HWMON_DEBUG_CHIP),y)
EXTRA_CFLAGS += -DDEBUG
endif
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 6e06019015a5..a4d430ee7e20 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1,6 +1,6 @@
/***************************************************************************
* Copyright (C) 2006 by Hans Edgington <hans@edgington.nl> *
- * Copyright (C) 2007-2009 Hans de Goede <hdegoede@redhat.com> *
+ * Copyright (C) 2007-2011 Hans de Goede <hdegoede@redhat.com> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
@@ -47,22 +47,23 @@
#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
#define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */
+#define SIO_F71808E_ID 0x0901 /* Chipset ID */
#define SIO_F71858_ID 0x0507 /* Chipset ID */
#define SIO_F71862_ID 0x0601 /* Chipset ID */
+#define SIO_F71869_ID 0x0814 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F71889_ID 0x0723 /* Chipset ID */
+#define SIO_F71889E_ID 0x0909 /* Chipset ID */
#define SIO_F8000_ID 0x0581 /* Chipset ID */
#define REGION_LENGTH 8
#define ADDR_REG_OFFSET 5
#define DATA_REG_OFFSET 6
-#define F71882FG_REG_PECI 0x0A
-
-#define F71882FG_REG_IN_STATUS 0x12 /* f71882fg only */
-#define F71882FG_REG_IN_BEEP 0x13 /* f71882fg only */
+#define F71882FG_REG_IN_STATUS 0x12 /* f7188x only */
+#define F71882FG_REG_IN_BEEP 0x13 /* f7188x only */
#define F71882FG_REG_IN(nr) (0x20 + (nr))
-#define F71882FG_REG_IN1_HIGH 0x32 /* f71882fg only */
+#define F71882FG_REG_IN1_HIGH 0x32 /* f7188x only */
#define F71882FG_REG_FAN(nr) (0xA0 + (16 * (nr)))
#define F71882FG_REG_FAN_TARGET(nr) (0xA2 + (16 * (nr)))
@@ -86,28 +87,71 @@
#define F71882FG_REG_FAN_HYST(nr) (0x98 + (nr))
+#define F71882FG_REG_FAN_FAULT_T 0x9F
+#define F71882FG_FAN_NEG_TEMP_EN 0x20
+#define F71882FG_FAN_PROG_SEL 0x80
+
#define F71882FG_REG_POINT_PWM(pwm, point) (0xAA + (point) + (16 * (pwm)))
#define F71882FG_REG_POINT_TEMP(pwm, point) (0xA6 + (point) + (16 * (pwm)))
#define F71882FG_REG_POINT_MAPPING(nr) (0xAF + 16 * (nr))
#define F71882FG_REG_START 0x01
+#define F71882FG_MAX_INS 9
+
#define FAN_MIN_DETECT 366 /* Lowest detectable fanspeed */
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-enum chips { f71858fg, f71862fg, f71882fg, f71889fg, f8000 };
+enum chips { f71808e, f71858fg, f71862fg, f71869, f71882fg, f71889fg,
+ f71889ed, f8000 };
static const char *f71882fg_names[] = {
+ "f71808e",
"f71858fg",
"f71862fg",
+ "f71869", /* Both f71869f and f71869e, reg. compatible and same id */
"f71882fg",
"f71889fg",
+ "f71889ed",
"f8000",
};
+static const char f71882fg_has_in[8][F71882FG_MAX_INS] = {
+ { 1, 1, 1, 1, 1, 1, 0, 1, 1 }, /* f71808e */
+ { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, /* f71858fg */
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71862fg */
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71869 */
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71882fg */
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71889fg */
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71889ed */
+ { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, /* f8000 */
+};
+
+static const char f71882fg_has_in1_alarm[8] = {
+ 0, /* f71808e */
+ 0, /* f71858fg */
+ 0, /* f71862fg */
+ 0, /* f71869 */
+ 1, /* f71882fg */
+ 1, /* f71889fg */
+ 1, /* f71889ed */
+ 0, /* f8000 */
+};
+
+static const char f71882fg_has_beep[8] = {
+ 0, /* f71808e */
+ 0, /* f71858fg */
+ 1, /* f71862fg */
+ 1, /* f71869 */
+ 1, /* f71882fg */
+ 1, /* f71889fg */
+ 1, /* f71889ed */
+ 0, /* f8000 */
+};
+
static struct platform_device *f71882fg_pdev;
/* Super-I/O Function prototypes */
@@ -129,11 +173,12 @@ struct f71882fg_data {
struct mutex update_lock;
int temp_start; /* temp numbering start (0 or 1) */
char valid; /* !=0 if following fields are valid */
+ char auto_point_temp_signed;
unsigned long last_updated; /* In jiffies */
unsigned long last_limits; /* In jiffies */
/* Register Values */
- u8 in[9];
+ u8 in[F71882FG_MAX_INS];
u8 in1_max;
u8 in_status;
u8 in_beep;
@@ -142,7 +187,7 @@ struct f71882fg_data {
u16 fan_full_speed[4];
u8 fan_status;
u8 fan_beep;
- /* Note: all models have only 3 temperature channels, but on some
+ /* Note: all models have max 3 temperature channels, but on some
they are addressed as 0-2 and on others as 1-3, so for coding
convenience we reserve space for 4 channels */
u16 temp[4];
@@ -262,13 +307,9 @@ static struct platform_driver f71882fg_driver = {
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-/* Temp and in attr for the f71858fg, the f71858fg is special as it
- has its temperature indexes start at 0 (the others start at 1) and
- it only has 3 voltage inputs */
-static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
- SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
- SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
- SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
+/* Temp attr for the f71858fg, the f71858fg is special as it has its
+ temperature indexes start at 0 (the others start at 1) */
+static struct sensor_device_attribute_2 f71858fg_temp_attr[] = {
SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0),
SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 0),
@@ -292,7 +333,6 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
SENSOR_ATTR_2(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
0, 1),
SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5),
- SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 1),
SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1),
SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2),
SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
@@ -308,17 +348,8 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = {
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
};
-/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */
-static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
- SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
- SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
- SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
- SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3),
- SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4),
- SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5),
- SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6),
- SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7),
- SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8),
+/* Temp attr for the standard models */
+static struct sensor_device_attribute_2 fxxxx_temp_attr[3][9] = { {
SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1),
SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 1),
@@ -328,17 +359,14 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
the max and crit alarms separately and lm_sensors v2 depends on the
presence of temp#_alarm files. The same goes for temp2/3 _alarm. */
SENSOR_ATTR_2(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 1),
- SENSOR_ATTR_2(temp1_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 1),
SENSOR_ATTR_2(temp1_crit, S_IRUGO|S_IWUSR, show_temp_crit,
store_temp_crit, 0, 1),
SENSOR_ATTR_2(temp1_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
0, 1),
SENSOR_ATTR_2(temp1_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5),
- SENSOR_ATTR_2(temp1_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 5),
SENSOR_ATTR_2(temp1_type, S_IRUGO, show_temp_type, NULL, 0, 1),
SENSOR_ATTR_2(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0, 1),
+}, {
SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 2),
SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 2),
@@ -346,17 +374,14 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
store_temp_max_hyst, 0, 2),
/* Should be temp2_max_alarm, see temp1_alarm note */
SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 2),
- SENSOR_ATTR_2(temp2_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 2),
SENSOR_ATTR_2(temp2_crit, S_IRUGO|S_IWUSR, show_temp_crit,
store_temp_crit, 0, 2),
SENSOR_ATTR_2(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
0, 2),
SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6),
- SENSOR_ATTR_2(temp2_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 6),
SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2),
SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
+}, {
SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3),
SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 3),
@@ -364,37 +389,39 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = {
store_temp_max_hyst, 0, 3),
/* Should be temp3_max_alarm, see temp1_alarm note */
SENSOR_ATTR_2(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 3),
- SENSOR_ATTR_2(temp3_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 3),
SENSOR_ATTR_2(temp3_crit, S_IRUGO|S_IWUSR, show_temp_crit,
store_temp_crit, 0, 3),
SENSOR_ATTR_2(temp3_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL,
0, 3),
SENSOR_ATTR_2(temp3_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 7),
- SENSOR_ATTR_2(temp3_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
- store_temp_beep, 0, 7),
SENSOR_ATTR_2(temp3_type, S_IRUGO, show_temp_type, NULL, 0, 3),
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 3),
-};
+} };
-/* For models with in1 alarm capability */
-static struct sensor_device_attribute_2 fxxxx_in1_alarm_attr[] = {
- SENSOR_ATTR_2(in1_max, S_IRUGO|S_IWUSR, show_in_max, store_in_max,
- 0, 1),
- SENSOR_ATTR_2(in1_beep, S_IRUGO|S_IWUSR, show_in_beep, store_in_beep,
- 0, 1),
- SENSOR_ATTR_2(in1_alarm, S_IRUGO, show_in_alarm, NULL, 0, 1),
-};
+/* Temp attr for models which can beep on temp alarm */
+static struct sensor_device_attribute_2 fxxxx_temp_beep_attr[3][2] = { {
+ SENSOR_ATTR_2(temp1_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 1),
+ SENSOR_ATTR_2(temp1_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 5),
+}, {
+ SENSOR_ATTR_2(temp2_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 2),
+ SENSOR_ATTR_2(temp2_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 6),
+}, {
+ SENSOR_ATTR_2(temp3_max_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 3),
+ SENSOR_ATTR_2(temp3_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep,
+ store_temp_beep, 0, 7),
+} };
-/* Temp and in attr for the f8000
+/* Temp attr for the f8000
Note on the f8000 temp_ovt (crit) is used as max, and temp_high (max)
is used as hysteresis value to clear alarms
Also like the f71858fg its temperature indexes start at 0
*/
-static struct sensor_device_attribute_2 f8000_in_temp_attr[] = {
- SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
- SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
- SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
+static struct sensor_device_attribute_2 f8000_temp_attr[] = {
SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0),
SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_crit,
store_temp_crit, 0, 0),
@@ -408,7 +435,6 @@ static struct sensor_device_attribute_2 f8000_in_temp_attr[] = {
SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO|S_IWUSR, show_temp_max,
store_temp_max, 0, 1),
SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5),
- SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 1),
SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1),
SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2),
SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_crit,
@@ -419,6 +445,28 @@ static struct sensor_device_attribute_2 f8000_in_temp_attr[] = {
SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2),
};
+/* in attr for all models */
+static struct sensor_device_attribute_2 fxxxx_in_attr[] = {
+ SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0),
+ SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1),
+ SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2),
+ SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3),
+ SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4),
+ SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5),
+ SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6),
+ SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7),
+ SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8),
+};
+
+/* For models with in1 alarm capability */
+static struct sensor_device_attribute_2 fxxxx_in1_alarm_attr[] = {
+ SENSOR_ATTR_2(in1_max, S_IRUGO|S_IWUSR, show_in_max, store_in_max,
+ 0, 1),
+ SENSOR_ATTR_2(in1_beep, S_IRUGO|S_IWUSR, show_in_beep, store_in_beep,
+ 0, 1),
+ SENSOR_ATTR_2(in1_alarm, S_IRUGO, show_in_alarm, NULL, 0, 1),
+};
+
/* Fan / PWM attr common to all models */
static struct sensor_device_attribute_2 fxxxx_fan_attr[4][6] = { {
SENSOR_ATTR_2(fan1_input, S_IRUGO, show_fan, NULL, 0, 0),
@@ -479,7 +527,7 @@ static struct sensor_device_attribute_2 fxxxx_fan_beep_attr[] = {
};
/* PWM attr for the f71862fg, fewer pwms and fewer zones per pwm than the
- f71858fg / f71882fg / f71889fg */
+ standard models */
static struct sensor_device_attribute_2 f71862fg_auto_pwm_attr[] = {
SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR,
show_pwm_auto_point_channel,
@@ -548,7 +596,87 @@ static struct sensor_device_attribute_2 f71862fg_auto_pwm_attr[] = {
show_pwm_auto_point_temp_hyst, NULL, 3, 2),
};
-/* PWM attr common to the f71858fg, f71882fg and f71889fg */
+/* PWM attr for the f71808e/f71869, almost identical to the f71862fg, but the
+ pwm setting when the temperature is above the pwmX_auto_point1_temp can be
+ programmed instead of being hardcoded to 0xff */
+static struct sensor_device_attribute_2 f71869_auto_pwm_attr[] = {
+ SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_channel,
+ store_pwm_auto_point_channel, 0, 0),
+ SENSOR_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 0, 0),
+ SENSOR_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 1, 0),
+ SENSOR_ATTR_2(pwm1_auto_point3_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 4, 0),
+ SENSOR_ATTR_2(pwm1_auto_point1_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 0, 0),
+ SENSOR_ATTR_2(pwm1_auto_point2_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 3, 0),
+ SENSOR_ATTR_2(pwm1_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp_hyst,
+ store_pwm_auto_point_temp_hyst,
+ 0, 0),
+ SENSOR_ATTR_2(pwm1_auto_point2_temp_hyst, S_IRUGO,
+ show_pwm_auto_point_temp_hyst, NULL, 3, 0),
+
+ SENSOR_ATTR_2(pwm2_auto_channels_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_channel,
+ store_pwm_auto_point_channel, 0, 1),
+ SENSOR_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 0, 1),
+ SENSOR_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 1, 1),
+ SENSOR_ATTR_2(pwm2_auto_point3_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 4, 1),
+ SENSOR_ATTR_2(pwm2_auto_point1_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 0, 1),
+ SENSOR_ATTR_2(pwm2_auto_point2_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 3, 1),
+ SENSOR_ATTR_2(pwm2_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp_hyst,
+ store_pwm_auto_point_temp_hyst,
+ 0, 1),
+ SENSOR_ATTR_2(pwm2_auto_point2_temp_hyst, S_IRUGO,
+ show_pwm_auto_point_temp_hyst, NULL, 3, 1),
+
+ SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_channel,
+ store_pwm_auto_point_channel, 0, 2),
+ SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 0, 2),
+ SENSOR_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 1, 2),
+ SENSOR_ATTR_2(pwm3_auto_point3_pwm, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_pwm, store_pwm_auto_point_pwm,
+ 4, 2),
+ SENSOR_ATTR_2(pwm3_auto_point1_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 0, 2),
+ SENSOR_ATTR_2(pwm3_auto_point2_temp, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp, store_pwm_auto_point_temp,
+ 3, 2),
+ SENSOR_ATTR_2(pwm3_auto_point1_temp_hyst, S_IRUGO|S_IWUSR,
+ show_pwm_auto_point_temp_hyst,
+ store_pwm_auto_point_temp_hyst,
+ 0, 2),
+ SENSOR_ATTR_2(pwm3_auto_point2_temp_hyst, S_IRUGO,
+ show_pwm_auto_point_temp_hyst, NULL, 3, 2),
+};
+
+/* PWM attr for the standard models */
static struct sensor_device_attribute_2 fxxxx_auto_pwm_attr[4][14] = { {
SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR,
show_pwm_auto_point_channel,
@@ -943,16 +1071,16 @@ static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr)
static struct f71882fg_data *f71882fg_update_device(struct device *dev)
{
struct f71882fg_data *data = dev_get_drvdata(dev);
- int nr, reg = 0, reg2;
+ int nr, reg, point;
int nr_fans = (data->type == f71882fg) ? 4 : 3;
- int nr_ins = (data->type == f71858fg || data->type == f8000) ? 3 : 9;
+ int nr_temps = (data->type == f71808e) ? 2 : 3;
mutex_lock(&data->update_lock);
/* Update once every 60 seconds */
if (time_after(jiffies, data->last_limits + 60 * HZ) ||
!data->valid) {
- if (data->type == f71882fg || data->type == f71889fg) {
+ if (f71882fg_has_in1_alarm[data->type]) {
data->in1_max =
f71882fg_read8(data, F71882FG_REG_IN1_HIGH);
data->in_beep =
@@ -960,7 +1088,8 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
}
/* Get High & boundary temps*/
- for (nr = data->temp_start; nr < 3 + data->temp_start; nr++) {
+ for (nr = data->temp_start; nr < nr_temps + data->temp_start;
+ nr++) {
data->temp_ovt[nr] = f71882fg_read8(data,
F71882FG_REG_TEMP_OVT(nr));
data->temp_high[nr] = f71882fg_read8(data,
@@ -973,44 +1102,19 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
data->temp_hyst[1] = f71882fg_read8(data,
F71882FG_REG_TEMP_HYST(1));
}
+ /* All but the f71858fg / f8000 have this register */
+ if ((data->type != f71858fg) && (data->type != f8000)) {
+ reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
+ data->temp_type[1] = (reg & 0x02) ? 2 : 4;
+ data->temp_type[2] = (reg & 0x04) ? 2 : 4;
+ data->temp_type[3] = (reg & 0x08) ? 2 : 4;
+ }
- if (data->type == f71862fg || data->type == f71882fg ||
- data->type == f71889fg) {
+ if (f71882fg_has_beep[data->type]) {
data->fan_beep = f71882fg_read8(data,
F71882FG_REG_FAN_BEEP);
data->temp_beep = f71882fg_read8(data,
F71882FG_REG_TEMP_BEEP);
- /* Have to hardcode type, because temp1 is special */
- reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE);
- data->temp_type[2] = (reg & 0x04) ? 2 : 4;
- data->temp_type[3] = (reg & 0x08) ? 2 : 4;
- }
- /* Determine temp index 1 sensor type */
- if (data->type == f71889fg) {
- reg2 = f71882fg_read8(data, F71882FG_REG_START);
- switch ((reg2 & 0x60) >> 5) {
- case 0x00: /* BJT / Thermistor */
- data->temp_type[1] = (reg & 0x02) ? 2 : 4;
- break;
- case 0x01: /* AMDSI */
- data->temp_type[1] = 5;
- break;
- case 0x02: /* PECI */
- case 0x03: /* Ibex Peak ?? Report as PECI for now */
- data->temp_type[1] = 6;
- break;
- }
- } else {
- reg2 = f71882fg_read8(data, F71882FG_REG_PECI);
- if ((reg2 & 0x03) == 0x01)
- data->temp_type[1] = 6; /* PECI */
- else if ((reg2 & 0x03) == 0x02)
- data->temp_type[1] = 5; /* AMDSI */
- else if (data->type == f71862fg ||
- data->type == f71882fg)
- data->temp_type[1] = (reg & 0x02) ? 2 : 4;
- else /* f71858fg and f8000 only support BJT */
- data->temp_type[1] = 2;
}
data->pwm_enable = f71882fg_read8(data,
@@ -1025,8 +1129,8 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
f71882fg_read8(data,
F71882FG_REG_POINT_MAPPING(nr));
- if (data->type != f71862fg) {
- int point;
+ switch (data->type) {
+ default:
for (point = 0; point < 5; point++) {
data->pwm_auto_point_pwm[nr][point] =
f71882fg_read8(data,
@@ -1039,7 +1143,14 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
F71882FG_REG_POINT_TEMP
(nr, point));
}
- } else {
+ break;
+ case f71808e:
+ case f71869:
+ data->pwm_auto_point_pwm[nr][0] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_PWM(nr, 0));
+ /* Fall through */
+ case f71862fg:
data->pwm_auto_point_pwm[nr][1] =
f71882fg_read8(data,
F71882FG_REG_POINT_PWM
@@ -1056,6 +1167,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
f71882fg_read8(data,
F71882FG_REG_POINT_TEMP
(nr, 3));
+ break;
}
}
data->last_limits = jiffies;
@@ -1067,7 +1179,8 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
F71882FG_REG_TEMP_STATUS);
data->temp_diode_open = f71882fg_read8(data,
F71882FG_REG_TEMP_DIODE_OPEN);
- for (nr = data->temp_start; nr < 3 + data->temp_start; nr++)
+ for (nr = data->temp_start; nr < nr_temps + data->temp_start;
+ nr++)
data->temp[nr] = f71882fg_read_temp(data, nr);
data->fan_status = f71882fg_read8(data,
@@ -1083,17 +1196,18 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
data->pwm[nr] =
f71882fg_read8(data, F71882FG_REG_PWM(nr));
}
-
/* The f8000 can monitor 1 more fan, but has no pwm for it */
if (data->type == f8000)
data->fan[3] = f71882fg_read16(data,
F71882FG_REG_FAN(3));
- if (data->type == f71882fg || data->type == f71889fg)
+
+ if (f71882fg_has_in1_alarm[data->type])
data->in_status = f71882fg_read8(data,
F71882FG_REG_IN_STATUS);
- for (nr = 0; nr < nr_ins; nr++)
- data->in[nr] = f71882fg_read8(data,
- F71882FG_REG_IN(nr));
+ for (nr = 0; nr < F71882FG_MAX_INS; nr++)
+ if (f71882fg_has_in[data->type][nr])
+ data->in[nr] = f71882fg_read8(data,
+ F71882FG_REG_IN(nr));
data->last_updated = jiffies;
data->valid = 1;
@@ -1882,7 +1996,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
val /= 1000;
- if (data->type == f71889fg)
+ if (data->auto_point_temp_signed)
val = SENSORS_LIMIT(val, -128, 127);
else
val = SENSORS_LIMIT(val, 0, 127);
@@ -1929,7 +2043,8 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
struct f71882fg_data *data;
struct f71882fg_sio_data *sio_data = pdev->dev.platform_data;
int err, i, nr_fans = (sio_data->type == f71882fg) ? 4 : 3;
- u8 start_reg;
+ int nr_temps = (sio_data->type == f71808e) ? 2 : 3;
+ u8 start_reg, reg;
data = kzalloc(sizeof(struct f71882fg_data), GFP_KERNEL);
if (!data)
@@ -1968,37 +2083,72 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
/* The f71858fg temperature alarms behave as
the f8000 alarms in this mode */
err = f71882fg_create_sysfs_files(pdev,
- f8000_in_temp_attr,
- ARRAY_SIZE(f8000_in_temp_attr));
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
else
err = f71882fg_create_sysfs_files(pdev,
- f71858fg_in_temp_attr,
- ARRAY_SIZE(f71858fg_in_temp_attr));
- break;
- case f71882fg:
- case f71889fg:
- err = f71882fg_create_sysfs_files(pdev,
- fxxxx_in1_alarm_attr,
- ARRAY_SIZE(fxxxx_in1_alarm_attr));
- if (err)
- goto exit_unregister_sysfs;
- /* fall through! */
- case f71862fg:
- err = f71882fg_create_sysfs_files(pdev,
- fxxxx_in_temp_attr,
- ARRAY_SIZE(fxxxx_in_temp_attr));
+ f71858fg_temp_attr,
+ ARRAY_SIZE(f71858fg_temp_attr));
break;
case f8000:
err = f71882fg_create_sysfs_files(pdev,
- f8000_in_temp_attr,
- ARRAY_SIZE(f8000_in_temp_attr));
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
break;
+ default:
+ err = f71882fg_create_sysfs_files(pdev,
+ &fxxxx_temp_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps);
}
if (err)
goto exit_unregister_sysfs;
+
+ if (f71882fg_has_beep[data->type]) {
+ err = f71882fg_create_sysfs_files(pdev,
+ &fxxxx_temp_beep_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_beep_attr[0])
+ * nr_temps);
+ if (err)
+ goto exit_unregister_sysfs;
+ }
+
+ for (i = 0; i < F71882FG_MAX_INS; i++) {
+ if (f71882fg_has_in[data->type][i]) {
+ err = device_create_file(&pdev->dev,
+ &fxxxx_in_attr[i].dev_attr);
+ if (err)
+ goto exit_unregister_sysfs;
+ }
+ }
+ if (f71882fg_has_in1_alarm[data->type]) {
+ err = f71882fg_create_sysfs_files(pdev,
+ fxxxx_in1_alarm_attr,
+ ARRAY_SIZE(fxxxx_in1_alarm_attr));
+ if (err)
+ goto exit_unregister_sysfs;
+ }
}
if (start_reg & 0x02) {
+ switch (data->type) {
+ case f71808e:
+ case f71869:
+ /* These always have signed auto point temps */
+ data->auto_point_temp_signed = 1;
+ /* Fall through to select correct fan/pwm reg bank! */
+ case f71889fg:
+ case f71889ed:
+ reg = f71882fg_read8(data, F71882FG_REG_FAN_FAULT_T);
+ if (reg & F71882FG_FAN_NEG_TEMP_EN)
+ data->auto_point_temp_signed = 1;
+ /* Ensure banked pwm registers point to right bank */
+ reg &= ~F71882FG_FAN_PROG_SEL;
+ f71882fg_write8(data, F71882FG_REG_FAN_FAULT_T, reg);
+ break;
+ default:
+ break;
+ }
+
data->pwm_enable =
f71882fg_read8(data, F71882FG_REG_PWM_ENABLE);
@@ -2013,8 +2163,11 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
case f71862fg:
err = (data->pwm_enable & 0x15) != 0x15;
break;
+ case f71808e:
+ case f71869:
case f71882fg:
case f71889fg:
+ case f71889ed:
err = 0;
break;
case f8000:
@@ -2034,8 +2187,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
if (err)
goto exit_unregister_sysfs;
- if (data->type == f71862fg || data->type == f71882fg ||
- data->type == f71889fg) {
+ if (f71882fg_has_beep[data->type]) {
err = f71882fg_create_sysfs_files(pdev,
fxxxx_fan_beep_attr, nr_fans);
if (err)
@@ -2043,11 +2195,42 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
}
switch (data->type) {
+ case f71808e:
+ case f71869:
+ case f71889fg:
+ case f71889ed:
+ for (i = 0; i < nr_fans; i++) {
+ data->pwm_auto_point_mapping[i] =
+ f71882fg_read8(data,
+ F71882FG_REG_POINT_MAPPING(i));
+ if ((data->pwm_auto_point_mapping[i] & 0x80) ||
+ (data->pwm_auto_point_mapping[i] & 3) == 0)
+ break;
+ }
+ if (i != nr_fans) {
+ dev_warn(&pdev->dev,
+ "Auto pwm controlled by raw digital "
+ "data, disabling pwm auto_point "
+ "sysfs attributes\n");
+ goto no_pwm_auto_point;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (data->type) {
case f71862fg:
err = f71882fg_create_sysfs_files(pdev,
f71862fg_auto_pwm_attr,
ARRAY_SIZE(f71862fg_auto_pwm_attr));
break;
+ case f71808e:
+ case f71869:
+ err = f71882fg_create_sysfs_files(pdev,
+ f71869_auto_pwm_attr,
+ ARRAY_SIZE(f71869_auto_pwm_attr));
+ break;
case f8000:
err = f71882fg_create_sysfs_files(pdev,
f8000_fan_attr,
@@ -2058,23 +2241,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
f8000_auto_pwm_attr,
ARRAY_SIZE(f8000_auto_pwm_attr));
break;
- case f71889fg:
- for (i = 0; i < nr_fans; i++) {
- data->pwm_auto_point_mapping[i] =
- f71882fg_read8(data,
- F71882FG_REG_POINT_MAPPING(i));
- if (data->pwm_auto_point_mapping[i] & 0x80)
- break;
- }
- if (i != nr_fans) {
- dev_warn(&pdev->dev,
- "Auto pwm controlled by raw digital "
- "data, disabling pwm auto_point "
- "sysfs attributes\n");
- break;
- }
- /* fall through */
- default: /* f71858fg / f71882fg */
+ default:
err = f71882fg_create_sysfs_files(pdev,
&fxxxx_auto_pwm_attr[0][0],
ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
@@ -2082,6 +2249,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
if (err)
goto exit_unregister_sysfs;
+no_pwm_auto_point:
for (i = 0; i < nr_fans; i++)
dev_info(&pdev->dev, "Fan: %d is in %s mode\n", i + 1,
(data->pwm_enable & (1 << 2 * i)) ?
@@ -2108,7 +2276,8 @@ exit_free:
static int f71882fg_remove(struct platform_device *pdev)
{
struct f71882fg_data *data = platform_get_drvdata(pdev);
- int nr_fans = (data->type == f71882fg) ? 4 : 3;
+ int i, nr_fans = (data->type == f71882fg) ? 4 : 3;
+ int nr_temps = (data->type == f71808e) ? 2 : 3;
u8 start_reg = f71882fg_read8(data, F71882FG_REG_START);
if (data->hwmon_dev)
@@ -2121,29 +2290,39 @@ static int f71882fg_remove(struct platform_device *pdev)
case f71858fg:
if (data->temp_config & 0x10)
f71882fg_remove_sysfs_files(pdev,
- f8000_in_temp_attr,
- ARRAY_SIZE(f8000_in_temp_attr));
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
else
f71882fg_remove_sysfs_files(pdev,
- f71858fg_in_temp_attr,
- ARRAY_SIZE(f71858fg_in_temp_attr));
- break;
- case f71882fg:
- case f71889fg:
- f71882fg_remove_sysfs_files(pdev,
- fxxxx_in1_alarm_attr,
- ARRAY_SIZE(fxxxx_in1_alarm_attr));
- /* fall through! */
- case f71862fg:
- f71882fg_remove_sysfs_files(pdev,
- fxxxx_in_temp_attr,
- ARRAY_SIZE(fxxxx_in_temp_attr));
+ f71858fg_temp_attr,
+ ARRAY_SIZE(f71858fg_temp_attr));
break;
case f8000:
f71882fg_remove_sysfs_files(pdev,
- f8000_in_temp_attr,
- ARRAY_SIZE(f8000_in_temp_attr));
+ f8000_temp_attr,
+ ARRAY_SIZE(f8000_temp_attr));
break;
+ default:
+ f71882fg_remove_sysfs_files(pdev,
+ &fxxxx_temp_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps);
+ }
+ if (f71882fg_has_beep[data->type]) {
+ f71882fg_remove_sysfs_files(pdev,
+ &fxxxx_temp_beep_attr[0][0],
+ ARRAY_SIZE(fxxxx_temp_beep_attr[0]) * nr_temps);
+ }
+
+ for (i = 0; i < F71882FG_MAX_INS; i++) {
+ if (f71882fg_has_in[data->type][i]) {
+ device_remove_file(&pdev->dev,
+ &fxxxx_in_attr[i].dev_attr);
+ }
+ }
+ if (f71882fg_has_in1_alarm[data->type]) {
+ f71882fg_remove_sysfs_files(pdev,
+ fxxxx_in1_alarm_attr,
+ ARRAY_SIZE(fxxxx_in1_alarm_attr));
}
}
@@ -2151,10 +2330,10 @@ static int f71882fg_remove(struct platform_device *pdev)
f71882fg_remove_sysfs_files(pdev, &fxxxx_fan_attr[0][0],
ARRAY_SIZE(fxxxx_fan_attr[0]) * nr_fans);
- if (data->type == f71862fg || data->type == f71882fg ||
- data->type == f71889fg)
+ if (f71882fg_has_beep[data->type]) {
f71882fg_remove_sysfs_files(pdev,
fxxxx_fan_beep_attr, nr_fans);
+ }
switch (data->type) {
case f71862fg:
@@ -2162,6 +2341,12 @@ static int f71882fg_remove(struct platform_device *pdev)
f71862fg_auto_pwm_attr,
ARRAY_SIZE(f71862fg_auto_pwm_attr));
break;
+ case f71808e:
+ case f71869:
+ f71882fg_remove_sysfs_files(pdev,
+ f71869_auto_pwm_attr,
+ ARRAY_SIZE(f71869_auto_pwm_attr));
+ break;
case f8000:
f71882fg_remove_sysfs_files(pdev,
f8000_fan_attr,
@@ -2170,7 +2355,7 @@ static int f71882fg_remove(struct platform_device *pdev)
f8000_auto_pwm_attr,
ARRAY_SIZE(f8000_auto_pwm_attr));
break;
- default: /* f71858fg / f71882fg / f71889fg */
+ default:
f71882fg_remove_sysfs_files(pdev,
&fxxxx_auto_pwm_attr[0][0],
ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans);
@@ -2200,18 +2385,27 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID);
switch (devid) {
+ case SIO_F71808E_ID:
+ sio_data->type = f71808e;
+ break;
case SIO_F71858_ID:
sio_data->type = f71858fg;
break;
case SIO_F71862_ID:
sio_data->type = f71862fg;
break;
+ case SIO_F71869_ID:
+ sio_data->type = f71869;
+ break;
case SIO_F71882_ID:
sio_data->type = f71882fg;
break;
case SIO_F71889_ID:
sio_data->type = f71889fg;
break;
+ case SIO_F71889E_ID:
+ sio_data->type = f71889ed;
+ break;
case SIO_F8000_ID:
sio_data->type = f8000;
break;
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
new file mode 100644
index 000000000000..58eded27f385
--- /dev/null
+++ b/drivers/hwmon/lineage-pem.c
@@ -0,0 +1,586 @@
+/*
+ * Driver for Lineage Compact Power Line series of power entry modules.
+ *
+ * Copyright (C) 2010, 2011 Ericsson AB.
+ *
+ * Documentation:
+ * http://www.lineagepower.com/oem/pdf/CPLI2C.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+/*
+ * This driver supports various Lineage Compact Power Line DC/DC and AC/DC
+ * converters such as CP1800, CP2000AC, CP2000DC, CP2100DC, and others.
+ *
+ * The devices are nominally PMBus compliant. However, most standard PMBus
+ * commands are not supported. Specifically, all hardware monitoring and
+ * status reporting commands are non-standard. For this reason, a standard
+ * PMBus driver can not be used.
+ *
+ * All Lineage CPL devices have a built-in I2C bus master selector (PCA9541).
+ * To ensure device access, this driver should only be used as client driver
+ * to the pca9541 I2C master selector driver.
+ */
+
+/* Command codes */
+#define PEM_OPERATION 0x01
+#define PEM_CLEAR_INFO_FLAGS 0x03
+#define PEM_VOUT_COMMAND 0x21
+#define PEM_VOUT_OV_FAULT_LIMIT 0x40
+#define PEM_READ_DATA_STRING 0xd0
+#define PEM_READ_INPUT_STRING 0xdc
+#define PEM_READ_FIRMWARE_REV 0xdd
+#define PEM_READ_RUN_TIMER 0xde
+#define PEM_FAN_HI_SPEED 0xdf
+#define PEM_FAN_NORMAL_SPEED 0xe0
+#define PEM_READ_FAN_SPEED 0xe1
+
+/* offsets in data string */
+#define PEM_DATA_STATUS_2 0
+#define PEM_DATA_STATUS_1 1
+#define PEM_DATA_ALARM_2 2
+#define PEM_DATA_ALARM_1 3
+#define PEM_DATA_VOUT_LSB 4
+#define PEM_DATA_VOUT_MSB 5
+#define PEM_DATA_CURRENT 6
+#define PEM_DATA_TEMP 7
+
+/* Virtual entries, to report constants */
+#define PEM_DATA_TEMP_MAX 10
+#define PEM_DATA_TEMP_CRIT 11
+
+/* offsets in input string */
+#define PEM_INPUT_VOLTAGE 0
+#define PEM_INPUT_POWER_LSB 1
+#define PEM_INPUT_POWER_MSB 2
+
+/* offsets in fan data */
+#define PEM_FAN_ADJUSTMENT 0
+#define PEM_FAN_FAN1 1
+#define PEM_FAN_FAN2 2
+#define PEM_FAN_FAN3 3
+
+/* Status register bits */
+#define STS1_OUTPUT_ON (1 << 0)
+#define STS1_LEDS_FLASHING (1 << 1)
+#define STS1_EXT_FAULT (1 << 2)
+#define STS1_SERVICE_LED_ON (1 << 3)
+#define STS1_SHUTDOWN_OCCURRED (1 << 4)
+#define STS1_INT_FAULT (1 << 5)
+#define STS1_ISOLATION_TEST_OK (1 << 6)
+
+#define STS2_ENABLE_PIN_HI (1 << 0)
+#define STS2_DATA_OUT_RANGE (1 << 1)
+#define STS2_RESTARTED_OK (1 << 1)
+#define STS2_ISOLATION_TEST_FAIL (1 << 3)
+#define STS2_HIGH_POWER_CAP (1 << 4)
+#define STS2_INVALID_INSTR (1 << 5)
+#define STS2_WILL_RESTART (1 << 6)
+#define STS2_PEC_ERR (1 << 7)
+
+/* Alarm register bits */
+#define ALRM1_VIN_OUT_LIMIT (1 << 0)
+#define ALRM1_VOUT_OUT_LIMIT (1 << 1)
+#define ALRM1_OV_VOLT_SHUTDOWN (1 << 2)
+#define ALRM1_VIN_OVERCURRENT (1 << 3)
+#define ALRM1_TEMP_WARNING (1 << 4)
+#define ALRM1_TEMP_SHUTDOWN (1 << 5)
+#define ALRM1_PRIMARY_FAULT (1 << 6)
+#define ALRM1_POWER_LIMIT (1 << 7)
+
+#define ALRM2_5V_OUT_LIMIT (1 << 1)
+#define ALRM2_TEMP_FAULT (1 << 2)
+#define ALRM2_OV_LOW (1 << 3)
+#define ALRM2_DCDC_TEMP_HIGH (1 << 4)
+#define ALRM2_PRI_TEMP_HIGH (1 << 5)
+#define ALRM2_NO_PRIMARY (1 << 6)
+#define ALRM2_FAN_FAULT (1 << 7)
+
+#define FIRMWARE_REV_LEN 4
+#define DATA_STRING_LEN 9
+#define INPUT_STRING_LEN 5 /* 4 for most devices */
+#define FAN_SPEED_LEN 5
+
+struct pem_data {
+ struct device *hwmon_dev;
+
+ struct mutex update_lock;
+ bool valid;
+ bool fans_supported;
+ int input_length;
+ unsigned long last_updated; /* in jiffies */
+
+ u8 firmware_rev[FIRMWARE_REV_LEN];
+ u8 data_string[DATA_STRING_LEN];
+ u8 input_string[INPUT_STRING_LEN];
+ u8 fan_speed[FAN_SPEED_LEN];
+};
+
+static int pem_read_block(struct i2c_client *client, u8 command, u8 *data,
+ int data_len)
+{
+ u8 block_buffer[I2C_SMBUS_BLOCK_MAX];
+ int result;
+
+ result = i2c_smbus_read_block_data(client, command, block_buffer);
+ if (unlikely(result < 0))
+ goto abort;
+ if (unlikely(result == 0xff || result != data_len)) {
+ result = -EIO;
+ goto abort;
+ }
+ memcpy(data, block_buffer, data_len);
+ result = 0;
+abort:
+ return result;
+}
+
+static struct pem_data *pem_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pem_data *data = i2c_get_clientdata(client);
+ struct pem_data *ret = data;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ int result;
+
+ /* Read data string */
+ result = pem_read_block(client, PEM_READ_DATA_STRING,
+ data->data_string,
+ sizeof(data->data_string));
+ if (unlikely(result < 0)) {
+ ret = ERR_PTR(result);
+ goto abort;
+ }
+
+ /* Read input string */
+ if (data->input_length) {
+ result = pem_read_block(client, PEM_READ_INPUT_STRING,
+ data->input_string,
+ data->input_length);
+ if (unlikely(result < 0)) {
+ ret = ERR_PTR(result);
+ goto abort;
+ }
+ }
+
+ /* Read fan speeds */
+ if (data->fans_supported) {
+ result = pem_read_block(client, PEM_READ_FAN_SPEED,
+ data->fan_speed,
+ sizeof(data->fan_speed));
+ if (unlikely(result < 0)) {
+ ret = ERR_PTR(result);
+ goto abort;
+ }
+ }
+
+ i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS);
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static long pem_get_data(u8 *data, int len, int index)
+{
+ long val;
+
+ switch (index) {
+ case PEM_DATA_VOUT_LSB:
+ val = (data[index] + (data[index+1] << 8)) * 5 / 2;
+ break;
+ case PEM_DATA_CURRENT:
+ val = data[index] * 200;
+ break;
+ case PEM_DATA_TEMP:
+ val = data[index] * 1000;
+ break;
+ case PEM_DATA_TEMP_MAX:
+ val = 97 * 1000; /* 97 degrees C per datasheet */
+ break;
+ case PEM_DATA_TEMP_CRIT:
+ val = 107 * 1000; /* 107 degrees C per datasheet */
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ val = 0;
+ }
+ return val;
+}
+
+static long pem_get_input(u8 *data, int len, int index)
+{
+ long val;
+
+ switch (index) {
+ case PEM_INPUT_VOLTAGE:
+ if (len == INPUT_STRING_LEN)
+ val = (data[index] + (data[index+1] << 8) - 75) * 1000;
+ else
+ val = (data[index] - 75) * 1000;
+ break;
+ case PEM_INPUT_POWER_LSB:
+ if (len == INPUT_STRING_LEN)
+ index++;
+ val = (data[index] + (data[index+1] << 8)) * 1000000L;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ val = 0;
+ }
+ return val;
+}
+
+static long pem_get_fan(u8 *data, int len, int index)
+{
+ long val;
+
+ switch (index) {
+ case PEM_FAN_FAN1:
+ case PEM_FAN_FAN2:
+ case PEM_FAN_FAN3:
+ val = data[index] * 100;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ val = 0;
+ }
+ return val;
+}
+
+/*
+ * Show boolean, either a fault or an alarm.
+ * .nr points to the register, .index is the bit mask to check
+ */
+static ssize_t pem_show_bool(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da);
+ struct pem_data *data = pem_update_device(dev);
+ u8 status;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ status = data->data_string[attr->nr] & attr->index;
+ return snprintf(buf, PAGE_SIZE, "%d\n", !!status);
+}
+
+static ssize_t pem_show_data(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pem_data *data = pem_update_device(dev);
+ long value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = pem_get_data(data->data_string, sizeof(data->data_string),
+ attr->index);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", value);
+}
+
+static ssize_t pem_show_input(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pem_data *data = pem_update_device(dev);
+ long value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = pem_get_input(data->input_string, sizeof(data->input_string),
+ attr->index);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", value);
+}
+
+static ssize_t pem_show_fan(struct device *dev, struct device_attribute *da,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pem_data *data = pem_update_device(dev);
+ long value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = pem_get_fan(data->fan_speed, sizeof(data->fan_speed),
+ attr->index);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", value);
+}
+
+/* Voltages */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_VOUT_LSB);
+static SENSOR_DEVICE_ATTR_2(in1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_VOUT_OUT_LIMIT);
+static SENSOR_DEVICE_ATTR_2(in1_crit_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_OV_VOLT_SHUTDOWN);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, pem_show_input, NULL,
+ PEM_INPUT_VOLTAGE);
+static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1,
+ ALRM1_VIN_OUT_LIMIT | ALRM1_PRIMARY_FAULT);
+
+/* Currents */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_CURRENT);
+static SENSOR_DEVICE_ATTR_2(curr1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_VIN_OVERCURRENT);
+
+/* Power */
+static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, pem_show_input, NULL,
+ PEM_INPUT_POWER_LSB);
+static SENSOR_DEVICE_ATTR_2(power1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_POWER_LIMIT);
+
+/* Fans */
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, pem_show_fan, NULL,
+ PEM_FAN_FAN1);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, pem_show_fan, NULL,
+ PEM_FAN_FAN2);
+static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, pem_show_fan, NULL,
+ PEM_FAN_FAN3);
+static SENSOR_DEVICE_ATTR_2(fan1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_2, ALRM2_FAN_FAULT);
+
+/* Temperatures */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_TEMP);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_TEMP_MAX);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, pem_show_data, NULL,
+ PEM_DATA_TEMP_CRIT);
+static SENSOR_DEVICE_ATTR_2(temp1_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_TEMP_WARNING);
+static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_1, ALRM1_TEMP_SHUTDOWN);
+static SENSOR_DEVICE_ATTR_2(temp1_fault, S_IRUGO, pem_show_bool, NULL,
+ PEM_DATA_ALARM_2, ALRM2_TEMP_FAULT);
+
+static struct attribute *pem_attributes[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in1_alarm.dev_attr.attr,
+ &sensor_dev_attr_in1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_in2_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_power1_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_fan1_alarm.dev_attr.attr,
+
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_fault.dev_attr.attr,
+
+ NULL,
+};
+
+static const struct attribute_group pem_group = {
+ .attrs = pem_attributes,
+};
+
+static struct attribute *pem_input_attributes[] = {
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_power1_input.dev_attr.attr,
+};
+
+static const struct attribute_group pem_input_group = {
+ .attrs = pem_input_attributes,
+};
+
+static struct attribute *pem_fan_attributes[] = {
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan3_input.dev_attr.attr,
+};
+
+static const struct attribute_group pem_fan_group = {
+ .attrs = pem_fan_attributes,
+};
+
+static int pem_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct pem_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BLOCK_DATA
+ | I2C_FUNC_SMBUS_WRITE_BYTE))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /*
+ * We use the next two commands to determine if the device is really
+ * there.
+ */
+ ret = pem_read_block(client, PEM_READ_FIRMWARE_REV,
+ data->firmware_rev, sizeof(data->firmware_rev));
+ if (ret < 0)
+ goto out_kfree;
+
+ ret = i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS);
+ if (ret < 0)
+ goto out_kfree;
+
+ dev_info(&client->dev, "Firmware revision %d.%d.%d\n",
+ data->firmware_rev[0], data->firmware_rev[1],
+ data->firmware_rev[2]);
+
+ /* Register sysfs hooks */
+ ret = sysfs_create_group(&client->dev.kobj, &pem_group);
+ if (ret)
+ goto out_kfree;
+
+ /*
+ * Check if input readings are supported.
+ * This is the case if we can read input data,
+ * and if the returned data is not all zeros.
+ * Note that input alarms are always supported.
+ */
+ ret = pem_read_block(client, PEM_READ_INPUT_STRING,
+ data->input_string,
+ sizeof(data->input_string) - 1);
+ if (!ret && (data->input_string[0] || data->input_string[1] ||
+ data->input_string[2]))
+ data->input_length = sizeof(data->input_string) - 1;
+ else if (ret < 0) {
+ /* Input string is one byte longer for some devices */
+ ret = pem_read_block(client, PEM_READ_INPUT_STRING,
+ data->input_string,
+ sizeof(data->input_string));
+ if (!ret && (data->input_string[0] || data->input_string[1] ||
+ data->input_string[2] || data->input_string[3]))
+ data->input_length = sizeof(data->input_string);
+ }
+ ret = 0;
+ if (data->input_length) {
+ ret = sysfs_create_group(&client->dev.kobj, &pem_input_group);
+ if (ret)
+ goto out_remove_groups;
+ }
+
+ /*
+ * Check if fan speed readings are supported.
+ * This is the case if we can read fan speed data,
+ * and if the returned data is not all zeros.
+ * Note that the fan alarm is always supported.
+ */
+ ret = pem_read_block(client, PEM_READ_FAN_SPEED,
+ data->fan_speed,
+ sizeof(data->fan_speed));
+ if (!ret && (data->fan_speed[0] || data->fan_speed[1] ||
+ data->fan_speed[2] || data->fan_speed[3])) {
+ data->fans_supported = true;
+ ret = sysfs_create_group(&client->dev.kobj, &pem_fan_group);
+ if (ret)
+ goto out_remove_groups;
+ }
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ goto out_remove_groups;
+ }
+
+ return 0;
+
+out_remove_groups:
+ sysfs_remove_group(&client->dev.kobj, &pem_input_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_fan_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_group);
+out_kfree:
+ kfree(data);
+ return ret;
+}
+
+static int pem_remove(struct i2c_client *client)
+{
+ struct pem_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+
+ sysfs_remove_group(&client->dev.kobj, &pem_input_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_fan_group);
+ sysfs_remove_group(&client->dev.kobj, &pem_group);
+
+ kfree(data);
+ return 0;
+}
+
+static const struct i2c_device_id pem_id[] = {
+ {"lineage_pem", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pem_id);
+
+static struct i2c_driver pem_driver = {
+ .driver = {
+ .name = "lineage_pem",
+ },
+ .probe = pem_probe,
+ .remove = pem_remove,
+ .id_table = pem_id,
+};
+
+static int __init pem_init(void)
+{
+ return i2c_add_driver(&pem_driver);
+}
+
+static void __exit pem_exit(void)
+{
+ i2c_del_driver(&pem_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_DESCRIPTION("Lineage CPL PEM hardware monitoring driver");
+MODULE_LICENSE("GPL");
+
+module_init(pem_init);
+module_exit(pem_exit);
diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c
index 2549de1de4e2..c1f8a8fbf694 100644
--- a/drivers/hwmon/lis3lv02d_spi.c
+++ b/drivers/hwmon/lis3lv02d_spi.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/spi/spi.h>
+#include <linux/pm.h>
#include "lis3lv02d.h"
@@ -88,9 +89,10 @@ static int __devexit lis302dl_spi_remove(struct spi_device *spi)
return lis3lv02d_remove_fs(&lis3_dev);
}
-#ifdef CONFIG_PM
-static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
+#ifdef CONFIG_PM_SLEEP
+static int lis3lv02d_spi_suspend(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
if (!lis3->pdata || !lis3->pdata->wakeup_flags)
@@ -99,8 +101,9 @@ static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg)
return 0;
}
-static int lis3lv02d_spi_resume(struct spi_device *spi)
+static int lis3lv02d_spi_resume(struct device *dev)
{
+ struct spi_device *spi = to_spi_device(dev);
struct lis3lv02d *lis3 = spi_get_drvdata(spi);
if (!lis3->pdata || !lis3->pdata->wakeup_flags)
@@ -108,21 +111,19 @@ static int lis3lv02d_spi_resume(struct spi_device *spi)
return 0;
}
-
-#else
-#define lis3lv02d_spi_suspend NULL
-#define lis3lv02d_spi_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(lis3lv02d_spi_pm, lis3lv02d_spi_suspend,
+ lis3lv02d_spi_resume);
+
static struct spi_driver lis302dl_spi_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .pm = &lis3lv02d_spi_pm,
},
.probe = lis302dl_spi_probe,
.remove = __devexit_p(lis302dl_spi_remove),
- .suspend = lis3lv02d_spi_suspend,
- .resume = lis3lv02d_spi_resume,
};
static int __init lis302dl_init(void)
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index d2cc28660816..cf47e6e476ed 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
enum chips {
any_chip, lm85b, lm85c,
adm1027, adt7463, adt7468,
- emc6d100, emc6d102, emc6d103
+ emc6d100, emc6d102, emc6d103, emc6d103s
};
/* The LM85 registers */
@@ -283,10 +283,6 @@ struct lm85_zone {
u8 hyst; /* Low limit hysteresis. (0-15) */
u8 range; /* Temp range, encoded */
s8 critical; /* "All fans ON" temp limit */
- u8 off_desired; /* Actual "off" temperature specified. Preserved
- * to prevent "drift" as other autofan control
- * values change.
- */
u8 max_desired; /* Actual "max" temperature specified. Preserved
* to prevent "drift" as other autofan control
* values change.
@@ -306,6 +302,8 @@ struct lm85_data {
const int *freq_map;
enum chips type;
+ bool has_vid5; /* true if VID5 is configured for ADT7463 or ADT7468 */
+
struct mutex update_lock;
int valid; /* !=0 if following fields are valid */
unsigned long last_reading; /* In jiffies */
@@ -352,6 +350,7 @@ static const struct i2c_device_id lm85_id[] = {
{ "emc6d101", emc6d100 },
{ "emc6d102", emc6d102 },
{ "emc6d103", emc6d103 },
+ { "emc6d103s", emc6d103s },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm85_id);
@@ -420,8 +419,7 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
struct lm85_data *data = lm85_update_device(dev);
int vid;
- if ((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80)) {
+ if (data->has_vid5) {
/* 6-pin VID (VRM 10) */
vid = vid_from_reg(data->vid & 0x3f, data->vrm);
} else {
@@ -891,7 +889,6 @@ static ssize_t set_temp_auto_temp_off(struct device *dev,
mutex_lock(&data->update_lock);
min = TEMP_FROM_REG(data->zone[nr].limit);
- data->zone[nr].off_desired = TEMP_TO_REG(val);
data->zone[nr].hyst = HYST_TO_REG(min - val);
if (nr == 0 || nr == 1) {
lm85_write_value(client, LM85_REG_AFAN_HYST1,
@@ -934,18 +931,6 @@ static ssize_t set_temp_auto_temp_min(struct device *dev,
((data->zone[nr].range & 0x0f) << 4)
| (data->pwm_freq[nr] & 0x07));
-/* Update temp_auto_hyst and temp_auto_off */
- data->zone[nr].hyst = HYST_TO_REG(TEMP_FROM_REG(
- data->zone[nr].limit) - TEMP_FROM_REG(
- data->zone[nr].off_desired));
- if (nr == 0 || nr == 1) {
- lm85_write_value(client, LM85_REG_AFAN_HYST1,
- (data->zone[0].hyst << 4)
- | data->zone[1].hyst);
- } else {
- lm85_write_value(client, LM85_REG_AFAN_HYST2,
- (data->zone[2].hyst << 4));
- }
mutex_unlock(&data->update_lock);
return count;
}
@@ -1084,13 +1069,7 @@ static struct attribute *lm85_attributes[] = {
&sensor_dev_attr_pwm1_auto_pwm_min.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_pwm_min.dev_attr.attr,
&sensor_dev_attr_pwm3_auto_pwm_min.dev_attr.attr,
- &sensor_dev_attr_pwm1_auto_pwm_minctl.dev_attr.attr,
- &sensor_dev_attr_pwm2_auto_pwm_minctl.dev_attr.attr,
- &sensor_dev_attr_pwm3_auto_pwm_minctl.dev_attr.attr,
- &sensor_dev_attr_temp1_auto_temp_off.dev_attr.attr,
- &sensor_dev_attr_temp2_auto_temp_off.dev_attr.attr,
- &sensor_dev_attr_temp3_auto_temp_off.dev_attr.attr,
&sensor_dev_attr_temp1_auto_temp_min.dev_attr.attr,
&sensor_dev_attr_temp2_auto_temp_min.dev_attr.attr,
&sensor_dev_attr_temp3_auto_temp_min.dev_attr.attr,
@@ -1111,6 +1090,26 @@ static const struct attribute_group lm85_group = {
.attrs = lm85_attributes,
};
+static struct attribute *lm85_attributes_minctl[] = {
+ &sensor_dev_attr_pwm1_auto_pwm_minctl.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_pwm_minctl.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_pwm_minctl.dev_attr.attr,
+};
+
+static const struct attribute_group lm85_group_minctl = {
+ .attrs = lm85_attributes_minctl,
+};
+
+static struct attribute *lm85_attributes_temp_off[] = {
+ &sensor_dev_attr_temp1_auto_temp_off.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_temp_off.dev_attr.attr,
+ &sensor_dev_attr_temp3_auto_temp_off.dev_attr.attr,
+};
+
+static const struct attribute_group lm85_group_temp_off = {
+ .attrs = lm85_attributes_temp_off,
+};
+
static struct attribute *lm85_attributes_in4[] = {
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in4_min.dev_attr.attr,
@@ -1258,16 +1257,9 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
case LM85_VERSTEP_EMC6D103_A1:
type_name = "emc6d103";
break;
- /*
- * Registers apparently missing in EMC6D103S/EMC6D103:A2
- * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102
- * (according to the data sheets), but used unconditionally
- * in the driver: 62[5:7], 6D[0:7], and 6E[0:7].
- * So skip EMC6D103S for now.
case LM85_VERSTEP_EMC6D103S:
type_name = "emc6d103s";
break;
- */
}
} else {
dev_dbg(&adapter->dev,
@@ -1280,6 +1272,19 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
+static void lm85_remove_files(struct i2c_client *client, struct lm85_data *data)
+{
+ sysfs_remove_group(&client->dev.kobj, &lm85_group);
+ if (data->type != emc6d103s) {
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_minctl);
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_temp_off);
+ }
+ if (!data->has_vid5)
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_in4);
+ if (data->type == emc6d100)
+ sysfs_remove_group(&client->dev.kobj, &lm85_group_in567);
+}
+
static int lm85_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1302,6 +1307,7 @@ static int lm85_probe(struct i2c_client *client,
case emc6d100:
case emc6d102:
case emc6d103:
+ case emc6d103s:
data->freq_map = adm1027_freq_map;
break;
default:
@@ -1319,11 +1325,26 @@ static int lm85_probe(struct i2c_client *client,
if (err)
goto err_kfree;
+ /* minctl and temp_off exist on all chips except emc6d103s */
+ if (data->type != emc6d103s) {
+ err = sysfs_create_group(&client->dev.kobj, &lm85_group_minctl);
+ if (err)
+ goto err_kfree;
+ err = sysfs_create_group(&client->dev.kobj,
+ &lm85_group_temp_off);
+ if (err)
+ goto err_kfree;
+ }
+
/* The ADT7463/68 have an optional VRM 10 mode where pin 21 is used
as a sixth digital VID input rather than an analog input. */
- data->vid = lm85_read_value(client, LM85_REG_VID);
- if (!((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80)))
+ if (data->type == adt7463 || data->type == adt7468) {
+ u8 vid = lm85_read_value(client, LM85_REG_VID);
+ if (vid & 0x80)
+ data->has_vid5 = true;
+ }
+
+ if (!data->has_vid5)
if ((err = sysfs_create_group(&client->dev.kobj,
&lm85_group_in4)))
goto err_remove_files;
@@ -1344,10 +1365,7 @@ static int lm85_probe(struct i2c_client *client,
/* Error out and cleanup code */
err_remove_files:
- sysfs_remove_group(&client->dev.kobj, &lm85_group);
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in4);
- if (data->type == emc6d100)
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in567);
+ lm85_remove_files(client, data);
err_kfree:
kfree(data);
return err;
@@ -1357,10 +1375,7 @@ static int lm85_remove(struct i2c_client *client)
{
struct lm85_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &lm85_group);
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in4);
- if (data->type == emc6d100)
- sysfs_remove_group(&client->dev.kobj, &lm85_group_in567);
+ lm85_remove_files(client, data);
kfree(data);
return 0;
}
@@ -1457,11 +1472,8 @@ static struct lm85_data *lm85_update_device(struct device *dev)
lm85_read_value(client, LM85_REG_FAN(i));
}
- if (!((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80))) {
- data->in[4] = lm85_read_value(client,
- LM85_REG_IN(4));
- }
+ if (!data->has_vid5)
+ data->in[4] = lm85_read_value(client, LM85_REG_IN(4));
if (data->type == adt7468)
data->cfg5 = lm85_read_value(client, ADT7468_REG_CFG5);
@@ -1487,7 +1499,8 @@ static struct lm85_data *lm85_update_device(struct device *dev)
/* More alarm bits */
data->alarms |= lm85_read_value(client,
EMC6D100_REG_ALARM3) << 16;
- } else if (data->type == emc6d102 || data->type == emc6d103) {
+ } else if (data->type == emc6d102 || data->type == emc6d103 ||
+ data->type == emc6d103s) {
/* Have to read LSB bits after the MSB ones because
the reading of the MSB bits has frozen the
LSBs (backward from the ADM1027).
@@ -1528,8 +1541,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
lm85_read_value(client, LM85_REG_FAN_MIN(i));
}
- if (!((data->type == adt7463 || data->type == adt7468) &&
- (data->vid & 0x80))) {
+ if (!data->has_vid5) {
data->in_min[4] = lm85_read_value(client,
LM85_REG_IN_MIN(4));
data->in_max[4] = lm85_read_value(client,
@@ -1573,17 +1585,19 @@ static struct lm85_data *lm85_update_device(struct device *dev)
}
}
- i = lm85_read_value(client, LM85_REG_AFAN_SPIKE1);
- data->autofan[0].min_off = (i & 0x20) != 0;
- data->autofan[1].min_off = (i & 0x40) != 0;
- data->autofan[2].min_off = (i & 0x80) != 0;
+ if (data->type != emc6d103s) {
+ i = lm85_read_value(client, LM85_REG_AFAN_SPIKE1);
+ data->autofan[0].min_off = (i & 0x20) != 0;
+ data->autofan[1].min_off = (i & 0x40) != 0;
+ data->autofan[2].min_off = (i & 0x80) != 0;
- i = lm85_read_value(client, LM85_REG_AFAN_HYST1);
- data->zone[0].hyst = i >> 4;
- data->zone[1].hyst = i & 0x0f;
+ i = lm85_read_value(client, LM85_REG_AFAN_HYST1);
+ data->zone[0].hyst = i >> 4;
+ data->zone[1].hyst = i & 0x0f;
- i = lm85_read_value(client, LM85_REG_AFAN_HYST2);
- data->zone[2].hyst = i >> 4;
+ i = lm85_read_value(client, LM85_REG_AFAN_HYST2);
+ data->zone[2].hyst = i >> 4;
+ }
data->last_config = jiffies;
} /* last_config */
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
new file mode 100644
index 000000000000..4ac06b75aa60
--- /dev/null
+++ b/drivers/hwmon/ltc4151.c
@@ -0,0 +1,256 @@
+/*
+ * Driver for Linear Technology LTC4151 High Voltage I2C Current
+ * and Voltage Monitor
+ *
+ * Copyright (C) 2011 AppearTV AS
+ *
+ * Derived from:
+ *
+ * Driver for Linear Technology LTC4261 I2C Negative Voltage Hot
+ * Swap Controller
+ * Copyright (C) 2010 Ericsson AB.
+ *
+ * Datasheet: http://www.linear.com/docs/Datasheet/4151fc.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+/* chip registers */
+#define LTC4151_SENSE_H 0x00
+#define LTC4151_SENSE_L 0x01
+#define LTC4151_VIN_H 0x02
+#define LTC4151_VIN_L 0x03
+#define LTC4151_ADIN_H 0x04
+#define LTC4151_ADIN_L 0x05
+
+struct ltc4151_data {
+ struct device *hwmon_dev;
+
+ struct mutex update_lock;
+ bool valid;
+ unsigned long last_updated; /* in jiffies */
+
+ /* Registers */
+ u8 regs[6];
+};
+
+static struct ltc4151_data *ltc4151_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct ltc4151_data *data = i2c_get_clientdata(client);
+ struct ltc4151_data *ret = data;
+
+ mutex_lock(&data->update_lock);
+
+ /*
+ * The chip's A/D updates 6 times per second
+ * (Conversion Rate 6 - 9 Hz)
+ */
+ if (time_after(jiffies, data->last_updated + HZ / 6) || !data->valid) {
+ int i;
+
+ dev_dbg(&client->dev, "Starting ltc4151 update\n");
+
+ /* Read all registers */
+ for (i = 0; i < ARRAY_SIZE(data->regs); i++) {
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, i);
+ if (unlikely(val < 0)) {
+ dev_dbg(dev,
+ "Failed to read ADC value: error %d\n",
+ val);
+ ret = ERR_PTR(val);
+ goto abort;
+ }
+ data->regs[i] = val;
+ }
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+/* Return the voltage from the given register in mV */
+static int ltc4151_get_value(struct ltc4151_data *data, u8 reg)
+{
+ u32 val;
+
+ val = (data->regs[reg] << 4) + (data->regs[reg + 1] >> 4);
+
+ switch (reg) {
+ case LTC4151_ADIN_H:
+ /* 500uV resolution. Convert to mV. */
+ val = val * 500 / 1000;
+ break;
+ case LTC4151_SENSE_H:
+ /*
+ * 20uV resolution. Convert to current as measured with
+ * an 1 mOhm sense resistor, in mA.
+ */
+ val = val * 20;
+ break;
+ case LTC4151_VIN_H:
+ /* 25 mV per increment */
+ val = val * 25;
+ break;
+ default:
+ /* If we get here, the developer messed up */
+ WARN_ON_ONCE(1);
+ val = 0;
+ break;
+ }
+
+ return val;
+}
+
+static ssize_t ltc4151_show_value(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct ltc4151_data *data = ltc4151_update_device(dev);
+ int value;
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ value = ltc4151_get_value(data, attr->index);
+ return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+/*
+ * Input voltages.
+ */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, \
+ ltc4151_show_value, NULL, LTC4151_VIN_H);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, \
+ ltc4151_show_value, NULL, LTC4151_ADIN_H);
+
+/* Currents (via sense resistor) */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, \
+ ltc4151_show_value, NULL, LTC4151_SENSE_H);
+
+/* Finally, construct an array of pointers to members of the above objects,
+ * as required for sysfs_create_group()
+ */
+static struct attribute *ltc4151_attributes[] = {
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+
+ NULL,
+};
+
+static const struct attribute_group ltc4151_group = {
+ .attrs = ltc4151_attributes,
+};
+
+static int ltc4151_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ struct ltc4151_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out_kzalloc;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* Register sysfs hooks */
+ ret = sysfs_create_group(&client->dev.kobj, &ltc4151_group);
+ if (ret)
+ goto out_sysfs_create_group;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ goto out_hwmon_device_register;
+ }
+
+ return 0;
+
+out_hwmon_device_register:
+ sysfs_remove_group(&client->dev.kobj, &ltc4151_group);
+out_sysfs_create_group:
+ kfree(data);
+out_kzalloc:
+ return ret;
+}
+
+static int ltc4151_remove(struct i2c_client *client)
+{
+ struct ltc4151_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &ltc4151_group);
+
+ kfree(data);
+
+ return 0;
+}
+
+static const struct i2c_device_id ltc4151_id[] = {
+ { "ltc4151", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ltc4151_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver ltc4151_driver = {
+ .driver = {
+ .name = "ltc4151",
+ },
+ .probe = ltc4151_probe,
+ .remove = ltc4151_remove,
+ .id_table = ltc4151_id,
+};
+
+static int __init ltc4151_init(void)
+{
+ return i2c_add_driver(&ltc4151_driver);
+}
+
+static void __exit ltc4151_exit(void)
+{
+ i2c_del_driver(&ltc4151_driver);
+}
+
+MODULE_AUTHOR("Per Dalen <per.dalen@appeartv.com>");
+MODULE_DESCRIPTION("LTC4151 driver");
+MODULE_LICENSE("GPL");
+
+module_init(ltc4151_init);
+module_exit(ltc4151_exit);
diff --git a/drivers/hwmon/max16064.c b/drivers/hwmon/max16064.c
new file mode 100644
index 000000000000..1d6d717060d3
--- /dev/null
+++ b/drivers/hwmon/max16064.c
@@ -0,0 +1,91 @@
+/*
+ * Hardware monitoring driver for Maxim MAX16064
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+static struct pmbus_driver_info max16064_info = {
+ .pages = 4,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .m[PSC_VOLTAGE_IN] = 19995,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -1,
+ .m[PSC_VOLTAGE_OUT] = 19995,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = -1,
+ .m[PSC_TEMPERATURE] = -7612,
+ .b[PSC_TEMPERATURE] = 335,
+ .R[PSC_TEMPERATURE] = -3,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_TEMP,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+};
+
+static int max16064_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &max16064_info);
+}
+
+static int max16064_remove(struct i2c_client *client)
+{
+ return pmbus_do_remove(client);
+}
+
+static const struct i2c_device_id max16064_id[] = {
+ {"max16064", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, max16064_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver max16064_driver = {
+ .driver = {
+ .name = "max16064",
+ },
+ .probe = max16064_probe,
+ .remove = max16064_remove,
+ .id_table = max16064_id,
+};
+
+static int __init max16064_init(void)
+{
+ return i2c_add_driver(&max16064_driver);
+}
+
+static void __exit max16064_exit(void)
+{
+ i2c_del_driver(&max16064_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX16064");
+MODULE_LICENSE("GPL");
+module_init(max16064_init);
+module_exit(max16064_exit);
diff --git a/drivers/hwmon/max34440.c b/drivers/hwmon/max34440.c
new file mode 100644
index 000000000000..992b701b4c5e
--- /dev/null
+++ b/drivers/hwmon/max34440.c
@@ -0,0 +1,199 @@
+/*
+ * Hardware monitoring driver for Maxim MAX34440/MAX34441
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+enum chips { max34440, max34441 };
+
+#define MAX34440_STATUS_OC_WARN (1 << 0)
+#define MAX34440_STATUS_OC_FAULT (1 << 1)
+#define MAX34440_STATUS_OT_FAULT (1 << 5)
+#define MAX34440_STATUS_OT_WARN (1 << 6)
+
+static int max34440_get_status(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+ int mfg_status;
+
+ ret = pmbus_set_page(client, page);
+ if (ret < 0)
+ return ret;
+
+ switch (reg) {
+ case PMBUS_STATUS_IOUT:
+ mfg_status = pmbus_read_word_data(client, 0,
+ PMBUS_STATUS_MFR_SPECIFIC);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX34440_STATUS_OC_WARN)
+ ret |= PB_IOUT_OC_WARNING;
+ if (mfg_status & MAX34440_STATUS_OC_FAULT)
+ ret |= PB_IOUT_OC_FAULT;
+ break;
+ case PMBUS_STATUS_TEMPERATURE:
+ mfg_status = pmbus_read_word_data(client, 0,
+ PMBUS_STATUS_MFR_SPECIFIC);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX34440_STATUS_OT_WARN)
+ ret |= PB_TEMP_OT_WARNING;
+ if (mfg_status & MAX34440_STATUS_OT_FAULT)
+ ret |= PB_TEMP_OT_FAULT;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static struct pmbus_driver_info max34440_info[] = {
+ [max34440] = {
+ .pages = 14,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .direct[PSC_CURRENT_OUT] = true,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 3, /* R = 0 in datasheet reflects mV */
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3, /* R = 0 in datasheet reflects mV */
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 3, /* R = 0 in datasheet reflects mA */
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[5] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[6] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[7] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[8] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[9] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[12] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[13] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .get_status = max34440_get_status,
+ },
+ [max34441] = {
+ .pages = 12,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .direct[PSC_CURRENT_OUT] = true,
+ .direct[PSC_FAN] = true,
+ .m[PSC_VOLTAGE_IN] = 1,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 3,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .m[PSC_CURRENT_OUT] = 1,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 3,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ .m[PSC_FAN] = 1,
+ .b[PSC_FAN] = 0,
+ .R[PSC_FAN] = 0,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[5] = PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12,
+ .func[6] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[7] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[8] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[9] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .get_status = max34440_get_status,
+ },
+};
+
+static int max34440_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &max34440_info[id->driver_data]);
+}
+
+static int max34440_remove(struct i2c_client *client)
+{
+ return pmbus_do_remove(client);
+}
+
+static const struct i2c_device_id max34440_id[] = {
+ {"max34440", max34440},
+ {"max34441", max34441},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, max34440_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver max34440_driver = {
+ .driver = {
+ .name = "max34440",
+ },
+ .probe = max34440_probe,
+ .remove = max34440_remove,
+ .id_table = max34440_id,
+};
+
+static int __init max34440_init(void)
+{
+ return i2c_add_driver(&max34440_driver);
+}
+
+static void __exit max34440_exit(void)
+{
+ i2c_del_driver(&max34440_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX34440/MAX34441");
+MODULE_LICENSE("GPL");
+module_init(max34440_init);
+module_exit(max34440_exit);
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
new file mode 100644
index 000000000000..f20d9978ee78
--- /dev/null
+++ b/drivers/hwmon/max6639.c
@@ -0,0 +1,653 @@
+/*
+ * max6639.c - Support for Maxim MAX6639
+ *
+ * 2-Channel Temperature Monitor with Dual PWM Fan-Speed Controller
+ *
+ * Copyright (C) 2010, 2011 Roland Stigge <stigge@antcom.de>
+ *
+ * based on the initial MAX6639 support from semptian.net
+ * by He Changqing <hechangqing@semptian.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/i2c/max6639.h>
+
+/* Addresses to scan */
+static unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END };
+
+/* The MAX6639 registers, valid channel numbers: 0, 1 */
+#define MAX6639_REG_TEMP(ch) (0x00 + (ch))
+#define MAX6639_REG_STATUS 0x02
+#define MAX6639_REG_OUTPUT_MASK 0x03
+#define MAX6639_REG_GCONFIG 0x04
+#define MAX6639_REG_TEMP_EXT(ch) (0x05 + (ch))
+#define MAX6639_REG_ALERT_LIMIT(ch) (0x08 + (ch))
+#define MAX6639_REG_OT_LIMIT(ch) (0x0A + (ch))
+#define MAX6639_REG_THERM_LIMIT(ch) (0x0C + (ch))
+#define MAX6639_REG_FAN_CONFIG1(ch) (0x10 + (ch) * 4)
+#define MAX6639_REG_FAN_CONFIG2a(ch) (0x11 + (ch) * 4)
+#define MAX6639_REG_FAN_CONFIG2b(ch) (0x12 + (ch) * 4)
+#define MAX6639_REG_FAN_CONFIG3(ch) (0x13 + (ch) * 4)
+#define MAX6639_REG_FAN_CNT(ch) (0x20 + (ch))
+#define MAX6639_REG_TARGET_CNT(ch) (0x22 + (ch))
+#define MAX6639_REG_FAN_PPR(ch) (0x24 + (ch))
+#define MAX6639_REG_TARGTDUTY(ch) (0x26 + (ch))
+#define MAX6639_REG_FAN_START_TEMP(ch) (0x28 + (ch))
+#define MAX6639_REG_DEVID 0x3D
+#define MAX6639_REG_MANUID 0x3E
+#define MAX6639_REG_DEVREV 0x3F
+
+/* Register bits */
+#define MAX6639_GCONFIG_STANDBY 0x80
+#define MAX6639_GCONFIG_POR 0x40
+#define MAX6639_GCONFIG_DISABLE_TIMEOUT 0x20
+#define MAX6639_GCONFIG_CH2_LOCAL 0x10
+#define MAX6639_GCONFIG_PWM_FREQ_HI 0x08
+
+#define MAX6639_FAN_CONFIG1_PWM 0x80
+
+#define MAX6639_FAN_CONFIG3_THERM_FULL_SPEED 0x40
+
+static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
+
+#define FAN_FROM_REG(val, div, rpm_range) ((val) == 0 ? -1 : \
+ (val) == 255 ? 0 : (rpm_ranges[rpm_range] * 30) / ((div + 1) * (val)))
+#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255)
+
+/*
+ * Client data (each client gets its own)
+ */
+struct max6639_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ char valid; /* !=0 if following fields are valid */
+ unsigned long last_updated; /* In jiffies */
+
+ /* Register values sampled regularly */
+ u16 temp[2]; /* Temperature, in 1/8 C, 0..255 C */
+ bool temp_fault[2]; /* Detected temperature diode failure */
+ u8 fan[2]; /* Register value: TACH count for fans >=30 */
+ u8 status; /* Detected channel alarms and fan failures */
+
+ /* Register values only written to */
+ u8 pwm[2]; /* Register value: Duty cycle 0..120 */
+ u8 temp_therm[2]; /* THERM Temperature, 0..255 C (->_max) */
+ u8 temp_alert[2]; /* ALERT Temperature, 0..255 C (->_crit) */
+ u8 temp_ot[2]; /* OT Temperature, 0..255 C (->_emergency) */
+
+ /* Register values initialized only once */
+ u8 ppr; /* Pulses per rotation 0..3 for 1..4 ppr */
+ u8 rpm_range; /* Index in above rpm_ranges table */
+};
+
+static struct max6639_data *max6639_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct max6639_data *ret = data;
+ int i;
+ int status_reg;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
+ int res;
+
+ dev_dbg(&client->dev, "Starting max6639 update\n");
+
+ status_reg = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_STATUS);
+ if (status_reg < 0) {
+ ret = ERR_PTR(status_reg);
+ goto abort;
+ }
+
+ data->status = status_reg;
+
+ for (i = 0; i < 2; i++) {
+ res = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_FAN_CNT(i));
+ if (res < 0) {
+ ret = ERR_PTR(res);
+ goto abort;
+ }
+ data->fan[i] = res;
+
+ res = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_TEMP_EXT(i));
+ if (res < 0) {
+ ret = ERR_PTR(res);
+ goto abort;
+ }
+ data->temp[i] = res >> 5;
+ data->temp_fault[i] = res & 0x01;
+
+ res = i2c_smbus_read_byte_data(client,
+ MAX6639_REG_TEMP(i));
+ if (res < 0) {
+ ret = ERR_PTR(res);
+ goto abort;
+ }
+ data->temp[i] |= res << 3;
+ }
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+abort:
+ mutex_unlock(&data->update_lock);
+
+ return ret;
+}
+
+static ssize_t show_temp_input(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ long temp;
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ temp = data->temp[attr->index] * 125;
+ return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_temp_fault(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", data->temp_fault[attr->index]);
+}
+
+static ssize_t show_temp_max(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", (data->temp_therm[attr->index] * 1000));
+}
+
+static ssize_t set_temp_max(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ mutex_lock(&data->update_lock);
+ data->temp_therm[attr->index] = TEMP_LIMIT_TO_REG(val);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_THERM_LIMIT(attr->index),
+ data->temp_therm[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_temp_crit(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", (data->temp_alert[attr->index] * 1000));
+}
+
+static ssize_t set_temp_crit(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ mutex_lock(&data->update_lock);
+ data->temp_alert[attr->index] = TEMP_LIMIT_TO_REG(val);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_ALERT_LIMIT(attr->index),
+ data->temp_alert[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_temp_emergency(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", (data->temp_ot[attr->index] * 1000));
+}
+
+static ssize_t set_temp_emergency(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ mutex_lock(&data->update_lock);
+ data->temp_ot[attr->index] = TEMP_LIMIT_TO_REG(val);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_OT_LIMIT(attr->index),
+ data->temp_ot[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_pwm(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ return sprintf(buf, "%d\n", data->pwm[attr->index] * 255 / 120);
+}
+
+static ssize_t set_pwm(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+ unsigned long val;
+ int res;
+
+ res = strict_strtoul(buf, 10, &val);
+ if (res)
+ return res;
+
+ val = SENSORS_LIMIT(val, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ data->pwm[attr->index] = (u8)(val * 120 / 255);
+ i2c_smbus_write_byte_data(client,
+ MAX6639_REG_TARGTDUTY(attr->index),
+ data->pwm[attr->index]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_fan_input(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index],
+ data->ppr, data->rpm_range));
+}
+
+static ssize_t show_alarm(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct max6639_data *data = max6639_update_device(dev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ return sprintf(buf, "%d\n", !!(data->status & (1 << attr->index)));
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
+ set_temp_max, 0);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
+ set_temp_max, 1);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ set_temp_crit, 0);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit,
+ set_temp_crit, 1);
+static SENSOR_DEVICE_ATTR(temp1_emergency, S_IWUSR | S_IRUGO,
+ show_temp_emergency, set_temp_emergency, 0);
+static SENSOR_DEVICE_ATTR(temp2_emergency, S_IWUSR | S_IRUGO,
+ show_temp_emergency, set_temp_emergency, 1);
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0);
+static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1);
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_alarm, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp1_emergency_alarm, S_IRUGO, show_alarm, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp2_emergency_alarm, S_IRUGO, show_alarm, NULL, 4);
+
+
+static struct attribute *max6639_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_fault.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency.dev_attr.attr,
+ &sensor_dev_attr_temp2_emergency.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_pwm2.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan2_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_fault.dev_attr.attr,
+ &sensor_dev_attr_fan2_fault.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_emergency_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_emergency_alarm.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group max6639_group = {
+ .attrs = max6639_attributes,
+};
+
+/*
+ * returns respective index in rpm_ranges table
+ * 1 by default on invalid range
+ */
+static int rpm_range_to_reg(int range)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rpm_ranges); i++) {
+ if (rpm_ranges[i] == range)
+ return i;
+ }
+
+ return 1; /* default: 4000 RPM */
+}
+
+static int max6639_init_client(struct i2c_client *client)
+{
+ struct max6639_data *data = i2c_get_clientdata(client);
+ struct max6639_platform_data *max6639_info =
+ client->dev.platform_data;
+ int i = 0;
+ int rpm_range = 1; /* default: 4000 RPM */
+ int err = 0;
+
+ /* Reset chip to default values, see below for GCONFIG setup */
+ err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
+ MAX6639_GCONFIG_POR);
+ if (err)
+ goto exit;
+
+ /* Fans pulse per revolution is 2 by default */
+ if (max6639_info && max6639_info->ppr > 0 &&
+ max6639_info->ppr < 5)
+ data->ppr = max6639_info->ppr;
+ else
+ data->ppr = 2;
+ data->ppr -= 1;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_PPR(i),
+ data->ppr << 5);
+ if (err)
+ goto exit;
+
+ if (max6639_info)
+ rpm_range = rpm_range_to_reg(max6639_info->rpm_range);
+ data->rpm_range = rpm_range;
+
+ for (i = 0; i < 2; i++) {
+
+ /* Fans config PWM, RPM */
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG1(i),
+ MAX6639_FAN_CONFIG1_PWM | rpm_range);
+ if (err)
+ goto exit;
+
+ /* Fans PWM polarity high by default */
+ if (max6639_info && max6639_info->pwm_polarity == 0)
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG2a(i), 0x00);
+ else
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG2a(i), 0x02);
+ if (err)
+ goto exit;
+
+ /*
+ * /THERM full speed enable,
+ * PWM frequency 25kHz, see also GCONFIG below
+ */
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_CONFIG3(i),
+ MAX6639_FAN_CONFIG3_THERM_FULL_SPEED | 0x03);
+ if (err)
+ goto exit;
+
+ /* Max. temp. 80C/90C/100C */
+ data->temp_therm[i] = 80;
+ data->temp_alert[i] = 90;
+ data->temp_ot[i] = 100;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_THERM_LIMIT(i),
+ data->temp_therm[i]);
+ if (err)
+ goto exit;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_ALERT_LIMIT(i),
+ data->temp_alert[i]);
+ if (err)
+ goto exit;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_OT_LIMIT(i), data->temp_ot[i]);
+ if (err)
+ goto exit;
+
+ /* PWM 120/120 (i.e. 100%) */
+ data->pwm[i] = 120;
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_TARGTDUTY(i), data->pwm[i]);
+ if (err)
+ goto exit;
+ }
+ /* Start monitoring */
+ err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
+ MAX6639_GCONFIG_DISABLE_TIMEOUT | MAX6639_GCONFIG_CH2_LOCAL |
+ MAX6639_GCONFIG_PWM_FREQ_HI);
+exit:
+ return err;
+}
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int max6639_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int dev_id, manu_id;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ /* Actual detection via device and manufacturer ID */
+ dev_id = i2c_smbus_read_byte_data(client, MAX6639_REG_DEVID);
+ manu_id = i2c_smbus_read_byte_data(client, MAX6639_REG_MANUID);
+ if (dev_id != 0x58 || manu_id != 0x4D)
+ return -ENODEV;
+
+ strlcpy(info->type, "max6639", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int max6639_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max6639_data *data;
+ int err;
+
+ data = kzalloc(sizeof(struct max6639_data), GFP_KERNEL);
+ if (!data) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /* Initialize the max6639 chip */
+ err = max6639_init_client(client);
+ if (err < 0)
+ goto error_free;
+
+ /* Register sysfs hooks */
+ err = sysfs_create_group(&client->dev.kobj, &max6639_group);
+ if (err)
+ goto error_free;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ err = PTR_ERR(data->hwmon_dev);
+ goto error_remove;
+ }
+
+ dev_info(&client->dev, "temperature sensor and fan control found\n");
+
+ return 0;
+
+error_remove:
+ sysfs_remove_group(&client->dev.kobj, &max6639_group);
+error_free:
+ kfree(data);
+exit:
+ return err;
+}
+
+static int max6639_remove(struct i2c_client *client)
+{
+ struct max6639_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &max6639_group);
+
+ kfree(data);
+ return 0;
+}
+
+static int max6639_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
+ if (data < 0)
+ return data;
+
+ return i2c_smbus_write_byte_data(client,
+ MAX6639_REG_GCONFIG, data | MAX6639_GCONFIG_STANDBY);
+}
+
+static int max6639_resume(struct i2c_client *client)
+{
+ int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG);
+ if (data < 0)
+ return data;
+
+ return i2c_smbus_write_byte_data(client,
+ MAX6639_REG_GCONFIG, data & ~MAX6639_GCONFIG_STANDBY);
+}
+
+static const struct i2c_device_id max6639_id[] = {
+ {"max6639", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, max6639_id);
+
+static struct i2c_driver max6639_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max6639",
+ },
+ .probe = max6639_probe,
+ .remove = max6639_remove,
+ .suspend = max6639_suspend,
+ .resume = max6639_resume,
+ .id_table = max6639_id,
+ .detect = max6639_detect,
+ .address_list = normal_i2c,
+};
+
+static int __init max6639_init(void)
+{
+ return i2c_add_driver(&max6639_driver);
+}
+
+static void __exit max6639_exit(void)
+{
+ i2c_del_driver(&max6639_driver);
+}
+
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("max6639 driver");
+MODULE_LICENSE("GPL");
+
+module_init(max6639_init);
+module_exit(max6639_exit);
diff --git a/drivers/hwmon/max8688.c b/drivers/hwmon/max8688.c
new file mode 100644
index 000000000000..8ebfef2ecf26
--- /dev/null
+++ b/drivers/hwmon/max8688.c
@@ -0,0 +1,158 @@
+/*
+ * Hardware monitoring driver for Maxim MAX8688
+ *
+ * Copyright (c) 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+#define MAX8688_MFG_STATUS 0xd8
+
+#define MAX8688_STATUS_OC_FAULT (1 << 4)
+#define MAX8688_STATUS_OV_FAULT (1 << 5)
+#define MAX8688_STATUS_OV_WARNING (1 << 8)
+#define MAX8688_STATUS_UV_FAULT (1 << 9)
+#define MAX8688_STATUS_UV_WARNING (1 << 10)
+#define MAX8688_STATUS_UC_FAULT (1 << 11)
+#define MAX8688_STATUS_OC_WARNING (1 << 12)
+#define MAX8688_STATUS_OT_FAULT (1 << 13)
+#define MAX8688_STATUS_OT_WARNING (1 << 14)
+
+static int max8688_get_status(struct i2c_client *client, int page, int reg)
+{
+ int ret = 0;
+ int mfg_status;
+
+ if (page)
+ return -EINVAL;
+
+ switch (reg) {
+ case PMBUS_STATUS_VOUT:
+ mfg_status = pmbus_read_word_data(client, 0,
+ MAX8688_MFG_STATUS);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX8688_STATUS_UV_WARNING)
+ ret |= PB_VOLTAGE_UV_WARNING;
+ if (mfg_status & MAX8688_STATUS_UV_FAULT)
+ ret |= PB_VOLTAGE_UV_FAULT;
+ if (mfg_status & MAX8688_STATUS_OV_WARNING)
+ ret |= PB_VOLTAGE_OV_WARNING;
+ if (mfg_status & MAX8688_STATUS_OV_FAULT)
+ ret |= PB_VOLTAGE_OV_FAULT;
+ break;
+ case PMBUS_STATUS_IOUT:
+ mfg_status = pmbus_read_word_data(client, 0,
+ MAX8688_MFG_STATUS);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX8688_STATUS_UC_FAULT)
+ ret |= PB_IOUT_UC_FAULT;
+ if (mfg_status & MAX8688_STATUS_OC_WARNING)
+ ret |= PB_IOUT_OC_WARNING;
+ if (mfg_status & MAX8688_STATUS_OC_FAULT)
+ ret |= PB_IOUT_OC_FAULT;
+ break;
+ case PMBUS_STATUS_TEMPERATURE:
+ mfg_status = pmbus_read_word_data(client, 0,
+ MAX8688_MFG_STATUS);
+ if (mfg_status < 0)
+ return mfg_status;
+ if (mfg_status & MAX8688_STATUS_OT_WARNING)
+ ret |= PB_TEMP_OT_WARNING;
+ if (mfg_status & MAX8688_STATUS_OT_FAULT)
+ ret |= PB_TEMP_OT_FAULT;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+ return ret;
+}
+
+static struct pmbus_driver_info max8688_info = {
+ .pages = 1,
+ .direct[PSC_VOLTAGE_IN] = true,
+ .direct[PSC_VOLTAGE_OUT] = true,
+ .direct[PSC_TEMPERATURE] = true,
+ .direct[PSC_CURRENT_OUT] = true,
+ .m[PSC_VOLTAGE_IN] = 19995,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = -1,
+ .m[PSC_VOLTAGE_OUT] = 19995,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = -1,
+ .m[PSC_CURRENT_OUT] = 23109,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = -2,
+ .m[PSC_TEMPERATURE] = -7612,
+ .b[PSC_TEMPERATURE] = 335,
+ .R[PSC_TEMPERATURE] = -3,
+ .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP
+ | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_STATUS_TEMP,
+ .get_status = max8688_get_status,
+};
+
+static int max8688_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ return pmbus_do_probe(client, id, &max8688_info);
+}
+
+static int max8688_remove(struct i2c_client *client)
+{
+ return pmbus_do_remove(client);
+}
+
+static const struct i2c_device_id max8688_id[] = {
+ {"max8688", 0},
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, max8688_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver max8688_driver = {
+ .driver = {
+ .name = "max8688",
+ },
+ .probe = max8688_probe,
+ .remove = max8688_remove,
+ .id_table = max8688_id,
+};
+
+static int __init max8688_init(void)
+{
+ return i2c_add_driver(&max8688_driver);
+}
+
+static void __exit max8688_exit(void)
+{
+ i2c_del_driver(&max8688_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus driver for Maxim MAX8688");
+MODULE_LICENSE("GPL");
+module_init(max8688_init);
+module_exit(max8688_exit);
diff --git a/drivers/hwmon/pmbus.c b/drivers/hwmon/pmbus.c
new file mode 100644
index 000000000000..98e2e28899e2
--- /dev/null
+++ b/drivers/hwmon/pmbus.c
@@ -0,0 +1,203 @@
+/*
+ * Hardware monitoring driver for PMBus devices
+ *
+ * Copyright (c) 2010, 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/i2c.h>
+#include "pmbus.h"
+
+/*
+ * Find sensor groups and status registers on each page.
+ */
+static void pmbus_find_sensor_groups(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ int page;
+
+ /* Sensors detected on page 0 only */
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_VIN))
+ info->func[0] |= PMBUS_HAVE_VIN;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_VCAP))
+ info->func[0] |= PMBUS_HAVE_VCAP;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_IIN))
+ info->func[0] |= PMBUS_HAVE_IIN;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_PIN))
+ info->func[0] |= PMBUS_HAVE_PIN;
+ if (info->func[0]
+ && pmbus_check_byte_register(client, 0, PMBUS_STATUS_INPUT))
+ info->func[0] |= PMBUS_HAVE_STATUS_INPUT;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
+ info->func[0] |= PMBUS_HAVE_FAN12;
+ if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_12))
+ info->func[0] |= PMBUS_HAVE_STATUS_FAN12;
+ }
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
+ info->func[0] |= PMBUS_HAVE_FAN34;
+ if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_34))
+ info->func[0] |= PMBUS_HAVE_STATUS_FAN34;
+ }
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_1)) {
+ info->func[0] |= PMBUS_HAVE_TEMP;
+ if (pmbus_check_byte_register(client, 0,
+ PMBUS_STATUS_TEMPERATURE))
+ info->func[0] |= PMBUS_HAVE_STATUS_TEMP;
+ }
+
+ /* Sensors detected on all pages */
+ for (page = 0; page < info->pages; page++) {
+ if (pmbus_check_word_register(client, page, PMBUS_READ_VOUT)) {
+ info->func[page] |= PMBUS_HAVE_VOUT;
+ if (pmbus_check_byte_register(client, page,
+ PMBUS_STATUS_VOUT))
+ info->func[page] |= PMBUS_HAVE_STATUS_VOUT;
+ }
+ if (pmbus_check_word_register(client, page, PMBUS_READ_IOUT)) {
+ info->func[page] |= PMBUS_HAVE_IOUT;
+ if (pmbus_check_byte_register(client, 0,
+ PMBUS_STATUS_IOUT))
+ info->func[page] |= PMBUS_HAVE_STATUS_IOUT;
+ }
+ if (pmbus_check_word_register(client, page, PMBUS_READ_POUT))
+ info->func[page] |= PMBUS_HAVE_POUT;
+ }
+}
+
+/*
+ * Identify chip parameters.
+ */
+static int pmbus_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ if (!info->pages) {
+ /*
+ * Check if the PAGE command is supported. If it is,
+ * keep setting the page number until it fails or until the
+ * maximum number of pages has been reached. Assume that
+ * this is the number of pages supported by the chip.
+ */
+ if (pmbus_check_byte_register(client, 0, PMBUS_PAGE)) {
+ int page;
+
+ for (page = 1; page < PMBUS_PAGES; page++) {
+ if (pmbus_set_page(client, page) < 0)
+ break;
+ }
+ pmbus_set_page(client, 0);
+ info->pages = page;
+ } else {
+ info->pages = 1;
+ }
+ }
+
+ /*
+ * We should check if the COEFFICIENTS register is supported.
+ * If it is, and the chip is configured for direct mode, we can read
+ * the coefficients from the chip, one set per group of sensor
+ * registers.
+ *
+ * To do this, we will need access to a chip which actually supports the
+ * COEFFICIENTS command, since the command is too complex to implement
+ * without testing it.
+ */
+
+ /* Try to find sensor groups */
+ pmbus_find_sensor_groups(client, info);
+
+ return 0;
+}
+
+static int pmbus_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pmbus_driver_info *info;
+ int ret;
+
+ info = kzalloc(sizeof(struct pmbus_driver_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->pages = id->driver_data;
+ info->identify = pmbus_identify;
+
+ ret = pmbus_do_probe(client, id, info);
+ if (ret < 0)
+ goto out;
+ return 0;
+
+out:
+ kfree(info);
+ return ret;
+}
+
+static int pmbus_remove(struct i2c_client *client)
+{
+ int ret;
+ const struct pmbus_driver_info *info;
+
+ info = pmbus_get_driver_info(client);
+ ret = pmbus_do_remove(client);
+ kfree(info);
+ return ret;
+}
+
+/*
+ * Use driver_data to set the number of pages supported by the chip.
+ */
+static const struct i2c_device_id pmbus_id[] = {
+ {"bmr450", 1},
+ {"bmr451", 1},
+ {"bmr453", 1},
+ {"bmr454", 1},
+ {"ltc2978", 8},
+ {"pmbus", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, pmbus_id);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver pmbus_driver = {
+ .driver = {
+ .name = "pmbus",
+ },
+ .probe = pmbus_probe,
+ .remove = pmbus_remove,
+ .id_table = pmbus_id,
+};
+
+static int __init pmbus_init(void)
+{
+ return i2c_add_driver(&pmbus_driver);
+}
+
+static void __exit pmbus_exit(void)
+{
+ i2c_del_driver(&pmbus_driver);
+}
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("Generic PMBus driver");
+MODULE_LICENSE("GPL");
+module_init(pmbus_init);
+module_exit(pmbus_exit);
diff --git a/drivers/hwmon/pmbus.h b/drivers/hwmon/pmbus.h
new file mode 100644
index 000000000000..a81f7f228762
--- /dev/null
+++ b/drivers/hwmon/pmbus.h
@@ -0,0 +1,313 @@
+/*
+ * pmbus.h - Common defines and structures for PMBus devices
+ *
+ * Copyright (c) 2010, 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef PMBUS_H
+#define PMBUS_H
+
+/*
+ * Registers
+ */
+#define PMBUS_PAGE 0x00
+#define PMBUS_OPERATION 0x01
+#define PMBUS_ON_OFF_CONFIG 0x02
+#define PMBUS_CLEAR_FAULTS 0x03
+#define PMBUS_PHASE 0x04
+
+#define PMBUS_CAPABILITY 0x19
+#define PMBUS_QUERY 0x1A
+
+#define PMBUS_VOUT_MODE 0x20
+#define PMBUS_VOUT_COMMAND 0x21
+#define PMBUS_VOUT_TRIM 0x22
+#define PMBUS_VOUT_CAL_OFFSET 0x23
+#define PMBUS_VOUT_MAX 0x24
+#define PMBUS_VOUT_MARGIN_HIGH 0x25
+#define PMBUS_VOUT_MARGIN_LOW 0x26
+#define PMBUS_VOUT_TRANSITION_RATE 0x27
+#define PMBUS_VOUT_DROOP 0x28
+#define PMBUS_VOUT_SCALE_LOOP 0x29
+#define PMBUS_VOUT_SCALE_MONITOR 0x2A
+
+#define PMBUS_COEFFICIENTS 0x30
+#define PMBUS_POUT_MAX 0x31
+
+#define PMBUS_FAN_CONFIG_12 0x3A
+#define PMBUS_FAN_COMMAND_1 0x3B
+#define PMBUS_FAN_COMMAND_2 0x3C
+#define PMBUS_FAN_CONFIG_34 0x3D
+#define PMBUS_FAN_COMMAND_3 0x3E
+#define PMBUS_FAN_COMMAND_4 0x3F
+
+#define PMBUS_VOUT_OV_FAULT_LIMIT 0x40
+#define PMBUS_VOUT_OV_FAULT_RESPONSE 0x41
+#define PMBUS_VOUT_OV_WARN_LIMIT 0x42
+#define PMBUS_VOUT_UV_WARN_LIMIT 0x43
+#define PMBUS_VOUT_UV_FAULT_LIMIT 0x44
+#define PMBUS_VOUT_UV_FAULT_RESPONSE 0x45
+#define PMBUS_IOUT_OC_FAULT_LIMIT 0x46
+#define PMBUS_IOUT_OC_FAULT_RESPONSE 0x47
+#define PMBUS_IOUT_OC_LV_FAULT_LIMIT 0x48
+#define PMBUS_IOUT_OC_LV_FAULT_RESPONSE 0x49
+#define PMBUS_IOUT_OC_WARN_LIMIT 0x4A
+#define PMBUS_IOUT_UC_FAULT_LIMIT 0x4B
+#define PMBUS_IOUT_UC_FAULT_RESPONSE 0x4C
+
+#define PMBUS_OT_FAULT_LIMIT 0x4F
+#define PMBUS_OT_FAULT_RESPONSE 0x50
+#define PMBUS_OT_WARN_LIMIT 0x51
+#define PMBUS_UT_WARN_LIMIT 0x52
+#define PMBUS_UT_FAULT_LIMIT 0x53
+#define PMBUS_UT_FAULT_RESPONSE 0x54
+#define PMBUS_VIN_OV_FAULT_LIMIT 0x55
+#define PMBUS_VIN_OV_FAULT_RESPONSE 0x56
+#define PMBUS_VIN_OV_WARN_LIMIT 0x57
+#define PMBUS_VIN_UV_WARN_LIMIT 0x58
+#define PMBUS_VIN_UV_FAULT_LIMIT 0x59
+
+#define PMBUS_IIN_OC_FAULT_LIMIT 0x5B
+#define PMBUS_IIN_OC_WARN_LIMIT 0x5D
+
+#define PMBUS_POUT_OP_FAULT_LIMIT 0x68
+#define PMBUS_POUT_OP_WARN_LIMIT 0x6A
+#define PMBUS_PIN_OP_WARN_LIMIT 0x6B
+
+#define PMBUS_STATUS_BYTE 0x78
+#define PMBUS_STATUS_WORD 0x79
+#define PMBUS_STATUS_VOUT 0x7A
+#define PMBUS_STATUS_IOUT 0x7B
+#define PMBUS_STATUS_INPUT 0x7C
+#define PMBUS_STATUS_TEMPERATURE 0x7D
+#define PMBUS_STATUS_CML 0x7E
+#define PMBUS_STATUS_OTHER 0x7F
+#define PMBUS_STATUS_MFR_SPECIFIC 0x80
+#define PMBUS_STATUS_FAN_12 0x81
+#define PMBUS_STATUS_FAN_34 0x82
+
+#define PMBUS_READ_VIN 0x88
+#define PMBUS_READ_IIN 0x89
+#define PMBUS_READ_VCAP 0x8A
+#define PMBUS_READ_VOUT 0x8B
+#define PMBUS_READ_IOUT 0x8C
+#define PMBUS_READ_TEMPERATURE_1 0x8D
+#define PMBUS_READ_TEMPERATURE_2 0x8E
+#define PMBUS_READ_TEMPERATURE_3 0x8F
+#define PMBUS_READ_FAN_SPEED_1 0x90
+#define PMBUS_READ_FAN_SPEED_2 0x91
+#define PMBUS_READ_FAN_SPEED_3 0x92
+#define PMBUS_READ_FAN_SPEED_4 0x93
+#define PMBUS_READ_DUTY_CYCLE 0x94
+#define PMBUS_READ_FREQUENCY 0x95
+#define PMBUS_READ_POUT 0x96
+#define PMBUS_READ_PIN 0x97
+
+#define PMBUS_REVISION 0x98
+#define PMBUS_MFR_ID 0x99
+#define PMBUS_MFR_MODEL 0x9A
+#define PMBUS_MFR_REVISION 0x9B
+#define PMBUS_MFR_LOCATION 0x9C
+#define PMBUS_MFR_DATE 0x9D
+#define PMBUS_MFR_SERIAL 0x9E
+
+/*
+ * CAPABILITY
+ */
+#define PB_CAPABILITY_SMBALERT (1<<4)
+#define PB_CAPABILITY_ERROR_CHECK (1<<7)
+
+/*
+ * VOUT_MODE
+ */
+#define PB_VOUT_MODE_MODE_MASK 0xe0
+#define PB_VOUT_MODE_PARAM_MASK 0x1f
+
+#define PB_VOUT_MODE_LINEAR 0x00
+#define PB_VOUT_MODE_VID 0x20
+#define PB_VOUT_MODE_DIRECT 0x40
+
+/*
+ * Fan configuration
+ */
+#define PB_FAN_2_PULSE_MASK ((1 << 0) | (1 << 1))
+#define PB_FAN_2_RPM (1 << 2)
+#define PB_FAN_2_INSTALLED (1 << 3)
+#define PB_FAN_1_PULSE_MASK ((1 << 4) | (1 << 5))
+#define PB_FAN_1_RPM (1 << 6)
+#define PB_FAN_1_INSTALLED (1 << 7)
+
+/*
+ * STATUS_BYTE, STATUS_WORD (lower)
+ */
+#define PB_STATUS_NONE_ABOVE (1<<0)
+#define PB_STATUS_CML (1<<1)
+#define PB_STATUS_TEMPERATURE (1<<2)
+#define PB_STATUS_VIN_UV (1<<3)
+#define PB_STATUS_IOUT_OC (1<<4)
+#define PB_STATUS_VOUT_OV (1<<5)
+#define PB_STATUS_OFF (1<<6)
+#define PB_STATUS_BUSY (1<<7)
+
+/*
+ * STATUS_WORD (upper)
+ */
+#define PB_STATUS_UNKNOWN (1<<8)
+#define PB_STATUS_OTHER (1<<9)
+#define PB_STATUS_FANS (1<<10)
+#define PB_STATUS_POWER_GOOD_N (1<<11)
+#define PB_STATUS_WORD_MFR (1<<12)
+#define PB_STATUS_INPUT (1<<13)
+#define PB_STATUS_IOUT_POUT (1<<14)
+#define PB_STATUS_VOUT (1<<15)
+
+/*
+ * STATUS_IOUT
+ */
+#define PB_POUT_OP_WARNING (1<<0)
+#define PB_POUT_OP_FAULT (1<<1)
+#define PB_POWER_LIMITING (1<<2)
+#define PB_CURRENT_SHARE_FAULT (1<<3)
+#define PB_IOUT_UC_FAULT (1<<4)
+#define PB_IOUT_OC_WARNING (1<<5)
+#define PB_IOUT_OC_LV_FAULT (1<<6)
+#define PB_IOUT_OC_FAULT (1<<7)
+
+/*
+ * STATUS_VOUT, STATUS_INPUT
+ */
+#define PB_VOLTAGE_UV_FAULT (1<<4)
+#define PB_VOLTAGE_UV_WARNING (1<<5)
+#define PB_VOLTAGE_OV_WARNING (1<<6)
+#define PB_VOLTAGE_OV_FAULT (1<<7)
+
+/*
+ * STATUS_INPUT
+ */
+#define PB_PIN_OP_WARNING (1<<0)
+#define PB_IIN_OC_WARNING (1<<1)
+#define PB_IIN_OC_FAULT (1<<2)
+
+/*
+ * STATUS_TEMPERATURE
+ */
+#define PB_TEMP_UT_FAULT (1<<4)
+#define PB_TEMP_UT_WARNING (1<<5)
+#define PB_TEMP_OT_WARNING (1<<6)
+#define PB_TEMP_OT_FAULT (1<<7)
+
+/*
+ * STATUS_FAN
+ */
+#define PB_FAN_AIRFLOW_WARNING (1<<0)
+#define PB_FAN_AIRFLOW_FAULT (1<<1)
+#define PB_FAN_FAN2_SPEED_OVERRIDE (1<<2)
+#define PB_FAN_FAN1_SPEED_OVERRIDE (1<<3)
+#define PB_FAN_FAN2_WARNING (1<<4)
+#define PB_FAN_FAN1_WARNING (1<<5)
+#define PB_FAN_FAN2_FAULT (1<<6)
+#define PB_FAN_FAN1_FAULT (1<<7)
+
+/*
+ * CML_FAULT_STATUS
+ */
+#define PB_CML_FAULT_OTHER_MEM_LOGIC (1<<0)
+#define PB_CML_FAULT_OTHER_COMM (1<<1)
+#define PB_CML_FAULT_PROCESSOR (1<<3)
+#define PB_CML_FAULT_MEMORY (1<<4)
+#define PB_CML_FAULT_PACKET_ERROR (1<<5)
+#define PB_CML_FAULT_INVALID_DATA (1<<6)
+#define PB_CML_FAULT_INVALID_COMMAND (1<<7)
+
+enum pmbus_sensor_classes {
+ PSC_VOLTAGE_IN = 0,
+ PSC_VOLTAGE_OUT,
+ PSC_CURRENT_IN,
+ PSC_CURRENT_OUT,
+ PSC_POWER,
+ PSC_TEMPERATURE,
+ PSC_FAN,
+ PSC_NUM_CLASSES /* Number of power sensor classes */
+};
+
+#define PMBUS_PAGES 32 /* Per PMBus specification */
+
+/* Functionality bit mask */
+#define PMBUS_HAVE_VIN (1 << 0)
+#define PMBUS_HAVE_VCAP (1 << 1)
+#define PMBUS_HAVE_VOUT (1 << 2)
+#define PMBUS_HAVE_IIN (1 << 3)
+#define PMBUS_HAVE_IOUT (1 << 4)
+#define PMBUS_HAVE_PIN (1 << 5)
+#define PMBUS_HAVE_POUT (1 << 6)
+#define PMBUS_HAVE_FAN12 (1 << 7)
+#define PMBUS_HAVE_FAN34 (1 << 8)
+#define PMBUS_HAVE_TEMP (1 << 9)
+#define PMBUS_HAVE_TEMP2 (1 << 10)
+#define PMBUS_HAVE_TEMP3 (1 << 11)
+#define PMBUS_HAVE_STATUS_VOUT (1 << 12)
+#define PMBUS_HAVE_STATUS_IOUT (1 << 13)
+#define PMBUS_HAVE_STATUS_INPUT (1 << 14)
+#define PMBUS_HAVE_STATUS_TEMP (1 << 15)
+#define PMBUS_HAVE_STATUS_FAN12 (1 << 16)
+#define PMBUS_HAVE_STATUS_FAN34 (1 << 17)
+
+struct pmbus_driver_info {
+ int pages; /* Total number of pages */
+ bool direct[PSC_NUM_CLASSES];
+ /* true if device uses direct data format
+ for the given sensor class */
+ /*
+ * Support one set of coefficients for each sensor type
+ * Used for chips providing data in direct mode.
+ */
+ int m[PSC_NUM_CLASSES]; /* mantissa for direct data format */
+ int b[PSC_NUM_CLASSES]; /* offset */
+ int R[PSC_NUM_CLASSES]; /* exponent */
+
+ u32 func[PMBUS_PAGES]; /* Functionality, per page */
+ /*
+ * The get_status function maps manufacturing specific status values
+ * into PMBus standard status values.
+ * This function is optional and only necessary if chip specific status
+ * register values have to be mapped into standard PMBus status register
+ * values.
+ */
+ int (*get_status)(struct i2c_client *client, int page, int reg);
+ /*
+ * The identify function determines supported PMBus functionality.
+ * This function is only necessary if a chip driver supports multiple
+ * chips, and the chip functionality is not pre-determined.
+ */
+ int (*identify)(struct i2c_client *client,
+ struct pmbus_driver_info *info);
+};
+
+/* Function declarations */
+
+int pmbus_set_page(struct i2c_client *client, u8 page);
+int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg);
+void pmbus_clear_faults(struct i2c_client *client);
+bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg);
+bool pmbus_check_word_register(struct i2c_client *client, int page, int reg);
+int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
+ struct pmbus_driver_info *info);
+int pmbus_do_remove(struct i2c_client *client);
+const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client
+ *client);
+
+#endif /* PMBUS_H */
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
new file mode 100644
index 000000000000..6474512f49b0
--- /dev/null
+++ b/drivers/hwmon/pmbus_core.c
@@ -0,0 +1,1658 @@
+/*
+ * Hardware monitoring driver for PMBus devices
+ *
+ * Copyright (c) 2010, 2011 Ericsson AB.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/delay.h>
+#include <linux/i2c/pmbus.h>
+#include "pmbus.h"
+
+/*
+ * Constants needed to determine number of sensors, booleans, and labels.
+ */
+#define PMBUS_MAX_INPUT_SENSORS 11 /* 6*volt, 3*curr, 2*power */
+#define PMBUS_VOUT_SENSORS_PER_PAGE 5 /* input, min, max, lcrit,
+ crit */
+#define PMBUS_IOUT_SENSORS_PER_PAGE 4 /* input, min, max, crit */
+#define PMBUS_POUT_SENSORS_PER_PAGE 4 /* input, cap, max, crit */
+#define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */
+#define PMBUS_MAX_SENSORS_PER_TEMP 5 /* input, min, max, lcrit,
+ crit */
+
+#define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm,
+ lcrit_alarm, crit_alarm;
+ c: alarm, crit_alarm;
+ p: crit_alarm */
+#define PMBUS_VOUT_BOOLEANS_PER_PAGE 4 /* min_alarm, max_alarm,
+ lcrit_alarm, crit_alarm */
+#define PMBUS_IOUT_BOOLEANS_PER_PAGE 3 /* alarm, lcrit_alarm,
+ crit_alarm */
+#define PMBUS_POUT_BOOLEANS_PER_PAGE 2 /* alarm, crit_alarm */
+#define PMBUS_MAX_BOOLEANS_PER_FAN 2 /* alarm, fault */
+#define PMBUS_MAX_BOOLEANS_PER_TEMP 4 /* min_alarm, max_alarm,
+ lcrit_alarm, crit_alarm */
+
+#define PMBUS_MAX_INPUT_LABELS 4 /* vin, vcap, iin, pin */
+
+/*
+ * status, status_vout, status_iout, status_fans, status_fan34, and status_temp
+ * are paged. status_input is unpaged.
+ */
+#define PB_NUM_STATUS_REG (PMBUS_PAGES * 6 + 1)
+
+/*
+ * Index into status register array, per status register group
+ */
+#define PB_STATUS_BASE 0
+#define PB_STATUS_VOUT_BASE (PB_STATUS_BASE + PMBUS_PAGES)
+#define PB_STATUS_IOUT_BASE (PB_STATUS_VOUT_BASE + PMBUS_PAGES)
+#define PB_STATUS_FAN_BASE (PB_STATUS_IOUT_BASE + PMBUS_PAGES)
+#define PB_STATUS_FAN34_BASE (PB_STATUS_FAN_BASE + PMBUS_PAGES)
+#define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES)
+#define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1)
+
+struct pmbus_sensor {
+ char name[I2C_NAME_SIZE]; /* sysfs sensor name */
+ struct sensor_device_attribute attribute;
+ u8 page; /* page number */
+ u8 reg; /* register */
+ enum pmbus_sensor_classes class; /* sensor class */
+ bool update; /* runtime sensor update needed */
+ int data; /* Sensor data.
+ Negative if there was a read error */
+};
+
+struct pmbus_boolean {
+ char name[I2C_NAME_SIZE]; /* sysfs boolean name */
+ struct sensor_device_attribute attribute;
+};
+
+struct pmbus_label {
+ char name[I2C_NAME_SIZE]; /* sysfs label name */
+ struct sensor_device_attribute attribute;
+ char label[I2C_NAME_SIZE]; /* label */
+};
+
+struct pmbus_data {
+ struct device *hwmon_dev;
+
+ u32 flags; /* from platform data */
+
+ int exponent; /* linear mode: exponent for output voltages */
+
+ const struct pmbus_driver_info *info;
+
+ int max_attributes;
+ int num_attributes;
+ struct attribute **attributes;
+ struct attribute_group group;
+
+ /*
+ * Sensors cover both sensor and limit registers.
+ */
+ int max_sensors;
+ int num_sensors;
+ struct pmbus_sensor *sensors;
+ /*
+ * Booleans are used for alarms.
+ * Values are determined from status registers.
+ */
+ int max_booleans;
+ int num_booleans;
+ struct pmbus_boolean *booleans;
+ /*
+ * Labels are used to map generic names (e.g., "in1")
+ * to PMBus specific names (e.g., "vin" or "vout1").
+ */
+ int max_labels;
+ int num_labels;
+ struct pmbus_label *labels;
+
+ struct mutex update_lock;
+ bool valid;
+ unsigned long last_updated; /* in jiffies */
+
+ /*
+ * A single status register covers multiple attributes,
+ * so we keep them all together.
+ */
+ u8 status_bits;
+ u8 status[PB_NUM_STATUS_REG];
+
+ u8 currpage;
+};
+
+int pmbus_set_page(struct i2c_client *client, u8 page)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ int rv = 0;
+ int newpage;
+
+ if (page != data->currpage) {
+ rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE);
+ if (newpage != page)
+ rv = -EINVAL;
+ else
+ data->currpage = page;
+ }
+ return rv;
+}
+EXPORT_SYMBOL_GPL(pmbus_set_page);
+
+static int pmbus_write_byte(struct i2c_client *client, u8 page, u8 value)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_write_byte(client, value);
+}
+
+static int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg,
+ u16 word)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_write_word_data(client, reg, word);
+}
+
+int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_read_word_data(client, reg);
+}
+EXPORT_SYMBOL_GPL(pmbus_read_word_data);
+
+static int pmbus_read_byte_data(struct i2c_client *client, u8 page, u8 reg)
+{
+ int rv;
+
+ rv = pmbus_set_page(client, page);
+ if (rv < 0)
+ return rv;
+
+ return i2c_smbus_read_byte_data(client, reg);
+}
+
+static void pmbus_clear_fault_page(struct i2c_client *client, int page)
+{
+ pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS);
+}
+
+void pmbus_clear_faults(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ int i;
+
+ for (i = 0; i < data->info->pages; i++)
+ pmbus_clear_fault_page(client, i);
+}
+EXPORT_SYMBOL_GPL(pmbus_clear_faults);
+
+static int pmbus_check_status_cml(struct i2c_client *client, int page)
+{
+ int status, status2;
+
+ status = pmbus_read_byte_data(client, page, PMBUS_STATUS_BYTE);
+ if (status < 0 || (status & PB_STATUS_CML)) {
+ status2 = pmbus_read_byte_data(client, page, PMBUS_STATUS_CML);
+ if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg)
+{
+ int rv;
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ rv = pmbus_read_byte_data(client, page, reg);
+ if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
+ rv = pmbus_check_status_cml(client, page);
+ pmbus_clear_fault_page(client, page);
+ return rv >= 0;
+}
+EXPORT_SYMBOL_GPL(pmbus_check_byte_register);
+
+bool pmbus_check_word_register(struct i2c_client *client, int page, int reg)
+{
+ int rv;
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ rv = pmbus_read_word_data(client, page, reg);
+ if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK))
+ rv = pmbus_check_status_cml(client, page);
+ pmbus_clear_fault_page(client, page);
+ return rv >= 0;
+}
+EXPORT_SYMBOL_GPL(pmbus_check_word_register);
+
+const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+
+ return data->info;
+}
+EXPORT_SYMBOL_GPL(pmbus_get_driver_info);
+
+static int pmbus_get_status(struct i2c_client *client, int page, int reg)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+ int status;
+
+ if (info->get_status) {
+ status = info->get_status(client, page, reg);
+ if (status != -ENODATA)
+ return status;
+ }
+ return pmbus_read_byte_data(client, page, reg);
+}
+
+static struct pmbus_data *pmbus_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ const struct pmbus_driver_info *info = data->info;
+
+ mutex_lock(&data->update_lock);
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ int i;
+
+ for (i = 0; i < info->pages; i++)
+ data->status[PB_STATUS_BASE + i]
+ = pmbus_read_byte_data(client, i,
+ PMBUS_STATUS_BYTE);
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_VOUT))
+ continue;
+ data->status[PB_STATUS_VOUT_BASE + i]
+ = pmbus_get_status(client, i, PMBUS_STATUS_VOUT);
+ }
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_IOUT))
+ continue;
+ data->status[PB_STATUS_IOUT_BASE + i]
+ = pmbus_get_status(client, i, PMBUS_STATUS_IOUT);
+ }
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_TEMP))
+ continue;
+ data->status[PB_STATUS_TEMP_BASE + i]
+ = pmbus_get_status(client, i,
+ PMBUS_STATUS_TEMPERATURE);
+ }
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN12))
+ continue;
+ data->status[PB_STATUS_FAN_BASE + i]
+ = pmbus_get_status(client, i, PMBUS_STATUS_FAN_12);
+ }
+
+ for (i = 0; i < info->pages; i++) {
+ if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN34))
+ continue;
+ data->status[PB_STATUS_FAN34_BASE + i]
+ = pmbus_get_status(client, i, PMBUS_STATUS_FAN_34);
+ }
+
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
+ data->status[PB_STATUS_INPUT_BASE]
+ = pmbus_get_status(client, 0, PMBUS_STATUS_INPUT);
+
+ for (i = 0; i < data->num_sensors; i++) {
+ struct pmbus_sensor *sensor = &data->sensors[i];
+
+ if (!data->valid || sensor->update)
+ sensor->data
+ = pmbus_read_word_data(client, sensor->page,
+ sensor->reg);
+ }
+ pmbus_clear_faults(client);
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+ mutex_unlock(&data->update_lock);
+ return data;
+}
+
+/*
+ * Convert linear sensor values to milli- or micro-units
+ * depending on sensor type.
+ */
+static int pmbus_reg2data_linear(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
+{
+ s16 exponent;
+ s32 mantissa;
+ long val;
+
+ if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */
+ exponent = data->exponent;
+ mantissa = (u16) sensor->data;
+ } else { /* LINEAR11 */
+ exponent = (sensor->data >> 11) & 0x001f;
+ mantissa = sensor->data & 0x07ff;
+
+ if (exponent > 0x0f)
+ exponent |= 0xffe0; /* sign extend exponent */
+ if (mantissa > 0x03ff)
+ mantissa |= 0xfffff800; /* sign extend mantissa */
+ }
+
+ val = mantissa;
+
+ /* scale result to milli-units for all sensors except fans */
+ if (sensor->class != PSC_FAN)
+ val = val * 1000L;
+
+ /* scale result to micro-units for power sensors */
+ if (sensor->class == PSC_POWER)
+ val = val * 1000L;
+
+ if (exponent >= 0)
+ val <<= exponent;
+ else
+ val >>= -exponent;
+
+ return (int)val;
+}
+
+/*
+ * Convert direct sensor values to milli- or micro-units
+ * depending on sensor type.
+ */
+static int pmbus_reg2data_direct(struct pmbus_data *data,
+ struct pmbus_sensor *sensor)
+{
+ long val = (s16) sensor->data;
+ long m, b, R;
+
+ m = data->info->m[sensor->class];
+ b = data->info->b[sensor->class];
+ R = data->info->R[sensor->class];
+
+ if (m == 0)
+ return 0;
+
+ /* X = 1/m * (Y * 10^-R - b) */
+ R = -R;
+ /* scale result to milli-units for everything but fans */
+ if (sensor->class != PSC_FAN) {
+ R += 3;
+ b *= 1000;
+ }
+
+ /* scale result to micro-units for power sensors */
+ if (sensor->class == PSC_POWER) {
+ R += 3;
+ b *= 1000;
+ }
+
+ while (R > 0) {
+ val *= 10;
+ R--;
+ }
+ while (R < 0) {
+ val = DIV_ROUND_CLOSEST(val, 10);
+ R++;
+ }
+
+ return (int)((val - b) / m);
+}
+
+static int pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
+{
+ int val;
+
+ if (data->info->direct[sensor->class])
+ val = pmbus_reg2data_direct(data, sensor);
+ else
+ val = pmbus_reg2data_linear(data, sensor);
+
+ return val;
+}
+
+#define MAX_MANTISSA (1023 * 1000)
+#define MIN_MANTISSA (511 * 1000)
+
+static u16 pmbus_data2reg_linear(struct pmbus_data *data,
+ enum pmbus_sensor_classes class, long val)
+{
+ s16 exponent = 0, mantissa;
+ bool negative = false;
+
+ /* simple case */
+ if (val == 0)
+ return 0;
+
+ if (class == PSC_VOLTAGE_OUT) {
+ /* LINEAR16 does not support negative voltages */
+ if (val < 0)
+ return 0;
+
+ /*
+ * For a static exponents, we don't have a choice
+ * but to adjust the value to it.
+ */
+ if (data->exponent < 0)
+ val <<= -data->exponent;
+ else
+ val >>= data->exponent;
+ val = DIV_ROUND_CLOSEST(val, 1000);
+ return val & 0xffff;
+ }
+
+ if (val < 0) {
+ negative = true;
+ val = -val;
+ }
+
+ /* Power is in uW. Convert to mW before converting. */
+ if (class == PSC_POWER)
+ val = DIV_ROUND_CLOSEST(val, 1000L);
+
+ /*
+ * For simplicity, convert fan data to milli-units
+ * before calculating the exponent.
+ */
+ if (class == PSC_FAN)
+ val = val * 1000;
+
+ /* Reduce large mantissa until it fits into 10 bit */
+ while (val >= MAX_MANTISSA && exponent < 15) {
+ exponent++;
+ val >>= 1;
+ }
+ /* Increase small mantissa to improve precision */
+ while (val < MIN_MANTISSA && exponent > -15) {
+ exponent--;
+ val <<= 1;
+ }
+
+ /* Convert mantissa from milli-units to units */
+ mantissa = DIV_ROUND_CLOSEST(val, 1000);
+
+ /* Ensure that resulting number is within range */
+ if (mantissa > 0x3ff)
+ mantissa = 0x3ff;
+
+ /* restore sign */
+ if (negative)
+ mantissa = -mantissa;
+
+ /* Convert to 5 bit exponent, 11 bit mantissa */
+ return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800);
+}
+
+static u16 pmbus_data2reg_direct(struct pmbus_data *data,
+ enum pmbus_sensor_classes class, long val)
+{
+ long m, b, R;
+
+ m = data->info->m[class];
+ b = data->info->b[class];
+ R = data->info->R[class];
+
+ /* Power is in uW. Adjust R and b. */
+ if (class == PSC_POWER) {
+ R -= 3;
+ b *= 1000;
+ }
+
+ /* Calculate Y = (m * X + b) * 10^R */
+ if (class != PSC_FAN) {
+ R -= 3; /* Adjust R and b for data in milli-units */
+ b *= 1000;
+ }
+ val = val * m + b;
+
+ while (R > 0) {
+ val *= 10;
+ R--;
+ }
+ while (R < 0) {
+ val = DIV_ROUND_CLOSEST(val, 10);
+ R++;
+ }
+
+ return val;
+}
+
+static u16 pmbus_data2reg(struct pmbus_data *data,
+ enum pmbus_sensor_classes class, long val)
+{
+ u16 regval;
+
+ if (data->info->direct[class])
+ regval = pmbus_data2reg_direct(data, class, val);
+ else
+ regval = pmbus_data2reg_linear(data, class, val);
+
+ return regval;
+}
+
+/*
+ * Return boolean calculated from converted data.
+ * <index> defines a status register index and mask, and optionally
+ * two sensor indexes.
+ * The upper half-word references the two sensors,
+ * two sensor indices.
+ * The upper half-word references the two optional sensors,
+ * the lower half word references status register and mask.
+ * The function returns true if (status[reg] & mask) is true and,
+ * if specified, if v1 >= v2.
+ * To determine if an object exceeds upper limits, specify <v, limit>.
+ * To determine if an object exceeds lower limits, specify <limit, v>.
+ *
+ * For booleans created with pmbus_add_boolean_reg(), only the lower 16 bits of
+ * index are set. s1 and s2 (the sensor index values) are zero in this case.
+ * The function returns true if (status[reg] & mask) is true.
+ *
+ * If the boolean was created with pmbus_add_boolean_cmp(), a comparison against
+ * a specified limit has to be performed to determine the boolean result.
+ * In this case, the function returns true if v1 >= v2 (where v1 and v2 are
+ * sensor values referenced by sensor indices s1 and s2).
+ *
+ * To determine if an object exceeds upper limits, specify <s1,s2> = <v,limit>.
+ * To determine if an object exceeds lower limits, specify <s1,s2> = <limit,v>.
+ *
+ * If a negative value is stored in any of the referenced registers, this value
+ * reflects an error code which will be returned.
+ */
+static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val)
+{
+ u8 s1 = (index >> 24) & 0xff;
+ u8 s2 = (index >> 16) & 0xff;
+ u8 reg = (index >> 8) & 0xff;
+ u8 mask = index & 0xff;
+ int status;
+ u8 regval;
+
+ status = data->status[reg];
+ if (status < 0)
+ return status;
+
+ regval = status & mask;
+ if (!s1 && !s2)
+ *val = !!regval;
+ else {
+ int v1, v2;
+ struct pmbus_sensor *sensor1, *sensor2;
+
+ sensor1 = &data->sensors[s1];
+ if (sensor1->data < 0)
+ return sensor1->data;
+ sensor2 = &data->sensors[s2];
+ if (sensor2->data < 0)
+ return sensor2->data;
+
+ v1 = pmbus_reg2data(data, sensor1);
+ v2 = pmbus_reg2data(data, sensor2);
+ *val = !!(regval && v1 >= v2);
+ }
+ return 0;
+}
+
+static ssize_t pmbus_show_boolean(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pmbus_data *data = pmbus_update_device(dev);
+ int val;
+ int err;
+
+ err = pmbus_get_boolean(data, attr->index, &val);
+ if (err)
+ return err;
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t pmbus_show_sensor(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+ struct pmbus_data *data = pmbus_update_device(dev);
+ struct pmbus_sensor *sensor;
+
+ sensor = &data->sensors[attr->index];
+ if (sensor->data < 0)
+ return sensor->data;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pmbus_reg2data(data, sensor));
+}
+
+static ssize_t pmbus_set_sensor(struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ struct pmbus_sensor *sensor = &data->sensors[attr->index];
+ ssize_t rv = count;
+ long val = 0;
+ int ret;
+ u16 regval;
+
+ if (strict_strtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ regval = pmbus_data2reg(data, sensor->class, val);
+ ret = pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
+ if (ret < 0)
+ rv = ret;
+ else
+ data->sensors[attr->index].data = regval;
+ mutex_unlock(&data->update_lock);
+ return rv;
+}
+
+static ssize_t pmbus_show_label(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ data->labels[attr->index].label);
+}
+
+#define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \
+do { \
+ struct sensor_device_attribute *a \
+ = &data->_type##s[data->num_##_type##s].attribute; \
+ BUG_ON(data->num_attributes >= data->max_attributes); \
+ a->dev_attr.attr.name = _name; \
+ a->dev_attr.attr.mode = _mode; \
+ a->dev_attr.show = _show; \
+ a->dev_attr.store = _set; \
+ a->index = _idx; \
+ data->attributes[data->num_attributes] = &a->dev_attr.attr; \
+ data->num_attributes++; \
+} while (0)
+
+#define PMBUS_ADD_GET_ATTR(data, _name, _type, _idx) \
+ PMBUS_ADD_ATTR(data, _name, _idx, S_IRUGO, _type, \
+ pmbus_show_##_type, NULL)
+
+#define PMBUS_ADD_SET_ATTR(data, _name, _type, _idx) \
+ PMBUS_ADD_ATTR(data, _name, _idx, S_IWUSR | S_IRUGO, _type, \
+ pmbus_show_##_type, pmbus_set_##_type)
+
+static void pmbus_add_boolean(struct pmbus_data *data,
+ const char *name, const char *type, int seq,
+ int idx)
+{
+ struct pmbus_boolean *boolean;
+
+ BUG_ON(data->num_booleans >= data->max_booleans);
+
+ boolean = &data->booleans[data->num_booleans];
+
+ snprintf(boolean->name, sizeof(boolean->name), "%s%d_%s",
+ name, seq, type);
+ PMBUS_ADD_GET_ATTR(data, boolean->name, boolean, idx);
+ data->num_booleans++;
+}
+
+static void pmbus_add_boolean_reg(struct pmbus_data *data,
+ const char *name, const char *type,
+ int seq, int reg, int bit)
+{
+ pmbus_add_boolean(data, name, type, seq, (reg << 8) | bit);
+}
+
+static void pmbus_add_boolean_cmp(struct pmbus_data *data,
+ const char *name, const char *type,
+ int seq, int i1, int i2, int reg, int mask)
+{
+ pmbus_add_boolean(data, name, type, seq,
+ (i1 << 24) | (i2 << 16) | (reg << 8) | mask);
+}
+
+static void pmbus_add_sensor(struct pmbus_data *data,
+ const char *name, const char *type, int seq,
+ int page, int reg, enum pmbus_sensor_classes class,
+ bool update)
+{
+ struct pmbus_sensor *sensor;
+
+ BUG_ON(data->num_sensors >= data->max_sensors);
+
+ sensor = &data->sensors[data->num_sensors];
+ snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s",
+ name, seq, type);
+ sensor->page = page;
+ sensor->reg = reg;
+ sensor->class = class;
+ sensor->update = update;
+ if (update)
+ PMBUS_ADD_GET_ATTR(data, sensor->name, sensor,
+ data->num_sensors);
+ else
+ PMBUS_ADD_SET_ATTR(data, sensor->name, sensor,
+ data->num_sensors);
+ data->num_sensors++;
+}
+
+static void pmbus_add_label(struct pmbus_data *data,
+ const char *name, int seq,
+ const char *lstring, int index)
+{
+ struct pmbus_label *label;
+
+ BUG_ON(data->num_labels >= data->max_labels);
+
+ label = &data->labels[data->num_labels];
+ snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq);
+ if (!index)
+ strncpy(label->label, lstring, sizeof(label->label) - 1);
+ else
+ snprintf(label->label, sizeof(label->label), "%s%d", lstring,
+ index);
+
+ PMBUS_ADD_GET_ATTR(data, label->name, label, data->num_labels);
+ data->num_labels++;
+}
+
+static const int pmbus_temp_registers[] = {
+ PMBUS_READ_TEMPERATURE_1,
+ PMBUS_READ_TEMPERATURE_2,
+ PMBUS_READ_TEMPERATURE_3
+};
+
+static const int pmbus_temp_flags[] = {
+ PMBUS_HAVE_TEMP,
+ PMBUS_HAVE_TEMP2,
+ PMBUS_HAVE_TEMP3
+};
+
+static const int pmbus_fan_registers[] = {
+ PMBUS_READ_FAN_SPEED_1,
+ PMBUS_READ_FAN_SPEED_2,
+ PMBUS_READ_FAN_SPEED_3,
+ PMBUS_READ_FAN_SPEED_4
+};
+
+static const int pmbus_fan_config_registers[] = {
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_12,
+ PMBUS_FAN_CONFIG_34,
+ PMBUS_FAN_CONFIG_34
+};
+
+static const int pmbus_fan_status_registers[] = {
+ PMBUS_STATUS_FAN_12,
+ PMBUS_STATUS_FAN_12,
+ PMBUS_STATUS_FAN_34,
+ PMBUS_STATUS_FAN_34
+};
+
+static const u32 pmbus_fan_flags[] = {
+ PMBUS_HAVE_FAN12,
+ PMBUS_HAVE_FAN12,
+ PMBUS_HAVE_FAN34,
+ PMBUS_HAVE_FAN34
+};
+
+static const u32 pmbus_fan_status_flags[] = {
+ PMBUS_HAVE_STATUS_FAN12,
+ PMBUS_HAVE_STATUS_FAN12,
+ PMBUS_HAVE_STATUS_FAN34,
+ PMBUS_HAVE_STATUS_FAN34
+};
+
+/*
+ * Determine maximum number of sensors, booleans, and labels.
+ * To keep things simple, only make a rough high estimate.
+ */
+static void pmbus_find_max_attr(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ const struct pmbus_driver_info *info = data->info;
+ int page, max_sensors, max_booleans, max_labels;
+
+ max_sensors = PMBUS_MAX_INPUT_SENSORS;
+ max_booleans = PMBUS_MAX_INPUT_BOOLEANS;
+ max_labels = PMBUS_MAX_INPUT_LABELS;
+
+ for (page = 0; page < info->pages; page++) {
+ if (info->func[page] & PMBUS_HAVE_VOUT) {
+ max_sensors += PMBUS_VOUT_SENSORS_PER_PAGE;
+ max_booleans += PMBUS_VOUT_BOOLEANS_PER_PAGE;
+ max_labels++;
+ }
+ if (info->func[page] & PMBUS_HAVE_IOUT) {
+ max_sensors += PMBUS_IOUT_SENSORS_PER_PAGE;
+ max_booleans += PMBUS_IOUT_BOOLEANS_PER_PAGE;
+ max_labels++;
+ }
+ if (info->func[page] & PMBUS_HAVE_POUT) {
+ max_sensors += PMBUS_POUT_SENSORS_PER_PAGE;
+ max_booleans += PMBUS_POUT_BOOLEANS_PER_PAGE;
+ max_labels++;
+ }
+ if (info->func[page] & PMBUS_HAVE_FAN12) {
+ max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN;
+ max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN;
+ }
+ if (info->func[page] & PMBUS_HAVE_FAN34) {
+ max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN;
+ max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN;
+ }
+ if (info->func[page] & PMBUS_HAVE_TEMP) {
+ max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
+ max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
+ }
+ if (info->func[page] & PMBUS_HAVE_TEMP2) {
+ max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
+ max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
+ }
+ if (info->func[page] & PMBUS_HAVE_TEMP3) {
+ max_sensors += PMBUS_MAX_SENSORS_PER_TEMP;
+ max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP;
+ }
+ }
+ data->max_sensors = max_sensors;
+ data->max_booleans = max_booleans;
+ data->max_labels = max_labels;
+ data->max_attributes = max_sensors + max_booleans + max_labels;
+}
+
+/*
+ * Search for attributes. Allocate sensors, booleans, and labels as needed.
+ */
+static void pmbus_find_attributes(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ const struct pmbus_driver_info *info = data->info;
+ int page, i0, i1, in_index;
+
+ /*
+ * Input voltage sensors
+ */
+ in_index = 1;
+ if (info->func[0] & PMBUS_HAVE_VIN) {
+ bool have_alarm = false;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "in", in_index, "vin", 0);
+ pmbus_add_sensor(data, "in", "input", in_index,
+ 0, PMBUS_READ_VIN, PSC_VOLTAGE_IN, true);
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_UV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "min", in_index,
+ 0, PMBUS_VIN_UV_WARN_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "min_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_UV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_UV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "lcrit", in_index,
+ 0, PMBUS_VIN_UV_FAULT_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "lcrit_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_UV_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_OV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "max", in_index,
+ 0, PMBUS_VIN_OV_WARN_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "max_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_OV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_VIN_OV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "crit", in_index,
+ 0, PMBUS_VIN_OV_FAULT_LIMIT,
+ PSC_VOLTAGE_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "in", "crit_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_VOLTAGE_OV_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Add generic alarm attribute only if there are no individual
+ * attributes.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "in", "alarm",
+ in_index,
+ PB_STATUS_BASE,
+ PB_STATUS_VIN_UV);
+ in_index++;
+ }
+ if (info->func[0] & PMBUS_HAVE_VCAP) {
+ pmbus_add_label(data, "in", in_index, "vcap", 0);
+ pmbus_add_sensor(data, "in", "input", in_index, 0,
+ PMBUS_READ_VCAP, PSC_VOLTAGE_IN, true);
+ in_index++;
+ }
+
+ /*
+ * Output voltage sensors
+ */
+ for (page = 0; page < info->pages; page++) {
+ bool have_alarm = false;
+
+ if (!(info->func[page] & PMBUS_HAVE_VOUT))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "in", in_index, "vout", page + 1);
+ pmbus_add_sensor(data, "in", "input", in_index, page,
+ PMBUS_READ_VOUT, PSC_VOLTAGE_OUT, true);
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_UV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "min", in_index, page,
+ PMBUS_VOUT_UV_WARN_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "min_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_UV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_UV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "lcrit", in_index, page,
+ PMBUS_VOUT_UV_FAULT_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "lcrit_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_UV_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_OV_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "max", in_index, page,
+ PMBUS_VOUT_OV_WARN_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "max_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_OV_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_VOUT_OV_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "in", "crit", in_index, page,
+ PMBUS_VOUT_OV_FAULT_LIMIT,
+ PSC_VOLTAGE_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) {
+ pmbus_add_boolean_reg(data, "in", "crit_alarm",
+ in_index,
+ PB_STATUS_VOUT_BASE +
+ page,
+ PB_VOLTAGE_OV_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Add generic alarm attribute only if there are no individual
+ * attributes.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "in", "alarm",
+ in_index,
+ PB_STATUS_BASE + page,
+ PB_STATUS_VOUT_OV);
+ in_index++;
+ }
+
+ /*
+ * Current sensors
+ */
+
+ /*
+ * Input current sensors
+ */
+ in_index = 1;
+ if (info->func[0] & PMBUS_HAVE_IIN) {
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "curr", in_index, "iin", 0);
+ pmbus_add_sensor(data, "curr", "input", in_index,
+ 0, PMBUS_READ_IIN, PSC_CURRENT_IN, true);
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_IIN_OC_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "max", in_index,
+ 0, PMBUS_IIN_OC_WARN_LIMIT,
+ PSC_CURRENT_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) {
+ pmbus_add_boolean_reg(data, "curr", "max_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_IIN_OC_WARNING);
+ }
+ }
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_IIN_OC_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "crit", in_index,
+ 0, PMBUS_IIN_OC_FAULT_LIMIT,
+ PSC_CURRENT_IN, false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
+ pmbus_add_boolean_reg(data, "curr",
+ "crit_alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_IIN_OC_FAULT);
+ }
+ in_index++;
+ }
+
+ /*
+ * Output current sensors
+ */
+ for (page = 0; page < info->pages; page++) {
+ bool have_alarm = false;
+
+ if (!(info->func[page] & PMBUS_HAVE_IOUT))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "curr", in_index, "iout", page + 1);
+ pmbus_add_sensor(data, "curr", "input", in_index, page,
+ PMBUS_READ_IOUT, PSC_CURRENT_OUT, true);
+ if (pmbus_check_word_register(client, page,
+ PMBUS_IOUT_OC_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "max", in_index, page,
+ PMBUS_IOUT_OC_WARN_LIMIT,
+ PSC_CURRENT_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) {
+ pmbus_add_boolean_reg(data, "curr", "max_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE +
+ page, PB_IOUT_OC_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_IOUT_UC_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "lcrit", in_index, page,
+ PMBUS_IOUT_UC_FAULT_LIMIT,
+ PSC_CURRENT_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) {
+ pmbus_add_boolean_reg(data, "curr",
+ "lcrit_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE +
+ page, PB_IOUT_UC_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_IOUT_OC_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "curr", "crit", in_index, page,
+ PMBUS_IOUT_OC_FAULT_LIMIT,
+ PSC_CURRENT_OUT, false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) {
+ pmbus_add_boolean_reg(data, "curr",
+ "crit_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE +
+ page, PB_IOUT_OC_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Add generic alarm attribute only if there are no individual
+ * attributes.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "curr", "alarm",
+ in_index,
+ PB_STATUS_BASE + page,
+ PB_STATUS_IOUT_OC);
+ in_index++;
+ }
+
+ /*
+ * Power sensors
+ */
+ /*
+ * Input Power sensors
+ */
+ in_index = 1;
+ if (info->func[0] & PMBUS_HAVE_PIN) {
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "power", in_index, "pin", 0);
+ pmbus_add_sensor(data, "power", "input", in_index,
+ 0, PMBUS_READ_PIN, PSC_POWER, true);
+ if (pmbus_check_word_register(client, 0,
+ PMBUS_PIN_OP_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "max", in_index,
+ 0, PMBUS_PIN_OP_WARN_LIMIT, PSC_POWER,
+ false);
+ if (info->func[0] & PMBUS_HAVE_STATUS_INPUT)
+ pmbus_add_boolean_reg(data, "power",
+ "alarm",
+ in_index,
+ PB_STATUS_INPUT_BASE,
+ PB_PIN_OP_WARNING);
+ }
+ in_index++;
+ }
+
+ /*
+ * Output Power sensors
+ */
+ for (page = 0; page < info->pages; page++) {
+ bool need_alarm = false;
+
+ if (!(info->func[page] & PMBUS_HAVE_POUT))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_label(data, "power", in_index, "pout", page + 1);
+ pmbus_add_sensor(data, "power", "input", in_index, page,
+ PMBUS_READ_POUT, PSC_POWER, true);
+ /*
+ * Per hwmon sysfs API, power_cap is to be used to limit output
+ * power.
+ * We have two registers related to maximum output power,
+ * PMBUS_POUT_MAX and PMBUS_POUT_OP_WARN_LIMIT.
+ * PMBUS_POUT_MAX matches the powerX_cap attribute definition.
+ * There is no attribute in the API to match
+ * PMBUS_POUT_OP_WARN_LIMIT. We use powerX_max for now.
+ */
+ if (pmbus_check_word_register(client, page, PMBUS_POUT_MAX)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "cap", in_index, page,
+ PMBUS_POUT_MAX, PSC_POWER, false);
+ need_alarm = true;
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_POUT_OP_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "max", in_index, page,
+ PMBUS_POUT_OP_WARN_LIMIT, PSC_POWER,
+ false);
+ need_alarm = true;
+ }
+ if (need_alarm && (info->func[page] & PMBUS_HAVE_STATUS_IOUT))
+ pmbus_add_boolean_reg(data, "power", "alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE + page,
+ PB_POUT_OP_WARNING
+ | PB_POWER_LIMITING);
+
+ if (pmbus_check_word_register(client, page,
+ PMBUS_POUT_OP_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "power", "crit", in_index, page,
+ PMBUS_POUT_OP_FAULT_LIMIT, PSC_POWER,
+ false);
+ if (info->func[page] & PMBUS_HAVE_STATUS_IOUT)
+ pmbus_add_boolean_reg(data, "power",
+ "crit_alarm",
+ in_index,
+ PB_STATUS_IOUT_BASE
+ + page,
+ PB_POUT_OP_FAULT);
+ }
+ in_index++;
+ }
+
+ /*
+ * Temperature sensors
+ */
+ in_index = 1;
+ for (page = 0; page < info->pages; page++) {
+ int t;
+
+ for (t = 0; t < ARRAY_SIZE(pmbus_temp_registers); t++) {
+ bool have_alarm = false;
+
+ /*
+ * A PMBus chip may support any combination of
+ * temperature registers on any page. So we can not
+ * abort after a failure to detect a register, but have
+ * to continue checking for all registers on all pages.
+ */
+ if (!(info->func[page] & pmbus_temp_flags[t]))
+ continue;
+
+ if (!pmbus_check_word_register
+ (client, page, pmbus_temp_registers[t]))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "input", in_index, page,
+ pmbus_temp_registers[t],
+ PSC_TEMPERATURE, true);
+
+ /*
+ * PMBus provides only one status register for TEMP1-3.
+ * Thus, we can not use the status register to determine
+ * which of the three sensors actually caused an alarm.
+ * Always compare current temperature against the limit
+ * registers to determine alarm conditions for a
+ * specific sensor.
+ *
+ * Since there is only one set of limit registers for
+ * up to three temperature sensors, we need to update
+ * all limit registers after the limit was changed for
+ * one of the sensors. This ensures that correct limits
+ * are reported for all temperature sensors.
+ */
+ if (pmbus_check_word_register
+ (client, page, PMBUS_UT_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "min", in_index,
+ page, PMBUS_UT_WARN_LIMIT,
+ PSC_TEMPERATURE, true);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "min_alarm", in_index, i1, i0,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_UT_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_UT_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "lcrit",
+ in_index, page,
+ PMBUS_UT_FAULT_LIMIT,
+ PSC_TEMPERATURE, true);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "lcrit_alarm", in_index, i1, i0,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_UT_FAULT);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register
+ (client, page, PMBUS_OT_WARN_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "max", in_index,
+ page, PMBUS_OT_WARN_LIMIT,
+ PSC_TEMPERATURE, true);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "max_alarm", in_index, i0, i1,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_OT_WARNING);
+ have_alarm = true;
+ }
+ }
+ if (pmbus_check_word_register(client, page,
+ PMBUS_OT_FAULT_LIMIT)) {
+ i1 = data->num_sensors;
+ pmbus_add_sensor(data, "temp", "crit", in_index,
+ page, PMBUS_OT_FAULT_LIMIT,
+ PSC_TEMPERATURE, true);
+ if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) {
+ pmbus_add_boolean_cmp(data, "temp",
+ "crit_alarm", in_index, i0, i1,
+ PB_STATUS_TEMP_BASE + page,
+ PB_TEMP_OT_FAULT);
+ have_alarm = true;
+ }
+ }
+ /*
+ * Last resort - we were not able to create any alarm
+ * registers. Report alarm for all sensors using the
+ * status register temperature alarm bit.
+ */
+ if (!have_alarm)
+ pmbus_add_boolean_reg(data, "temp", "alarm",
+ in_index,
+ PB_STATUS_BASE + page,
+ PB_STATUS_TEMPERATURE);
+ in_index++;
+ }
+ }
+
+ /*
+ * Fans
+ */
+ in_index = 1;
+ for (page = 0; page < info->pages; page++) {
+ int f;
+
+ for (f = 0; f < ARRAY_SIZE(pmbus_fan_registers); f++) {
+ int regval;
+
+ if (!(info->func[page] & pmbus_fan_flags[f]))
+ break;
+
+ if (!pmbus_check_word_register(client, page,
+ pmbus_fan_registers[f])
+ || !pmbus_check_byte_register(client, page,
+ pmbus_fan_config_registers[f]))
+ break;
+
+ /*
+ * Skip fan if not installed.
+ * Each fan configuration register covers multiple fans,
+ * so we have to do some magic.
+ */
+ regval = pmbus_read_byte_data(client, page,
+ pmbus_fan_config_registers[f]);
+ if (regval < 0 ||
+ (!(regval & (PB_FAN_1_INSTALLED >> ((f & 1) * 4)))))
+ continue;
+
+ i0 = data->num_sensors;
+ pmbus_add_sensor(data, "fan", "input", in_index, page,
+ pmbus_fan_registers[f], PSC_FAN, true);
+
+ /*
+ * Each fan status register covers multiple fans,
+ * so we have to do some magic.
+ */
+ if ((info->func[page] & pmbus_fan_status_flags[f]) &&
+ pmbus_check_byte_register(client,
+ page, pmbus_fan_status_registers[f])) {
+ int base;
+
+ if (f > 1) /* fan 3, 4 */
+ base = PB_STATUS_FAN34_BASE + page;
+ else
+ base = PB_STATUS_FAN_BASE + page;
+ pmbus_add_boolean_reg(data, "fan", "alarm",
+ in_index, base,
+ PB_FAN_FAN1_WARNING >> (f & 1));
+ pmbus_add_boolean_reg(data, "fan", "fault",
+ in_index, base,
+ PB_FAN_FAN1_FAULT >> (f & 1));
+ }
+ in_index++;
+ }
+ }
+}
+
+/*
+ * Identify chip parameters.
+ * This function is called for all chips.
+ */
+static int pmbus_identify_common(struct i2c_client *client,
+ struct pmbus_data *data)
+{
+ int vout_mode = -1, exponent;
+
+ if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE))
+ vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (vout_mode >= 0 && vout_mode != 0xff) {
+ /*
+ * Not all chips support the VOUT_MODE command,
+ * so a failure to read it is not an error.
+ */
+ switch (vout_mode >> 5) {
+ case 0: /* linear mode */
+ if (data->info->direct[PSC_VOLTAGE_OUT])
+ return -ENODEV;
+
+ exponent = vout_mode & 0x1f;
+ /* and sign-extend it */
+ if (exponent & 0x10)
+ exponent |= ~0x1f;
+ data->exponent = exponent;
+ break;
+ case 2: /* direct mode */
+ if (!data->info->direct[PSC_VOLTAGE_OUT])
+ return -ENODEV;
+ break;
+ default:
+ return -ENODEV;
+ }
+ }
+
+ /* Determine maximum number of sensors, booleans, and labels */
+ pmbus_find_max_attr(client, data);
+ pmbus_clear_fault_page(client, 0);
+ return 0;
+}
+
+int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
+ struct pmbus_driver_info *info)
+{
+ const struct pmbus_platform_data *pdata = client->dev.platform_data;
+ struct pmbus_data *data;
+ int ret;
+
+ if (!info) {
+ dev_err(&client->dev, "Missing chip information");
+ return -ENODEV;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE
+ | I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&client->dev, "No memory to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /*
+ * Bail out if status register or PMBus revision register
+ * does not exist.
+ */
+ if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0
+ || i2c_smbus_read_byte_data(client, PMBUS_REVISION) < 0) {
+ dev_err(&client->dev,
+ "Status or revision register not found\n");
+ ret = -ENODEV;
+ goto out_data;
+ }
+
+ if (pdata)
+ data->flags = pdata->flags;
+ data->info = info;
+
+ pmbus_clear_faults(client);
+
+ if (info->identify) {
+ ret = (*info->identify)(client, info);
+ if (ret < 0) {
+ dev_err(&client->dev, "Chip identification failed\n");
+ goto out_data;
+ }
+ }
+
+ if (info->pages <= 0 || info->pages > PMBUS_PAGES) {
+ dev_err(&client->dev, "Bad number of PMBus pages: %d\n",
+ info->pages);
+ ret = -EINVAL;
+ goto out_data;
+ }
+ /*
+ * Bail out if more than one page was configured, but we can not
+ * select the highest page. This is an indication that the wrong
+ * chip type was selected. Better bail out now than keep
+ * returning errors later on.
+ */
+ if (info->pages > 1 && pmbus_set_page(client, info->pages - 1) < 0) {
+ dev_err(&client->dev, "Failed to select page %d\n",
+ info->pages - 1);
+ ret = -EINVAL;
+ goto out_data;
+ }
+
+ ret = pmbus_identify_common(client, data);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to identify chip capabilities\n");
+ goto out_data;
+ }
+
+ ret = -ENOMEM;
+ data->sensors = kzalloc(sizeof(struct pmbus_sensor) * data->max_sensors,
+ GFP_KERNEL);
+ if (!data->sensors) {
+ dev_err(&client->dev, "No memory to allocate sensor data\n");
+ goto out_data;
+ }
+
+ data->booleans = kzalloc(sizeof(struct pmbus_boolean)
+ * data->max_booleans, GFP_KERNEL);
+ if (!data->booleans) {
+ dev_err(&client->dev, "No memory to allocate boolean data\n");
+ goto out_sensors;
+ }
+
+ data->labels = kzalloc(sizeof(struct pmbus_label) * data->max_labels,
+ GFP_KERNEL);
+ if (!data->labels) {
+ dev_err(&client->dev, "No memory to allocate label data\n");
+ goto out_booleans;
+ }
+
+ data->attributes = kzalloc(sizeof(struct attribute *)
+ * data->max_attributes, GFP_KERNEL);
+ if (!data->attributes) {
+ dev_err(&client->dev, "No memory to allocate attribute data\n");
+ goto out_labels;
+ }
+
+ pmbus_find_attributes(client, data);
+
+ /*
+ * If there are no attributes, something is wrong.
+ * Bail out instead of trying to register nothing.
+ */
+ if (!data->num_attributes) {
+ dev_err(&client->dev, "No attributes found\n");
+ ret = -ENODEV;
+ goto out_attributes;
+ }
+
+ /* Register sysfs hooks */
+ data->group.attrs = data->attributes;
+ ret = sysfs_create_group(&client->dev.kobj, &data->group);
+ if (ret) {
+ dev_err(&client->dev, "Failed to create sysfs entries\n");
+ goto out_attributes;
+ }
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ dev_err(&client->dev, "Failed to register hwmon device\n");
+ goto out_hwmon_device_register;
+ }
+ return 0;
+
+out_hwmon_device_register:
+ sysfs_remove_group(&client->dev.kobj, &data->group);
+out_attributes:
+ kfree(data->attributes);
+out_labels:
+ kfree(data->labels);
+out_booleans:
+ kfree(data->booleans);
+out_sensors:
+ kfree(data->sensors);
+out_data:
+ kfree(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pmbus_do_probe);
+
+int pmbus_do_remove(struct i2c_client *client)
+{
+ struct pmbus_data *data = i2c_get_clientdata(client);
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &data->group);
+ kfree(data->attributes);
+ kfree(data->labels);
+ kfree(data->booleans);
+ kfree(data->sensors);
+ kfree(data);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pmbus_do_remove);
+
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_DESCRIPTION("PMBus core driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 073eabedc432..f2b377c56a3a 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1,11 +1,12 @@
/*
w83627ehf - Driver for the hardware monitoring functionality of
- the Winbond W83627EHF Super-I/O chip
+ the Winbond W83627EHF Super-I/O chip
Copyright (C) 2005 Jean Delvare <khali@linux-fr.org>
Copyright (C) 2006 Yuan Mu (Winbond),
- Rudolf Marek <r.marek@assembler.cz>
- David Hubbard <david.c.hubbard@gmail.com>
+ Rudolf Marek <r.marek@assembler.cz>
+ David Hubbard <david.c.hubbard@gmail.com>
Daniel J Blueman <daniel.blueman@gmail.com>
+ Copyright (C) 2010 Sheng-Yuan Huang (Nuvoton) (PS00)
Shamelessly ripped from the w83627hf driver
Copyright (C) 2003 Mark Studebaker
@@ -35,11 +36,13 @@
Chip #vin #fan #pwm #temp chip IDs man ID
w83627ehf 10 5 4 3 0x8850 0x88 0x5ca3
- 0x8860 0xa1
+ 0x8860 0xa1
w83627dhg 9 5 4 3 0xa020 0xc1 0x5ca3
w83627dhg-p 9 5 4 3 0xb070 0xc1 0x5ca3
w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3
- w83667hg-b 9 5 3 3 0xb350 0xc1 0x5ca3
+ w83667hg-b 9 5 3 4 0xb350 0xc1 0x5ca3
+ nct6775f 9 4 3 9 0xb470 0xc1 0x5ca3
+ nct6776f 9 5 3 9 0xC330 0xc1 0x5ca3
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -58,21 +61,28 @@
#include <linux/io.h>
#include "lm75.h"
-enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83667hg, w83667hg_b };
+enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83667hg, w83667hg_b, nct6775,
+ nct6776 };
/* used to set data->name = w83627ehf_device_names[data->sio_kind] */
-static const char * w83627ehf_device_names[] = {
+static const char * const w83627ehf_device_names[] = {
"w83627ehf",
"w83627dhg",
"w83627dhg",
"w83667hg",
"w83667hg",
+ "nct6775",
+ "nct6776",
};
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
+static unsigned short fan_debounce;
+module_param(fan_debounce, ushort, 0);
+MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
+
#define DRVNAME "w83627ehf"
/*
@@ -80,7 +90,7 @@ MODULE_PARM_DESC(force_id, "Override the detected device ID");
*/
#define W83627EHF_LD_HWM 0x0b
-#define W83667HG_LD_VID 0x0d
+#define W83667HG_LD_VID 0x0d
#define SIO_REG_LDSEL 0x07 /* Logical device select */
#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */
@@ -94,8 +104,10 @@ MODULE_PARM_DESC(force_id, "Override the detected device ID");
#define SIO_W83627EHG_ID 0x8860
#define SIO_W83627DHG_ID 0xa020
#define SIO_W83627DHG_P_ID 0xb070
-#define SIO_W83667HG_ID 0xa510
+#define SIO_W83667HG_ID 0xa510
#define SIO_W83667HG_B_ID 0xb350
+#define SIO_NCT6775_ID 0xb470
+#define SIO_NCT6776_ID 0xc330
#define SIO_ID_MASK 0xFFF0
static inline void
@@ -138,7 +150,7 @@ superio_exit(int ioreg)
* ISA constants
*/
-#define IOREGION_ALIGNMENT ~7
+#define IOREGION_ALIGNMENT (~7)
#define IOREGION_OFFSET 5
#define IOREGION_LENGTH 2
#define ADDR_REG_OFFSET 0
@@ -164,13 +176,10 @@ static const u16 W83627EHF_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d, 0x3e, 0x55c };
#define W83627EHF_REG_IN(nr) ((nr < 7) ? (0x20 + (nr)) : \
(0x550 + (nr) - 7))
-#define W83627EHF_REG_TEMP1 0x27
-#define W83627EHF_REG_TEMP1_HYST 0x3a
-#define W83627EHF_REG_TEMP1_OVER 0x39
-static const u16 W83627EHF_REG_TEMP[] = { 0x150, 0x250 };
-static const u16 W83627EHF_REG_TEMP_HYST[] = { 0x153, 0x253 };
-static const u16 W83627EHF_REG_TEMP_OVER[] = { 0x155, 0x255 };
-static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0x152, 0x252 };
+static const u16 W83627EHF_REG_TEMP[] = { 0x27, 0x150, 0x250, 0x7e };
+static const u16 W83627EHF_REG_TEMP_HYST[] = { 0x3a, 0x153, 0x253, 0 };
+static const u16 W83627EHF_REG_TEMP_OVER[] = { 0x39, 0x155, 0x255, 0 };
+static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0, 0x152, 0x252, 0 };
/* Fan clock dividers are spread over the following five registers */
#define W83627EHF_REG_FANDIV1 0x47
@@ -179,6 +188,11 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0x152, 0x252 };
#define W83627EHF_REG_DIODE 0x59
#define W83627EHF_REG_SMI_OVT 0x4C
+/* NCT6775F has its own fan divider registers */
+#define NCT6775_REG_FANDIV1 0x506
+#define NCT6775_REG_FANDIV2 0x507
+#define NCT6775_REG_FAN_DEBOUNCE 0xf0
+
#define W83627EHF_REG_ALARM1 0x459
#define W83627EHF_REG_ALARM2 0x45A
#define W83627EHF_REG_ALARM3 0x45B
@@ -199,22 +213,123 @@ static const u8 W83627EHF_PWM_MODE_SHIFT[] = { 0, 1, 0, 6 };
static const u8 W83627EHF_PWM_ENABLE_SHIFT[] = { 2, 4, 1, 4 };
/* FAN Duty Cycle, be used to control */
-static const u8 W83627EHF_REG_PWM[] = { 0x01, 0x03, 0x11, 0x61 };
-static const u8 W83627EHF_REG_TARGET[] = { 0x05, 0x06, 0x13, 0x63 };
+static const u16 W83627EHF_REG_PWM[] = { 0x01, 0x03, 0x11, 0x61 };
+static const u16 W83627EHF_REG_TARGET[] = { 0x05, 0x06, 0x13, 0x63 };
static const u8 W83627EHF_REG_TOLERANCE[] = { 0x07, 0x07, 0x14, 0x62 };
/* Advanced Fan control, some values are common for all fans */
-static const u8 W83627EHF_REG_FAN_START_OUTPUT[] = { 0x0a, 0x0b, 0x16, 0x65 };
-static const u8 W83627EHF_REG_FAN_STOP_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 };
-static const u8 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0c, 0x0d, 0x17, 0x66 };
+static const u16 W83627EHF_REG_FAN_START_OUTPUT[] = { 0x0a, 0x0b, 0x16, 0x65 };
+static const u16 W83627EHF_REG_FAN_STOP_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 };
+static const u16 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0c, 0x0d, 0x17, 0x66 };
-static const u8 W83627EHF_REG_FAN_MAX_OUTPUT_COMMON[]
+static const u16 W83627EHF_REG_FAN_MAX_OUTPUT_COMMON[]
= { 0xff, 0x67, 0xff, 0x69 };
-static const u8 W83627EHF_REG_FAN_STEP_OUTPUT_COMMON[]
+static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_COMMON[]
= { 0xff, 0x68, 0xff, 0x6a };
-static const u8 W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B[] = { 0x67, 0x69, 0x6b };
-static const u8 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[] = { 0x68, 0x6a, 0x6c };
+static const u16 W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B[] = { 0x67, 0x69, 0x6b };
+static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[]
+ = { 0x68, 0x6a, 0x6c };
+
+static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301 };
+static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302 };
+static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = { 0x105, 0x205, 0x305 };
+static const u16 NCT6775_REG_FAN_START_OUTPUT[] = { 0x106, 0x206, 0x306 };
+static const u16 NCT6775_REG_FAN_STOP_TIME[] = { 0x107, 0x207, 0x307 };
+static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309 };
+static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a };
+static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b };
+static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
+static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642};
+
+static const u16 NCT6775_REG_TEMP[]
+ = { 0x27, 0x150, 0x250, 0x73, 0x75, 0x77, 0x62b, 0x62c, 0x62d };
+static const u16 NCT6775_REG_TEMP_CONFIG[]
+ = { 0, 0x152, 0x252, 0, 0, 0, 0x628, 0x629, 0x62A };
+static const u16 NCT6775_REG_TEMP_HYST[]
+ = { 0x3a, 0x153, 0x253, 0, 0, 0, 0x673, 0x678, 0x67D };
+static const u16 NCT6775_REG_TEMP_OVER[]
+ = { 0x39, 0x155, 0x255, 0, 0, 0, 0x672, 0x677, 0x67C };
+static const u16 NCT6775_REG_TEMP_SOURCE[]
+ = { 0x621, 0x622, 0x623, 0x100, 0x200, 0x300, 0x624, 0x625, 0x626 };
+
+static const char *const w83667hg_b_temp_label[] = {
+ "SYSTIN",
+ "CPUTIN",
+ "AUXTIN",
+ "AMDTSI",
+ "PECI Agent 1",
+ "PECI Agent 2",
+ "PECI Agent 3",
+ "PECI Agent 4"
+};
+
+static const char *const nct6775_temp_label[] = {
+ "",
+ "SYSTIN",
+ "CPUTIN",
+ "AUXTIN",
+ "AMD SB-TSI",
+ "PECI Agent 0",
+ "PECI Agent 1",
+ "PECI Agent 2",
+ "PECI Agent 3",
+ "PECI Agent 4",
+ "PECI Agent 5",
+ "PECI Agent 6",
+ "PECI Agent 7",
+ "PCH_CHIP_CPU_MAX_TEMP",
+ "PCH_CHIP_TEMP",
+ "PCH_CPU_TEMP",
+ "PCH_MCH_TEMP",
+ "PCH_DIM0_TEMP",
+ "PCH_DIM1_TEMP",
+ "PCH_DIM2_TEMP",
+ "PCH_DIM3_TEMP"
+};
+
+static const char *const nct6776_temp_label[] = {
+ "",
+ "SYSTIN",
+ "CPUTIN",
+ "AUXTIN",
+ "SMBUSMASTER 0",
+ "SMBUSMASTER 1",
+ "SMBUSMASTER 2",
+ "SMBUSMASTER 3",
+ "SMBUSMASTER 4",
+ "SMBUSMASTER 5",
+ "SMBUSMASTER 6",
+ "SMBUSMASTER 7",
+ "PECI Agent 0",
+ "PECI Agent 1",
+ "PCH_CHIP_CPU_MAX_TEMP",
+ "PCH_CHIP_TEMP",
+ "PCH_CPU_TEMP",
+ "PCH_MCH_TEMP",
+ "PCH_DIM0_TEMP",
+ "PCH_DIM1_TEMP",
+ "PCH_DIM2_TEMP",
+ "PCH_DIM3_TEMP",
+ "BYTE_TEMP"
+};
+
+#define NUM_REG_TEMP ARRAY_SIZE(NCT6775_REG_TEMP)
+
+static inline int is_word_sized(u16 reg)
+{
+ return ((((reg & 0xff00) == 0x100
+ || (reg & 0xff00) == 0x200)
+ && ((reg & 0x00ff) == 0x50
+ || (reg & 0x00ff) == 0x53
+ || (reg & 0x00ff) == 0x55))
+ || (reg & 0xfff0) == 0x630
+ || reg == 0x640 || reg == 0x642
+ || ((reg & 0xfff0) == 0x650
+ && (reg & 0x000f) >= 0x06)
+ || reg == 0x73 || reg == 0x75 || reg == 0x77
+ );
+}
/*
* Conversions
@@ -232,12 +347,36 @@ static inline u8 step_time_to_reg(unsigned int msec, u8 mode)
(msec + 200) / 400), 1, 255);
}
-static inline unsigned int
-fan_from_reg(u8 reg, unsigned int div)
+static unsigned int fan_from_reg8(u16 reg, unsigned int divreg)
{
if (reg == 0 || reg == 255)
return 0;
- return 1350000U / (reg * div);
+ return 1350000U / (reg << divreg);
+}
+
+static unsigned int fan_from_reg13(u16 reg, unsigned int divreg)
+{
+ if ((reg & 0xff1f) == 0xff1f)
+ return 0;
+
+ reg = (reg & 0x1f) | ((reg & 0xff00) >> 3);
+
+ if (reg == 0)
+ return 0;
+
+ return 1350000U / reg;
+}
+
+static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
+{
+ if (reg == 0 || reg == 0xffff)
+ return 0;
+
+ /*
+ * Even though the registers are 16 bit wide, the fan divisor
+ * still applies.
+ */
+ return 1350000U / (reg << divreg);
}
static inline unsigned int
@@ -247,21 +386,19 @@ div_from_reg(u8 reg)
}
static inline int
-temp1_from_reg(s8 reg)
+temp_from_reg(u16 reg, s16 regval)
{
- return reg * 1000;
+ if (is_word_sized(reg))
+ return LM75_TEMP_FROM_REG(regval);
+ return regval * 1000;
}
-static inline s8
-temp1_to_reg(long temp, int min, int max)
+static inline u16
+temp_to_reg(u16 reg, long temp)
{
- if (temp <= min)
- return min / 1000;
- if (temp >= max)
- return max / 1000;
- if (temp < 0)
- return (temp - 500) / 1000;
- return (temp + 500) / 1000;
+ if (is_word_sized(reg))
+ return LM75_TEMP_TO_REG(temp);
+ return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), 1000);
}
/* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */
@@ -275,7 +412,8 @@ static inline long in_from_reg(u8 reg, u8 nr)
static inline u8 in_to_reg(u32 val, u8 nr)
{
- return SENSORS_LIMIT(((val + (scale_in[nr] / 2)) / scale_in[nr]), 0, 255);
+ return SENSORS_LIMIT(((val + (scale_in[nr] / 2)) / scale_in[nr]), 0,
+ 255);
}
/*
@@ -289,38 +427,57 @@ struct w83627ehf_data {
struct device *hwmon_dev;
struct mutex lock;
- const u8 *REG_FAN_START_OUTPUT;
- const u8 *REG_FAN_STOP_OUTPUT;
- const u8 *REG_FAN_MAX_OUTPUT;
- const u8 *REG_FAN_STEP_OUTPUT;
+ u16 reg_temp[NUM_REG_TEMP];
+ u16 reg_temp_over[NUM_REG_TEMP];
+ u16 reg_temp_hyst[NUM_REG_TEMP];
+ u16 reg_temp_config[NUM_REG_TEMP];
+ u8 temp_src[NUM_REG_TEMP];
+ const char * const *temp_label;
+
+ const u16 *REG_PWM;
+ const u16 *REG_TARGET;
+ const u16 *REG_FAN;
+ const u16 *REG_FAN_MIN;
+ const u16 *REG_FAN_START_OUTPUT;
+ const u16 *REG_FAN_STOP_OUTPUT;
+ const u16 *REG_FAN_STOP_TIME;
+ const u16 *REG_FAN_MAX_OUTPUT;
+ const u16 *REG_FAN_STEP_OUTPUT;
+
+ unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg);
+ unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg);
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* Register values */
+ u8 bank; /* current register bank */
u8 in_num; /* number of in inputs we have */
u8 in[10]; /* Register value */
u8 in_max[10]; /* Register value */
u8 in_min[10]; /* Register value */
- u8 fan[5];
- u8 fan_min[5];
+ unsigned int rpm[5];
+ u16 fan_min[5];
u8 fan_div[5];
u8 has_fan; /* some fan inputs can be disabled */
+ u8 has_fan_min; /* some fans don't have min register */
+ bool has_fan_div;
u8 temp_type[3];
- s8 temp1;
- s8 temp1_max;
- s8 temp1_max_hyst;
- s16 temp[2];
- s16 temp_max[2];
- s16 temp_max_hyst[2];
+ s16 temp[9];
+ s16 temp_max[9];
+ s16 temp_max_hyst[9];
u32 alarms;
u8 pwm_mode[4]; /* 0->DC variable voltage, 1->PWM variable duty cycle */
u8 pwm_enable[4]; /* 1->manual
2->thermal cruise mode (also called SmartFan I)
3->fan speed cruise mode
- 4->variable thermal cruise (also called SmartFan III) */
+ 4->variable thermal cruise (also called
+ SmartFan III)
+ 5->enhanced variable thermal cruise (also called
+ SmartFan IV) */
+ u8 pwm_enable_orig[4]; /* original value of pwm_enable */
u8 pwm_num; /* number of pwm */
u8 pwm[4];
u8 target_temp[4];
@@ -335,7 +492,7 @@ struct w83627ehf_data {
u8 vid;
u8 vrm;
- u8 temp3_disable;
+ u16 have_temp;
u8 in6_skip;
};
@@ -344,30 +501,19 @@ struct w83627ehf_sio_data {
enum kinds kind;
};
-static inline int is_word_sized(u16 reg)
-{
- return (((reg & 0xff00) == 0x100
- || (reg & 0xff00) == 0x200)
- && ((reg & 0x00ff) == 0x50
- || (reg & 0x00ff) == 0x53
- || (reg & 0x00ff) == 0x55));
-}
-
-/* Registers 0x50-0x5f are banked */
+/*
+ * On older chips, only registers 0x50-0x5f are banked.
+ * On more recent chips, all registers are banked.
+ * Assume that is the case and set the bank number for each access.
+ * Cache the bank number so it only needs to be set if it changes.
+ */
static inline void w83627ehf_set_bank(struct w83627ehf_data *data, u16 reg)
{
- if ((reg & 0x00f0) == 0x50) {
+ u8 bank = reg >> 8;
+ if (data->bank != bank) {
outb_p(W83627EHF_REG_BANK, data->addr + ADDR_REG_OFFSET);
- outb_p(reg >> 8, data->addr + DATA_REG_OFFSET);
- }
-}
-
-/* Not strictly necessary, but play it safe for now */
-static inline void w83627ehf_reset_bank(struct w83627ehf_data *data, u16 reg)
-{
- if (reg & 0xff00) {
- outb_p(W83627EHF_REG_BANK, data->addr + ADDR_REG_OFFSET);
- outb_p(0, data->addr + DATA_REG_OFFSET);
+ outb_p(bank, data->addr + DATA_REG_OFFSET);
+ data->bank = bank;
}
}
@@ -385,14 +531,13 @@ static u16 w83627ehf_read_value(struct w83627ehf_data *data, u16 reg)
data->addr + ADDR_REG_OFFSET);
res = (res << 8) + inb_p(data->addr + DATA_REG_OFFSET);
}
- w83627ehf_reset_bank(data, reg);
mutex_unlock(&data->lock);
-
return res;
}
-static int w83627ehf_write_value(struct w83627ehf_data *data, u16 reg, u16 value)
+static int w83627ehf_write_value(struct w83627ehf_data *data, u16 reg,
+ u16 value)
{
int word_sized = is_word_sized(reg);
@@ -406,13 +551,40 @@ static int w83627ehf_write_value(struct w83627ehf_data *data, u16 reg, u16 value
data->addr + ADDR_REG_OFFSET);
}
outb_p(value & 0xff, data->addr + DATA_REG_OFFSET);
- w83627ehf_reset_bank(data, reg);
mutex_unlock(&data->lock);
return 0;
}
/* This function assumes that the caller holds data->update_lock */
+static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr)
+{
+ u8 reg;
+
+ switch (nr) {
+ case 0:
+ reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x70)
+ | (data->fan_div[0] & 0x7);
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
+ break;
+ case 1:
+ reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7)
+ | ((data->fan_div[1] << 4) & 0x70);
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
+ case 2:
+ reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70)
+ | (data->fan_div[2] & 0x7);
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
+ break;
+ case 3:
+ reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x7)
+ | ((data->fan_div[3] << 4) & 0x70);
+ w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg);
+ break;
+ }
+}
+
+/* This function assumes that the caller holds data->update_lock */
static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
{
u8 reg;
@@ -463,6 +635,32 @@ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
}
}
+static void w83627ehf_write_fan_div_common(struct device *dev,
+ struct w83627ehf_data *data, int nr)
+{
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+
+ if (sio_data->kind == nct6776)
+ ; /* no dividers, do nothing */
+ else if (sio_data->kind == nct6775)
+ nct6775_write_fan_div(data, nr);
+ else
+ w83627ehf_write_fan_div(data, nr);
+}
+
+static void nct6775_update_fan_div(struct w83627ehf_data *data)
+{
+ u8 i;
+
+ i = w83627ehf_read_value(data, NCT6775_REG_FANDIV1);
+ data->fan_div[0] = i & 0x7;
+ data->fan_div[1] = (i & 0x70) >> 4;
+ i = w83627ehf_read_value(data, NCT6775_REG_FANDIV2);
+ data->fan_div[2] = i & 0x7;
+ if (data->has_fan & (1<<3))
+ data->fan_div[3] = (i & 0x70) >> 4;
+}
+
static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
{
int i;
@@ -488,10 +686,79 @@ static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
}
}
+static void w83627ehf_update_fan_div_common(struct device *dev,
+ struct w83627ehf_data *data)
+{
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+
+ if (sio_data->kind == nct6776)
+ ; /* no dividers, do nothing */
+ else if (sio_data->kind == nct6775)
+ nct6775_update_fan_div(data);
+ else
+ w83627ehf_update_fan_div(data);
+}
+
+static void nct6775_update_pwm(struct w83627ehf_data *data)
+{
+ int i;
+ int pwmcfg, fanmodecfg;
+
+ for (i = 0; i < data->pwm_num; i++) {
+ pwmcfg = w83627ehf_read_value(data,
+ W83627EHF_REG_PWM_ENABLE[i]);
+ fanmodecfg = w83627ehf_read_value(data,
+ NCT6775_REG_FAN_MODE[i]);
+ data->pwm_mode[i] =
+ ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
+ data->pwm_enable[i] = ((fanmodecfg >> 4) & 7) + 1;
+ data->tolerance[i] = fanmodecfg & 0x0f;
+ data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
+ }
+}
+
+static void w83627ehf_update_pwm(struct w83627ehf_data *data)
+{
+ int i;
+ int pwmcfg = 0, tolerance = 0; /* shut up the compiler */
+
+ for (i = 0; i < data->pwm_num; i++) {
+ if (!(data->has_fan & (1 << i)))
+ continue;
+
+ /* pwmcfg, tolerance mapped for i=0, i=1 to same reg */
+ if (i != 1) {
+ pwmcfg = w83627ehf_read_value(data,
+ W83627EHF_REG_PWM_ENABLE[i]);
+ tolerance = w83627ehf_read_value(data,
+ W83627EHF_REG_TOLERANCE[i]);
+ }
+ data->pwm_mode[i] =
+ ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1;
+ data->pwm_enable[i] = ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i])
+ & 3) + 1;
+ data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]);
+
+ data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0)) & 0x0f;
+ }
+}
+
+static void w83627ehf_update_pwm_common(struct device *dev,
+ struct w83627ehf_data *data)
+{
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776)
+ nct6775_update_pwm(data);
+ else
+ w83627ehf_update_pwm(data);
+}
+
static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- int pwmcfg = 0, tolerance = 0; /* shut up the compiler */
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
+
int i;
mutex_lock(&data->update_lock);
@@ -499,7 +766,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + HZ + HZ/2)
|| !data->valid) {
/* Fan clock dividers */
- w83627ehf_update_fan_div(data);
+ w83627ehf_update_fan_div_common(dev, data);
/* Measured voltages and limits */
for (i = 0; i < data->in_num; i++) {
@@ -513,92 +780,90 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
/* Measured fan speeds and limits */
for (i = 0; i < 5; i++) {
+ u16 reg;
+
if (!(data->has_fan & (1 << i)))
continue;
- data->fan[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN[i]);
- data->fan_min[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN_MIN[i]);
+ reg = w83627ehf_read_value(data, data->REG_FAN[i]);
+ data->rpm[i] = data->fan_from_reg(reg,
+ data->fan_div[i]);
+
+ if (data->has_fan_min & (1 << i))
+ data->fan_min[i] = w83627ehf_read_value(data,
+ data->REG_FAN_MIN[i]);
/* If we failed to measure the fan speed and clock
divider can be increased, let's try that for next
time */
- if (data->fan[i] == 0xff
- && data->fan_div[i] < 0x07) {
- dev_dbg(dev, "Increasing fan%d "
+ if (data->has_fan_div
+ && (reg >= 0xff || (sio_data->kind == nct6775
+ && reg == 0x00))
+ && data->fan_div[i] < 0x07) {
+ dev_dbg(dev, "Increasing fan%d "
"clock divider from %u to %u\n",
i + 1, div_from_reg(data->fan_div[i]),
div_from_reg(data->fan_div[i] + 1));
data->fan_div[i]++;
- w83627ehf_write_fan_div(data, i);
+ w83627ehf_write_fan_div_common(dev, data, i);
/* Preserve min limit if possible */
- if (data->fan_min[i] >= 2
+ if ((data->has_fan_min & (1 << i))
+ && data->fan_min[i] >= 2
&& data->fan_min[i] != 255)
w83627ehf_write_value(data,
- W83627EHF_REG_FAN_MIN[i],
+ data->REG_FAN_MIN[i],
(data->fan_min[i] /= 2));
}
}
+ w83627ehf_update_pwm_common(dev, data);
+
for (i = 0; i < data->pwm_num; i++) {
if (!(data->has_fan & (1 << i)))
continue;
- /* pwmcfg, tolerance mapped for i=0, i=1 to same reg */
- if (i != 1) {
- pwmcfg = w83627ehf_read_value(data,
- W83627EHF_REG_PWM_ENABLE[i]);
- tolerance = w83627ehf_read_value(data,
- W83627EHF_REG_TOLERANCE[i]);
- }
- data->pwm_mode[i] =
- ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1)
- ? 0 : 1;
- data->pwm_enable[i] =
- ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i])
- & 3) + 1;
- data->pwm[i] = w83627ehf_read_value(data,
- W83627EHF_REG_PWM[i]);
- data->fan_start_output[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN_START_OUTPUT[i]);
- data->fan_stop_output[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN_STOP_OUTPUT[i]);
- data->fan_stop_time[i] = w83627ehf_read_value(data,
- W83627EHF_REG_FAN_STOP_TIME[i]);
-
- if (data->REG_FAN_MAX_OUTPUT[i] != 0xff)
+ data->fan_start_output[i] =
+ w83627ehf_read_value(data,
+ data->REG_FAN_START_OUTPUT[i]);
+ data->fan_stop_output[i] =
+ w83627ehf_read_value(data,
+ data->REG_FAN_STOP_OUTPUT[i]);
+ data->fan_stop_time[i] =
+ w83627ehf_read_value(data,
+ data->REG_FAN_STOP_TIME[i]);
+
+ if (data->REG_FAN_MAX_OUTPUT &&
+ data->REG_FAN_MAX_OUTPUT[i] != 0xff)
data->fan_max_output[i] =
w83627ehf_read_value(data,
- data->REG_FAN_MAX_OUTPUT[i]);
+ data->REG_FAN_MAX_OUTPUT[i]);
- if (data->REG_FAN_STEP_OUTPUT[i] != 0xff)
+ if (data->REG_FAN_STEP_OUTPUT &&
+ data->REG_FAN_STEP_OUTPUT[i] != 0xff)
data->fan_step_output[i] =
w83627ehf_read_value(data,
- data->REG_FAN_STEP_OUTPUT[i]);
+ data->REG_FAN_STEP_OUTPUT[i]);
data->target_temp[i] =
w83627ehf_read_value(data,
- W83627EHF_REG_TARGET[i]) &
+ data->REG_TARGET[i]) &
(data->pwm_mode[i] == 1 ? 0x7f : 0xff);
- data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0))
- & 0x0f;
}
/* Measured temperatures and limits */
- data->temp1 = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP1);
- data->temp1_max = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP1_OVER);
- data->temp1_max_hyst = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP1_HYST);
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ if (!(data->have_temp & (1 << i)))
+ continue;
data->temp[i] = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP[i]);
- data->temp_max[i] = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP_OVER[i]);
- data->temp_max_hyst[i] = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP_HYST[i]);
+ data->reg_temp[i]);
+ if (data->reg_temp_over[i])
+ data->temp_max[i]
+ = w83627ehf_read_value(data,
+ data->reg_temp_over[i]);
+ if (data->reg_temp_hyst[i])
+ data->temp_max_hyst[i]
+ = w83627ehf_read_value(data,
+ data->reg_temp_hyst[i]);
}
data->alarms = w83627ehf_read_value(data,
@@ -625,7 +890,8 @@ show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr)); \
}
@@ -635,14 +901,18 @@ show_in_reg(in_max)
#define store_in_reg(REG, reg) \
static ssize_t \
-store_in_##reg (struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
+store_in_##reg(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
{ \
struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- u32 val = simple_strtoul(buf, NULL, 10); \
- \
+ unsigned long val; \
+ int err; \
+ err = strict_strtoul(buf, 10, &val); \
+ if (err < 0) \
+ return err; \
mutex_lock(&data->update_lock); \
data->in_##reg[nr] = in_to_reg(val, nr); \
w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \
@@ -654,7 +924,8 @@ store_in_##reg (struct device *dev, struct device_attribute *attr, \
store_in_reg(MIN, min)
store_in_reg(MAX, max)
-static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct w83627ehf_data *data = w83627ehf_update_device(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
@@ -689,45 +960,50 @@ static struct sensor_device_attribute sda_in_alarm[] = {
};
static struct sensor_device_attribute sda_in_min[] = {
- SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0),
- SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1),
- SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2),
- SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3),
- SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4),
- SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5),
- SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6),
- SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7),
- SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8),
- SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9),
+ SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0),
+ SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1),
+ SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2),
+ SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3),
+ SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4),
+ SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5),
+ SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6),
+ SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7),
+ SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8),
+ SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9),
};
static struct sensor_device_attribute sda_in_max[] = {
- SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0),
- SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1),
- SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2),
- SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3),
- SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4),
- SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5),
- SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6),
- SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7),
- SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8),
- SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9),
+ SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0),
+ SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1),
+ SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2),
+ SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3),
+ SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4),
+ SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5),
+ SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6),
+ SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7),
+ SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8),
+ SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9),
};
-#define show_fan_reg(reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
- int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", \
- fan_from_reg(data->reg[nr], \
- div_from_reg(data->fan_div[nr]))); \
+static ssize_t
+show_fan(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ return sprintf(buf, "%d\n", data->rpm[nr]);
+}
+
+static ssize_t
+show_fan_min(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ return sprintf(buf, "%d\n",
+ data->fan_from_reg_min(data->fan_min[nr],
+ data->fan_div[nr]));
}
-show_fan_reg(fan);
-show_fan_reg(fan_min);
static ssize_t
show_fan_div(struct device *dev, struct device_attribute *attr,
@@ -746,11 +1022,32 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- unsigned int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
unsigned int reg;
u8 new_div;
+ err = strict_strtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
mutex_lock(&data->update_lock);
+ if (!data->has_fan_div) {
+ /*
+ * Only NCT6776F for now, so we know that this is a 13 bit
+ * register
+ */
+ if (!val) {
+ val = 0xff1f;
+ } else {
+ if (val > 1350000U)
+ val = 135000U;
+ val = 1350000U / val;
+ val = (val & 0x1f) | ((val << 3) & 0xff00);
+ }
+ data->fan_min[nr] = val;
+ goto done; /* Leave fan divider alone */
+ }
if (!val) {
/* No min limit, alarm disabled */
data->fan_min[nr] = 255;
@@ -761,15 +1058,17 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
even with the highest divider (128) */
data->fan_min[nr] = 254;
new_div = 7; /* 128 == (1 << 7) */
- dev_warn(dev, "fan%u low limit %u below minimum %u, set to "
- "minimum\n", nr + 1, val, fan_from_reg(254, 128));
+ dev_warn(dev, "fan%u low limit %lu below minimum %u, set to "
+ "minimum\n", nr + 1, val,
+ data->fan_from_reg_min(254, 7));
} else if (!reg) {
/* Speed above this value cannot possibly be represented,
even with the lowest divider (1) */
data->fan_min[nr] = 1;
new_div = 0; /* 1 == (1 << 0) */
- dev_warn(dev, "fan%u low limit %u above maximum %u, set to "
- "maximum\n", nr + 1, val, fan_from_reg(1, 1));
+ dev_warn(dev, "fan%u low limit %lu above maximum %u, set to "
+ "maximum\n", nr + 1, val,
+ data->fan_from_reg_min(1, 0));
} else {
/* Automatically pick the best divider, i.e. the one such
that the min limit will correspond to a register value
@@ -785,25 +1084,16 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
/* Write both the fan clock divider (if it changed) and the new
fan min (unconditionally) */
if (new_div != data->fan_div[nr]) {
- /* Preserve the fan speed reading */
- if (data->fan[nr] != 0xff) {
- if (new_div > data->fan_div[nr])
- data->fan[nr] >>= new_div - data->fan_div[nr];
- else if (data->fan[nr] & 0x80)
- data->fan[nr] = 0xff;
- else
- data->fan[nr] <<= data->fan_div[nr] - new_div;
- }
-
dev_dbg(dev, "fan%u clock divider changed from %u to %u\n",
nr + 1, div_from_reg(data->fan_div[nr]),
div_from_reg(new_div));
data->fan_div[nr] = new_div;
- w83627ehf_write_fan_div(data, nr);
+ w83627ehf_write_fan_div_common(dev, data, nr);
/* Give the chip time to sample a new speed value */
data->last_updated = jiffies;
}
- w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[nr],
+done:
+ w83627ehf_write_value(data, data->REG_FAN_MIN[nr],
data->fan_min[nr]);
mutex_unlock(&data->update_lock);
@@ -847,70 +1137,54 @@ static struct sensor_device_attribute sda_fan_div[] = {
SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4),
};
-#define show_temp1_reg(reg) \
-static ssize_t \
-show_##reg(struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- return sprintf(buf, "%d\n", temp1_from_reg(data->reg)); \
-}
-show_temp1_reg(temp1);
-show_temp1_reg(temp1_max);
-show_temp1_reg(temp1_max_hyst);
-
-#define store_temp1_reg(REG, reg) \
-static ssize_t \
-store_temp1_##reg(struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- struct w83627ehf_data *data = dev_get_drvdata(dev); \
- long val = simple_strtol(buf, NULL, 10); \
- \
- mutex_lock(&data->update_lock); \
- data->temp1_##reg = temp1_to_reg(val, -128000, 127000); \
- w83627ehf_write_value(data, W83627EHF_REG_TEMP1_##REG, \
- data->temp1_##reg); \
- mutex_unlock(&data->update_lock); \
- return count; \
+static ssize_t
+show_temp_label(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct w83627ehf_data *data = w83627ehf_update_device(dev);
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+ return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]);
}
-store_temp1_reg(OVER, max);
-store_temp1_reg(HYST, max_hyst);
-#define show_temp_reg(reg) \
+#define show_temp_reg(addr, reg) \
static ssize_t \
show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
return sprintf(buf, "%d\n", \
- LM75_TEMP_FROM_REG(data->reg[nr])); \
+ temp_from_reg(data->addr[nr], data->reg[nr])); \
}
-show_temp_reg(temp);
-show_temp_reg(temp_max);
-show_temp_reg(temp_max_hyst);
+show_temp_reg(reg_temp, temp);
+show_temp_reg(reg_temp_over, temp_max);
+show_temp_reg(reg_temp_hyst, temp_max_hyst);
-#define store_temp_reg(REG, reg) \
+#define store_temp_reg(addr, reg) \
static ssize_t \
store_##reg(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- long val = simple_strtol(buf, NULL, 10); \
- \
+ int err; \
+ long val; \
+ err = strict_strtol(buf, 10, &val); \
+ if (err < 0) \
+ return err; \
mutex_lock(&data->update_lock); \
- data->reg[nr] = LM75_TEMP_TO_REG(val); \
- w83627ehf_write_value(data, W83627EHF_REG_TEMP_##REG[nr], \
+ data->reg[nr] = temp_to_reg(data->addr[nr], val); \
+ w83627ehf_write_value(data, data->addr[nr], \
data->reg[nr]); \
mutex_unlock(&data->update_lock); \
return count; \
}
-store_temp_reg(OVER, temp_max);
-store_temp_reg(HYST, temp_max_hyst);
+store_temp_reg(reg_temp_over, temp_max);
+store_temp_reg(reg_temp_hyst, temp_max_hyst);
static ssize_t
show_temp_type(struct device *dev, struct device_attribute *attr, char *buf)
@@ -922,27 +1196,69 @@ show_temp_type(struct device *dev, struct device_attribute *attr, char *buf)
}
static struct sensor_device_attribute sda_temp_input[] = {
- SENSOR_ATTR(temp1_input, S_IRUGO, show_temp1, NULL, 0),
- SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0),
- SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 1),
+ SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0),
+ SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1),
+ SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2),
+ SENSOR_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3),
+ SENSOR_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4),
+ SENSOR_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5),
+ SENSOR_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6),
+ SENSOR_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7),
+ SENSOR_ATTR(temp9_input, S_IRUGO, show_temp, NULL, 8),
+};
+
+static struct sensor_device_attribute sda_temp_label[] = {
+ SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0),
+ SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1),
+ SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2),
+ SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3),
+ SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4),
+ SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5),
+ SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6),
+ SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7),
+ SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8),
};
static struct sensor_device_attribute sda_temp_max[] = {
- SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp1_max,
- store_temp1_max, 0),
- SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
+ SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max,
store_temp_max, 0),
- SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
+ SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
store_temp_max, 1),
+ SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 2),
+ SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 3),
+ SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 4),
+ SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 5),
+ SENSOR_ATTR(temp7_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 6),
+ SENSOR_ATTR(temp8_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 7),
+ SENSOR_ATTR(temp9_max, S_IRUGO | S_IWUSR, show_temp_max,
+ store_temp_max, 8),
};
static struct sensor_device_attribute sda_temp_max_hyst[] = {
- SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp1_max_hyst,
- store_temp1_max_hyst, 0),
- SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
store_temp_max_hyst, 0),
- SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
store_temp_max_hyst, 1),
+ SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 2),
+ SENSOR_ATTR(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 3),
+ SENSOR_ATTR(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 4),
+ SENSOR_ATTR(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 5),
+ SENSOR_ATTR(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 6),
+ SENSOR_ATTR(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 7),
+ SENSOR_ATTR(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst,
+ store_temp_max_hyst, 8),
};
static struct sensor_device_attribute sda_temp_alarm[] = {
@@ -958,11 +1274,12 @@ static struct sensor_device_attribute sda_temp_type[] = {
};
#define show_pwm_reg(reg) \
-static ssize_t show_##reg (struct device *dev, struct device_attribute *attr, \
- char *buf) \
+static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
return sprintf(buf, "%d\n", data->reg[nr]); \
}
@@ -978,9 +1295,14 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- u32 val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
u16 reg;
+ err = strict_strtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
if (val > 1)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1001,11 +1323,18 @@ store_pwm(struct device *dev, struct device_attribute *attr,
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 0, 255);
+ unsigned long val;
+ int err;
+
+ err = strict_strtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm[nr] = val;
- w83627ehf_write_value(data, W83627EHF_REG_PWM[nr], val);
+ w83627ehf_write_value(data, data->REG_PWM[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -1015,19 +1344,38 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- u32 val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
u16 reg;
- if (!val || (val > 4))
+ err = strict_strtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ if (!val || (val > 4 && val != data->pwm_enable_orig[nr]))
return -EINVAL;
+ /* SmartFan III mode is not supported on NCT6776F */
+ if (sio_data->kind == nct6776 && val == 4)
+ return -EINVAL;
+
mutex_lock(&data->update_lock);
- reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
data->pwm_enable[nr] = val;
- reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]);
- reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr];
- w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ reg = w83627ehf_read_value(data,
+ NCT6775_REG_FAN_MODE[nr]);
+ reg &= 0x0f;
+ reg |= (val - 1) << 4;
+ w83627ehf_write_value(data,
+ NCT6775_REG_FAN_MODE[nr], reg);
+ } else {
+ reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
+ reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]);
+ reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr];
+ w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg);
+ }
mutex_unlock(&data->update_lock);
return count;
}
@@ -1038,9 +1386,10 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- return sprintf(buf, "%d\n", temp1_from_reg(data->reg[nr])); \
+ return sprintf(buf, "%d\n", data->reg[nr] * 1000); \
}
show_tol_temp(tolerance)
@@ -1053,11 +1402,18 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
- u8 val = temp1_to_reg(simple_strtoul(buf, NULL, 10), 0, 127000);
+ long val;
+ int err;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 127);
mutex_lock(&data->update_lock);
data->target_temp[nr] = val;
- w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val);
+ w83627ehf_write_value(data, data->REG_TARGET[nr], val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -1067,20 +1423,37 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
u16 reg;
+ long val;
+ int err;
+
+ err = strict_strtol(buf, 10, &val);
+ if (err < 0)
+ return err;
+
/* Limit the temp to 0C - 15C */
- u8 val = temp1_to_reg(simple_strtoul(buf, NULL, 10), 0, 15000);
+ val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 15);
mutex_lock(&data->update_lock);
- reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
- data->tolerance[nr] = val;
- if (nr == 1)
- reg = (reg & 0x0f) | (val << 4);
- else
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ /* Limit tolerance further for NCT6776F */
+ if (sio_data->kind == nct6776 && val > 7)
+ val = 7;
+ reg = w83627ehf_read_value(data, NCT6775_REG_FAN_MODE[nr]);
reg = (reg & 0xf0) | val;
- w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
+ w83627ehf_write_value(data, NCT6775_REG_FAN_MODE[nr], reg);
+ } else {
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]);
+ if (nr == 1)
+ reg = (reg & 0x0f) | (val << 4);
+ else
+ reg = (reg & 0xf0) | val;
+ w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg);
+ }
+ data->tolerance[nr] = val;
mutex_unlock(&data->update_lock);
return count;
}
@@ -1143,18 +1516,25 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
return sprintf(buf, "%d\n", data->reg[nr]); \
-}\
+} \
static ssize_t \
store_##reg(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
-{\
+{ \
struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 1, 255); \
+ unsigned long val; \
+ int err; \
+ err = strict_strtoul(buf, 10, &val); \
+ if (err < 0) \
+ return err; \
+ val = SENSORS_LIMIT(val, 1, 255); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
w83627ehf_write_value(data, data->REG_##REG[nr], val); \
@@ -1172,10 +1552,12 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct w83627ehf_data *data = w83627ehf_update_device(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
return sprintf(buf, "%d\n", \
- step_time_from_reg(data->reg[nr], data->pwm_mode[nr])); \
+ step_time_from_reg(data->reg[nr], \
+ data->pwm_mode[nr])); \
} \
\
static ssize_t \
@@ -1183,10 +1565,15 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct w83627ehf_data *data = dev_get_drvdata(dev); \
- struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \
+ struct sensor_device_attribute *sensor_attr = \
+ to_sensor_dev_attr(attr); \
int nr = sensor_attr->index; \
- u8 val = step_time_to_reg(simple_strtoul(buf, NULL, 10), \
- data->pwm_mode[nr]); \
+ unsigned long val; \
+ int err; \
+ err = strict_strtoul(buf, 10, &val); \
+ if (err < 0) \
+ return err; \
+ val = step_time_to_reg(val, data->pwm_mode[nr]); \
mutex_lock(&data->update_lock); \
data->reg[nr] = val; \
w83627ehf_write_value(data, W83627EHF_REG_##REG[nr], val); \
@@ -1283,7 +1670,8 @@ static void w83627ehf_device_remove_files(struct device *dev)
for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
struct sensor_device_attribute *attr =
&sda_sf3_max_step_arrays[i];
- if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff)
+ if (data->REG_FAN_STEP_OUTPUT &&
+ data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff)
device_remove_file(dev, &attr->dev_attr);
}
for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++)
@@ -1309,12 +1697,15 @@ static void w83627ehf_device_remove_files(struct device *dev)
device_remove_file(dev, &sda_target_temp[i].dev_attr);
device_remove_file(dev, &sda_tolerance[i].dev_attr);
}
- for (i = 0; i < 3; i++) {
- if ((i == 2) && data->temp3_disable)
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ if (!(data->have_temp & (1 << i)))
continue;
device_remove_file(dev, &sda_temp_input[i].dev_attr);
+ device_remove_file(dev, &sda_temp_label[i].dev_attr);
device_remove_file(dev, &sda_temp_max[i].dev_attr);
device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr);
+ if (i > 2)
+ continue;
device_remove_file(dev, &sda_temp_alarm[i].dev_attr);
device_remove_file(dev, &sda_temp_type[i].dev_attr);
}
@@ -1335,15 +1726,17 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
w83627ehf_write_value(data, W83627EHF_REG_CONFIG,
tmp | 0x01);
- /* Enable temp2 and temp3 if needed */
- for (i = 0; i < 2; i++) {
- tmp = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP_CONFIG[i]);
- if ((i == 1) && data->temp3_disable)
+ /* Enable temperature sensors if needed */
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ if (!(data->have_temp & (1 << i)))
+ continue;
+ if (!data->reg_temp_config[i])
continue;
+ tmp = w83627ehf_read_value(data,
+ data->reg_temp_config[i]);
if (tmp & 0x01)
w83627ehf_write_value(data,
- W83627EHF_REG_TEMP_CONFIG[i],
+ data->reg_temp_config[i],
tmp & 0xfe);
}
@@ -1362,13 +1755,39 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
}
}
+static void w82627ehf_swap_tempreg(struct w83627ehf_data *data,
+ int r1, int r2)
+{
+ u16 tmp;
+
+ tmp = data->temp_src[r1];
+ data->temp_src[r1] = data->temp_src[r2];
+ data->temp_src[r2] = tmp;
+
+ tmp = data->reg_temp[r1];
+ data->reg_temp[r1] = data->reg_temp[r2];
+ data->reg_temp[r2] = tmp;
+
+ tmp = data->reg_temp_over[r1];
+ data->reg_temp_over[r1] = data->reg_temp_over[r2];
+ data->reg_temp_over[r2] = tmp;
+
+ tmp = data->reg_temp_hyst[r1];
+ data->reg_temp_hyst[r1] = data->reg_temp_hyst[r2];
+ data->reg_temp_hyst[r2] = tmp;
+
+ tmp = data->reg_temp_config[r1];
+ data->reg_temp_config[r1] = data->reg_temp_config[r2];
+ data->reg_temp_config[r2] = tmp;
+}
+
static int __devinit w83627ehf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct w83627ehf_sio_data *sio_data = dev->platform_data;
struct w83627ehf_data *data;
struct resource *res;
- u8 fan4pin, fan5pin, en_vrm10;
+ u8 fan3pin, fan4pin, fan4min, fan5pin, en_vrm10;
int i, err = 0;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -1380,7 +1799,8 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
goto exit;
}
- if (!(data = kzalloc(sizeof(struct w83627ehf_data), GFP_KERNEL))) {
+ data = kzalloc(sizeof(struct w83627ehf_data), GFP_KERNEL);
+ if (!data) {
err = -ENOMEM;
goto exit_release;
}
@@ -1393,25 +1813,202 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
/* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */
data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9;
- /* 667HG has 3 pwms */
+ /* 667HG, NCT6775F, and NCT6776F have 3 pwms */
data->pwm_num = (sio_data->kind == w83667hg
- || sio_data->kind == w83667hg_b) ? 3 : 4;
+ || sio_data->kind == w83667hg_b
+ || sio_data->kind == nct6775
+ || sio_data->kind == nct6776) ? 3 : 4;
+ data->have_temp = 0x07;
/* Check temp3 configuration bit for 667HG */
- if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
- data->temp3_disable = w83627ehf_read_value(data,
- W83627EHF_REG_TEMP_CONFIG[1]) & 0x01;
- data->in6_skip = !data->temp3_disable;
+ if (sio_data->kind == w83667hg) {
+ u8 reg;
+
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TEMP_CONFIG[2]);
+ if (reg & 0x01)
+ data->have_temp &= ~(1 << 2);
+ else
+ data->in6_skip = 1; /* either temp3 or in6 */
+ }
+
+ /* Deal with temperature register setup first. */
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ int mask = 0;
+
+ /*
+ * Display temperature sensor output only if it monitors
+ * a source other than one already reported. Always display
+ * first three temperature registers, though.
+ */
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ u8 src;
+
+ data->reg_temp[i] = NCT6775_REG_TEMP[i];
+ data->reg_temp_over[i] = NCT6775_REG_TEMP_OVER[i];
+ data->reg_temp_hyst[i] = NCT6775_REG_TEMP_HYST[i];
+ data->reg_temp_config[i] = NCT6775_REG_TEMP_CONFIG[i];
+
+ src = w83627ehf_read_value(data,
+ NCT6775_REG_TEMP_SOURCE[i]);
+ src &= 0x1f;
+ if (src && !(mask & (1 << src))) {
+ data->have_temp |= 1 << i;
+ mask |= 1 << src;
+ }
+
+ data->temp_src[i] = src;
+
+ /*
+ * Now do some register swapping if index 0..2 don't
+ * point to SYSTIN(1), CPUIN(2), and AUXIN(3).
+ * Idea is to have the first three attributes
+ * report SYSTIN, CPUIN, and AUXIN if possible
+ * without overriding the basic system configuration.
+ */
+ if (i > 0 && data->temp_src[0] != 1
+ && data->temp_src[i] == 1)
+ w82627ehf_swap_tempreg(data, 0, i);
+ if (i > 1 && data->temp_src[1] != 2
+ && data->temp_src[i] == 2)
+ w82627ehf_swap_tempreg(data, 1, i);
+ if (i > 2 && data->temp_src[2] != 3
+ && data->temp_src[i] == 3)
+ w82627ehf_swap_tempreg(data, 2, i);
+ }
+ if (sio_data->kind == nct6776) {
+ /*
+ * On NCT6776, AUXTIN and VIN3 pins are shared.
+ * Only way to detect it is to check if AUXTIN is used
+ * as a temperature source, and if that source is
+ * enabled.
+ *
+ * If that is the case, disable in6, which reports VIN3.
+ * Otherwise disable temp3.
+ */
+ if (data->temp_src[2] == 3) {
+ u8 reg;
+
+ if (data->reg_temp_config[2])
+ reg = w83627ehf_read_value(data,
+ data->reg_temp_config[2]);
+ else
+ reg = 0; /* Assume AUXTIN is used */
+
+ if (reg & 0x01)
+ data->have_temp &= ~(1 << 2);
+ else
+ data->in6_skip = 1;
+ }
+ data->temp_label = nct6776_temp_label;
+ } else {
+ data->temp_label = nct6775_temp_label;
+ }
+ } else if (sio_data->kind == w83667hg_b) {
+ u8 reg;
+
+ /*
+ * Temperature sources are selected with bank 0, registers 0x49
+ * and 0x4a.
+ */
+ for (i = 0; i < ARRAY_SIZE(W83627EHF_REG_TEMP); i++) {
+ data->reg_temp[i] = W83627EHF_REG_TEMP[i];
+ data->reg_temp_over[i] = W83627EHF_REG_TEMP_OVER[i];
+ data->reg_temp_hyst[i] = W83627EHF_REG_TEMP_HYST[i];
+ data->reg_temp_config[i] = W83627EHF_REG_TEMP_CONFIG[i];
+ }
+ reg = w83627ehf_read_value(data, 0x4a);
+ data->temp_src[0] = reg >> 5;
+ reg = w83627ehf_read_value(data, 0x49);
+ data->temp_src[1] = reg & 0x07;
+ data->temp_src[2] = (reg >> 4) & 0x07;
+
+ /*
+ * W83667HG-B has another temperature register at 0x7e.
+ * The temperature source is selected with register 0x7d.
+ * Support it if the source differs from already reported
+ * sources.
+ */
+ reg = w83627ehf_read_value(data, 0x7d);
+ reg &= 0x07;
+ if (reg != data->temp_src[0] && reg != data->temp_src[1]
+ && reg != data->temp_src[2]) {
+ data->temp_src[3] = reg;
+ data->have_temp |= 1 << 3;
+ }
+
+ /*
+ * Chip supports either AUXTIN or VIN3. Try to find out which
+ * one.
+ */
+ reg = w83627ehf_read_value(data, W83627EHF_REG_TEMP_CONFIG[2]);
+ if (data->temp_src[2] == 2 && (reg & 0x01))
+ data->have_temp &= ~(1 << 2);
+
+ if ((data->temp_src[2] == 2 && (data->have_temp & (1 << 2)))
+ || (data->temp_src[3] == 2 && (data->have_temp & (1 << 3))))
+ data->in6_skip = 1;
+
+ data->temp_label = w83667hg_b_temp_label;
+ } else {
+ /* Temperature sources are fixed */
+ for (i = 0; i < 3; i++) {
+ data->reg_temp[i] = W83627EHF_REG_TEMP[i];
+ data->reg_temp_over[i] = W83627EHF_REG_TEMP_OVER[i];
+ data->reg_temp_hyst[i] = W83627EHF_REG_TEMP_HYST[i];
+ data->reg_temp_config[i] = W83627EHF_REG_TEMP_CONFIG[i];
+ }
}
- data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
- data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
- if (sio_data->kind == w83667hg_b) {
+ if (sio_data->kind == nct6775) {
+ data->has_fan_div = true;
+ data->fan_from_reg = fan_from_reg16;
+ data->fan_from_reg_min = fan_from_reg8;
+ data->REG_PWM = NCT6775_REG_PWM;
+ data->REG_TARGET = NCT6775_REG_TARGET;
+ data->REG_FAN = NCT6775_REG_FAN;
+ data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
+ data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
+ data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
+ data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
+ data->REG_FAN_MAX_OUTPUT = NCT6775_REG_FAN_MAX_OUTPUT;
+ data->REG_FAN_STEP_OUTPUT = NCT6775_REG_FAN_STEP_OUTPUT;
+ } else if (sio_data->kind == nct6776) {
+ data->has_fan_div = false;
+ data->fan_from_reg = fan_from_reg13;
+ data->fan_from_reg_min = fan_from_reg13;
+ data->REG_PWM = NCT6775_REG_PWM;
+ data->REG_TARGET = NCT6775_REG_TARGET;
+ data->REG_FAN = NCT6775_REG_FAN;
+ data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
+ data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT;
+ data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT;
+ data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME;
+ } else if (sio_data->kind == w83667hg_b) {
+ data->has_fan_div = true;
+ data->fan_from_reg = fan_from_reg8;
+ data->fan_from_reg_min = fan_from_reg8;
+ data->REG_PWM = W83627EHF_REG_PWM;
+ data->REG_TARGET = W83627EHF_REG_TARGET;
+ data->REG_FAN = W83627EHF_REG_FAN;
+ data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
+ data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
+ data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
+ data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
data->REG_FAN_MAX_OUTPUT =
W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B;
data->REG_FAN_STEP_OUTPUT =
W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B;
} else {
+ data->has_fan_div = true;
+ data->fan_from_reg = fan_from_reg8;
+ data->fan_from_reg_min = fan_from_reg8;
+ data->REG_PWM = W83627EHF_REG_PWM;
+ data->REG_TARGET = W83627EHF_REG_TARGET;
+ data->REG_FAN = W83627EHF_REG_FAN;
+ data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN;
+ data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT;
+ data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT;
+ data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME;
data->REG_FAN_MAX_OUTPUT =
W83627EHF_REG_FAN_MAX_OUTPUT_COMMON;
data->REG_FAN_STEP_OUTPUT =
@@ -1424,7 +2021,8 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
data->vrm = vid_which_vrm();
superio_enter(sio_data->sioreg);
/* Read VID value */
- if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
+ if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b ||
+ sio_data->kind == nct6775 || sio_data->kind == nct6776) {
/* W83667HG has different pins for VID input and output, so
we can get the VID input values directly at logical device D
0xe3. */
@@ -1475,13 +2073,44 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
}
/* fan4 and fan5 share some pins with the GPIO and serial flash */
- if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
- fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20;
+ if (sio_data->kind == nct6775) {
+ /* On NCT6775, fan4 shares pins with the fdc interface */
+ fan3pin = 1;
+ fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80);
+ fan4min = 0;
+ fan5pin = 0;
+ } else if (sio_data->kind == nct6776) {
+ fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
+ fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
+ fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
+ fan4min = fan4pin;
+ } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
+ fan3pin = 1;
fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40;
+ fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20;
+ fan4min = fan4pin;
} else {
- fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02);
+ fan3pin = 1;
fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06);
+ fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02);
+ fan4min = fan4pin;
}
+
+ if (fan_debounce &&
+ (sio_data->kind == nct6775 || sio_data->kind == nct6776)) {
+ u8 tmp;
+
+ superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
+ tmp = superio_inb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE);
+ if (sio_data->kind == nct6776)
+ superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
+ 0x3e | tmp);
+ else
+ superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE,
+ 0x1e | tmp);
+ pr_info("Enabled fan debounce for chip %s\n", data->name);
+ }
+
superio_exit(sio_data->sioreg);
/* It looks like fan4 and fan5 pins can be alternatively used
@@ -1490,26 +2119,54 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
connected fan5 as input unless they are emitting log 1, which
is not the default. */
- data->has_fan = 0x07; /* fan1, fan2 and fan3 */
- i = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
- if ((i & (1 << 2)) && fan4pin)
- data->has_fan |= (1 << 3);
- if (!(i & (1 << 1)) && fan5pin)
- data->has_fan |= (1 << 4);
+ data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */
+
+ data->has_fan |= (fan3pin << 2);
+ data->has_fan_min |= (fan3pin << 2);
+
+ /*
+ * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1 register
+ */
+ if (sio_data->kind == nct6775 || sio_data->kind == nct6776) {
+ data->has_fan |= (fan4pin << 3) | (fan5pin << 4);
+ data->has_fan_min |= (fan4min << 3) | (fan5pin << 4);
+ } else {
+ i = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1);
+ if ((i & (1 << 2)) && fan4pin) {
+ data->has_fan |= (1 << 3);
+ data->has_fan_min |= (1 << 3);
+ }
+ if (!(i & (1 << 1)) && fan5pin) {
+ data->has_fan |= (1 << 4);
+ data->has_fan_min |= (1 << 4);
+ }
+ }
/* Read fan clock dividers immediately */
- w83627ehf_update_fan_div(data);
+ w83627ehf_update_fan_div_common(dev, data);
+
+ /* Read pwm data to save original values */
+ w83627ehf_update_pwm_common(dev, data);
+ for (i = 0; i < data->pwm_num; i++)
+ data->pwm_enable_orig[i] = data->pwm_enable[i];
+
+ /* Read pwm data to save original values */
+ w83627ehf_update_pwm_common(dev, data);
+ for (i = 0; i < data->pwm_num; i++)
+ data->pwm_enable_orig[i] = data->pwm_enable[i];
/* Register sysfs hooks */
- for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++)
- if ((err = device_create_file(dev,
- &sda_sf3_arrays[i].dev_attr)))
+ for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) {
+ err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr);
+ if (err)
goto exit_remove;
+ }
for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) {
struct sensor_device_attribute *attr =
&sda_sf3_max_step_arrays[i];
- if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) {
+ if (data->REG_FAN_STEP_OUTPUT &&
+ data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) {
err = device_create_file(dev, &attr->dev_attr);
if (err)
goto exit_remove;
@@ -1518,8 +2175,9 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
/* if fan4 is enabled create the sf3 files for it */
if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4)
for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) {
- if ((err = device_create_file(dev,
- &sda_sf3_arrays_fan4[i].dev_attr)))
+ err = device_create_file(dev,
+ &sda_sf3_arrays_fan4[i].dev_attr);
+ if (err)
goto exit_remove;
}
@@ -1541,12 +2199,20 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
if ((err = device_create_file(dev,
&sda_fan_input[i].dev_attr))
|| (err = device_create_file(dev,
- &sda_fan_alarm[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_fan_div[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_fan_min[i].dev_attr)))
+ &sda_fan_alarm[i].dev_attr)))
goto exit_remove;
+ if (sio_data->kind != nct6776) {
+ err = device_create_file(dev,
+ &sda_fan_div[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+ if (data->has_fan_min & (1 << i)) {
+ err = device_create_file(dev,
+ &sda_fan_min[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
if (i < data->pwm_num &&
((err = device_create_file(dev,
&sda_pwm[i].dev_attr))
@@ -1562,16 +2228,33 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
}
}
- for (i = 0; i < 3; i++) {
- if ((i == 2) && data->temp3_disable)
+ for (i = 0; i < NUM_REG_TEMP; i++) {
+ if (!(data->have_temp & (1 << i)))
+ continue;
+ err = device_create_file(dev, &sda_temp_input[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ if (data->temp_label) {
+ err = device_create_file(dev,
+ &sda_temp_label[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+ if (data->reg_temp_over[i]) {
+ err = device_create_file(dev,
+ &sda_temp_max[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+ if (data->reg_temp_hyst[i]) {
+ err = device_create_file(dev,
+ &sda_temp_max_hyst[i].dev_attr);
+ if (err)
+ goto exit_remove;
+ }
+ if (i > 2)
continue;
if ((err = device_create_file(dev,
- &sda_temp_input[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_temp_max[i].dev_attr))
- || (err = device_create_file(dev,
- &sda_temp_max_hyst[i].dev_attr))
- || (err = device_create_file(dev,
&sda_temp_alarm[i].dev_attr))
|| (err = device_create_file(dev,
&sda_temp_type[i].dev_attr)))
@@ -1632,6 +2315,8 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
static const char __initdata sio_name_W83627DHG_P[] = "W83627DHG-P";
static const char __initdata sio_name_W83667HG[] = "W83667HG";
static const char __initdata sio_name_W83667HG_B[] = "W83667HG-B";
+ static const char __initdata sio_name_NCT6775[] = "NCT6775F";
+ static const char __initdata sio_name_NCT6776[] = "NCT6776F";
u16 val;
const char *sio_name;
@@ -1668,6 +2353,14 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
sio_data->kind = w83667hg_b;
sio_name = sio_name_W83667HG_B;
break;
+ case SIO_NCT6775_ID:
+ sio_data->kind = nct6775;
+ sio_name = sio_name_NCT6775;
+ break;
+ case SIO_NCT6776_ID:
+ sio_data->kind = nct6776;
+ sio_name = sio_name_NCT6776;
+ break;
default:
if (val != 0xffff)
pr_debug("unsupported chip ID: 0x%04x\n", val);
@@ -1689,7 +2382,8 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr,
/* Activate logical device if needed */
val = superio_inb(sioaddr, SIO_REG_ENABLE);
if (!(val & 0x01)) {
- pr_warn("Forcibly enabling Super-I/O. Sensor is probably unusable.\n");
+ pr_warn("Forcibly enabling Super-I/O. "
+ "Sensor is probably unusable.\n");
superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
}
@@ -1726,7 +2420,8 @@ static int __init sensors_w83627ehf_init(void)
if (err)
goto exit;
- if (!(pdev = platform_device_alloc(DRVNAME, address))) {
+ pdev = platform_device_alloc(DRVNAME, address);
+ if (!pdev) {
err = -ENOMEM;
pr_err("Device allocation failed\n");
goto exit_unregister;
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 113505a6434e..230601e8853f 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -433,7 +433,7 @@ config I2C_IXP2000
config I2C_MPC
tristate "MPC107/824x/85xx/512x/52xx/83xx/86xx"
- depends on PPC32
+ depends on PPC
help
If you say yes to this option, support will be included for the
built-in I2C interface on the MPC107, Tsi107, MPC512x, MPC52xx,
@@ -452,6 +452,16 @@ config I2C_MV64XXX
This driver can also be built as a module. If so, the module
will be called i2c-mv64xxx.
+config I2C_MXS
+ tristate "Freescale i.MX28 I2C interface"
+ depends on SOC_IMX28
+ help
+ Say Y here if you want to use the I2C bus controller on
+ the Freescale i.MX28 processors.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-mxs.
+
config I2C_NOMADIK
tristate "ST-Ericsson Nomadik/Ux500 I2C Controller"
depends on PLAT_NOMADIK
@@ -523,6 +533,17 @@ config I2C_PNX
This driver can also be built as a module. If so, the module
will be called i2c-pnx.
+config I2C_PUV3
+ tristate "PKUnity v3 I2C bus support"
+ depends on UNICORE32 && ARCH_PUV3
+ select I2C_ALGOBIT
+ help
+ This driver supports the I2C IP inside the PKUnity-v3 SoC.
+ This I2C bus controller is under AMBA/AXI bus.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-puv3.
+
config I2C_PXA
tristate "Intel PXA2XX I2C adapter"
depends on ARCH_PXA || ARCH_MMP
@@ -607,6 +628,13 @@ config I2C_STU300
This driver can also be built as a module. If so, the module
will be called i2c-stu300.
+config I2C_TEGRA
+ tristate "NVIDIA Tegra internal I2C controller"
+ depends on ARCH_TEGRA
+ help
+ If you say yes to this option, support will be included for the
+ I2C controller embedded in NVIDIA Tegra SOCs
+
config I2C_VERSATILE
tristate "ARM Versatile/Realview I2C bus support"
depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 9d2d0ec7fb23..3878c959d4fa 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
+obj-$(CONFIG_I2C_MXS) += i2c-mxs.o
obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o
obj-$(CONFIG_I2C_NUC900) += i2c-nuc900.o
obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o
@@ -51,6 +52,7 @@ obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o
obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o
obj-$(CONFIG_I2C_PMCMSP) += i2c-pmcmsp.o
obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
+obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o
obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o
obj-$(CONFIG_I2C_S6000) += i2c-s6000.o
@@ -58,6 +60,7 @@ obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o
obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o
obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o
obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
+obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o
obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o
obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
new file mode 100644
index 000000000000..8022e2390a5a
--- /dev/null
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -0,0 +1,412 @@
+/*
+ * Freescale MXS I2C bus driver
+ *
+ * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
+ *
+ * based on a (non-working) driver which was:
+ *
+ * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * TODO: add dma-support if platform-support for it is available
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+
+#include <mach/common.h>
+
+#define DRIVER_NAME "mxs-i2c"
+
+#define MXS_I2C_CTRL0 (0x00)
+#define MXS_I2C_CTRL0_SET (0x04)
+
+#define MXS_I2C_CTRL0_SFTRST 0x80000000
+#define MXS_I2C_CTRL0_SEND_NAK_ON_LAST 0x02000000
+#define MXS_I2C_CTRL0_RETAIN_CLOCK 0x00200000
+#define MXS_I2C_CTRL0_POST_SEND_STOP 0x00100000
+#define MXS_I2C_CTRL0_PRE_SEND_START 0x00080000
+#define MXS_I2C_CTRL0_MASTER_MODE 0x00020000
+#define MXS_I2C_CTRL0_DIRECTION 0x00010000
+#define MXS_I2C_CTRL0_XFER_COUNT(v) ((v) & 0x0000FFFF)
+
+#define MXS_I2C_CTRL1 (0x40)
+#define MXS_I2C_CTRL1_SET (0x44)
+#define MXS_I2C_CTRL1_CLR (0x48)
+
+#define MXS_I2C_CTRL1_BUS_FREE_IRQ 0x80
+#define MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ 0x40
+#define MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ 0x20
+#define MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ 0x10
+#define MXS_I2C_CTRL1_EARLY_TERM_IRQ 0x08
+#define MXS_I2C_CTRL1_MASTER_LOSS_IRQ 0x04
+#define MXS_I2C_CTRL1_SLAVE_STOP_IRQ 0x02
+#define MXS_I2C_CTRL1_SLAVE_IRQ 0x01
+
+#define MXS_I2C_IRQ_MASK (MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ | \
+ MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ | \
+ MXS_I2C_CTRL1_EARLY_TERM_IRQ | \
+ MXS_I2C_CTRL1_MASTER_LOSS_IRQ | \
+ MXS_I2C_CTRL1_SLAVE_STOP_IRQ | \
+ MXS_I2C_CTRL1_SLAVE_IRQ)
+
+#define MXS_I2C_QUEUECTRL (0x60)
+#define MXS_I2C_QUEUECTRL_SET (0x64)
+#define MXS_I2C_QUEUECTRL_CLR (0x68)
+
+#define MXS_I2C_QUEUECTRL_QUEUE_RUN 0x20
+#define MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE 0x04
+
+#define MXS_I2C_QUEUESTAT (0x70)
+#define MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY 0x00002000
+
+#define MXS_I2C_QUEUECMD (0x80)
+
+#define MXS_I2C_QUEUEDATA (0x90)
+
+#define MXS_I2C_DATA (0xa0)
+
+
+#define MXS_CMD_I2C_SELECT (MXS_I2C_CTRL0_RETAIN_CLOCK | \
+ MXS_I2C_CTRL0_PRE_SEND_START | \
+ MXS_I2C_CTRL0_MASTER_MODE | \
+ MXS_I2C_CTRL0_DIRECTION | \
+ MXS_I2C_CTRL0_XFER_COUNT(1))
+
+#define MXS_CMD_I2C_WRITE (MXS_I2C_CTRL0_PRE_SEND_START | \
+ MXS_I2C_CTRL0_MASTER_MODE | \
+ MXS_I2C_CTRL0_DIRECTION)
+
+#define MXS_CMD_I2C_READ (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \
+ MXS_I2C_CTRL0_MASTER_MODE)
+
+/**
+ * struct mxs_i2c_dev - per device, private MXS-I2C data
+ *
+ * @dev: driver model device node
+ * @regs: IO registers pointer
+ * @cmd_complete: completion object for transaction wait
+ * @cmd_err: error code for last transaction
+ * @adapter: i2c subsystem adapter node
+ */
+struct mxs_i2c_dev {
+ struct device *dev;
+ void __iomem *regs;
+ struct completion cmd_complete;
+ u32 cmd_err;
+ struct i2c_adapter adapter;
+};
+
+/*
+ * TODO: check if calls to here are really needed. If not, we could get rid of
+ * mxs_reset_block and the mach-dependency. Needs an I2C analyzer, probably.
+ */
+static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
+{
+ mxs_reset_block(i2c->regs);
+ writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
+}
+
+static void mxs_i2c_pioq_setup_read(struct mxs_i2c_dev *i2c, u8 addr, int len,
+ int flags)
+{
+ u32 data;
+
+ writel(MXS_CMD_I2C_SELECT, i2c->regs + MXS_I2C_QUEUECMD);
+
+ data = (addr << 1) | I2C_SMBUS_READ;
+ writel(data, i2c->regs + MXS_I2C_DATA);
+
+ data = MXS_CMD_I2C_READ | MXS_I2C_CTRL0_XFER_COUNT(len) | flags;
+ writel(data, i2c->regs + MXS_I2C_QUEUECMD);
+}
+
+static void mxs_i2c_pioq_setup_write(struct mxs_i2c_dev *i2c,
+ u8 addr, u8 *buf, int len, int flags)
+{
+ u32 data;
+ int i, shifts_left;
+
+ data = MXS_CMD_I2C_WRITE | MXS_I2C_CTRL0_XFER_COUNT(len + 1) | flags;
+ writel(data, i2c->regs + MXS_I2C_QUEUECMD);
+
+ /*
+ * We have to copy the slave address (u8) and buffer (arbitrary number
+ * of u8) into the data register (u32). To achieve that, the u8 are put
+ * into the MSBs of 'data' which is then shifted for the next u8. When
+ * apropriate, 'data' is written to MXS_I2C_DATA. So, the first u32
+ * looks like this:
+ *
+ * 3 2 1 0
+ * 10987654|32109876|54321098|76543210
+ * --------+--------+--------+--------
+ * buffer+2|buffer+1|buffer+0|slave_addr
+ */
+
+ data = ((addr << 1) | I2C_SMBUS_WRITE) << 24;
+
+ for (i = 0; i < len; i++) {
+ data >>= 8;
+ data |= buf[i] << 24;
+ if ((i & 3) == 2)
+ writel(data, i2c->regs + MXS_I2C_DATA);
+ }
+
+ /* Write out the remaining bytes if any */
+ shifts_left = 24 - (i & 3) * 8;
+ if (shifts_left)
+ writel(data >> shifts_left, i2c->regs + MXS_I2C_DATA);
+}
+
+/*
+ * TODO: should be replaceable with a waitqueue and RD_QUEUE_IRQ (setting the
+ * rd_threshold to 1). Couldn't get this to work, though.
+ */
+static int mxs_i2c_wait_for_data(struct mxs_i2c_dev *i2c)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+
+ while (readl(i2c->regs + MXS_I2C_QUEUESTAT)
+ & MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+ cond_resched();
+ }
+
+ return 0;
+}
+
+static int mxs_i2c_finish_read(struct mxs_i2c_dev *i2c, u8 *buf, int len)
+{
+ u32 data;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if ((i & 3) == 0) {
+ if (mxs_i2c_wait_for_data(i2c))
+ return -ETIMEDOUT;
+ data = readl(i2c->regs + MXS_I2C_QUEUEDATA);
+ }
+ buf[i] = data & 0xff;
+ data >>= 8;
+ }
+
+ return 0;
+}
+
+/*
+ * Low level master read/write transaction.
+ */
+static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
+ int stop)
+{
+ struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
+ int ret;
+ int flags;
+
+ init_completion(&i2c->cmd_complete);
+
+ dev_dbg(i2c->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
+ msg->addr, msg->len, msg->flags, stop);
+
+ if (msg->len == 0)
+ return -EINVAL;
+
+ flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
+
+ if (msg->flags & I2C_M_RD)
+ mxs_i2c_pioq_setup_read(i2c, msg->addr, msg->len, flags);
+ else
+ mxs_i2c_pioq_setup_write(i2c, msg->addr, msg->buf, msg->len,
+ flags);
+
+ writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
+ i2c->regs + MXS_I2C_QUEUECTRL_SET);
+
+ ret = wait_for_completion_timeout(&i2c->cmd_complete,
+ msecs_to_jiffies(1000));
+ if (ret == 0)
+ goto timeout;
+
+ if ((!i2c->cmd_err) && (msg->flags & I2C_M_RD)) {
+ ret = mxs_i2c_finish_read(i2c, msg->buf, msg->len);
+ if (ret)
+ goto timeout;
+ }
+
+ if (i2c->cmd_err == -ENXIO)
+ mxs_i2c_reset(i2c);
+
+ dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err);
+
+ return i2c->cmd_err;
+
+timeout:
+ dev_dbg(i2c->dev, "Timeout!\n");
+ mxs_i2c_reset(i2c);
+ return -ETIMEDOUT;
+}
+
+static int mxs_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
+{
+ int i;
+ int err;
+
+ for (i = 0; i < num; i++) {
+ err = mxs_i2c_xfer_msg(adap, &msgs[i], i == (num - 1));
+ if (err)
+ return err;
+ }
+
+ return num;
+}
+
+static u32 mxs_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
+{
+ struct mxs_i2c_dev *i2c = dev_id;
+ u32 stat = readl(i2c->regs + MXS_I2C_CTRL1) & MXS_I2C_IRQ_MASK;
+
+ if (!stat)
+ return IRQ_NONE;
+
+ if (stat & MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ)
+ i2c->cmd_err = -ENXIO;
+ else if (stat & (MXS_I2C_CTRL1_EARLY_TERM_IRQ |
+ MXS_I2C_CTRL1_MASTER_LOSS_IRQ |
+ MXS_I2C_CTRL1_SLAVE_STOP_IRQ | MXS_I2C_CTRL1_SLAVE_IRQ))
+ /* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */
+ i2c->cmd_err = -EIO;
+ else
+ i2c->cmd_err = 0;
+
+ complete(&i2c->cmd_complete);
+
+ writel(stat, i2c->regs + MXS_I2C_CTRL1_CLR);
+ return IRQ_HANDLED;
+}
+
+static const struct i2c_algorithm mxs_i2c_algo = {
+ .master_xfer = mxs_i2c_xfer,
+ .functionality = mxs_i2c_func,
+};
+
+static int __devinit mxs_i2c_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mxs_i2c_dev *i2c;
+ struct i2c_adapter *adap;
+ struct resource *res;
+ resource_size_t res_size;
+ int err, irq;
+
+ i2c = devm_kzalloc(dev, sizeof(struct mxs_i2c_dev), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
+ res_size = resource_size(res);
+ if (!devm_request_mem_region(dev, res->start, res_size, res->name))
+ return -EBUSY;
+
+ i2c->regs = devm_ioremap_nocache(dev, res->start, res_size);
+ if (!i2c->regs)
+ return -EBUSY;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, mxs_i2c_isr, 0, dev_name(dev), i2c);
+ if (err)
+ return err;
+
+ i2c->dev = dev;
+ platform_set_drvdata(pdev, i2c);
+
+ /* Do reset to enforce correct startup after pinmuxing */
+ mxs_i2c_reset(i2c);
+ writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
+ i2c->regs + MXS_I2C_QUEUECTRL_SET);
+
+ adap = &i2c->adapter;
+ strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
+ adap->owner = THIS_MODULE;
+ adap->algo = &mxs_i2c_algo;
+ adap->dev.parent = dev;
+ adap->nr = pdev->id;
+ i2c_set_adapdata(adap, i2c);
+ err = i2c_add_numbered_adapter(adap);
+ if (err) {
+ dev_err(dev, "Failed to add adapter (%d)\n", err);
+ writel(MXS_I2C_CTRL0_SFTRST,
+ i2c->regs + MXS_I2C_CTRL0_SET);
+ return err;
+ }
+
+ return 0;
+}
+
+static int __devexit mxs_i2c_remove(struct platform_device *pdev)
+{
+ struct mxs_i2c_dev *i2c = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i2c_del_adapter(&i2c->adapter);
+ if (ret)
+ return -EBUSY;
+
+ writel(MXS_I2C_QUEUECTRL_QUEUE_RUN,
+ i2c->regs + MXS_I2C_QUEUECTRL_CLR);
+ writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver mxs_i2c_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .remove = __devexit_p(mxs_i2c_remove),
+};
+
+static int __init mxs_i2c_init(void)
+{
+ return platform_driver_probe(&mxs_i2c_driver, mxs_i2c_probe);
+}
+subsys_initcall(mxs_i2c_init);
+
+static void __exit mxs_i2c_exit(void)
+{
+ platform_driver_unregister(&mxs_i2c_driver);
+}
+module_exit(mxs_i2c_exit);
+
+MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_DESCRIPTION("MXS I2C Bus Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
new file mode 100644
index 000000000000..fac673940849
--- /dev/null
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -0,0 +1,306 @@
+/*
+ * I2C driver for PKUnity-v3 SoC
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <mach/hardware.h>
+
+/*
+ * Poll the i2c status register until the specified bit is set.
+ * Returns 0 if timed out (100 msec).
+ */
+static short poll_status(unsigned long bit)
+{
+ int loop_cntr = 1000;
+
+ if (bit & I2C_STATUS_TFNF) {
+ do {
+ udelay(10);
+ } while (!(readl(I2C_STATUS) & bit) && (--loop_cntr > 0));
+ } else {
+ /* RXRDY handler */
+ do {
+ if (readl(I2C_TAR) == I2C_TAR_EEPROM)
+ msleep(20);
+ else
+ udelay(10);
+ } while (!(readl(I2C_RXFLR) & 0xf) && (--loop_cntr > 0));
+ }
+
+ return (loop_cntr > 0);
+}
+
+static int xfer_read(struct i2c_adapter *adap, unsigned char *buf, int length)
+{
+ int i2c_reg = *buf;
+
+ /* Read data */
+ while (length--) {
+ if (!poll_status(I2C_STATUS_TFNF)) {
+ dev_dbg(&adap->dev, "Tx FIFO Not Full timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* send addr */
+ writel(i2c_reg | I2C_DATACMD_WRITE, I2C_DATACMD);
+
+ /* get ready to next write */
+ i2c_reg++;
+
+ /* send read CMD */
+ writel(I2C_DATACMD_READ, I2C_DATACMD);
+
+ /* wait until the Rx FIFO have available */
+ if (!poll_status(I2C_STATUS_RFNE)) {
+ dev_dbg(&adap->dev, "RXRDY timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* read the data to buf */
+ *buf = (readl(I2C_DATACMD) & I2C_DATACMD_DAT_MASK);
+ buf++;
+ }
+
+ return 0;
+}
+
+static int xfer_write(struct i2c_adapter *adap, unsigned char *buf, int length)
+{
+ int i2c_reg = *buf;
+
+ /* Do nothing but storing the reg_num to a static variable */
+ if (i2c_reg == -1) {
+ printk(KERN_WARNING "Error i2c reg\n");
+ return -ETIMEDOUT;
+ }
+
+ if (length == 1)
+ return 0;
+
+ buf++;
+ length--;
+ while (length--) {
+ /* send addr */
+ writel(i2c_reg | I2C_DATACMD_WRITE, I2C_DATACMD);
+
+ /* send write CMD */
+ writel(*buf | I2C_DATACMD_WRITE, I2C_DATACMD);
+
+ /* wait until the Rx FIFO have available */
+ msleep(20);
+
+ /* read the data to buf */
+ i2c_reg++;
+ buf++;
+ }
+
+ return 0;
+}
+
+/*
+ * Generic i2c master transfer entrypoint.
+ *
+ */
+static int puv3_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *pmsg,
+ int num)
+{
+ int i, ret;
+ unsigned char swap;
+
+ /* Disable i2c */
+ writel(I2C_ENABLE_DISABLE, I2C_ENABLE);
+
+ /* Set the work mode and speed*/
+ writel(I2C_CON_MASTER | I2C_CON_SPEED_STD | I2C_CON_SLAVEDISABLE, I2C_CON);
+
+ writel(pmsg->addr, I2C_TAR);
+
+ /* Enable i2c */
+ writel(I2C_ENABLE_ENABLE, I2C_ENABLE);
+
+ dev_dbg(&adap->dev, "puv3_i2c_xfer: processing %d messages:\n", num);
+
+ for (i = 0; i < num; i++) {
+ dev_dbg(&adap->dev, " #%d: %sing %d byte%s %s 0x%02x\n", i,
+ pmsg->flags & I2C_M_RD ? "read" : "writ",
+ pmsg->len, pmsg->len > 1 ? "s" : "",
+ pmsg->flags & I2C_M_RD ? "from" : "to", pmsg->addr);
+
+ if (pmsg->len && pmsg->buf) { /* sanity check */
+ if (pmsg->flags & I2C_M_RD)
+ ret = xfer_read(adap, pmsg->buf, pmsg->len);
+ else
+ ret = xfer_write(adap, pmsg->buf, pmsg->len);
+
+ if (ret)
+ return ret;
+
+ }
+ dev_dbg(&adap->dev, "transfer complete\n");
+ pmsg++; /* next message */
+ }
+
+ /* XXX: fixup be16_to_cpu in bq27x00_battery.c */
+ if (pmsg->addr == I2C_TAR_PWIC) {
+ swap = pmsg->buf[0];
+ pmsg->buf[0] = pmsg->buf[1];
+ pmsg->buf[1] = swap;
+ }
+
+ return i;
+}
+
+/*
+ * Return list of supported functionality.
+ */
+static u32 puv3_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm puv3_i2c_algorithm = {
+ .master_xfer = puv3_i2c_xfer,
+ .functionality = puv3_i2c_func,
+};
+
+/*
+ * Main initialization routine.
+ */
+static int __devinit puv3_i2c_probe(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter;
+ struct resource *mem;
+ int rc;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -ENODEV;
+
+ if (!request_mem_region(mem->start, resource_size(mem), "puv3_i2c"))
+ return -EBUSY;
+
+ adapter = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
+ if (adapter == NULL) {
+ dev_err(&pdev->dev, "can't allocate inteface!\n");
+ rc = -ENOMEM;
+ goto fail_nomem;
+ }
+ snprintf(adapter->name, sizeof(adapter->name), "PUV3-I2C at 0x%08x",
+ mem->start);
+ adapter->algo = &puv3_i2c_algorithm;
+ adapter->class = I2C_CLASS_HWMON;
+ adapter->dev.parent = &pdev->dev;
+
+ platform_set_drvdata(pdev, adapter);
+
+ adapter->nr = pdev->id;
+ rc = i2c_add_numbered_adapter(adapter);
+ if (rc) {
+ dev_err(&pdev->dev, "Adapter '%s' registration failed\n",
+ adapter->name);
+ goto fail_add_adapter;
+ }
+
+ dev_info(&pdev->dev, "PKUnity v3 i2c bus adapter.\n");
+ return 0;
+
+fail_add_adapter:
+ platform_set_drvdata(pdev, NULL);
+ kfree(adapter);
+fail_nomem:
+ release_mem_region(mem->start, resource_size(mem));
+
+ return rc;
+}
+
+static int __devexit puv3_i2c_remove(struct platform_device *pdev)
+{
+ struct i2c_adapter *adapter = platform_get_drvdata(pdev);
+ struct resource *mem;
+ int rc;
+
+ rc = i2c_del_adapter(adapter);
+ if (rc) {
+ dev_err(&pdev->dev, "Adapter '%s' delete fail\n",
+ adapter->name);
+ return rc;
+ }
+
+ put_device(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+
+ return rc;
+}
+
+#ifdef CONFIG_PM
+static int puv3_i2c_suspend(struct platform_device *dev, pm_message_t state)
+{
+ int poll_count;
+ /* Disable the IIC */
+ writel(I2C_ENABLE_DISABLE, I2C_ENABLE);
+ for (poll_count = 0; poll_count < 50; poll_count++) {
+ if (readl(I2C_ENSTATUS) & I2C_ENSTATUS_ENABLE)
+ udelay(25);
+ }
+
+ return 0;
+}
+
+static int puv3_i2c_resume(struct platform_device *dev)
+{
+ return 0 ;
+}
+#else
+#define puv3_i2c_suspend NULL
+#define puv3_i2c_resume NULL
+#endif
+
+MODULE_ALIAS("platform:puv3_i2c");
+
+static struct platform_driver puv3_i2c_driver = {
+ .probe = puv3_i2c_probe,
+ .remove = __devexit_p(puv3_i2c_remove),
+ .suspend = puv3_i2c_suspend,
+ .resume = puv3_i2c_resume,
+ .driver = {
+ .name = "PKUnity-v3-I2C",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init puv3_i2c_init(void)
+{
+ return platform_driver_register(&puv3_i2c_driver);
+}
+
+static void __exit puv3_i2c_exit(void)
+{
+ platform_driver_unregister(&puv3_i2c_driver);
+}
+
+module_init(puv3_i2c_init);
+module_exit(puv3_i2c_exit);
+
+MODULE_DESCRIPTION("PKUnity v3 I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
new file mode 100644
index 000000000000..3921f664c9c3
--- /dev/null
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -0,0 +1,700 @@
+/*
+ * drivers/i2c/busses/i2c-tegra.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c-tegra.h>
+
+#include <asm/unaligned.h>
+
+#include <mach/clk.h>
+
+#define TEGRA_I2C_TIMEOUT (msecs_to_jiffies(1000))
+#define BYTES_PER_FIFO_WORD 4
+
+#define I2C_CNFG 0x000
+#define I2C_CNFG_PACKET_MODE_EN (1<<10)
+#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
+#define I2C_SL_CNFG 0x020
+#define I2C_SL_CNFG_NEWSL (1<<2)
+#define I2C_SL_ADDR1 0x02c
+#define I2C_TX_FIFO 0x050
+#define I2C_RX_FIFO 0x054
+#define I2C_PACKET_TRANSFER_STATUS 0x058
+#define I2C_FIFO_CONTROL 0x05c
+#define I2C_FIFO_CONTROL_TX_FLUSH (1<<1)
+#define I2C_FIFO_CONTROL_RX_FLUSH (1<<0)
+#define I2C_FIFO_CONTROL_TX_TRIG_SHIFT 5
+#define I2C_FIFO_CONTROL_RX_TRIG_SHIFT 2
+#define I2C_FIFO_STATUS 0x060
+#define I2C_FIFO_STATUS_TX_MASK 0xF0
+#define I2C_FIFO_STATUS_TX_SHIFT 4
+#define I2C_FIFO_STATUS_RX_MASK 0x0F
+#define I2C_FIFO_STATUS_RX_SHIFT 0
+#define I2C_INT_MASK 0x064
+#define I2C_INT_STATUS 0x068
+#define I2C_INT_PACKET_XFER_COMPLETE (1<<7)
+#define I2C_INT_ALL_PACKETS_XFER_COMPLETE (1<<6)
+#define I2C_INT_TX_FIFO_OVERFLOW (1<<5)
+#define I2C_INT_RX_FIFO_UNDERFLOW (1<<4)
+#define I2C_INT_NO_ACK (1<<3)
+#define I2C_INT_ARBITRATION_LOST (1<<2)
+#define I2C_INT_TX_FIFO_DATA_REQ (1<<1)
+#define I2C_INT_RX_FIFO_DATA_REQ (1<<0)
+#define I2C_CLK_DIVISOR 0x06c
+
+#define DVC_CTRL_REG1 0x000
+#define DVC_CTRL_REG1_INTR_EN (1<<10)
+#define DVC_CTRL_REG2 0x004
+#define DVC_CTRL_REG3 0x008
+#define DVC_CTRL_REG3_SW_PROG (1<<26)
+#define DVC_CTRL_REG3_I2C_DONE_INTR_EN (1<<30)
+#define DVC_STATUS 0x00c
+#define DVC_STATUS_I2C_DONE_INTR (1<<30)
+
+#define I2C_ERR_NONE 0x00
+#define I2C_ERR_NO_ACK 0x01
+#define I2C_ERR_ARBITRATION_LOST 0x02
+
+#define PACKET_HEADER0_HEADER_SIZE_SHIFT 28
+#define PACKET_HEADER0_PACKET_ID_SHIFT 16
+#define PACKET_HEADER0_CONT_ID_SHIFT 12
+#define PACKET_HEADER0_PROTOCOL_I2C (1<<4)
+
+#define I2C_HEADER_HIGHSPEED_MODE (1<<22)
+#define I2C_HEADER_CONT_ON_NAK (1<<21)
+#define I2C_HEADER_SEND_START_BYTE (1<<20)
+#define I2C_HEADER_READ (1<<19)
+#define I2C_HEADER_10BIT_ADDR (1<<18)
+#define I2C_HEADER_IE_ENABLE (1<<17)
+#define I2C_HEADER_REPEAT_START (1<<16)
+#define I2C_HEADER_MASTER_ADDR_SHIFT 12
+#define I2C_HEADER_SLAVE_ADDR_SHIFT 1
+
+/**
+ * struct tegra_i2c_dev - per device i2c context
+ * @dev: device reference for power management
+ * @adapter: core i2c layer adapter information
+ * @clk: clock reference for i2c controller
+ * @i2c_clk: clock reference for i2c bus
+ * @iomem: memory resource for registers
+ * @base: ioremapped registers cookie
+ * @cont_id: i2c controller id, used for for packet header
+ * @irq: irq number of transfer complete interrupt
+ * @is_dvc: identifies the DVC i2c controller, has a different register layout
+ * @msg_complete: transfer completion notifier
+ * @msg_err: error code for completed message
+ * @msg_buf: pointer to current message data
+ * @msg_buf_remaining: size of unsent data in the message buffer
+ * @msg_read: identifies read transfers
+ * @bus_clk_rate: current i2c bus clock rate
+ * @is_suspended: prevents i2c controller accesses after suspend is called
+ */
+struct tegra_i2c_dev {
+ struct device *dev;
+ struct i2c_adapter adapter;
+ struct clk *clk;
+ struct clk *i2c_clk;
+ struct resource *iomem;
+ void __iomem *base;
+ int cont_id;
+ int irq;
+ int is_dvc;
+ struct completion msg_complete;
+ int msg_err;
+ u8 *msg_buf;
+ size_t msg_buf_remaining;
+ int msg_read;
+ unsigned long bus_clk_rate;
+ bool is_suspended;
+};
+
+static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg)
+{
+ writel(val, i2c_dev->base + reg);
+}
+
+static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
+{
+ return readl(i2c_dev->base + reg);
+}
+
+/*
+ * i2c_writel and i2c_readl will offset the register if necessary to talk
+ * to the I2C block inside the DVC block
+ */
+static unsigned long tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev,
+ unsigned long reg)
+{
+ if (i2c_dev->is_dvc)
+ reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40;
+ return reg;
+}
+
+static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val,
+ unsigned long reg)
+{
+ writel(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+}
+
+static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg)
+{
+ return readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+}
+
+static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data,
+ unsigned long reg, int len)
+{
+ writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
+}
+
+static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
+ unsigned long reg, int len)
+{
+ readsl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
+}
+
+static void tegra_i2c_mask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
+{
+ u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
+ int_mask &= ~mask;
+ i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
+}
+
+static void tegra_i2c_unmask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask)
+{
+ u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK);
+ int_mask |= mask;
+ i2c_writel(i2c_dev, int_mask, I2C_INT_MASK);
+}
+
+static int tegra_i2c_flush_fifos(struct tegra_i2c_dev *i2c_dev)
+{
+ unsigned long timeout = jiffies + HZ;
+ u32 val = i2c_readl(i2c_dev, I2C_FIFO_CONTROL);
+ val |= I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH;
+ i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+
+ while (i2c_readl(i2c_dev, I2C_FIFO_CONTROL) &
+ (I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH)) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(i2c_dev->dev, "timeout waiting for fifo flush\n");
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ return 0;
+}
+
+static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int rx_fifo_avail;
+ u8 *buf = i2c_dev->msg_buf;
+ size_t buf_remaining = i2c_dev->msg_buf_remaining;
+ int words_to_transfer;
+
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ rx_fifo_avail = (val & I2C_FIFO_STATUS_RX_MASK) >>
+ I2C_FIFO_STATUS_RX_SHIFT;
+
+ /* Rounds down to not include partial word at the end of buf */
+ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
+ if (words_to_transfer > rx_fifo_avail)
+ words_to_transfer = rx_fifo_avail;
+
+ i2c_readsl(i2c_dev, buf, I2C_RX_FIFO, words_to_transfer);
+
+ buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+ buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+ rx_fifo_avail -= words_to_transfer;
+
+ /*
+ * If there is a partial word at the end of buf, handle it manually to
+ * prevent overwriting past the end of buf
+ */
+ if (rx_fifo_avail > 0 && buf_remaining > 0) {
+ BUG_ON(buf_remaining > 3);
+ val = i2c_readl(i2c_dev, I2C_RX_FIFO);
+ memcpy(buf, &val, buf_remaining);
+ buf_remaining = 0;
+ rx_fifo_avail--;
+ }
+
+ BUG_ON(rx_fifo_avail > 0 && buf_remaining > 0);
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf;
+ return 0;
+}
+
+static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int tx_fifo_avail;
+ u8 *buf = i2c_dev->msg_buf;
+ size_t buf_remaining = i2c_dev->msg_buf_remaining;
+ int words_to_transfer;
+
+ val = i2c_readl(i2c_dev, I2C_FIFO_STATUS);
+ tx_fifo_avail = (val & I2C_FIFO_STATUS_TX_MASK) >>
+ I2C_FIFO_STATUS_TX_SHIFT;
+
+ /* Rounds down to not include partial word at the end of buf */
+ words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
+ if (words_to_transfer > tx_fifo_avail)
+ words_to_transfer = tx_fifo_avail;
+
+ i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+
+ buf += words_to_transfer * BYTES_PER_FIFO_WORD;
+ buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
+ tx_fifo_avail -= words_to_transfer;
+
+ /*
+ * If there is a partial word at the end of buf, handle it manually to
+ * prevent reading past the end of buf, which could cross a page
+ * boundary and fault.
+ */
+ if (tx_fifo_avail > 0 && buf_remaining > 0) {
+ BUG_ON(buf_remaining > 3);
+ memcpy(&val, buf, buf_remaining);
+ i2c_writel(i2c_dev, val, I2C_TX_FIFO);
+ buf_remaining = 0;
+ tx_fifo_avail--;
+ }
+
+ BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
+ i2c_dev->msg_buf_remaining = buf_remaining;
+ i2c_dev->msg_buf = buf;
+ return 0;
+}
+
+/*
+ * One of the Tegra I2C blocks is inside the DVC (Digital Voltage Controller)
+ * block. This block is identical to the rest of the I2C blocks, except that
+ * it only supports master mode, it has registers moved around, and it needs
+ * some extra init to get it into I2C mode. The register moves are handled
+ * by i2c_readl and i2c_writel
+ */
+static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val = 0;
+ val = dvc_readl(i2c_dev, DVC_CTRL_REG3);
+ val |= DVC_CTRL_REG3_SW_PROG;
+ val |= DVC_CTRL_REG3_I2C_DONE_INTR_EN;
+ dvc_writel(i2c_dev, val, DVC_CTRL_REG3);
+
+ val = dvc_readl(i2c_dev, DVC_CTRL_REG1);
+ val |= DVC_CTRL_REG1_INTR_EN;
+ dvc_writel(i2c_dev, val, DVC_CTRL_REG1);
+}
+
+static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
+{
+ u32 val;
+ int err = 0;
+
+ clk_enable(i2c_dev->clk);
+
+ tegra_periph_reset_assert(i2c_dev->clk);
+ udelay(2);
+ tegra_periph_reset_deassert(i2c_dev->clk);
+
+ if (i2c_dev->is_dvc)
+ tegra_dvc_init(i2c_dev);
+
+ val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN;
+ i2c_writel(i2c_dev, val, I2C_CNFG);
+ i2c_writel(i2c_dev, 0, I2C_INT_MASK);
+ clk_set_rate(i2c_dev->clk, i2c_dev->bus_clk_rate * 8);
+
+ val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
+ 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT;
+ i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL);
+
+ if (tegra_i2c_flush_fifos(i2c_dev))
+ err = -ETIMEDOUT;
+
+ clk_disable(i2c_dev->clk);
+ return err;
+}
+
+static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
+{
+ u32 status;
+ const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ struct tegra_i2c_dev *i2c_dev = dev_id;
+
+ status = i2c_readl(i2c_dev, I2C_INT_STATUS);
+
+ if (status == 0) {
+ dev_warn(i2c_dev->dev, "interrupt with no status\n");
+ return IRQ_NONE;
+ }
+
+ if (unlikely(status & status_err)) {
+ if (status & I2C_INT_NO_ACK)
+ i2c_dev->msg_err |= I2C_ERR_NO_ACK;
+ if (status & I2C_INT_ARBITRATION_LOST)
+ i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST;
+ complete(&i2c_dev->msg_complete);
+ goto err;
+ }
+
+ if (i2c_dev->msg_read && (status & I2C_INT_RX_FIFO_DATA_REQ)) {
+ if (i2c_dev->msg_buf_remaining)
+ tegra_i2c_empty_rx_fifo(i2c_dev);
+ else
+ BUG();
+ }
+
+ if (!i2c_dev->msg_read && (status & I2C_INT_TX_FIFO_DATA_REQ)) {
+ if (i2c_dev->msg_buf_remaining)
+ tegra_i2c_fill_tx_fifo(i2c_dev);
+ else
+ tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
+ }
+
+ if ((status & I2C_INT_PACKET_XFER_COMPLETE) &&
+ !i2c_dev->msg_buf_remaining)
+ complete(&i2c_dev->msg_complete);
+
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ if (i2c_dev->is_dvc)
+ dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
+ return IRQ_HANDLED;
+err:
+ /* An error occured, mask all interrupts */
+ tegra_i2c_mask_irq(i2c_dev, I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST |
+ I2C_INT_PACKET_XFER_COMPLETE | I2C_INT_TX_FIFO_DATA_REQ |
+ I2C_INT_RX_FIFO_DATA_REQ);
+ i2c_writel(i2c_dev, status, I2C_INT_STATUS);
+ return IRQ_HANDLED;
+}
+
+static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
+ struct i2c_msg *msg, int stop)
+{
+ u32 packet_header;
+ u32 int_mask;
+ int ret;
+
+ tegra_i2c_flush_fifos(i2c_dev);
+ i2c_writel(i2c_dev, 0xFF, I2C_INT_STATUS);
+
+ if (msg->len == 0)
+ return -EINVAL;
+
+ i2c_dev->msg_buf = msg->buf;
+ i2c_dev->msg_buf_remaining = msg->len;
+ i2c_dev->msg_err = I2C_ERR_NONE;
+ i2c_dev->msg_read = (msg->flags & I2C_M_RD);
+ INIT_COMPLETION(i2c_dev->msg_complete);
+
+ packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) |
+ PACKET_HEADER0_PROTOCOL_I2C |
+ (i2c_dev->cont_id << PACKET_HEADER0_CONT_ID_SHIFT) |
+ (1 << PACKET_HEADER0_PACKET_ID_SHIFT);
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ packet_header = msg->len - 1;
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
+ packet_header |= I2C_HEADER_IE_ENABLE;
+ if (msg->flags & I2C_M_TEN)
+ packet_header |= I2C_HEADER_10BIT_ADDR;
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ packet_header |= I2C_HEADER_CONT_ON_NAK;
+ if (msg->flags & I2C_M_NOSTART)
+ packet_header |= I2C_HEADER_REPEAT_START;
+ if (msg->flags & I2C_M_RD)
+ packet_header |= I2C_HEADER_READ;
+ i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
+
+ if (!(msg->flags & I2C_M_RD))
+ tegra_i2c_fill_tx_fifo(i2c_dev);
+
+ int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
+ if (msg->flags & I2C_M_RD)
+ int_mask |= I2C_INT_RX_FIFO_DATA_REQ;
+ else if (i2c_dev->msg_buf_remaining)
+ int_mask |= I2C_INT_TX_FIFO_DATA_REQ;
+ tegra_i2c_unmask_irq(i2c_dev, int_mask);
+ dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n",
+ i2c_readl(i2c_dev, I2C_INT_MASK));
+
+ ret = wait_for_completion_timeout(&i2c_dev->msg_complete, TEGRA_I2C_TIMEOUT);
+ tegra_i2c_mask_irq(i2c_dev, int_mask);
+
+ if (WARN_ON(ret == 0)) {
+ dev_err(i2c_dev->dev, "i2c transfer timed out\n");
+
+ tegra_i2c_init(i2c_dev);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(i2c_dev->dev, "transfer complete: %d %d %d\n",
+ ret, completion_done(&i2c_dev->msg_complete), i2c_dev->msg_err);
+
+ if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
+ return 0;
+
+ tegra_i2c_init(i2c_dev);
+ if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
+ if (msg->flags & I2C_M_IGNORE_NAK)
+ return 0;
+ return -EREMOTEIO;
+ }
+
+ return -EIO;
+}
+
+static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
+{
+ struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap);
+ int i;
+ int ret = 0;
+
+ if (i2c_dev->is_suspended)
+ return -EBUSY;
+
+ clk_enable(i2c_dev->clk);
+ for (i = 0; i < num; i++) {
+ int stop = (i == (num - 1)) ? 1 : 0;
+ ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], stop);
+ if (ret)
+ break;
+ }
+ clk_disable(i2c_dev->clk);
+ return ret ?: i;
+}
+
+static u32 tegra_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C;
+}
+
+static const struct i2c_algorithm tegra_i2c_algo = {
+ .master_xfer = tegra_i2c_xfer,
+ .functionality = tegra_i2c_func,
+};
+
+static int tegra_i2c_probe(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev;
+ struct tegra_i2c_platform_data *pdata = pdev->dev.platform_data;
+ struct resource *res;
+ struct resource *iomem;
+ struct clk *clk;
+ struct clk *i2c_clk;
+ void *base;
+ int irq;
+ int ret = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no mem resource\n");
+ return -EINVAL;
+ }
+ iomem = request_mem_region(res->start, resource_size(res), pdev->name);
+ if (!iomem) {
+ dev_err(&pdev->dev, "I2C region already claimed\n");
+ return -EBUSY;
+ }
+
+ base = ioremap(iomem->start, resource_size(iomem));
+ if (!base) {
+ dev_err(&pdev->dev, "Cannot ioremap I2C region\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no irq resource\n");
+ ret = -EINVAL;
+ goto err_iounmap;
+ }
+ irq = res->start;
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "missing controller clock");
+ ret = PTR_ERR(clk);
+ goto err_release_region;
+ }
+
+ i2c_clk = clk_get(&pdev->dev, "i2c");
+ if (IS_ERR(i2c_clk)) {
+ dev_err(&pdev->dev, "missing bus clock");
+ ret = PTR_ERR(i2c_clk);
+ goto err_clk_put;
+ }
+
+ i2c_dev = kzalloc(sizeof(struct tegra_i2c_dev), GFP_KERNEL);
+ if (!i2c_dev) {
+ ret = -ENOMEM;
+ goto err_i2c_clk_put;
+ }
+
+ i2c_dev->base = base;
+ i2c_dev->clk = clk;
+ i2c_dev->i2c_clk = i2c_clk;
+ i2c_dev->iomem = iomem;
+ i2c_dev->adapter.algo = &tegra_i2c_algo;
+ i2c_dev->irq = irq;
+ i2c_dev->cont_id = pdev->id;
+ i2c_dev->dev = &pdev->dev;
+ i2c_dev->bus_clk_rate = pdata ? pdata->bus_clk_rate : 100000;
+
+ if (pdev->id == 3)
+ i2c_dev->is_dvc = 1;
+ init_completion(&i2c_dev->msg_complete);
+
+ platform_set_drvdata(pdev, i2c_dev);
+
+ ret = tegra_i2c_init(i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize i2c controller");
+ goto err_free;
+ }
+
+ ret = request_irq(i2c_dev->irq, tegra_i2c_isr, 0, pdev->name, i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq);
+ goto err_free;
+ }
+
+ clk_enable(i2c_dev->i2c_clk);
+
+ i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
+ i2c_dev->adapter.owner = THIS_MODULE;
+ i2c_dev->adapter.class = I2C_CLASS_HWMON;
+ strlcpy(i2c_dev->adapter.name, "Tegra I2C adapter",
+ sizeof(i2c_dev->adapter.name));
+ i2c_dev->adapter.algo = &tegra_i2c_algo;
+ i2c_dev->adapter.dev.parent = &pdev->dev;
+ i2c_dev->adapter.nr = pdev->id;
+
+ ret = i2c_add_numbered_adapter(&i2c_dev->adapter);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add I2C adapter\n");
+ goto err_free_irq;
+ }
+
+ return 0;
+err_free_irq:
+ free_irq(i2c_dev->irq, i2c_dev);
+err_free:
+ kfree(i2c_dev);
+err_i2c_clk_put:
+ clk_put(i2c_clk);
+err_clk_put:
+ clk_put(clk);
+err_release_region:
+ release_mem_region(iomem->start, resource_size(iomem));
+err_iounmap:
+ iounmap(base);
+ return ret;
+}
+
+static int tegra_i2c_remove(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ i2c_del_adapter(&i2c_dev->adapter);
+ free_irq(i2c_dev->irq, i2c_dev);
+ clk_put(i2c_dev->i2c_clk);
+ clk_put(i2c_dev->clk);
+ release_mem_region(i2c_dev->iomem->start,
+ resource_size(i2c_dev->iomem));
+ iounmap(i2c_dev->base);
+ kfree(i2c_dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tegra_i2c_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+
+ i2c_lock_adapter(&i2c_dev->adapter);
+ i2c_dev->is_suspended = true;
+ i2c_unlock_adapter(&i2c_dev->adapter);
+
+ return 0;
+}
+
+static int tegra_i2c_resume(struct platform_device *pdev)
+{
+ struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+ int ret;
+
+ i2c_lock_adapter(&i2c_dev->adapter);
+
+ ret = tegra_i2c_init(i2c_dev);
+
+ if (ret) {
+ i2c_unlock_adapter(&i2c_dev->adapter);
+ return ret;
+ }
+
+ i2c_dev->is_suspended = false;
+
+ i2c_unlock_adapter(&i2c_dev->adapter);
+
+ return 0;
+}
+#endif
+
+static struct platform_driver tegra_i2c_driver = {
+ .probe = tegra_i2c_probe,
+ .remove = tegra_i2c_remove,
+#ifdef CONFIG_PM
+ .suspend = tegra_i2c_suspend,
+ .resume = tegra_i2c_resume,
+#endif
+ .driver = {
+ .name = "tegra-i2c",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init tegra_i2c_init_driver(void)
+{
+ return platform_driver_register(&tegra_i2c_driver);
+}
+
+static void __exit tegra_i2c_exit_driver(void)
+{
+ platform_driver_unregister(&tegra_i2c_driver);
+}
+
+subsys_initcall(tegra_i2c_init_driver);
+module_exit(tegra_i2c_exit_driver);
+
+MODULE_DESCRIPTION("nVidia Tegra2 I2C Bus Controller driver");
+MODULE_AUTHOR("Colin Cross");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 7b2fc98e2f2b..8db008de5392 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -532,6 +532,29 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
stats->custom[3].value = conn->fmr_unalign_cnt;
}
+static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
+{
+ struct iser_conn *ib_conn = ep->dd_data;
+ int len;
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_PORT:
+ case ISCSI_PARAM_CONN_ADDRESS:
+ if (!ib_conn || !ib_conn->cma_id)
+ return -ENOTCONN;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &ib_conn->cma_id->route.addr.dst_addr,
+ param, buf);
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return len;
+}
+
static struct iscsi_endpoint *
iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
int non_blocking)
@@ -637,6 +660,8 @@ static struct iscsi_transport iscsi_iser_transport = {
ISCSI_MAX_BURST |
ISCSI_PDU_INORDER_EN |
ISCSI_DATASEQ_INORDER_EN |
+ ISCSI_CONN_PORT |
+ ISCSI_CONN_ADDRESS |
ISCSI_EXP_STATSN |
ISCSI_PERSISTENT_PORT |
ISCSI_PERSISTENT_ADDRESS |
@@ -659,6 +684,7 @@ static struct iscsi_transport iscsi_iser_transport = {
.destroy_conn = iscsi_iser_conn_destroy,
.set_param = iscsi_iser_set_param,
.get_conn_param = iscsi_conn_get_param,
+ .get_ep_param = iscsi_iser_get_ep_param,
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_iser_conn_start,
.stop_conn = iscsi_iser_conn_stop,
diff --git a/drivers/input/serio/i8042-unicore32io.h b/drivers/input/serio/i8042-unicore32io.h
new file mode 100644
index 000000000000..73f5cc124a36
--- /dev/null
+++ b/drivers/input/serio/i8042-unicore32io.h
@@ -0,0 +1,73 @@
+/*
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2011 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _I8042_UNICORE32_H
+#define _I8042_UNICORE32_H
+
+#include <mach/hardware.h>
+
+/*
+ * Names.
+ */
+#define I8042_KBD_PHYS_DESC "isa0060/serio0"
+#define I8042_AUX_PHYS_DESC "isa0060/serio1"
+#define I8042_MUX_PHYS_DESC "isa0060/serio%d"
+
+/*
+ * IRQs.
+ */
+#define I8042_KBD_IRQ IRQ_PS2_KBD
+#define I8042_AUX_IRQ IRQ_PS2_AUX
+
+/*
+ * Register numbers.
+ */
+#define I8042_COMMAND_REG PS2_COMMAND
+#define I8042_STATUS_REG PS2_STATUS
+#define I8042_DATA_REG PS2_DATA
+
+#define I8042_REGION_START (resource_size_t)(PS2_DATA)
+#define I8042_REGION_SIZE (resource_size_t)(16)
+
+static inline int i8042_read_data(void)
+{
+ return readb(I8042_DATA_REG);
+}
+
+static inline int i8042_read_status(void)
+{
+ return readb(I8042_STATUS_REG);
+}
+
+static inline void i8042_write_data(int val)
+{
+ writeb(val, I8042_DATA_REG);
+}
+
+static inline void i8042_write_command(int val)
+{
+ writeb(val, I8042_COMMAND_REG);
+}
+
+static inline int i8042_platform_init(void)
+{
+ if (!request_mem_region(I8042_REGION_START, I8042_REGION_SIZE, "i8042"))
+ return -EBUSY;
+
+ i8042_reset = 1;
+ return 0;
+}
+
+static inline void i8042_platform_exit(void)
+{
+ release_mem_region(I8042_REGION_START, I8042_REGION_SIZE);
+}
+
+#endif /* _I8042_UNICORE32_H */
diff --git a/drivers/input/serio/i8042.h b/drivers/input/serio/i8042.h
index ac1d759d0f55..3452708fbe3b 100644
--- a/drivers/input/serio/i8042.h
+++ b/drivers/input/serio/i8042.h
@@ -26,6 +26,8 @@
#include "i8042-sparcio.h"
#elif defined(CONFIG_X86) || defined(CONFIG_IA64)
#include "i8042-x86ia64io.h"
+#elif defined(CONFIG_UNICORE32)
+#include "i8042-unicore32io.h"
#else
#include "i8042-io.h"
#endif
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index b82d28819e2a..4b0b63c290a6 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1283,24 +1283,22 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (!error && !clone->errors)
return 0; /* I/O complete */
- if (error == -EOPNOTSUPP)
- return error;
-
- if (clone->cmd_flags & REQ_DISCARD)
- /*
- * Pass all discard request failures up.
- * FIXME: only fail_path if the discard failed due to a
- * transport problem. This requires precise understanding
- * of the underlying failure (e.g. the SCSI sense).
- */
+ if (error == -EOPNOTSUPP || error == -EREMOTEIO)
return error;
if (mpio->pgpath)
fail_path(mpio->pgpath);
spin_lock_irqsave(&m->lock, flags);
- if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m))
- r = -EIO;
+ if (!m->nr_valid_paths) {
+ if (!m->queue_if_no_path) {
+ if (!__must_push_back(m))
+ r = -EIO;
+ } else {
+ if (error == -EBADE)
+ r = error;
+ }
+ }
spin_unlock_irqrestore(&m->lock, flags);
return r;
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 013c7d881948..22027e7946f7 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -2593,6 +2593,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
#define MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE (0x03)
#define MPI_SAS_IOUNIT0_RATE_1_5 (0x08)
#define MPI_SAS_IOUNIT0_RATE_3_0 (0x09)
+#define MPI_SAS_IOUNIT0_RATE_6_0 (0x0A)
/* see mpi_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index 8faa4fab7b89..fd6222882a0e 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -841,6 +841,7 @@ typedef struct _EVENT_DATA_SAS_PHY_LINK_STATUS
#define MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE (0x03)
#define MPI_EVENT_SAS_PLS_LR_RATE_1_5 (0x08)
#define MPI_EVENT_SAS_PLS_LR_RATE_3_0 (0x09)
+#define MPI_EVENT_SAS_PLS_LR_RATE_6_0 (0x0A)
/* SAS Discovery Event data */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 3358c0af3466..ec8080c98081 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -7418,7 +7418,12 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
case MPI_EVENT_SAS_PLS_LR_RATE_3_0:
snprintf(evStr, EVENT_DESCR_STR_SZ,
"SAS PHY Link Status: Phy=%d:"
- " Rate 3.0 Gpbs",PhyNumber);
+ " Rate 3.0 Gbps", PhyNumber);
+ break;
+ case MPI_EVENT_SAS_PLS_LR_RATE_6_0:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
+ " Rate 6.0 Gbps", PhyNumber);
break;
default:
snprintf(evStr, EVENT_DESCR_STR_SZ,
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index e8deb8ed0499..878bda0cce70 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1314,8 +1314,10 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
else
karg->adapterType = MPT_IOCTL_INTERFACE_SCSI;
- if (karg->hdr.port > 1)
+ if (karg->hdr.port > 1) {
+ kfree(karg);
return -EINVAL;
+ }
port = karg->hdr.port;
karg->port = port;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 8aefb1829fcd..f5a14afad2cd 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1973,7 +1973,6 @@ static struct scsi_host_template mptsas_driver_template = {
.change_queue_depth = mptscsih_change_queue_depth,
.eh_abort_handler = mptscsih_abort,
.eh_device_reset_handler = mptscsih_dev_reset,
- .eh_bus_reset_handler = mptscsih_bus_reset,
.eh_host_reset_handler = mptscsih_host_reset,
.bios_param = mptscsih_bios_param,
.can_queue = MPT_SAS_CAN_QUEUE,
@@ -3063,6 +3062,9 @@ static int mptsas_probe_one_phy(struct device *dev,
case MPI_SAS_IOUNIT0_RATE_3_0:
phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
break;
+ case MPI_SAS_IOUNIT0_RATE_6_0:
+ phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
+ break;
case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
default:
@@ -3691,7 +3693,8 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event)
}
if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
- link_rate == MPI_SAS_IOUNIT0_RATE_3_0) {
+ link_rate == MPI_SAS_IOUNIT0_RATE_3_0 ||
+ link_rate == MPI_SAS_IOUNIT0_RATE_6_0) {
if (!port_info) {
if (ioc->old_sas_discovery_protocal) {
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 9d48659e3b28..32e64cc85d2c 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -145,13 +145,6 @@ static struct {
{ "Broadcom NetXtreme II BCM57712E XGb" }
};
-#ifndef PCI_DEVICE_ID_NX2_57712
-#define PCI_DEVICE_ID_NX2_57712 0x1662
-#endif
-#ifndef PCI_DEVICE_ID_NX2_57712E
-#define PCI_DEVICE_ID_NX2_57712E 0x1663
-#endif
-
static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 98e6fdf34d30..77cf813ba264 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_PCI_IOV) += iov.o
obj-$(CONFIG_X86) += setup-bus.o
obj-$(CONFIG_ALPHA) += setup-bus.o setup-irq.o
obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o
+obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o
obj-$(CONFIG_PARISC) += setup-bus.o
obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o
obj-$(CONFIG_PPC) += setup-bus.o
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index a9fe23d5bd0f..379d8592bc6e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2648,6 +2648,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-EAGAIN);
}
+ len_to_track_end = 0;
/*
* A tidaw can address 4k of memory, but must not cross page boundaries
* We can let the block layer handle this by setting
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 3c3f3ffe2179..e950f1ad4dd1 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -50,7 +50,7 @@ MODULE_LICENSE("GPL");
static void chsc_subchannel_irq(struct subchannel *sch)
{
- struct chsc_private *private = sch->private;
+ struct chsc_private *private = dev_get_drvdata(&sch->dev);
struct chsc_request *request = private->request;
struct irb *irb = (struct irb *)&S390_lowcore.irb;
@@ -80,13 +80,14 @@ static int chsc_subchannel_probe(struct subchannel *sch)
private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private)
return -ENOMEM;
+ dev_set_drvdata(&sch->dev, private);
ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
if (ret) {
CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
sch->schid.ssid, sch->schid.sch_no, ret);
+ dev_set_drvdata(&sch->dev, NULL);
kfree(private);
} else {
- sch->private = private;
if (dev_get_uevent_suppress(&sch->dev)) {
dev_set_uevent_suppress(&sch->dev, 0);
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
@@ -100,8 +101,8 @@ static int chsc_subchannel_remove(struct subchannel *sch)
struct chsc_private *private;
cio_disable_subchannel(sch);
- private = sch->private;
- sch->private = NULL;
+ private = dev_get_drvdata(&sch->dev);
+ dev_set_drvdata(&sch->dev, NULL);
if (private->request) {
complete(&private->request->completion);
put_device(&sch->dev);
@@ -147,7 +148,10 @@ static struct css_device_id chsc_subchannel_ids[] = {
MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
static struct css_driver chsc_subchannel_driver = {
- .owner = THIS_MODULE,
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "chsc_subchannel",
+ },
.subchannel_type = chsc_subchannel_ids,
.irq = chsc_subchannel_irq,
.probe = chsc_subchannel_probe,
@@ -157,7 +161,6 @@ static struct css_driver chsc_subchannel_driver = {
.freeze = chsc_subchannel_freeze,
.thaw = chsc_subchannel_restore,
.restore = chsc_subchannel_restore,
- .name = "chsc_subchannel",
};
static int __init chsc_init_dbfs(void)
@@ -241,7 +244,7 @@ static int chsc_async(struct chsc_async_area *chsc_area,
chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
while ((sch = chsc_get_next_subchannel(sch))) {
spin_lock(sch->lock);
- private = sch->private;
+ private = dev_get_drvdata(&sch->dev);
if (private->request) {
spin_unlock(sch->lock);
ret = -EBUSY;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 430f875006f2..cbde448f9947 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -84,29 +84,14 @@ out_unregister:
arch_initcall (cio_debug_init);
-int
-cio_set_options (struct subchannel *sch, int flags)
+int cio_set_options(struct subchannel *sch, int flags)
{
- sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
- sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
- sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
- return 0;
-}
+ struct io_subchannel_private *priv = to_io_private(sch);
-/* FIXME: who wants to use this? */
-int
-cio_get_options (struct subchannel *sch)
-{
- int flags;
-
- flags = 0;
- if (sch->options.suspend)
- flags |= DOIO_ALLOW_SUSPEND;
- if (sch->options.prefetch)
- flags |= DOIO_DENY_PREFETCH;
- if (sch->options.inter)
- flags |= DOIO_SUPPRESS_INTER;
- return flags;
+ priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
+ priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
+ priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
+ return 0;
}
static int
@@ -139,21 +124,21 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
__u8 lpm, /* logical path mask */
__u8 key) /* storage key */
{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ union orb *orb = &priv->orb;
int ccode;
- union orb *orb;
CIO_TRACE_EVENT(5, "stIO");
CIO_TRACE_EVENT(5, dev_name(&sch->dev));
- orb = &to_io_private(sch)->orb;
memset(orb, 0, sizeof(union orb));
/* sch is always under 2G. */
orb->cmd.intparm = (u32)(addr_t)sch;
orb->cmd.fmt = 1;
- orb->cmd.pfch = sch->options.prefetch == 0;
- orb->cmd.spnd = sch->options.suspend;
- orb->cmd.ssic = sch->options.suspend && sch->options.inter;
+ orb->cmd.pfch = priv->options.prefetch == 0;
+ orb->cmd.spnd = priv->options.suspend;
+ orb->cmd.ssic = priv->options.suspend && priv->options.inter;
orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
#ifdef CONFIG_64BIT
/*
@@ -630,11 +615,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
irb = (struct irb *)&S390_lowcore.irb;
do {
kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
- /*
- * Non I/O-subchannel thin interrupts are processed differently
- */
- if (tpi_info->adapter_IO == 1 &&
- tpi_info->int_type == IO_INTERRUPT_TYPE) {
+ if (tpi_info->adapter_IO) {
do_adapter_IO(tpi_info->isc);
continue;
}
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index bf7f80f5a330..155a82bcb9e5 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -84,13 +84,6 @@ struct subchannel {
SUBCHANNEL_TYPE_MSG = 2,
SUBCHANNEL_TYPE_ADM = 3,
} st; /* subchannel type */
-
- struct {
- unsigned int suspend:1; /* allow suspend */
- unsigned int prefetch:1;/* deny prefetch */
- unsigned int inter:1; /* suppress intermediate interrupts */
- } __attribute__ ((packed)) options;
-
__u8 vpm; /* verified path mask */
__u8 lpm; /* logical path mask */
__u8 opm; /* operational path mask */
@@ -99,14 +92,11 @@ struct subchannel {
struct chsc_ssd_info ssd_info; /* subchannel description */
struct device dev; /* entry in device tree */
struct css_driver *driver;
- void *private; /* private per subchannel type data */
enum sch_todo todo;
struct work_struct todo_work;
struct schib_config config;
} __attribute__ ((aligned(8)));
-#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
-
#define to_subchannel(n) container_of(n, struct subchannel, dev)
extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
@@ -120,7 +110,6 @@ extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
extern int cio_cancel (struct subchannel *);
extern int cio_set_options (struct subchannel *, int);
-extern int cio_get_options (struct subchannel *);
extern int cio_update_schib(struct subchannel *sch);
extern int cio_commit_config(struct subchannel *sch);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 24d8e97355b9..c47b25fd3f43 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -35,6 +35,7 @@ int css_init_done = 0;
int max_ssid;
struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
+static struct bus_type css_bus_type;
int
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
@@ -1214,7 +1215,7 @@ static const struct dev_pm_ops css_pm_ops = {
.restore = css_pm_restore,
};
-struct bus_type css_bus_type = {
+static struct bus_type css_bus_type = {
.name = "css",
.match = css_bus_match,
.probe = css_probe,
@@ -1233,9 +1234,7 @@ struct bus_type css_bus_type = {
*/
int css_driver_register(struct css_driver *cdrv)
{
- cdrv->drv.name = cdrv->name;
cdrv->drv.bus = &css_bus_type;
- cdrv->drv.owner = cdrv->owner;
return driver_register(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_register);
@@ -1253,4 +1252,3 @@ void css_driver_unregister(struct css_driver *cdrv)
EXPORT_SYMBOL_GPL(css_driver_unregister);
MODULE_LICENSE("GPL");
-EXPORT_SYMBOL(css_bus_type);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 7e37886de231..80ebdddf7747 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -63,7 +63,6 @@ struct subchannel;
struct chp_link;
/**
* struct css_driver - device driver for subchannels
- * @owner: owning module
* @subchannel_type: subchannel type supported by this driver
* @drv: embedded device driver structure
* @irq: called on interrupts
@@ -78,10 +77,8 @@ struct chp_link;
* @thaw: undo work done in @freeze
* @restore: callback for restoring after hibernation
* @settle: wait for asynchronous work to finish
- * @name: name of the device driver
*/
struct css_driver {
- struct module *owner;
struct css_device_id *subchannel_type;
struct device_driver drv;
void (*irq)(struct subchannel *);
@@ -96,16 +93,10 @@ struct css_driver {
int (*thaw) (struct subchannel *);
int (*restore)(struct subchannel *);
int (*settle)(void);
- const char *name;
};
#define to_cssdriver(n) container_of(n, struct css_driver, drv)
-/*
- * all css_drivers have the css_bus_type
- */
-extern struct bus_type css_bus_type;
-
extern int css_driver_register(struct css_driver *);
extern void css_driver_unregister(struct css_driver *);
@@ -140,7 +131,6 @@ struct channel_subsystem {
};
#define to_css(dev) container_of(dev, struct channel_subsystem, device)
-extern struct bus_type css_bus_type;
extern struct channel_subsystem *channel_subsystems[];
/* Helper functions to build lists for the slow path. */
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index b7eaff9ca19e..e50b12163afe 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -172,9 +172,11 @@ static int io_subchannel_settle(void)
}
static struct css_driver io_subchannel_driver = {
- .owner = THIS_MODULE,
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "io_subchannel",
+ },
.subchannel_type = io_subchannel_ids,
- .name = "io_subchannel",
.irq = io_subchannel_irq,
.sch_event = io_subchannel_sch_event,
.chp_event = io_subchannel_chp_event,
@@ -1030,6 +1032,7 @@ static void io_subchannel_init_fields(struct subchannel *sch)
*/
static int io_subchannel_probe(struct subchannel *sch)
{
+ struct io_subchannel_private *io_priv;
struct ccw_device *cdev;
int rc;
@@ -1073,10 +1076,11 @@ static int io_subchannel_probe(struct subchannel *sch)
if (rc)
goto out_schedule;
/* Allocate I/O subchannel private data. */
- sch->private = kzalloc(sizeof(struct io_subchannel_private),
- GFP_KERNEL | GFP_DMA);
- if (!sch->private)
+ io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+ if (!io_priv)
goto out_schedule;
+
+ set_io_private(sch, io_priv);
css_schedule_eval(sch->schid);
return 0;
@@ -1090,6 +1094,7 @@ out_schedule:
static int
io_subchannel_remove (struct subchannel *sch)
{
+ struct io_subchannel_private *io_priv = to_io_private(sch);
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
@@ -1099,11 +1104,12 @@ io_subchannel_remove (struct subchannel *sch)
/* Set ccw device to not operational and drop reference. */
spin_lock_irq(cdev->ccwlock);
sch_set_cdev(sch, NULL);
+ set_io_private(sch, NULL);
cdev->private->state = DEV_STATE_NOT_OPER;
spin_unlock_irq(cdev->ccwlock);
ccw_device_unregister(cdev);
out_free:
- kfree(sch->private);
+ kfree(io_priv);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
return 0;
}
@@ -1553,11 +1559,12 @@ spinlock_t * cio_get_console_lock(void)
static int ccw_device_console_enable(struct ccw_device *cdev,
struct subchannel *sch)
{
+ struct io_subchannel_private *io_priv = cio_get_console_priv();
int rc;
/* Attach subchannel private data. */
- sch->private = cio_get_console_priv();
- memset(sch->private, 0, sizeof(struct io_subchannel_private));
+ memset(io_priv, 0, sizeof(*io_priv));
+ set_io_private(sch, io_priv);
io_subchannel_init_fields(sch);
rc = cio_commit_config(sch);
if (rc)
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index d024d2c21897..ba31ad88f4f7 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -5,68 +5,36 @@
#include <asm/schid.h>
#include <asm/ccwdev.h>
#include "css.h"
-
-/*
- * command-mode operation request block
- */
-struct cmd_orb {
- u32 intparm; /* interruption parameter */
- u32 key : 4; /* flags, like key, suspend control, etc. */
- u32 spnd : 1; /* suspend control */
- u32 res1 : 1; /* reserved */
- u32 mod : 1; /* modification control */
- u32 sync : 1; /* synchronize control */
- u32 fmt : 1; /* format control */
- u32 pfch : 1; /* prefetch control */
- u32 isic : 1; /* initial-status-interruption control */
- u32 alcc : 1; /* address-limit-checking control */
- u32 ssic : 1; /* suppress-suspended-interr. control */
- u32 res2 : 1; /* reserved */
- u32 c64 : 1; /* IDAW/QDIO 64 bit control */
- u32 i2k : 1; /* IDAW 2/4kB block size control */
- u32 lpm : 8; /* logical path mask */
- u32 ils : 1; /* incorrect length */
- u32 zero : 6; /* reserved zeros */
- u32 orbx : 1; /* ORB extension control */
- u32 cpa; /* channel program address */
-} __attribute__ ((packed, aligned(4)));
-
-/*
- * transport-mode operation request block
- */
-struct tm_orb {
- u32 intparm;
- u32 key:4;
- u32 :9;
- u32 b:1;
- u32 :2;
- u32 lpm:8;
- u32 :7;
- u32 x:1;
- u32 tcw;
- u32 prio:8;
- u32 :8;
- u32 rsvpgm:8;
- u32 :8;
- u32 :32;
- u32 :32;
- u32 :32;
- u32 :32;
-} __attribute__ ((packed, aligned(4)));
-
-union orb {
- struct cmd_orb cmd;
- struct tm_orb tm;
-} __attribute__ ((packed, aligned(4)));
+#include "orb.h"
struct io_subchannel_private {
union orb orb; /* operation request block */
struct ccw1 sense_ccw; /* static ccw for sense command */
-} __attribute__ ((aligned(8)));
+ struct ccw_device *cdev;/* pointer to the child ccw device */
+ struct {
+ unsigned int suspend:1; /* allow suspend */
+ unsigned int prefetch:1;/* deny prefetch */
+ unsigned int inter:1; /* suppress intermediate interrupts */
+ } __packed options;
+} __aligned(8);
-#define to_io_private(n) ((struct io_subchannel_private *)n->private)
-#define sch_get_cdev(n) (dev_get_drvdata(&n->dev))
-#define sch_set_cdev(n, c) (dev_set_drvdata(&n->dev, c))
+#define to_io_private(n) ((struct io_subchannel_private *) \
+ dev_get_drvdata(&(n)->dev))
+#define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p))
+
+static inline struct ccw_device *sch_get_cdev(struct subchannel *sch)
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ return priv ? priv->cdev : NULL;
+}
+
+static inline void sch_set_cdev(struct subchannel *sch,
+ struct ccw_device *cdev)
+{
+ struct io_subchannel_private *priv = to_io_private(sch);
+ if (priv)
+ priv->cdev = cdev;
+}
#define MAX_CIWS 8
@@ -191,23 +159,6 @@ struct ccw_device_private {
void *cmb_wait; /* deferred cmb enable/disable */
};
-static inline int ssch(struct subchannel_id schid, union orb *addr)
-{
- register struct subchannel_id reg1 asm("1") = schid;
- int ccode = -EIO;
-
- asm volatile(
- " ssch 0(%2)\n"
- "0: ipm %0\n"
- " srl %0,28\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+d" (ccode)
- : "d" (reg1), "a" (addr), "m" (*addr)
- : "cc", "memory");
- return ccode;
-}
-
static inline int rsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
@@ -223,21 +174,6 @@ static inline int rsch(struct subchannel_id schid)
return ccode;
}
-static inline int csch(struct subchannel_id schid)
-{
- register struct subchannel_id reg1 asm("1") = schid;
- int ccode;
-
- asm volatile(
- " csch\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (ccode)
- : "d" (reg1)
- : "cc");
- return ccode;
-}
-
static inline int hsch(struct subchannel_id schid)
{
register struct subchannel_id reg1 asm("1") = schid;
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index fac06155773f..4d80fc67a06b 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -3,6 +3,8 @@
#include <asm/chpid.h>
#include <asm/schid.h>
+#include "orb.h"
+#include "cio.h"
/*
* TPI info structure
@@ -87,6 +89,38 @@ static inline int tsch(struct subchannel_id schid, struct irb *addr)
return ccode;
}
+static inline int ssch(struct subchannel_id schid, union orb *addr)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode = -EIO;
+
+ asm volatile(
+ " ssch 0(%2)\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : "+d" (ccode)
+ : "d" (reg1), "a" (addr), "m" (*addr)
+ : "cc", "memory");
+ return ccode;
+}
+
+static inline int csch(struct subchannel_id schid)
+{
+ register struct subchannel_id reg1 asm("1") = schid;
+ int ccode;
+
+ asm volatile(
+ " csch\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=d" (ccode)
+ : "d" (reg1)
+ : "cc");
+ return ccode;
+}
+
static inline int tpi(struct tpi_info *addr)
{
int ccode;
diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h
new file mode 100644
index 000000000000..45a9865c2b36
--- /dev/null
+++ b/drivers/s390/cio/orb.h
@@ -0,0 +1,67 @@
+/*
+ * Orb related data structures.
+ *
+ * Copyright IBM Corp. 2007, 2011
+ *
+ * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
+ * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
+ * Sebastian Ott <sebott@linux.vnet.ibm.com>
+ */
+
+#ifndef S390_ORB_H
+#define S390_ORB_H
+
+/*
+ * Command-mode operation request block
+ */
+struct cmd_orb {
+ u32 intparm; /* interruption parameter */
+ u32 key:4; /* flags, like key, suspend control, etc. */
+ u32 spnd:1; /* suspend control */
+ u32 res1:1; /* reserved */
+ u32 mod:1; /* modification control */
+ u32 sync:1; /* synchronize control */
+ u32 fmt:1; /* format control */
+ u32 pfch:1; /* prefetch control */
+ u32 isic:1; /* initial-status-interruption control */
+ u32 alcc:1; /* address-limit-checking control */
+ u32 ssic:1; /* suppress-suspended-interr. control */
+ u32 res2:1; /* reserved */
+ u32 c64:1; /* IDAW/QDIO 64 bit control */
+ u32 i2k:1; /* IDAW 2/4kB block size control */
+ u32 lpm:8; /* logical path mask */
+ u32 ils:1; /* incorrect length */
+ u32 zero:6; /* reserved zeros */
+ u32 orbx:1; /* ORB extension control */
+ u32 cpa; /* channel program address */
+} __packed __aligned(4);
+
+/*
+ * Transport-mode operation request block
+ */
+struct tm_orb {
+ u32 intparm;
+ u32 key:4;
+ u32:9;
+ u32 b:1;
+ u32:2;
+ u32 lpm:8;
+ u32:7;
+ u32 x:1;
+ u32 tcw;
+ u32 prio:8;
+ u32:8;
+ u32 rsvpgm:8;
+ u32:8;
+ u32:32;
+ u32:32;
+ u32:32;
+ u32:32;
+} __packed __aligned(4);
+
+union orb {
+ struct cmd_orb cmd;
+ struct tm_orb tm;
+} __packed __aligned(4);
+
+#endif /* S390_ORB_H */
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 51c666fb67a4..645b0fcbb370 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -122,36 +122,21 @@ static int __init zfcp_module_init(void)
{
int retval = -ENOMEM;
- zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn",
- sizeof(struct zfcp_fc_gpn_ft_req));
- if (!zfcp_data.gpn_ft_cache)
- goto out;
-
- zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb",
- sizeof(struct fsf_qtcb));
- if (!zfcp_data.qtcb_cache)
+ zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb",
+ sizeof(struct fsf_qtcb));
+ if (!zfcp_fsf_qtcb_cache)
goto out_qtcb_cache;
- zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr",
- sizeof(struct fsf_status_read_buffer));
- if (!zfcp_data.sr_buffer_cache)
- goto out_sr_cache;
+ zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req",
+ sizeof(struct zfcp_fc_req));
+ if (!zfcp_fc_req_cache)
+ goto out_fc_cache;
- zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
- sizeof(struct zfcp_fc_gid_pn));
- if (!zfcp_data.gid_pn_cache)
- goto out_gid_cache;
-
- zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc",
- sizeof(struct zfcp_fc_els_adisc));
- if (!zfcp_data.adisc_cache)
- goto out_adisc_cache;
-
- zfcp_data.scsi_transport_template =
+ zfcp_scsi_transport_template =
fc_attach_transport(&zfcp_transport_functions);
- if (!zfcp_data.scsi_transport_template)
+ if (!zfcp_scsi_transport_template)
goto out_transport;
- scsi_transport_reserve_device(zfcp_data.scsi_transport_template,
+ scsi_transport_reserve_device(zfcp_scsi_transport_template,
sizeof(struct zfcp_scsi_dev));
@@ -175,18 +160,12 @@ static int __init zfcp_module_init(void)
out_ccw_register:
misc_deregister(&zfcp_cfdc_misc);
out_misc:
- fc_release_transport(zfcp_data.scsi_transport_template);
+ fc_release_transport(zfcp_scsi_transport_template);
out_transport:
- kmem_cache_destroy(zfcp_data.adisc_cache);
-out_adisc_cache:
- kmem_cache_destroy(zfcp_data.gid_pn_cache);
-out_gid_cache:
- kmem_cache_destroy(zfcp_data.sr_buffer_cache);
-out_sr_cache:
- kmem_cache_destroy(zfcp_data.qtcb_cache);
+ kmem_cache_destroy(zfcp_fc_req_cache);
+out_fc_cache:
+ kmem_cache_destroy(zfcp_fsf_qtcb_cache);
out_qtcb_cache:
- kmem_cache_destroy(zfcp_data.gpn_ft_cache);
-out:
return retval;
}
@@ -196,12 +175,9 @@ static void __exit zfcp_module_exit(void)
{
ccw_driver_unregister(&zfcp_ccw_driver);
misc_deregister(&zfcp_cfdc_misc);
- fc_release_transport(zfcp_data.scsi_transport_template);
- kmem_cache_destroy(zfcp_data.adisc_cache);
- kmem_cache_destroy(zfcp_data.gid_pn_cache);
- kmem_cache_destroy(zfcp_data.sr_buffer_cache);
- kmem_cache_destroy(zfcp_data.qtcb_cache);
- kmem_cache_destroy(zfcp_data.gpn_ft_cache);
+ fc_release_transport(zfcp_scsi_transport_template);
+ kmem_cache_destroy(zfcp_fc_req_cache);
+ kmem_cache_destroy(zfcp_fsf_qtcb_cache);
}
module_exit(zfcp_module_exit);
@@ -260,18 +236,18 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
return -ENOMEM;
adapter->pool.qtcb_pool =
- mempool_create_slab_pool(4, zfcp_data.qtcb_cache);
+ mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache);
if (!adapter->pool.qtcb_pool)
return -ENOMEM;
- adapter->pool.status_read_data =
- mempool_create_slab_pool(FSF_STATUS_READS_RECOM,
- zfcp_data.sr_buffer_cache);
- if (!adapter->pool.status_read_data)
+ BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE);
+ adapter->pool.sr_data =
+ mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0);
+ if (!adapter->pool.sr_data)
return -ENOMEM;
adapter->pool.gid_pn =
- mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
+ mempool_create_slab_pool(1, zfcp_fc_req_cache);
if (!adapter->pool.gid_pn)
return -ENOMEM;
@@ -290,8 +266,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
mempool_destroy(adapter->pool.qtcb_pool);
if (adapter->pool.status_read_req)
mempool_destroy(adapter->pool.status_read_req);
- if (adapter->pool.status_read_data)
- mempool_destroy(adapter->pool.status_read_data);
+ if (adapter->pool.sr_data)
+ mempool_destroy(adapter->pool.sr_data);
if (adapter->pool.gid_pn)
mempool_destroy(adapter->pool.gid_pn);
}
@@ -386,6 +362,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports);
+ INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update);
if (zfcp_qdio_setup(adapter))
goto failed;
@@ -437,7 +414,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN;
adapter->ccw_device->dev.dma_parms = &adapter->dma_parms;
- if (!zfcp_adapter_scsi_register(adapter))
+ if (!zfcp_scsi_adapter_register(adapter))
return adapter;
failed:
@@ -451,10 +428,11 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter)
cancel_work_sync(&adapter->scan_work);
cancel_work_sync(&adapter->stat_work);
+ cancel_work_sync(&adapter->ns_up_work);
zfcp_destroy_adapter_work_queue(adapter);
zfcp_fc_wka_ports_force_offline(adapter->gs);
- zfcp_adapter_scsi_unregister(adapter);
+ zfcp_scsi_adapter_unregister(adapter);
sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs);
zfcp_erp_thread_kill(adapter);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 9ae1d0a6f627..527ba48eea57 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -89,7 +89,6 @@ struct zfcp_reqlist;
#define ZFCP_STATUS_LUN_READONLY 0x00000008
/* FSF request status (this does not have a common part) */
-#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002
#define ZFCP_STATUS_FSFREQ_ERROR 0x00000008
#define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010
#define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040
@@ -108,7 +107,7 @@ struct zfcp_adapter_mempool {
mempool_t *scsi_req;
mempool_t *scsi_abort;
mempool_t *status_read_req;
- mempool_t *status_read_data;
+ mempool_t *sr_data;
mempool_t *gid_pn;
mempool_t *qtcb_pool;
};
@@ -190,6 +189,7 @@ struct zfcp_adapter {
struct fsf_qtcb_bottom_port *stats_reset_data;
unsigned long stats_reset;
struct work_struct scan_work;
+ struct work_struct ns_up_work;
struct service_level service_level;
struct workqueue_struct *work_queue;
struct device_dma_parameters dma_parms;
@@ -314,15 +314,4 @@ struct zfcp_fsf_req {
void (*handler)(struct zfcp_fsf_req *);
};
-/* driver data */
-struct zfcp_data {
- struct scsi_host_template scsi_host_template;
- struct scsi_transport_template *scsi_transport_template;
- struct kmem_cache *gpn_ft_cache;
- struct kmem_cache *qtcb_cache;
- struct kmem_cache *sr_buffer_cache;
- struct kmem_cache *gid_pn_cache;
- struct kmem_cache *adisc_cache;
-};
-
#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index e003e306f870..e1b4f800e226 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -732,7 +732,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act)
if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED)
return ZFCP_ERP_FAILED;
- if (mempool_resize(act->adapter->pool.status_read_data,
+ if (mempool_resize(act->adapter->pool.sr_data,
act->adapter->stat_read_buf_num, GFP_KERNEL))
return ZFCP_ERP_FAILED;
@@ -1231,8 +1231,10 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
if (result == ZFCP_ERP_SUCCEEDED) {
register_service_level(&adapter->service_level);
queue_work(adapter->work_queue, &adapter->scan_work);
+ queue_work(adapter->work_queue, &adapter->ns_up_work);
} else
unregister_service_level(&adapter->service_level);
+
kref_put(&adapter->ref, zfcp_adapter_release);
break;
}
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 6e325284fbe7..03627cfd81cd 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -80,6 +80,7 @@ extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
extern void zfcp_erp_timeout_handler(unsigned long);
/* zfcp_fc.c */
+extern struct kmem_cache *zfcp_fc_req_cache;
extern void zfcp_fc_enqueue_event(struct zfcp_adapter *,
enum fc_host_event_code event_code, u32);
extern void zfcp_fc_post_event(struct work_struct *);
@@ -95,8 +96,10 @@ extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *);
extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *);
+extern void zfcp_fc_sym_name_update(struct work_struct *);
/* zfcp_fsf.c */
+extern struct kmem_cache *zfcp_fsf_qtcb_cache;
extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
@@ -139,9 +142,9 @@ extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *,
struct qdio_buffer *);
/* zfcp_scsi.c */
-extern struct zfcp_data zfcp_data;
-extern int zfcp_adapter_scsi_register(struct zfcp_adapter *);
-extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *);
+extern struct scsi_transport_template *zfcp_scsi_transport_template;
+extern int zfcp_scsi_adapter_register(struct zfcp_adapter *);
+extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *);
extern struct fc_function_template zfcp_transport_functions;
extern void zfcp_scsi_rport_work(struct work_struct *);
extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 30cf91a787a3..297e6b71ce9c 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -11,11 +11,14 @@
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/utsname.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
+struct kmem_cache *zfcp_fc_req_cache;
+
static u32 zfcp_fc_rscn_range_mask[] = {
[ELS_ADDR_FMT_PORT] = 0xFFFFFF,
[ELS_ADDR_FMT_AREA] = 0xFFFF00,
@@ -260,24 +263,18 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
zfcp_fc_incoming_rscn(fsf_req);
}
-static void zfcp_fc_ns_gid_pn_eval(void *data)
+static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
{
- struct zfcp_fc_gid_pn *gid_pn = data;
- struct zfcp_fsf_ct_els *ct = &gid_pn->ct;
- struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req);
- struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp);
- struct zfcp_port *port = gid_pn->port;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
- if (ct->status)
+ if (ct_els->status)
return;
- if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC)
+ if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC)
return;
- /* paranoia */
- if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn)
- return;
/* looks like a valid d_id */
- port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid);
+ ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
}
static void zfcp_fc_complete(void *data)
@@ -285,69 +282,73 @@ static void zfcp_fc_complete(void *data)
complete(data);
}
+static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
+{
+ ct_hdr->ct_rev = FC_CT_REV;
+ ct_hdr->ct_fs_type = FC_FST_DIR;
+ ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
+ ct_hdr->ct_cmd = cmd;
+ ct_hdr->ct_mr_size = mr_size / 4;
+}
+
static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
- struct zfcp_fc_gid_pn *gid_pn)
+ struct zfcp_fc_req *fc_req)
{
struct zfcp_adapter *adapter = port->adapter;
DECLARE_COMPLETION_ONSTACK(completion);
+ struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
+ struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
int ret;
/* setup parameters for send generic command */
- gid_pn->port = port;
- gid_pn->ct.handler = zfcp_fc_complete;
- gid_pn->ct.handler_data = &completion;
- gid_pn->ct.req = &gid_pn->sg_req;
- gid_pn->ct.resp = &gid_pn->sg_resp;
- sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req,
- sizeof(struct zfcp_fc_gid_pn_req));
- sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp,
- sizeof(struct zfcp_fc_gid_pn_resp));
-
- /* setup nameserver request */
- gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV;
- gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR;
- gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
- gid_pn->gid_pn_req.ct_hdr.ct_options = 0;
- gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN;
- gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4;
- gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn;
-
- ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct,
+ fc_req->ct_els.port = port;
+ fc_req->ct_els.handler = zfcp_fc_complete;
+ fc_req->ct_els.handler_data = &completion;
+ fc_req->ct_els.req = &fc_req->sg_req;
+ fc_req->ct_els.resp = &fc_req->sg_rsp;
+ sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
+ sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));
+
+ zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
+ FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
+ gid_pn_req->gid_pn.fn_wwpn = port->wwpn;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
adapter->pool.gid_pn_req,
ZFCP_FC_CTELS_TMO);
if (!ret) {
wait_for_completion(&completion);
- zfcp_fc_ns_gid_pn_eval(gid_pn);
+ zfcp_fc_ns_gid_pn_eval(fc_req);
}
return ret;
}
/**
- * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
+ * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
* @port: port where GID_PN request is needed
* return: -ENOMEM on error, 0 otherwise
*/
static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
{
int ret;
- struct zfcp_fc_gid_pn *gid_pn;
+ struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
- gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
- if (!gid_pn)
+ fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
+ if (!fc_req)
return -ENOMEM;
- memset(gid_pn, 0, sizeof(*gid_pn));
+ memset(fc_req, 0, sizeof(*fc_req));
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
if (ret)
goto out;
- ret = zfcp_fc_ns_gid_pn_request(port, gid_pn);
+ ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
zfcp_fc_wka_port_put(&adapter->gs->ds);
out:
- mempool_free(gid_pn, adapter->pool.gid_pn);
+ mempool_free(fc_req, adapter->pool.gid_pn);
return ret;
}
@@ -419,11 +420,11 @@ void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
static void zfcp_fc_adisc_handler(void *data)
{
- struct zfcp_fc_els_adisc *adisc = data;
- struct zfcp_port *port = adisc->els.port;
- struct fc_els_adisc *adisc_resp = &adisc->adisc_resp;
+ struct zfcp_fc_req *fc_req = data;
+ struct zfcp_port *port = fc_req->ct_els.port;
+ struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
- if (adisc->els.status) {
+ if (fc_req->ct_els.status) {
/* request rejected or timed out */
zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fcadh_1");
@@ -445,42 +446,42 @@ static void zfcp_fc_adisc_handler(void *data)
out:
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
put_device(&port->dev);
- kmem_cache_free(zfcp_data.adisc_cache, adisc);
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
static int zfcp_fc_adisc(struct zfcp_port *port)
{
- struct zfcp_fc_els_adisc *adisc;
+ struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
+ struct Scsi_Host *shost = adapter->scsi_host;
int ret;
- adisc = kmem_cache_zalloc(zfcp_data.adisc_cache, GFP_ATOMIC);
- if (!adisc)
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
+ if (!fc_req)
return -ENOMEM;
- adisc->els.port = port;
- adisc->els.req = &adisc->req;
- adisc->els.resp = &adisc->resp;
- sg_init_one(adisc->els.req, &adisc->adisc_req,
+ fc_req->ct_els.port = port;
+ fc_req->ct_els.req = &fc_req->sg_req;
+ fc_req->ct_els.resp = &fc_req->sg_rsp;
+ sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
sizeof(struct fc_els_adisc));
- sg_init_one(adisc->els.resp, &adisc->adisc_resp,
+ sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
sizeof(struct fc_els_adisc));
- adisc->els.handler = zfcp_fc_adisc_handler;
- adisc->els.handler_data = adisc;
+ fc_req->ct_els.handler = zfcp_fc_adisc_handler;
+ fc_req->ct_els.handler_data = fc_req;
/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
without FC-AL-2 capability, so we don't set it */
- adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host);
- adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host);
- adisc->adisc_req.adisc_cmd = ELS_ADISC;
- hton24(adisc->adisc_req.adisc_port_id,
- fc_host_port_id(adapter->scsi_host));
+ fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost);
+ fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost);
+ fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
+ hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
- ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els,
+ ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
ZFCP_FC_CTELS_TMO);
if (ret)
- kmem_cache_free(zfcp_data.adisc_cache, adisc);
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
return ret;
}
@@ -528,68 +529,42 @@ void zfcp_fc_test_link(struct zfcp_port *port)
put_device(&port->dev);
}
-static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num)
+static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num)
{
- struct scatterlist *sg = &gpn_ft->sg_req;
-
- kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg));
- zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
-
- kfree(gpn_ft);
-}
+ struct zfcp_fc_req *fc_req;
-static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num)
-{
- struct zfcp_fc_gpn_ft *gpn_ft;
- struct zfcp_fc_gpn_ft_req *req;
-
- gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL);
- if (!gpn_ft)
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+ if (!fc_req)
return NULL;
- req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
- if (!req) {
- kfree(gpn_ft);
- gpn_ft = NULL;
- goto out;
+ if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
+ return NULL;
}
- sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
- if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) {
- zfcp_free_sg_env(gpn_ft, buf_num);
- gpn_ft = NULL;
- }
-out:
- return gpn_ft;
-}
+ sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
+ sizeof(struct zfcp_fc_gpn_ft_req));
+ return fc_req;
+}
-static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
+static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_bytes)
{
- struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
- struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
DECLARE_COMPLETION_ONSTACK(completion);
int ret;
- /* prepare CT IU for GPN_FT */
- req->ct_hdr.ct_rev = FC_CT_REV;
- req->ct_hdr.ct_fs_type = FC_FST_DIR;
- req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE;
- req->ct_hdr.ct_options = 0;
- req->ct_hdr.ct_cmd = FC_NS_GPN_FT;
- req->ct_hdr.ct_mr_size = max_bytes / 4;
- req->gpn_ft.fn_domain_id_scope = 0;
- req->gpn_ft.fn_area_id_scope = 0;
+ zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
- /* prepare zfcp_send_ct */
- ct->handler = zfcp_fc_complete;
- ct->handler_data = &completion;
- ct->req = &gpn_ft->sg_req;
- ct->resp = gpn_ft->sg_resp;
+ ct_els->handler = zfcp_fc_complete;
+ ct_els->handler_data = &completion;
+ ct_els->req = &fc_req->sg_req;
+ ct_els->resp = &fc_req->sg_rsp;
- ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL,
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (!ret)
wait_for_completion(&completion);
@@ -610,11 +585,11 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
list_move_tail(&port->list, lh);
}
-static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
+static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_entries)
{
- struct zfcp_fsf_ct_els *ct = &gpn_ft->ct;
- struct scatterlist *sg = gpn_ft->sg_resp;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct scatterlist *sg = &fc_req->sg_rsp;
struct fc_ct_hdr *hdr = sg_virt(sg);
struct fc_gpn_ft_resp *acc = sg_virt(sg);
struct zfcp_port *port, *tmp;
@@ -623,7 +598,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft,
u32 d_id;
int ret = 0, x, last = 0;
- if (ct->status)
+ if (ct_els->status)
return -EIO;
if (hdr->ct_cmd != FC_FS_ACC) {
@@ -687,7 +662,7 @@ void zfcp_fc_scan_ports(struct work_struct *work)
struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
scan_work);
int ret, i;
- struct zfcp_fc_gpn_ft *gpn_ft;
+ struct zfcp_fc_req *fc_req;
int chain, max_entries, buf_num, max_bytes;
chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
@@ -702,25 +677,145 @@ void zfcp_fc_scan_ports(struct work_struct *work)
if (zfcp_fc_wka_port_get(&adapter->gs->ds))
return;
- gpn_ft = zfcp_alloc_sg_env(buf_num);
- if (!gpn_ft)
+ fc_req = zfcp_alloc_sg_env(buf_num);
+ if (!fc_req)
goto out;
for (i = 0; i < 3; i++) {
- ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
+ ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
if (!ret) {
- ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries);
+ ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
if (ret == -EAGAIN)
ssleep(1);
else
break;
}
}
- zfcp_free_sg_env(gpn_ft, buf_num);
+ zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
out:
zfcp_fc_wka_port_put(&adapter->gs->ds);
}
+static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
+ struct zfcp_fc_req *fc_req)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ char devno[] = "DEVNO:";
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
+ struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
+ int ret;
+
+ zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
+ FC_SYMBOLIC_NAME_SIZE);
+ hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));
+
+ sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
+ sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));
+
+ ct_els->handler = zfcp_fc_complete;
+ ct_els->handler_data = &completion;
+ ct_els->req = &fc_req->sg_req;
+ ct_els->resp = &fc_req->sg_rsp;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+ ZFCP_FC_CTELS_TMO);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&completion);
+ if (ct_els->status)
+ return ct_els->status;
+
+ if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
+ !(strstr(gspn_rsp->gspn.fp_name, devno)))
+ snprintf(fc_host_symbolic_name(adapter->scsi_host),
+ FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
+ gspn_rsp->gspn.fp_name, devno,
+ dev_name(&adapter->ccw_device->dev),
+ init_utsname()->nodename);
+ else
+ strlcpy(fc_host_symbolic_name(adapter->scsi_host),
+ gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
+
+ return 0;
+}
+
+static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
+ struct zfcp_fc_req *fc_req)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ struct Scsi_Host *shost = adapter->scsi_host;
+ struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
+ struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
+ struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
+ int ret, len;
+
+ zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
+ FC_SYMBOLIC_NAME_SIZE);
+ hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
+ len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
+ FC_SYMBOLIC_NAME_SIZE);
+ rspn_req->rspn.fr_name_len = len;
+
+ sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
+ sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));
+
+ ct_els->handler = zfcp_fc_complete;
+ ct_els->handler_data = &completion;
+ ct_els->req = &fc_req->sg_req;
+ ct_els->resp = &fc_req->sg_rsp;
+
+ ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
+ ZFCP_FC_CTELS_TMO);
+ if (!ret)
+ wait_for_completion(&completion);
+}
+
+/**
+ * zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
+ * @work: ns_up_work of the adapter where to update the symbolic port name
+ *
+ * Retrieve the current symbolic port name that may have been set by
+ * the hardware using the GSPN request and update the fc_host
+ * symbolic_name sysfs attribute. When running in NPIV mode (and hence
+ * the port name is unique for this system), update the symbolic port
+ * name to add Linux specific information and update the FC nameserver
+ * using the RSPN request.
+ */
+void zfcp_fc_sym_name_update(struct work_struct *work)
+{
+ struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
+ ns_up_work);
+ int ret;
+ struct zfcp_fc_req *fc_req;
+
+ if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
+ fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+ return;
+
+ fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
+ if (!fc_req)
+ return;
+
+ ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
+ if (ret)
+ goto out_free;
+
+ ret = zfcp_fc_gspn(adapter, fc_req);
+ if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
+ goto out_ds_put;
+
+ memset(fc_req, 0, sizeof(*fc_req));
+ zfcp_fc_rspn(adapter, fc_req);
+
+out_ds_put:
+ zfcp_fc_wka_port_put(&adapter->gs->ds);
+out_free:
+ kmem_cache_free(zfcp_fc_req_cache, fc_req);
+}
+
static void zfcp_fc_ct_els_job_handler(void *data)
{
struct fc_bsg_job *job = data;
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index b464ae01086c..4561f3bf7300 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -64,33 +64,16 @@ struct zfcp_fc_gid_pn_req {
} __packed;
/**
- * struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response
+ * struct zfcp_fc_gid_pn_rsp - container for ct header plus gid_pn response
* @ct_hdr: FC GS common transport header
* @gid_pn: GID_PN response
*/
-struct zfcp_fc_gid_pn_resp {
+struct zfcp_fc_gid_pn_rsp {
struct fc_ct_hdr ct_hdr;
struct fc_gid_pn_resp gid_pn;
} __packed;
/**
- * struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request
- * @ct: data passed to zfcp_fsf for issuing fsf request
- * @sg_req: scatterlist entry for request data
- * @sg_resp: scatterlist entry for response data
- * @gid_pn_req: GID_PN request data
- * @gid_pn_resp: GID_PN response data
- */
-struct zfcp_fc_gid_pn {
- struct zfcp_fsf_ct_els ct;
- struct scatterlist sg_req;
- struct scatterlist sg_resp;
- struct zfcp_fc_gid_pn_req gid_pn_req;
- struct zfcp_fc_gid_pn_resp gid_pn_resp;
- struct zfcp_port *port;
-};
-
-/**
* struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request
* @ct_hdr: FC GS common transport header
* @gpn_ft: GPN_FT request
@@ -101,41 +84,72 @@ struct zfcp_fc_gpn_ft_req {
} __packed;
/**
- * struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response
+ * struct zfcp_fc_gspn_req - container for ct header plus GSPN_ID request
* @ct_hdr: FC GS common transport header
- * @gpn_ft: Array of gpn_ft response data to fill one memory page
+ * @gspn: GSPN_ID request
*/
-struct zfcp_fc_gpn_ft_resp {
+struct zfcp_fc_gspn_req {
struct fc_ct_hdr ct_hdr;
- struct fc_gpn_ft_resp gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE];
+ struct fc_gid_pn_resp gspn;
} __packed;
/**
- * struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request
- * @ct: data passed to zfcp_fsf for issuing fsf request
- * @sg_req: scatter list entry for gpn_ft request
- * @sg_resp: scatter list entries for gpn_ft responses (per memory page)
+ * struct zfcp_fc_gspn_rsp - container for ct header plus GSPN_ID response
+ * @ct_hdr: FC GS common transport header
+ * @gspn: GSPN_ID response
+ * @name: The name string of the GSPN_ID response
*/
-struct zfcp_fc_gpn_ft {
- struct zfcp_fsf_ct_els ct;
- struct scatterlist sg_req;
- struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS];
-};
+struct zfcp_fc_gspn_rsp {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_gspn_resp gspn;
+ char name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
/**
- * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC
- * @els: data required for issuing els fsf command
- * @req: scatterlist entry for ELS ADISC request
- * @resp: scatterlist entry for ELS ADISC response
- * @adisc_req: ELS ADISC request data
- * @adisc_resp: ELS ADISC response data
+ * struct zfcp_fc_rspn_req - container for ct header plus RSPN_ID request
+ * @ct_hdr: FC GS common transport header
+ * @rspn: RSPN_ID request
+ * @name: The name string of the RSPN_ID request
*/
-struct zfcp_fc_els_adisc {
- struct zfcp_fsf_ct_els els;
- struct scatterlist req;
- struct scatterlist resp;
- struct fc_els_adisc adisc_req;
- struct fc_els_adisc adisc_resp;
+struct zfcp_fc_rspn_req {
+ struct fc_ct_hdr ct_hdr;
+ struct fc_ns_rspn rspn;
+ char name[FC_SYMBOLIC_NAME_SIZE];
+} __packed;
+
+/**
+ * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp
+ * @ct_els: data required for issuing fsf command
+ * @sg_req: scatterlist entry for request data
+ * @sg_rsp: scatterlist entry for response data
+ * @u: request specific data
+ */
+struct zfcp_fc_req {
+ struct zfcp_fsf_ct_els ct_els;
+ struct scatterlist sg_req;
+ struct scatterlist sg_rsp;
+ union {
+ struct {
+ struct fc_els_adisc req;
+ struct fc_els_adisc rsp;
+ } adisc;
+ struct {
+ struct zfcp_fc_gid_pn_req req;
+ struct zfcp_fc_gid_pn_rsp rsp;
+ } gid_pn;
+ struct {
+ struct scatterlist sg_rsp2[ZFCP_FC_GPN_FT_NUM_BUFS - 1];
+ struct zfcp_fc_gpn_ft_req req;
+ } gpn_ft;
+ struct {
+ struct zfcp_fc_gspn_req req;
+ struct zfcp_fc_gspn_rsp rsp;
+ } gspn;
+ struct {
+ struct zfcp_fc_rspn_req req;
+ struct fc_ct_hdr rsp;
+ } rspn;
+ } u;
};
/**
@@ -192,14 +206,21 @@ struct zfcp_fc_wka_ports {
* zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd
* @fcp: fcp_cmnd to setup
* @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB
+ * @tm: task management flags to setup task management command
*/
static inline
-void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
+void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi,
+ u8 tm_flags)
{
char tag[2];
int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun);
+ if (unlikely(tm_flags)) {
+ fcp->fc_tm_flags = tm_flags;
+ return;
+ }
+
if (scsi_populate_tag_msg(scsi, tag)) {
switch (tag[0]) {
case MSG_ORDERED_TAG:
@@ -226,19 +247,6 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi)
}
/**
- * zfcp_fc_fcp_tm - setup FCP command as task management command
- * @fcp: fcp_cmnd to setup
- * @dev: scsi_device where to send the task management command
- * @tm: task management flags to setup tm command
- */
-static inline
-void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags)
-{
- int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun);
- fcp->fc_tm_flags |= tm_flags;
-}
-
-/**
* zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly
* @fcp_rsp: FCP RSP IU to evaluate
* @scsi: SCSI command where to update status and sense buffer
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 60ff9d172c79..a0e05ef65924 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -18,6 +18,8 @@
#include "zfcp_qdio.h"
#include "zfcp_reqlist.h"
+struct kmem_cache *zfcp_fsf_qtcb_cache;
+
static void zfcp_fsf_request_timeout_handler(unsigned long data)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
@@ -83,7 +85,7 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
}
if (likely(req->qtcb))
- kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
+ kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
kfree(req);
}
@@ -212,7 +214,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
zfcp_dbf_hba_fsf_uss("fssrh_1", req);
- mempool_free(sr_buf, adapter->pool.status_read_data);
+ mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
zfcp_fsf_req_free(req);
return;
}
@@ -265,7 +267,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
break;
}
- mempool_free(sr_buf, adapter->pool.status_read_data);
+ mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
zfcp_fsf_req_free(req);
atomic_inc(&adapter->stat_miss);
@@ -628,7 +630,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
if (likely(pool))
qtcb = mempool_alloc(pool, GFP_ATOMIC);
else
- qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
+ qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
if (unlikely(!qtcb))
return NULL;
@@ -723,6 +725,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
struct zfcp_adapter *adapter = qdio->adapter;
struct zfcp_fsf_req *req;
struct fsf_status_read_buffer *sr_buf;
+ struct page *page;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@@ -736,11 +739,12 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
goto out;
}
- sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
- if (!sr_buf) {
+ page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
+ if (!page) {
retval = -ENOMEM;
goto failed_buf;
}
+ sr_buf = page_address(page);
memset(sr_buf, 0, sizeof(*sr_buf));
req->data = sr_buf;
@@ -755,7 +759,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
failed_req_send:
req->data = NULL;
- mempool_free(sr_buf, adapter->pool.status_read_data);
+ mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
failed_buf:
zfcp_dbf_hba_fsf_uss("fssr__1", req);
zfcp_fsf_req_free(req);
@@ -1552,7 +1556,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
- if (unlikely(IS_ERR(req))) {
+ if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
@@ -1605,7 +1609,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
SBAL_FLAGS0_TYPE_READ,
qdio->adapter->pool.erp_req);
- if (unlikely(IS_ERR(req))) {
+ if (IS_ERR(req)) {
retval = PTR_ERR(req);
goto out;
}
@@ -2206,7 +2210,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
- zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
+ zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
if (scsi_prot_sg_count(scsi_cmnd)) {
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
@@ -2284,7 +2288,6 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
goto out;
}
- req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
req->data = scmnd;
req->handler = zfcp_fsf_fcp_task_mgmt_handler;
req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
@@ -2296,7 +2299,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
- zfcp_fc_fcp_tm(fcp_cmnd, scmnd->device, tm_flags);
+ zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags);
zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
if (!zfcp_fsf_req_send(req))
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index ddb5800823a9..2a4991d6d4d5 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -292,7 +292,37 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
return SUCCESS;
}
-int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
+struct scsi_transport_template *zfcp_scsi_transport_template;
+
+static struct scsi_host_template zfcp_scsi_host_template = {
+ .module = THIS_MODULE,
+ .name = "zfcp",
+ .queuecommand = zfcp_scsi_queuecommand,
+ .eh_abort_handler = zfcp_scsi_eh_abort_handler,
+ .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
+ .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
+ .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
+ .slave_alloc = zfcp_scsi_slave_alloc,
+ .slave_configure = zfcp_scsi_slave_configure,
+ .slave_destroy = zfcp_scsi_slave_destroy,
+ .change_queue_depth = zfcp_scsi_change_queue_depth,
+ .proc_name = "zfcp",
+ .can_queue = 4096,
+ .this_id = -1,
+ .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
+ .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
+ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
+ .cmd_per_lun = 1,
+ .use_clustering = 1,
+ .shost_attrs = zfcp_sysfs_shost_attrs,
+ .sdev_attrs = zfcp_sysfs_sdev_attrs,
+};
+
+/**
+ * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
+ * @adapter: The zfcp adapter to register with the SCSI midlayer
+ */
+int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
struct ccw_dev_id dev_id;
@@ -301,7 +331,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
ccw_device_get_id(adapter->ccw_device, &dev_id);
/* register adapter as SCSI host with mid layer of SCSI stack */
- adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template,
+ adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
sizeof (struct zfcp_adapter *));
if (!adapter->scsi_host) {
dev_err(&adapter->ccw_device->dev,
@@ -316,7 +346,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
adapter->scsi_host->max_channel = 0;
adapter->scsi_host->unique_id = dev_id.devno;
adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
- adapter->scsi_host->transportt = zfcp_data.scsi_transport_template;
+ adapter->scsi_host->transportt = zfcp_scsi_transport_template;
adapter->scsi_host->hostdata[0] = (unsigned long) adapter;
@@ -328,7 +358,11 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
return 0;
}
-void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
+/**
+ * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
+ * @adapter: The zfcp adapter to unregister.
+ */
+void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
{
struct Scsi_Host *shost;
struct zfcp_port *port;
@@ -346,8 +380,6 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter)
scsi_remove_host(shost);
scsi_host_put(shost);
adapter->scsi_host = NULL;
-
- return;
}
static struct fc_host_statistics*
@@ -688,33 +720,8 @@ struct fc_function_template zfcp_transport_functions = {
/* no functions registered for following dynamic attributes but
directly set by LLDD */
.show_host_port_type = 1,
+ .show_host_symbolic_name = 1,
.show_host_speed = 1,
.show_host_port_id = 1,
.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
};
-
-struct zfcp_data zfcp_data = {
- .scsi_host_template = {
- .name = "zfcp",
- .module = THIS_MODULE,
- .proc_name = "zfcp",
- .change_queue_depth = zfcp_scsi_change_queue_depth,
- .slave_alloc = zfcp_scsi_slave_alloc,
- .slave_configure = zfcp_scsi_slave_configure,
- .slave_destroy = zfcp_scsi_slave_destroy,
- .queuecommand = zfcp_scsi_queuecommand,
- .eh_abort_handler = zfcp_scsi_eh_abort_handler,
- .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
- .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
- .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
- .can_queue = 4096,
- .this_id = -1,
- .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ,
- .cmd_per_lun = 1,
- .use_clustering = 1,
- .sdev_attrs = zfcp_sysfs_sdev_attrs,
- .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8),
- .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
- .shost_attrs = zfcp_sysfs_shost_attrs,
- },
-};
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8616496ffc02..4a1f029c4fe9 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -381,6 +381,7 @@ config ISCSI_BOOT_SYSFS
source "drivers/scsi/cxgbi/Kconfig"
source "drivers/scsi/bnx2i/Kconfig"
+source "drivers/scsi/bnx2fc/Kconfig"
source "drivers/scsi/be2iscsi/Kconfig"
config SGIWD93_SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index ef6de669424b..7ad0b8a79ae8 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_LIBFC) += libfc/
obj-$(CONFIG_LIBFCOE) += fcoe/
obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_FCOE_FNIC) += fnic/
+obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 9a5629f94f95..e7cd2fcbe036 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -936,8 +936,7 @@ static void NCR5380_exit(struct Scsi_Host *instance)
{
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
- cancel_delayed_work(&hostdata->coroutine);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&hostdata->coroutine);
}
/**
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 984bd527c6c9..da7b9887ec48 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1020,7 +1020,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
int poll_count = 0;
arcmsr_free_sysfs_attr(acb);
scsi_remove_host(host);
- flush_scheduled_work();
+ flush_work_sync(&acb->arcmsr_do_message_isr_bh);
del_timer_sync(&acb->eternal_timer);
arcmsr_disable_outbound_ints(acb);
arcmsr_stop_adapter_bgrb(acb);
@@ -1066,7 +1066,7 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
(struct AdapterControlBlock *)host->hostdata;
del_timer_sync(&acb->eternal_timer);
arcmsr_disable_outbound_ints(acb);
- flush_scheduled_work();
+ flush_work_sync(&acb->arcmsr_do_message_isr_bh);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index eaaa8813067d..868cc5590145 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -210,28 +210,20 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
}
/**
- * beiscsi_conn_get_param - get the iscsi parameter
- * @cls_conn: pointer to iscsi cls conn
+ * beiscsi_ep_get_param - get the iscsi parameter
+ * @ep: pointer to iscsi ep
* @param: parameter type identifier
* @buf: buffer pointer
*
* returns iscsi parameter
*/
-int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
+int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
enum iscsi_param param, char *buf)
{
- struct beiscsi_endpoint *beiscsi_ep;
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
int len = 0;
SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param);
- beiscsi_ep = beiscsi_conn->ep;
- if (!beiscsi_ep) {
- SE_DEBUG(DBG_LVL_1,
- "In beiscsi_conn_get_param , no beiscsi_ep\n");
- return -ENODEV;
- }
switch (param) {
case ISCSI_PARAM_CONN_PORT:
@@ -244,7 +236,7 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
break;
default:
- return iscsi_conn_get_param(cls_conn, param, buf);
+ return -ENOSYS;
}
return len;
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 8950a702b9f4..9c532797c29e 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -48,8 +48,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn,
uint64_t transport_fd, int is_leading);
-int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf);
+int beiscsi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param,
+ char *buf);
int beiscsi_get_host_param(struct Scsi_Host *shost,
enum iscsi_host_param param, char *buf);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 638c72b7f94a..24e20ba9633c 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -4384,7 +4384,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.bind_conn = beiscsi_conn_bind,
.destroy_conn = iscsi_conn_teardown,
.set_param = beiscsi_set_param,
- .get_conn_param = beiscsi_conn_get_param,
+ .get_conn_param = iscsi_conn_get_param,
.get_session_param = iscsi_session_get_param,
.get_host_param = beiscsi_get_host_param,
.start_conn = beiscsi_conn_start,
@@ -4395,6 +4395,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
.alloc_pdu = beiscsi_alloc_pdu,
.parse_pdu_itt = beiscsi_parse_pdu,
.get_stats = beiscsi_conn_get_stats,
+ .get_ep_param = beiscsi_ep_get_param,
.ep_connect = beiscsi_ep_connect,
.ep_poll = beiscsi_ep_poll,
.ep_disconnect = beiscsi_ep_disconnect,
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
new file mode 100644
index 000000000000..69d031d98469
--- /dev/null
+++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h
@@ -0,0 +1,1080 @@
+#ifndef __57XX_FCOE_HSI_LINUX_LE__
+#define __57XX_FCOE_HSI_LINUX_LE__
+
+/*
+ * common data for all protocols
+ */
+struct b577xx_doorbell_hdr {
+ u8 header;
+#define B577XX_DOORBELL_HDR_RX (0x1<<0)
+#define B577XX_DOORBELL_HDR_RX_SHIFT 0
+#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1)
+#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1
+#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2)
+#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2
+#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4)
+#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4
+};
+
+/*
+ * doorbell message sent to the chip
+ */
+struct b577xx_doorbell_set_prod {
+#if defined(__BIG_ENDIAN)
+ u16 prod;
+ u8 zero_fill1;
+ struct b577xx_doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+ struct b577xx_doorbell_hdr header;
+ u8 zero_fill1;
+ u16 prod;
+#endif
+};
+
+
+struct regpair {
+ __le32 lo;
+ __le32 hi;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ */
+struct fcoe_abts_rsp_union {
+ u32 r_ctl;
+ u32 abts_rsp_payload[7];
+};
+
+
+/*
+ * 4 regs size
+ */
+struct fcoe_bd_ctx {
+ u32 buf_addr_hi;
+ u32 buf_addr_lo;
+#if defined(__BIG_ENDIAN)
+ u16 rsrv0;
+ u16 buf_len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 buf_len;
+ u16 rsrv0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 rsrv1;
+ u16 flags;
+#elif defined(__LITTLE_ENDIAN)
+ u16 flags;
+ u16 rsrv1;
+#endif
+};
+
+
+struct fcoe_cleanup_flow_info {
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u16 task_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 task_id;
+ u16 reserved1;
+#endif
+ u32 reserved2[7];
+};
+
+
+struct fcoe_fcp_cmd_payload {
+ u32 opaque[8];
+};
+
+struct fcoe_fc_hdr {
+#if defined(__BIG_ENDIAN)
+ u8 cs_ctl;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 cs_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 r_ctl;
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 r_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 seq_id;
+ u8 df_ctl;
+ u16 seq_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u16 seq_cnt;
+ u8 df_ctl;
+ u8 seq_id;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 type;
+ u8 f_ctl[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 f_ctl[3];
+ u8 type;
+#endif
+ u32 parameters;
+#if defined(__BIG_ENDIAN)
+ u16 ox_id;
+ u16 rx_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_id;
+ u16 ox_id;
+#endif
+};
+
+struct fcoe_fc_frame {
+ struct fcoe_fc_hdr fc_hdr;
+ u32 reserved0[2];
+};
+
+union fcoe_cmd_flow_info {
+ struct fcoe_fcp_cmd_payload fcp_cmd_payload;
+ struct fcoe_fc_frame mp_fc_frame;
+};
+
+
+
+struct fcoe_fcp_rsp_flags {
+ u8 flags;
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
+};
+
+
+struct fcoe_fcp_rsp_payload {
+ struct regpair reserved0;
+ u32 fcp_resid;
+#if defined(__BIG_ENDIAN)
+ u16 retry_delay_timer;
+ struct fcoe_fcp_rsp_flags fcp_flags;
+ u8 scsi_status_code;
+#elif defined(__LITTLE_ENDIAN)
+ u8 scsi_status_code;
+ struct fcoe_fcp_rsp_flags fcp_flags;
+ u16 retry_delay_timer;
+#endif
+ u32 fcp_rsp_len;
+ u32 fcp_sns_len;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ */
+struct fcoe_fcp_rsp_union {
+ struct fcoe_fcp_rsp_payload payload;
+ struct regpair reserved0;
+};
+
+
+struct fcoe_fcp_xfr_rdy_payload {
+ u32 burst_len;
+ u32 data_ro;
+};
+
+struct fcoe_read_flow_info {
+ struct fcoe_fc_hdr fc_data_in_hdr;
+ u32 reserved[2];
+};
+
+struct fcoe_write_flow_info {
+ struct fcoe_fc_hdr fc_data_out_hdr;
+ struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload;
+};
+
+union fcoe_rsp_flow_info {
+ struct fcoe_fcp_rsp_union fcp_rsp;
+ struct fcoe_abts_rsp_union abts_rsp;
+};
+
+/*
+ * 32 bytes used for general purposes
+ */
+union fcoe_general_task_ctx {
+ union fcoe_cmd_flow_info cmd_info;
+ struct fcoe_read_flow_info read_info;
+ struct fcoe_write_flow_info write_info;
+ union fcoe_rsp_flow_info rsp_info;
+ struct fcoe_cleanup_flow_info cleanup_info;
+ u32 comp_info[8];
+};
+
+
+/*
+ * FCoE KCQ CQE parameters
+ */
+union fcoe_kcqe_params {
+ u32 reserved0[4];
+};
+
+/*
+ * FCoE KCQ CQE
+ */
+struct fcoe_kcqe {
+ u32 fcoe_conn_id;
+ u32 completion_status;
+ u32 fcoe_conn_context_id;
+ union fcoe_kcqe_params params;
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+ u8 op_code;
+ u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+ u16 qe_self_seq;
+ u8 op_code;
+ u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE KWQE header
+ */
+struct fcoe_kwqe_header {
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+ u8 op_code;
+#elif defined(__LITTLE_ENDIAN)
+ u8 op_code;
+ u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE firmware init request 1
+ */
+struct fcoe_kwqe_init1 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 num_tasks;
+#elif defined(__LITTLE_ENDIAN)
+ u16 num_tasks;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 task_list_pbl_addr_lo;
+ u32 task_list_pbl_addr_hi;
+ u32 dummy_buffer_addr_lo;
+ u32 dummy_buffer_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 rq_num_wqes;
+ u16 sq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+ u16 sq_num_wqes;
+ u16 rq_num_wqes;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 cq_num_wqes;
+ u16 rq_buffer_log_size;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_buffer_log_size;
+ u16 cq_num_wqes;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+ u8 num_sessions_log;
+ u16 mtu;
+#elif defined(__LITTLE_ENDIAN)
+ u16 mtu;
+ u8 num_sessions_log;
+ u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * FCoE firmware init request 2
+ */
+struct fcoe_kwqe_init2 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 hash_tbl_pbl_addr_lo;
+ u32 hash_tbl_pbl_addr_hi;
+ u32 t2_hash_tbl_addr_lo;
+ u32 t2_hash_tbl_addr_hi;
+ u32 t2_ptr_hash_tbl_addr_lo;
+ u32 t2_ptr_hash_tbl_addr_hi;
+ u32 free_list_count;
+};
+
+/*
+ * FCoE firmware init request 3
+ */
+struct fcoe_kwqe_init3 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 error_bit_map_lo;
+ u32 error_bit_map_hi;
+#if defined(__BIG_ENDIAN)
+ u8 reserved21[3];
+ u8 cached_session_enable;
+#elif defined(__LITTLE_ENDIAN)
+ u8 cached_session_enable;
+ u8 reserved21[3];
+#endif
+ u32 reserved2[4];
+};
+
+/*
+ * FCoE connection offload request 1
+ */
+struct fcoe_kwqe_conn_offload1 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 fcoe_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 fcoe_conn_id;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 sq_addr_lo;
+ u32 sq_addr_hi;
+ u32 rq_pbl_addr_lo;
+ u32 rq_pbl_addr_hi;
+ u32 rq_first_pbe_addr_lo;
+ u32 rq_first_pbe_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u16 rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_prod;
+ u16 reserved0;
+#endif
+};
+
+/*
+ * FCoE connection offload request 2
+ */
+struct fcoe_kwqe_conn_offload2 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 tx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tx_max_fc_pay_len;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 cq_addr_lo;
+ u32 cq_addr_hi;
+ u32 xferq_addr_lo;
+ u32 xferq_addr_hi;
+ u32 conn_db_addr_lo;
+ u32 conn_db_addr_hi;
+ u32 reserved1;
+};
+
+/*
+ * FCoE connection offload request 3
+ */
+struct fcoe_kwqe_conn_offload3 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+#elif defined(__LITTLE_ENDIAN)
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+ struct fcoe_kwqe_header hdr;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 tx_max_conc_seqs_c3;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 tx_max_conc_seqs_c3;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+#endif
+ u32 reserved;
+ u32 confq_first_pbe_addr_lo;
+ u32 confq_first_pbe_addr_hi;
+#if defined(__BIG_ENDIAN)
+ u16 rx_max_fc_pay_len;
+ u16 tx_total_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 tx_total_conc_seqs;
+ u16 rx_max_fc_pay_len;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 rx_open_seqs_exch_c3;
+ u8 rx_max_conc_seqs_c3;
+ u16 rx_total_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_total_conc_seqs;
+ u8 rx_max_conc_seqs_c3;
+ u8 rx_open_seqs_exch_c3;
+#endif
+};
+
+/*
+ * FCoE connection offload request 4
+ */
+struct fcoe_kwqe_conn_offload4 {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u8 reserved2;
+ u8 e_d_tov_timer_val;
+#elif defined(__LITTLE_ENDIAN)
+ u8 e_d_tov_timer_val;
+ u8 reserved2;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u8 src_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+ u8 dst_mac_addr_hi16[2];
+ u8 src_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+ u8 src_mac_addr_hi16[2];
+ u8 dst_mac_addr_hi16[2];
+#endif
+ u8 dst_mac_addr_lo32[4];
+ u32 lcq_addr_lo;
+ u32 lcq_addr_hi;
+ u32 confq_pbl_base_addr_lo;
+ u32 confq_pbl_base_addr_hi;
+};
+
+/*
+ * FCoE connection enable request
+ */
+struct fcoe_kwqe_conn_enable_disable {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u8 src_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+ u8 src_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+ u8 src_mac_addr_hi16[2];
+ u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+#endif
+ u8 dst_mac_addr_lo32[4];
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u8 dst_mac_addr_hi16[2];
+#elif defined(__LITTLE_ENDIAN)
+ u8 dst_mac_addr_hi16[2];
+ u16 reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 vlan_flag;
+ u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id[3];
+ u8 vlan_flag;
+#endif
+#if defined(__BIG_ENDIAN)
+ u8 reserved3;
+ u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id[3];
+ u8 reserved3;
+#endif
+ u32 context_id;
+ u32 conn_id;
+ u32 reserved4;
+};
+
+/*
+ * FCoE connection destroy request
+ */
+struct fcoe_kwqe_conn_destroy {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 context_id;
+ u32 conn_id;
+ u32 reserved1[5];
+};
+
+/*
+ * FCoe destroy request
+ */
+struct fcoe_kwqe_destroy {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 reserved1[7];
+};
+
+/*
+ * FCoe statistics request
+ */
+struct fcoe_kwqe_stat {
+#if defined(__BIG_ENDIAN)
+ struct fcoe_kwqe_header hdr;
+ u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+ u16 reserved0;
+ struct fcoe_kwqe_header hdr;
+#endif
+ u32 stat_params_addr_lo;
+ u32 stat_params_addr_hi;
+ u32 reserved1[5];
+};
+
+/*
+ * FCoE KWQ WQE
+ */
+union fcoe_kwqe {
+ struct fcoe_kwqe_init1 init1;
+ struct fcoe_kwqe_init2 init2;
+ struct fcoe_kwqe_init3 init3;
+ struct fcoe_kwqe_conn_offload1 conn_offload1;
+ struct fcoe_kwqe_conn_offload2 conn_offload2;
+ struct fcoe_kwqe_conn_offload3 conn_offload3;
+ struct fcoe_kwqe_conn_offload4 conn_offload4;
+ struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
+ struct fcoe_kwqe_conn_destroy conn_destroy;
+ struct fcoe_kwqe_destroy destroy;
+ struct fcoe_kwqe_stat statistics;
+};
+
+struct fcoe_mul_sges_ctx {
+ struct regpair cur_sge_addr;
+#if defined(__BIG_ENDIAN)
+ u8 sgl_size;
+ u8 cur_sge_idx;
+ u16 cur_sge_off;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cur_sge_off;
+ u8 cur_sge_idx;
+ u8 sgl_size;
+#endif
+};
+
+struct fcoe_s_stat_ctx {
+ u8 flags;
+#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
+#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
+#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
+#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
+#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
+#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
+#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
+#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
+};
+
+struct fcoe_seq_ctx {
+#if defined(__BIG_ENDIAN)
+ u16 low_seq_cnt;
+ struct fcoe_s_stat_ctx s_stat;
+ u8 seq_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 seq_id;
+ struct fcoe_s_stat_ctx s_stat;
+ u16 low_seq_cnt;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 err_seq_cnt;
+ u16 high_seq_cnt;
+#elif defined(__LITTLE_ENDIAN)
+ u16 high_seq_cnt;
+ u16 err_seq_cnt;
+#endif
+ u32 low_exp_ro;
+ u32 high_exp_ro;
+};
+
+
+struct fcoe_single_sge_ctx {
+ struct regpair cur_buf_addr;
+#if defined(__BIG_ENDIAN)
+ u16 reserved0;
+ u16 cur_buf_rem;
+#elif defined(__LITTLE_ENDIAN)
+ u16 cur_buf_rem;
+ u16 reserved0;
+#endif
+};
+
+union fcoe_sgl_ctx {
+ struct fcoe_single_sge_ctx single_sge;
+ struct fcoe_mul_sges_ctx mul_sges;
+};
+
+
+
+/*
+ * FCoE SQ element
+ */
+struct fcoe_sqe {
+ u16 wqe;
+#define FCOE_SQE_TASK_ID (0x7FFF<<0)
+#define FCOE_SQE_TASK_ID_SHIFT 0
+#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+struct fcoe_task_ctx_entry_tx_only {
+ union fcoe_sgl_ctx sgl_ctx;
+};
+
+struct fcoe_task_ctx_entry_txwr_rxrd {
+#if defined(__BIG_ENDIAN)
+ u16 verify_tx_seq;
+ u8 init_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
+ u8 tx_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
+#elif defined(__LITTLE_ENDIAN)
+ u8 tx_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
+ u8 init_flags;
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
+#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
+ u16 verify_tx_seq;
+#endif
+};
+
+/*
+ * Common section. Both TX and RX processing might write and read from it in
+ * different flows
+ */
+struct fcoe_task_ctx_entry_tx_rx_cmn {
+ u32 data_2_trns;
+ union fcoe_general_task_ctx general;
+#if defined(__BIG_ENDIAN)
+ u16 tx_low_seq_cnt;
+ struct fcoe_s_stat_ctx tx_s_stat;
+ u8 tx_seq_id;
+#elif defined(__LITTLE_ENDIAN)
+ u8 tx_seq_id;
+ struct fcoe_s_stat_ctx tx_s_stat;
+ u16 tx_low_seq_cnt;
+#endif
+ u32 common_flags;
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29)
+#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29
+};
+
+struct fcoe_task_ctx_entry_rxwr_txrd {
+#if defined(__BIG_ENDIAN)
+ u16 rx_id;
+ u16 rx_flags;
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
+#elif defined(__LITTLE_ENDIAN)
+ u16 rx_flags;
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
+#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
+ u16 rx_id;
+#endif
+};
+
+struct fcoe_task_ctx_entry_rx_only {
+ struct fcoe_seq_ctx seq_ctx;
+ struct fcoe_seq_ctx ooo_seq_ctx;
+ u32 rsrv3;
+ union fcoe_sgl_ctx sgl_ctx;
+};
+
+struct fcoe_task_ctx_entry {
+ struct fcoe_task_ctx_entry_tx_only tx_wr_only;
+ struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
+ struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
+ struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
+ struct fcoe_task_ctx_entry_rx_only rx_wr_only;
+ u32 reserved[4];
+};
+
+
+/*
+ * FCoE XFRQ element
+ */
+struct fcoe_xfrqe {
+ u16 wqe;
+#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
+#define FCOE_XFRQE_TASK_ID_SHIFT 0
+#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE CONFQ element
+ */
+struct fcoe_confqe {
+#if defined(__BIG_ENDIAN)
+ u16 rx_id;
+ u16 ox_id;
+#elif defined(__LITTLE_ENDIAN)
+ u16 ox_id;
+ u16 rx_id;
+#endif
+ u32 param;
+};
+
+
+/*
+ * FCoE conection data base
+ */
+struct fcoe_conn_db {
+#if defined(__BIG_ENDIAN)
+ u16 rsrv0;
+ u16 rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+ u16 rq_prod;
+ u16 rsrv0;
+#endif
+ u32 rsrv1;
+ struct regpair cq_arm;
+};
+
+
+/*
+ * FCoE CQ element
+ */
+struct fcoe_cqe {
+ u16 wqe;
+#define FCOE_CQE_CQE_INFO (0x3FFF<<0)
+#define FCOE_CQE_CQE_INFO_SHIFT 0
+#define FCOE_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE error/warning resporting entry
+ */
+struct fcoe_err_report_entry {
+ u32 err_warn_bitmap_lo;
+ u32 err_warn_bitmap_hi;
+ u32 tx_buf_off;
+ u32 rx_buf_off;
+ struct fcoe_fc_hdr fc_hdr;
+};
+
+
+/*
+ * FCoE hash table entry (32 bytes)
+ */
+struct fcoe_hash_table_entry {
+#if defined(__BIG_ENDIAN)
+ u8 d_id_0;
+ u8 s_id_2;
+ u8 s_id_1;
+ u8 s_id_0;
+#elif defined(__LITTLE_ENDIAN)
+ u8 s_id_0;
+ u8 s_id_1;
+ u8 s_id_2;
+ u8 d_id_0;
+#endif
+#if defined(__BIG_ENDIAN)
+ u16 dst_mac_addr_hi;
+ u8 d_id_2;
+ u8 d_id_1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 d_id_1;
+ u8 d_id_2;
+ u16 dst_mac_addr_hi;
+#endif
+ u32 dst_mac_addr_lo;
+#if defined(__BIG_ENDIAN)
+ u16 vlan_id;
+ u16 src_mac_addr_hi;
+#elif defined(__LITTLE_ENDIAN)
+ u16 src_mac_addr_hi;
+ u16 vlan_id;
+#endif
+ u32 src_mac_addr_lo;
+#if defined(__BIG_ENDIAN)
+ u16 reserved1;
+ u8 reserved0;
+ u8 vlan_flag;
+#elif defined(__LITTLE_ENDIAN)
+ u8 vlan_flag;
+ u8 reserved0;
+ u16 reserved1;
+#endif
+ u32 reserved2;
+ u32 field_id;
+#define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0)
+#define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0
+#define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24)
+#define FCOE_HASH_TABLE_ENTRY_RESERVED3_SHIFT 24
+#define FCOE_HASH_TABLE_ENTRY_VALID (0x1<<31)
+#define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31
+};
+
+/*
+ * FCoE pending work request CQE
+ */
+struct fcoe_pend_wq_cqe {
+ u16 wqe;
+#define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0)
+#define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0
+#define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_PEND_WQ_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_PEND_WQ_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_PEND_WQ_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * FCoE RX statistics parameters section#0
+ */
+struct fcoe_rx_stat_params_section0 {
+ u32 fcoe_ver_cnt;
+ u32 fcoe_rx_pkt_cnt;
+ u32 fcoe_rx_byte_cnt;
+ u32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1
+ */
+struct fcoe_rx_stat_params_section1 {
+ u32 fc_crc_cnt;
+ u32 eofa_del_cnt;
+ u32 miss_frame_cnt;
+ u32 seq_timeout_cnt;
+ u32 drop_seq_cnt;
+ u32 fcoe_rx_drop_pkt_cnt;
+ u32 fcp_rx_pkt_cnt;
+ u32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters
+ */
+struct fcoe_tx_stat_params {
+ u32 fcoe_tx_pkt_cnt;
+ u32 fcoe_tx_byte_cnt;
+ u32 fcp_tx_pkt_cnt;
+ u32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters
+ */
+struct fcoe_statistics_params {
+ struct fcoe_tx_stat_params tx_stat;
+ struct fcoe_rx_stat_params_section0 rx_stat0;
+ struct fcoe_rx_stat_params_section1 rx_stat1;
+};
+
+
+/*
+ * FCoE t2 hash table entry (64 bytes)
+ */
+struct fcoe_t2_hash_table_entry {
+ struct fcoe_hash_table_entry data;
+ struct regpair next;
+ struct regpair reserved0[3];
+};
+
+/*
+ * FCoE unsolicited CQE
+ */
+struct fcoe_unsolicited_cqe {
+ u16 wqe;
+#define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0)
+#define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0
+#define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2)
+#define FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT 2
+#define FCOE_UNSOLICITED_CQE_CQE_TYPE (0x1<<14)
+#define FCOE_UNSOLICITED_CQE_CQE_TYPE_SHIFT 14
+#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+#endif /* __57XX_FCOE_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
new file mode 100644
index 000000000000..6a38080e35ed
--- /dev/null
+++ b/drivers/scsi/bnx2fc/Kconfig
@@ -0,0 +1,11 @@
+config SCSI_BNX2X_FCOE
+ tristate "Broadcom NetXtreme II FCoE support"
+ depends on PCI
+ select NETDEVICES
+ select NETDEV_1000
+ select LIBFC
+ select LIBFCOE
+ select CNIC
+ ---help---
+ This driver supports FCoE offload for the Broadcom NetXtreme II
+ devices.
diff --git a/drivers/scsi/bnx2fc/Makefile b/drivers/scsi/bnx2fc/Makefile
new file mode 100644
index 000000000000..a92695a25176
--- /dev/null
+++ b/drivers/scsi/bnx2fc/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o
+
+bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
new file mode 100644
index 000000000000..df2fc09ba479
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -0,0 +1,511 @@
+#ifndef _BNX2FC_H_
+#define _BNX2FC_H_
+/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/kthread.h>
+#include <linux/crc32.h>
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/io.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/libfc.h>
+#include <scsi/libfcoe.h>
+#include <scsi/fc_encode.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fc2.h>
+#include <scsi/fc_frame.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/fc/fc_fcp.h>
+
+#include "57xx_hsi_bnx2fc.h"
+#include "bnx2fc_debug.h"
+#include "../../net/cnic_if.h"
+#include "bnx2fc_constants.h"
+
+#define BNX2FC_NAME "bnx2fc"
+#define BNX2FC_VERSION "1.0.0"
+
+#define PFX "bnx2fc: "
+
+#define BNX2X_DOORBELL_PCI_BAR 2
+
+#define BNX2FC_MAX_BD_LEN 0xffff
+#define BNX2FC_BD_SPLIT_SZ 0x8000
+#define BNX2FC_MAX_BDS_PER_CMD 256
+
+#define BNX2FC_SQ_WQES_MAX 256
+
+#define BNX2FC_SCSI_MAX_SQES ((3 * BNX2FC_SQ_WQES_MAX) / 8)
+#define BNX2FC_TM_MAX_SQES ((BNX2FC_SQ_WQES_MAX) / 2)
+#define BNX2FC_ELS_MAX_SQES (BNX2FC_TM_MAX_SQES - 1)
+
+#define BNX2FC_RQ_WQES_MAX 16
+#define BNX2FC_CQ_WQES_MAX (BNX2FC_SQ_WQES_MAX + BNX2FC_RQ_WQES_MAX)
+
+#define BNX2FC_NUM_MAX_SESS 128
+#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
+
+#define BNX2FC_MAX_OUTSTANDING_CMNDS 4096
+#define BNX2FC_MIN_PAYLOAD 256
+#define BNX2FC_MAX_PAYLOAD 2048
+
+#define BNX2FC_RQ_BUF_SZ 256
+#define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ))
+
+#define BNX2FC_SQ_WQE_SIZE (sizeof(struct fcoe_sqe))
+#define BNX2FC_CQ_WQE_SIZE (sizeof(struct fcoe_cqe))
+#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ)
+#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe))
+#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe))
+#define BNX2FC_5771X_DB_PAGE_SIZE 128
+
+#define BNX2FC_MAX_TASKS BNX2FC_MAX_OUTSTANDING_CMNDS
+#define BNX2FC_TASK_SIZE 128
+#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE)
+#define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE)
+
+#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8
+#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024)
+
+#define BNX2FC_MAX_SEQS 255
+
+#define BNX2FC_READ (1 << 1)
+#define BNX2FC_WRITE (1 << 0)
+
+#define BNX2FC_MIN_XID 0
+#define BNX2FC_MAX_XID (BNX2FC_MAX_OUTSTANDING_CMNDS - 1)
+#define FCOE_MIN_XID (BNX2FC_MAX_OUTSTANDING_CMNDS)
+#define FCOE_MAX_XID \
+ (BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256))
+#define BNX2FC_MAX_LUN 0xFFFF
+#define BNX2FC_MAX_FCP_TGT 256
+#define BNX2FC_MAX_CMD_LEN 16
+
+#define BNX2FC_TM_TIMEOUT 60 /* secs */
+#define BNX2FC_IO_TIMEOUT 20000UL /* msecs */
+
+#define BNX2FC_WAIT_CNT 120
+#define BNX2FC_FW_TIMEOUT (3 * HZ)
+
+#define PORT_MAX 2
+
+#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
+
+/* FC FCP Status */
+#define FC_GOOD 0
+
+#define BNX2FC_RNID_HBA 0x7
+
+/* bnx2fc driver uses only one instance of fcoe_percpu_s */
+extern struct fcoe_percpu_s bnx2fc_global;
+
+extern struct workqueue_struct *bnx2fc_wq;
+
+struct bnx2fc_percpu_s {
+ struct task_struct *iothread;
+ struct list_head work_list;
+ spinlock_t fp_work_lock;
+};
+
+
+struct bnx2fc_hba {
+ struct list_head link;
+ struct cnic_dev *cnic;
+ struct pci_dev *pcidev;
+ struct net_device *netdev;
+ struct net_device *phys_dev;
+ unsigned long reg_with_cnic;
+ #define BNX2FC_CNIC_REGISTERED 1
+ struct packet_type fcoe_packet_type;
+ struct packet_type fip_packet_type;
+ struct bnx2fc_cmd_mgr *cmd_mgr;
+ struct workqueue_struct *timer_work_queue;
+ struct kref kref;
+ spinlock_t hba_lock;
+ struct mutex hba_mutex;
+ unsigned long adapter_state;
+ #define ADAPTER_STATE_UP 0
+ #define ADAPTER_STATE_GOING_DOWN 1
+ #define ADAPTER_STATE_LINK_DOWN 2
+ #define ADAPTER_STATE_READY 3
+ u32 flags;
+ unsigned long init_done;
+ #define BNX2FC_FW_INIT_DONE 0
+ #define BNX2FC_CTLR_INIT_DONE 1
+ #define BNX2FC_CREATE_DONE 2
+ struct fcoe_ctlr ctlr;
+ u8 vlan_enabled;
+ int vlan_id;
+ u32 next_conn_id;
+ struct fcoe_task_ctx_entry **task_ctx;
+ dma_addr_t *task_ctx_dma;
+ struct regpair *task_ctx_bd_tbl;
+ dma_addr_t task_ctx_bd_dma;
+
+ int hash_tbl_segment_count;
+ void **hash_tbl_segments;
+ void *hash_tbl_pbl;
+ dma_addr_t hash_tbl_pbl_dma;
+ struct fcoe_t2_hash_table_entry *t2_hash_tbl;
+ dma_addr_t t2_hash_tbl_dma;
+ char *t2_hash_tbl_ptr;
+ dma_addr_t t2_hash_tbl_ptr_dma;
+
+ char *dummy_buffer;
+ dma_addr_t dummy_buf_dma;
+
+ struct fcoe_statistics_params *stats_buffer;
+ dma_addr_t stats_buf_dma;
+
+ /*
+ * PCI related info.
+ */
+ u16 pci_did;
+ u16 pci_vid;
+ u16 pci_sdid;
+ u16 pci_svid;
+ u16 pci_func;
+ u16 pci_devno;
+
+ struct task_struct *l2_thread;
+
+ /* linkdown handling */
+ wait_queue_head_t shutdown_wait;
+ int wait_for_link_down;
+
+ /*destroy handling */
+ struct timer_list destroy_timer;
+ wait_queue_head_t destroy_wait;
+
+ /* Active list of offloaded sessions */
+ struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS];
+ int num_ofld_sess;
+
+ /* statistics */
+ struct completion stat_req_done;
+};
+
+#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr)
+
+struct bnx2fc_cmd_mgr {
+ struct bnx2fc_hba *hba;
+ u16 next_idx;
+ struct list_head *free_list;
+ spinlock_t *free_list_lock;
+ struct io_bdt **io_bdt_pool;
+ struct bnx2fc_cmd **cmds;
+};
+
+struct bnx2fc_rport {
+ struct fcoe_port *port;
+ struct fc_rport *rport;
+ struct fc_rport_priv *rdata;
+ void __iomem *ctx_base;
+#define DPM_TRIGER_TYPE 0x40
+ u32 fcoe_conn_id;
+ u32 context_id;
+ u32 sid;
+
+ unsigned long flags;
+#define BNX2FC_FLAG_SESSION_READY 0x1
+#define BNX2FC_FLAG_OFFLOADED 0x2
+#define BNX2FC_FLAG_DISABLED 0x3
+#define BNX2FC_FLAG_DESTROYED 0x4
+#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5
+#define BNX2FC_FLAG_DESTROY_CMPL 0x6
+#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x7
+#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8
+#define BNX2FC_FLAG_EXPL_LOGO 0x9
+
+ u32 max_sqes;
+ u32 max_rqes;
+ u32 max_cqes;
+
+ struct fcoe_sqe *sq;
+ dma_addr_t sq_dma;
+ u16 sq_prod_idx;
+ u8 sq_curr_toggle_bit;
+ u32 sq_mem_size;
+
+ struct fcoe_cqe *cq;
+ dma_addr_t cq_dma;
+ u32 cq_cons_idx;
+ u8 cq_curr_toggle_bit;
+ u32 cq_mem_size;
+
+ void *rq;
+ dma_addr_t rq_dma;
+ u32 rq_prod_idx;
+ u32 rq_cons_idx;
+ u32 rq_mem_size;
+
+ void *rq_pbl;
+ dma_addr_t rq_pbl_dma;
+ u32 rq_pbl_size;
+
+ struct fcoe_xfrqe *xferq;
+ dma_addr_t xferq_dma;
+ u32 xferq_mem_size;
+
+ struct fcoe_confqe *confq;
+ dma_addr_t confq_dma;
+ u32 confq_mem_size;
+
+ void *confq_pbl;
+ dma_addr_t confq_pbl_dma;
+ u32 confq_pbl_size;
+
+ struct fcoe_conn_db *conn_db;
+ dma_addr_t conn_db_dma;
+ u32 conn_db_mem_size;
+
+ struct fcoe_sqe *lcq;
+ dma_addr_t lcq_dma;
+ u32 lcq_mem_size;
+
+ void *ofld_req[4];
+ dma_addr_t ofld_req_dma[4];
+ void *enbl_req;
+ dma_addr_t enbl_req_dma;
+
+ spinlock_t tgt_lock;
+ spinlock_t cq_lock;
+ atomic_t num_active_ios;
+ u32 flush_in_prog;
+ unsigned long work_time_slice;
+ unsigned long timestamp;
+ struct list_head free_task_list;
+ struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];
+ atomic_t pi;
+ atomic_t ci;
+ struct list_head active_cmd_queue;
+ struct list_head els_queue;
+ struct list_head io_retire_queue;
+ struct list_head active_tm_queue;
+
+ struct timer_list ofld_timer;
+ wait_queue_head_t ofld_wait;
+
+ struct timer_list upld_timer;
+ wait_queue_head_t upld_wait;
+};
+
+struct bnx2fc_mp_req {
+ u8 tm_flags;
+
+ u32 req_len;
+ void *req_buf;
+ dma_addr_t req_buf_dma;
+ struct fcoe_bd_ctx *mp_req_bd;
+ dma_addr_t mp_req_bd_dma;
+ struct fc_frame_header req_fc_hdr;
+
+ u32 resp_len;
+ void *resp_buf;
+ dma_addr_t resp_buf_dma;
+ struct fcoe_bd_ctx *mp_resp_bd;
+ dma_addr_t mp_resp_bd_dma;
+ struct fc_frame_header resp_fc_hdr;
+};
+
+struct bnx2fc_els_cb_arg {
+ struct bnx2fc_cmd *aborted_io_req;
+ struct bnx2fc_cmd *io_req;
+ u16 l2_oxid;
+};
+
+/* bnx2fc command structure */
+struct bnx2fc_cmd {
+ struct list_head link;
+ u8 on_active_queue;
+ u8 on_tmf_queue;
+ u8 cmd_type;
+#define BNX2FC_SCSI_CMD 1
+#define BNX2FC_TASK_MGMT_CMD 2
+#define BNX2FC_ABTS 3
+#define BNX2FC_ELS 4
+#define BNX2FC_CLEANUP 5
+ u8 io_req_flags;
+ struct kref refcount;
+ struct fcoe_port *port;
+ struct bnx2fc_rport *tgt;
+ struct scsi_cmnd *sc_cmd;
+ struct bnx2fc_cmd_mgr *cmd_mgr;
+ struct bnx2fc_mp_req mp_req;
+ void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg);
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct delayed_work timeout_work; /* timer for ULP timeouts */
+ struct completion tm_done;
+ int wait_for_comp;
+ u16 xid;
+ struct fcoe_task_ctx_entry *task;
+ struct io_bdt *bd_tbl;
+ struct fcp_rsp *rsp;
+ size_t data_xfer_len;
+ unsigned long req_flags;
+#define BNX2FC_FLAG_ISSUE_RRQ 0x1
+#define BNX2FC_FLAG_ISSUE_ABTS 0x2
+#define BNX2FC_FLAG_ABTS_DONE 0x3
+#define BNX2FC_FLAG_TM_COMPL 0x4
+#define BNX2FC_FLAG_TM_TIMEOUT 0x5
+#define BNX2FC_FLAG_IO_CLEANUP 0x6
+#define BNX2FC_FLAG_RETIRE_OXID 0x7
+#define BNX2FC_FLAG_EH_ABORT 0x8
+#define BNX2FC_FLAG_IO_COMPL 0x9
+#define BNX2FC_FLAG_ELS_DONE 0xa
+#define BNX2FC_FLAG_ELS_TIMEOUT 0xb
+ u32 fcp_resid;
+ u32 fcp_rsp_len;
+ u32 fcp_sns_len;
+ u8 cdb_status; /* SCSI IO status */
+ u8 fcp_status; /* FCP IO status */
+ u8 fcp_rsp_code;
+ u8 scsi_comp_flags;
+};
+
+struct io_bdt {
+ struct bnx2fc_cmd *io_req;
+ struct fcoe_bd_ctx *bd_tbl;
+ dma_addr_t bd_tbl_dma;
+ u16 bd_valid;
+};
+
+struct bnx2fc_work {
+ struct list_head list;
+ struct bnx2fc_rport *tgt;
+ u16 wqe;
+};
+struct bnx2fc_unsol_els {
+ struct fc_lport *lport;
+ struct fc_frame *fp;
+ struct work_struct unsol_els_work;
+};
+
+
+
+struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type);
+void bnx2fc_cmd_release(struct kref *ref);
+int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd);
+int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba);
+int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba);
+int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_disable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt);
+void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
+ u32 num_cqe);
+int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba);
+void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba);
+int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba);
+void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba);
+struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
+ u16 min_xid, u16 max_xid);
+void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr);
+void bnx2fc_get_link_state(struct bnx2fc_hba *hba);
+char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items);
+void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items);
+int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen);
+int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req);
+int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp);
+int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req);
+int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req);
+void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
+ unsigned int timer_msec);
+int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req);
+void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u16 orig_xid);
+void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task);
+void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task);
+void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid);
+void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt);
+int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd);
+int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
+void bnx2fc_rport_event_handler(struct fc_lport *lport,
+ struct fc_rport_priv *rport,
+ enum fc_rport_event event);
+void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq);
+void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
+ struct fcp_cmnd *fcp_cmnd);
+
+
+
+void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt);
+struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
+ struct fc_frame *fp, unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *,
+ void *),
+ void *arg, u32 timeout);
+int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
+struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
+ u32 port_id);
+void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
+ unsigned char *buf,
+ u32 frame_len, u16 l2_oxid);
+int bnx2fc_send_stat_req(struct bnx2fc_hba *hba);
+
+#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h
new file mode 100644
index 000000000000..fe7769173c43
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h
@@ -0,0 +1,206 @@
+#ifndef __BNX2FC_CONSTANTS_H_
+#define __BNX2FC_CONSTANTS_H_
+
+/**
+ * This file defines HSI constants for the FCoE flows
+ */
+
+/* KWQ/KCQ FCoE layer code */
+#define FCOE_KWQE_LAYER_CODE (7)
+
+/* KWQ (kernel work queue) request op codes */
+#define FCOE_KWQE_OPCODE_INIT1 (0)
+#define FCOE_KWQE_OPCODE_INIT2 (1)
+#define FCOE_KWQE_OPCODE_INIT3 (2)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6)
+#define FCOE_KWQE_OPCODE_ENABLE_CONN (7)
+#define FCOE_KWQE_OPCODE_DISABLE_CONN (8)
+#define FCOE_KWQE_OPCODE_DESTROY_CONN (9)
+#define FCOE_KWQE_OPCODE_DESTROY (10)
+#define FCOE_KWQE_OPCODE_STAT (11)
+
+/* KCQ (kernel completion queue) response op codes */
+#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10)
+#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11)
+#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12)
+#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15)
+#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16)
+#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17)
+#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18)
+#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
+#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21)
+
+/* KCQ (kernel completion queue) completion status */
+#define FCOE_KCQE_COMPLETION_STATUS_SUCCESS (0x0)
+#define FCOE_KCQE_COMPLETION_STATUS_ERROR (0x1)
+#define FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x2)
+#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3)
+#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4)
+#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5)
+
+/* Unsolicited CQE type */
+#define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0
+#define FCOE_ERROR_DETECTION_CQE_TYPE 1
+#define FCOE_WARNING_DETECTION_CQE_TYPE 2
+
+/* Task context constants */
+/* After driver has initialize the task in case timer services required */
+#define FCOE_TASK_TX_STATE_INIT 0
+/* In case timer services are required then shall be updated by Xstorm after
+ * start processing the task. In case no timer facilities are required then the
+ * driver would initialize the state to this value */
+#define FCOE_TASK_TX_STATE_NORMAL 1
+/* Task is under abort procedure. Updated in order to stop processing of
+ * pending WQEs on this task */
+#define FCOE_TASK_TX_STATE_ABORT 2
+/* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */
+#define FCOE_TASK_TX_STATE_ERROR 3
+/* For REC_TOV timer expiration indication received from Xstorm */
+#define FCOE_TASK_TX_STATE_WARNING 4
+/* For completed unsolicited task */
+#define FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED 5
+/* For exchange cleanup request task */
+#define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6
+/* For sequence cleanup request task */
+#define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7
+/* Mark task as aborted and indicate that ABTS was not transmitted */
+#define FCOE_TASK_TX_STATE_BEFORE_ABTS_TX 8
+/* Mark task as aborted and indicate that ABTS was transmitted */
+#define FCOE_TASK_TX_STATE_AFTER_ABTS_TX 9
+/* For completion the ABTS task. */
+#define FCOE_TASK_TX_STATE_ABTS_TX_COMPLETED 10
+/* Mark task as aborted and indicate that Exchange cleanup was not transmitted
+ */
+#define FCOE_TASK_TX_STATE_BEFORE_EXCHANGE_CLEANUP_TX 11
+/* Mark task as aborted and indicate that Exchange cleanup was transmitted */
+#define FCOE_TASK_TX_STATE_AFTER_EXCHANGE_CLEANUP_TX 12
+
+#define FCOE_TASK_RX_STATE_NORMAL 0
+#define FCOE_TASK_RX_STATE_COMPLETED 1
+/* Obsolete: Intermediate completion (middle path with local completion) */
+#define FCOE_TASK_RX_STATE_INTER_COMP 2
+/* For REC_TOV timer expiration indication received from Xstorm */
+#define FCOE_TASK_RX_STATE_WARNING 3
+/* For E_D_T_TOV timer expiration in Ustorm */
+#define FCOE_TASK_RX_STATE_ERROR 4
+/* ABTS ACC arrived wait for local completion to finally complete the task. */
+#define FCOE_TASK_RX_STATE_ABTS_ACC_ARRIVED 5
+/* local completion arrived wait for ABTS ACC to finally complete the task. */
+#define FCOE_TASK_RX_STATE_ABTS_LOCAL_COMP_ARRIVED 6
+/* Special completion indication in case of task was aborted. */
+#define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7
+/* Special completion indication in case of task was cleaned. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 8
+/* Special completion indication (in task requested the exchange cleanup) in
+ * case cleaned task is in non-valid. */
+#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 9
+/* Special completion indication (in task requested the sequence cleanup) in
+ * case cleaned task was already returned to normal. */
+#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 10
+/* Exchange cleanup arrived wait until xfer will be handled to finally
+ * complete the task. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_ARRIVED 11
+/* Xfer handled, wait for exchange cleanup to finally complete the task. */
+#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_HANDLED_XFER 12
+
+#define FCOE_TASK_TYPE_WRITE 0
+#define FCOE_TASK_TYPE_READ 1
+#define FCOE_TASK_TYPE_MIDPATH 2
+#define FCOE_TASK_TYPE_UNSOLICITED 3
+#define FCOE_TASK_TYPE_ABTS 4
+#define FCOE_TASK_TYPE_EXCHANGE_CLEANUP 5
+#define FCOE_TASK_TYPE_SEQUENCE_CLEANUP 6
+
+#define FCOE_TASK_DEV_TYPE_DISK 0
+#define FCOE_TASK_DEV_TYPE_TAPE 1
+
+#define FCOE_TASK_CLASS_TYPE_3 0
+#define FCOE_TASK_CLASS_TYPE_2 1
+
+/* Everest FCoE connection type */
+#define B577XX_FCOE_CONNECTION_TYPE 4
+
+/* Error codes for Error Reporting in fast path flows */
+/* XFER error codes */
+#define FCOE_ERROR_CODE_XFER_OOO_RO 0
+#define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1
+#define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2
+#define FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS 3
+#define FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE 4
+#define FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE 5
+#define FCOE_ERROR_CODE_XFER_PEND_XFER_SET 6
+#define FCOE_ERROR_CODE_XFER_OPENED_SEQ 7
+#define FCOE_ERROR_CODE_XFER_FCTL 8
+
+/* FCP RSP error codes */
+#define FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET 9
+#define FCOE_ERROR_CODE_FCP_RSP_UNDERFLOW 10
+#define FCOE_ERROR_CODE_FCP_RSP_OVERFLOW 11
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD 12
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD 13
+#define FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE 14
+#define FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET 15
+#define FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ 16
+#define FCOE_ERROR_CODE_FCP_RSP_FCTL 17
+#define FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET 18
+#define FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET 19
+
+/* FCP DATA error codes */
+#define FCOE_ERROR_CODE_DATA_OOO_RO 20
+#define FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE 21
+#define FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS 22
+#define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23
+#define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24
+#define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25
+#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26
+#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27
+#define FCOE_ERROR_CODE_DATA_FCTL 28
+
+/* Middle path error codes */
+#define FCOE_ERROR_CODE_MIDPATH_TYPE_NOT_ELS 29
+#define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30
+#define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31
+#define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32
+#define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33
+#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_FCTL 34
+#define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35
+#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36
+
+/* ABTS error codes */
+#define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37
+#define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38
+#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39
+#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40
+#define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41
+
+/* Common error codes */
+#define FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD 42
+#define FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE 43
+#define FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH 44
+#define FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT 45
+#define FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH 46
+#define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47
+#define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48
+#define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49
+#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50
+
+/* Unsolicited Rx error codes */
+#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51
+#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_BLS 52
+#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_ELS 53
+#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_BLS 54
+#define FCOE_ERROR_CODE_UNSOLICITED_R_CTL 55
+
+#define FCOE_ERROR_CODE_RW_TASK_DDF_RCTL_INFO_FIELD 56
+#define FCOE_ERROR_CODE_RW_TASK_INVALID_RCTL 57
+#define FCOE_ERROR_CODE_RW_TASK_RCTL_GENERAL_MISMATCH 58
+
+/* Timer error codes */
+#define FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION 60
+#define FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION 61
+
+
+#endif /* BNX2FC_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
new file mode 100644
index 000000000000..7f6aff68cc53
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -0,0 +1,70 @@
+#ifndef __BNX2FC_DEBUG__
+#define __BNX2FC_DEBUG__
+
+/* Log level bit mask */
+#define LOG_IO 0x01 /* scsi cmd error, cleanup */
+#define LOG_TGT 0x02 /* Session setup, cleanup, etc' */
+#define LOG_HBA 0x04 /* lport events, link, mtu, etc' */
+#define LOG_ELS 0x08 /* ELS logs */
+#define LOG_MISC 0x10 /* fcoe L2 frame related logs*/
+#define LOG_ALL 0xff /* LOG all messages */
+
+extern unsigned int bnx2fc_debug_level;
+
+#define BNX2FC_CHK_LOGGING(LEVEL, CMD) \
+ do { \
+ if (unlikely(bnx2fc_debug_level & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+ } while (0)
+
+#define BNX2FC_ELS_DBG(fmt, arg...) \
+ BNX2FC_CHK_LOGGING(LOG_ELS, \
+ printk(KERN_ALERT PFX fmt, ##arg))
+
+#define BNX2FC_MISC_DBG(fmt, arg...) \
+ BNX2FC_CHK_LOGGING(LOG_MISC, \
+ printk(KERN_ALERT PFX fmt, ##arg))
+
+#define BNX2FC_IO_DBG(io_req, fmt, arg...) \
+ do { \
+ if (!io_req || !io_req->port || !io_req->port->lport || \
+ !io_req->port->lport->host) \
+ BNX2FC_CHK_LOGGING(LOG_IO, \
+ printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ else \
+ BNX2FC_CHK_LOGGING(LOG_IO, \
+ shost_printk(KERN_ALERT, \
+ (io_req)->port->lport->host, \
+ PFX "xid:0x%x " fmt, \
+ (io_req)->xid, ##arg)); \
+ } while (0)
+
+#define BNX2FC_TGT_DBG(tgt, fmt, arg...) \
+ do { \
+ if (!tgt || !tgt->port || !tgt->port->lport || \
+ !tgt->port->lport->host || !tgt->rport) \
+ BNX2FC_CHK_LOGGING(LOG_TGT, \
+ printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ else \
+ BNX2FC_CHK_LOGGING(LOG_TGT, \
+ shost_printk(KERN_ALERT, \
+ (tgt)->port->lport->host, \
+ PFX "port:%x " fmt, \
+ (tgt)->rport->port_id, ##arg)); \
+ } while (0)
+
+
+#define BNX2FC_HBA_DBG(lport, fmt, arg...) \
+ do { \
+ if (!lport || !lport->host) \
+ BNX2FC_CHK_LOGGING(LOG_HBA, \
+ printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \
+ else \
+ BNX2FC_CHK_LOGGING(LOG_HBA, \
+ shost_printk(KERN_ALERT, lport->host, \
+ PFX fmt, ##arg)); \
+ } while (0)
+
+#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
new file mode 100644
index 000000000000..7a11a255157f
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -0,0 +1,515 @@
+/*
+ * bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * This file contains helper routines that handle ELS requests
+ * and responses.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg);
+static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg);
+static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
+ void *data, u32 data_len,
+ void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
+ struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
+
+static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_cmd *orig_io_req;
+ struct bnx2fc_cmd *rrq_req;
+ int rc = 0;
+
+ BUG_ON(!cb_arg);
+ rrq_req = cb_arg->io_req;
+ orig_io_req = cb_arg->aborted_io_req;
+ BUG_ON(!orig_io_req);
+ BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
+ orig_io_req->xid, rrq_req->xid);
+
+ kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
+
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
+ /*
+ * els req is timed out. cleanup the IO with FW and
+ * drop the completion. Remove from active_cmd_queue.
+ */
+ BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
+ rrq_req->xid);
+
+ if (rrq_req->on_active_queue) {
+ list_del_init(&rrq_req->link);
+ rrq_req->on_active_queue = 0;
+ rc = bnx2fc_initiate_cleanup(rrq_req);
+ BUG_ON(rc);
+ }
+ }
+ kfree(cb_arg);
+}
+int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
+{
+
+ struct fc_els_rrq rrq;
+ struct bnx2fc_rport *tgt = aborted_io_req->tgt;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ struct bnx2fc_els_cb_arg *cb_arg = NULL;
+ u32 sid = tgt->sid;
+ u32 r_a_tov = lport->r_a_tov;
+ unsigned long start = jiffies;
+ int rc;
+
+ BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
+ aborted_io_req->xid);
+ memset(&rrq, 0, sizeof(rrq));
+
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
+ rc = -ENOMEM;
+ goto rrq_err;
+ }
+
+ cb_arg->aborted_io_req = aborted_io_req;
+
+ rrq.rrq_cmd = ELS_RRQ;
+ hton24(rrq.rrq_s_id, sid);
+ rrq.rrq_ox_id = htons(aborted_io_req->xid);
+ rrq.rrq_rx_id = htons(aborted_io_req->task->rx_wr_tx_rd.rx_id);
+
+retry_rrq:
+ rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
+ bnx2fc_rrq_compl, cb_arg,
+ r_a_tov);
+ if (rc == -ENOMEM) {
+ if (time_after(jiffies, start + (10 * HZ))) {
+ BNX2FC_ELS_DBG("rrq Failed\n");
+ rc = FAILED;
+ goto rrq_err;
+ }
+ msleep(20);
+ goto retry_rrq;
+ }
+rrq_err:
+ if (rc) {
+ BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
+ aborted_io_req->xid);
+ kfree(cb_arg);
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ }
+ return rc;
+}
+
+static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
+{
+ struct bnx2fc_cmd *els_req;
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ unsigned char *buf;
+ void *resp_buf;
+ u32 resp_len, hdr_len;
+ u16 l2_oxid;
+ int frame_len;
+ int rc = 0;
+
+ l2_oxid = cb_arg->l2_oxid;
+ BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
+
+ els_req = cb_arg->io_req;
+ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
+ /*
+ * els req is timed out. cleanup the IO with FW and
+ * drop the completion. libfc will handle the els timeout
+ */
+ if (els_req->on_active_queue) {
+ list_del_init(&els_req->link);
+ els_req->on_active_queue = 0;
+ rc = bnx2fc_initiate_cleanup(els_req);
+ BUG_ON(rc);
+ }
+ goto free_arg;
+ }
+
+ tgt = els_req->tgt;
+ mp_req = &(els_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+ resp_len = mp_req->resp_len;
+ resp_buf = mp_req->resp_buf;
+
+ buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!buf) {
+ printk(KERN_ERR PFX "Unable to alloc mp buf\n");
+ goto free_arg;
+ }
+ hdr_len = sizeof(*fc_hdr);
+ if (hdr_len + resp_len > PAGE_SIZE) {
+ printk(KERN_ERR PFX "l2_els_compl: resp len is "
+ "beyond page size\n");
+ goto free_buf;
+ }
+ memcpy(buf, fc_hdr, hdr_len);
+ memcpy(buf + hdr_len, resp_buf, resp_len);
+ frame_len = hdr_len + resp_len;
+
+ bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
+
+free_buf:
+ kfree(buf);
+free_arg:
+ kfree(cb_arg);
+}
+
+int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+ struct fc_els_adisc *adisc;
+ struct fc_frame_header *fh;
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ fh = fc_frame_header_get(fp);
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
+ return -ENOMEM;
+ }
+
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
+ adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+ /* adisc is initialized by libfc */
+ rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
+ bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+ if (rc)
+ kfree(cb_arg);
+ return rc;
+}
+
+int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+ struct fc_els_logo *logo;
+ struct fc_frame_header *fh;
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ fh = fc_frame_header_get(fp);
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
+ return -ENOMEM;
+ }
+
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
+ logo = fc_frame_payload_get(fp, sizeof(*logo));
+ /* logo is initialized by libfc */
+ rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
+ bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+ if (rc)
+ kfree(cb_arg);
+ return rc;
+}
+
+int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
+{
+ struct fc_els_rls *rls;
+ struct fc_frame_header *fh;
+ struct bnx2fc_els_cb_arg *cb_arg;
+ struct fc_lport *lport = tgt->rdata->local_port;
+ u32 r_a_tov = lport->r_a_tov;
+ int rc;
+
+ fh = fc_frame_header_get(fp);
+ cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
+ if (!cb_arg) {
+ printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
+ return -ENOMEM;
+ }
+
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ rls = fc_frame_payload_get(fp, sizeof(*rls));
+ /* rls is initialized by libfc */
+ rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
+ bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
+ if (rc)
+ kfree(cb_arg);
+ return rc;
+}
+
+static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
+ void *data, u32 data_len,
+ void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
+ struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
+{
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct fc_rport *rport = tgt->rport;
+ struct fc_lport *lport = port->lport;
+ struct bnx2fc_cmd *els_req;
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ int rc = 0;
+ int task_idx, index;
+ u32 did, sid;
+ u16 xid;
+
+ rc = fc_remote_port_chkready(rport);
+ if (rc) {
+ printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+ if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
+ (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
+ printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+ els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
+ if (!els_req) {
+ rc = -ENOMEM;
+ goto els_err;
+ }
+
+ els_req->sc_cmd = NULL;
+ els_req->port = port;
+ els_req->tgt = tgt;
+ els_req->cb_func = cb_func;
+ cb_arg->io_req = els_req;
+ els_req->cb_arg = cb_arg;
+
+ mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
+ rc = bnx2fc_init_mp_req(els_req);
+ if (rc == FAILED) {
+ printk(KERN_ALERT PFX "ELS MP request init failed\n");
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = -ENOMEM;
+ goto els_err;
+ } else {
+ /* rc SUCCESS */
+ rc = 0;
+ }
+
+ /* Set the data_xfer_len to the size of ELS payload */
+ mp_req->req_len = data_len;
+ els_req->data_xfer_len = mp_req->req_len;
+
+ /* Fill ELS Payload */
+ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
+ memcpy(mp_req->req_buf, data, data_len);
+ } else {
+ printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op);
+ els_req->cb_func = NULL;
+ els_req->cb_arg = NULL;
+ spin_lock_bh(&tgt->tgt_lock);
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = -EINVAL;
+ }
+
+ if (rc)
+ goto els_err;
+
+ /* Fill FC header */
+ fc_hdr = &(mp_req->req_fc_hdr);
+
+ did = tgt->rport->port_id;
+ sid = tgt->sid;
+
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
+ FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+
+ /* Obtain exchange id */
+ xid = els_req->xid;
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_mp_task(els_req, task);
+
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ printk(KERN_ERR PFX "initiate_els.. session not ready\n");
+ els_req->cb_func = NULL;
+ els_req->cb_arg = NULL;
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return -EINVAL;
+ }
+
+ if (timer_msec)
+ bnx2fc_cmd_timer_set(els_req, timer_msec);
+ bnx2fc_add_2_sq(tgt, xid);
+
+ els_req->on_active_queue = 1;
+ list_add_tail(&els_req->link, &tgt->els_queue);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+els_err:
+ return rc;
+}
+
+void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
+ struct fcoe_task_ctx_entry *task, u8 num_rq)
+{
+ struct bnx2fc_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ u64 *hdr;
+ u64 *temp_hdr;
+
+ BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
+ "cmd_type = %d\n", els_req->xid, els_req->cmd_type);
+
+ if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
+ &els_req->req_flags)) {
+ BNX2FC_ELS_DBG("Timer context finished processing this "
+ "els - 0x%x\n", els_req->xid);
+ /* This IO doesnt receive cleanup completion */
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+ return;
+ }
+
+ /* Cancel the timeout_work, as we received the response */
+ if (cancel_delayed_work(&els_req->timeout_work))
+ kref_put(&els_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ if (els_req->on_active_queue) {
+ list_del_init(&els_req->link);
+ els_req->on_active_queue = 0;
+ }
+
+ mp_req = &(els_req->mp_req);
+ fc_hdr = &(mp_req->resp_fc_hdr);
+
+ hdr = (u64 *)fc_hdr;
+ temp_hdr = (u64 *)
+ &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+ hdr[0] = cpu_to_be64(temp_hdr[0]);
+ hdr[1] = cpu_to_be64(temp_hdr[1]);
+ hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+ mp_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+
+ /* Parse ELS response */
+ if ((els_req->cb_func) && (els_req->cb_arg)) {
+ els_req->cb_func(els_req->cb_arg);
+ els_req->cb_arg = NULL;
+ }
+
+ kref_put(&els_req->refcount, bnx2fc_cmd_release);
+}
+
+static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg)
+{
+ struct fcoe_ctlr *fip = arg;
+ struct fc_exch *exch = fc_seq_exch(seq);
+ struct fc_lport *lport = exch->lp;
+ u8 *mac;
+ struct fc_frame_header *fh;
+ u8 op;
+
+ if (IS_ERR(fp))
+ goto done;
+
+ mac = fr_cb(fp)->granted_mac;
+ if (is_zero_ether_addr(mac)) {
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_type != FC_TYPE_ELS) {
+ printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
+ "fh_type != FC_TYPE_ELS\n");
+ fc_frame_free(fp);
+ return;
+ }
+ op = fc_frame_payload_op(fp);
+ if (lport->vport) {
+ if (op == ELS_LS_RJT) {
+ printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
+ fc_vport_terminate(lport->vport);
+ fc_frame_free(fp);
+ return;
+ }
+ }
+ if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
+ fc_frame_free(fp);
+ return;
+ }
+ }
+ fip->update_mac(lport, mac);
+done:
+ fc_lport_flogi_resp(seq, fp, lport);
+}
+
+static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg)
+{
+ struct fcoe_ctlr *fip = arg;
+ struct fc_exch *exch = fc_seq_exch(seq);
+ struct fc_lport *lport = exch->lp;
+ static u8 zero_mac[ETH_ALEN] = { 0 };
+
+ if (!IS_ERR(fp))
+ fip->update_mac(lport, zero_mac);
+ fc_lport_logo_resp(seq, fp, lport);
+}
+
+struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
+ struct fc_frame *fp, unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *,
+ void *),
+ void *arg, u32 timeout)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_ctlr *fip = &hba->ctlr;
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+
+ switch (op) {
+ case ELS_FLOGI:
+ case ELS_FDISC:
+ return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
+ fip, timeout);
+ case ELS_LOGO:
+ /* only hook onto fabric logouts, not port logouts */
+ if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
+ break;
+ return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
+ fip, timeout);
+ }
+ return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
new file mode 100644
index 000000000000..e476e8753079
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -0,0 +1,2535 @@
+/* bnx2fc_fcoe.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * This file contains the code that interacts with libfc, libfcoe,
+ * cnic modules to create FCoE instances, send/receive non-offloaded
+ * FIP/FCoE packets, listen to link events etc.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+static struct list_head adapter_list;
+static u32 adapter_count;
+static DEFINE_MUTEX(bnx2fc_dev_lock);
+DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
+
+#define DRV_MODULE_NAME "bnx2fc"
+#define DRV_MODULE_VERSION BNX2FC_VERSION
+#define DRV_MODULE_RELDATE "Jan 25, 2011"
+
+
+static char version[] __devinitdata =
+ "Broadcom NetXtreme II FCoE Driver " DRV_MODULE_NAME \
+ " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+
+MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 FCoE Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+#define BNX2FC_MAX_QUEUE_DEPTH 256
+#define BNX2FC_MIN_QUEUE_DEPTH 32
+#define FCOE_WORD_TO_BYTE 4
+
+static struct scsi_transport_template *bnx2fc_transport_template;
+static struct scsi_transport_template *bnx2fc_vport_xport_template;
+
+struct workqueue_struct *bnx2fc_wq;
+
+/* bnx2fc structure needs only one instance of the fcoe_percpu_s structure.
+ * Here the io threads are per cpu but the l2 thread is just one
+ */
+struct fcoe_percpu_s bnx2fc_global;
+DEFINE_SPINLOCK(bnx2fc_global_lock);
+
+static struct cnic_ulp_ops bnx2fc_cnic_cb;
+static struct libfc_function_template bnx2fc_libfc_fcn_templ;
+static struct scsi_host_template bnx2fc_shost_template;
+static struct fc_function_template bnx2fc_transport_function;
+static struct fc_function_template bnx2fc_vport_xport_function;
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
+static int bnx2fc_destroy(struct net_device *net_device);
+static int bnx2fc_enable(struct net_device *netdev);
+static int bnx2fc_disable(struct net_device *netdev);
+
+static void bnx2fc_recv_frame(struct sk_buff *skb);
+
+static void bnx2fc_start_disc(struct bnx2fc_hba *hba);
+static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
+static int bnx2fc_net_config(struct fc_lport *lp);
+static int bnx2fc_lport_config(struct fc_lport *lport);
+static int bnx2fc_em_config(struct fc_lport *lport);
+static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
+static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba);
+static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
+static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
+ struct device *parent, int npiv);
+static void bnx2fc_destroy_work(struct work_struct *work);
+
+static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
+static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
+
+static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
+static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba);
+
+static void bnx2fc_port_shutdown(struct fc_lport *lport);
+static void bnx2fc_stop(struct bnx2fc_hba *hba);
+static int __init bnx2fc_mod_init(void);
+static void __exit bnx2fc_mod_exit(void);
+
+unsigned int bnx2fc_debug_level;
+module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
+
+static int bnx2fc_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu);
+/* notification function for CPU hotplug events */
+static struct notifier_block bnx2fc_cpu_notifier = {
+ .notifier_call = bnx2fc_cpu_callback,
+};
+
+static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
+{
+ struct fcoe_percpu_s *bg;
+ struct fcoe_rcv_info *fr;
+ struct sk_buff_head *list;
+ struct sk_buff *skb, *next;
+ struct sk_buff *head;
+
+ bg = &bnx2fc_global;
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ list = &bg->fcoe_rx_list;
+ head = list->next;
+ for (skb = head; skb != (struct sk_buff *)list;
+ skb = next) {
+ next = skb->next;
+ fr = fcoe_dev_from_skb(skb);
+ if (fr->fr_dev == lp) {
+ __skb_unlink(skb, list);
+ kfree_skb(skb);
+ }
+ }
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+}
+
+int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+{
+ int rc;
+ spin_lock(&bnx2fc_global_lock);
+ rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global);
+ spin_unlock(&bnx2fc_global_lock);
+
+ return rc;
+}
+
+static void bnx2fc_abort_io(struct fc_lport *lport)
+{
+ /*
+ * This function is no-op for bnx2fc, but we do
+ * not want to leave it as NULL either, as libfc
+ * can call the default function which is
+ * fc_fcp_abort_io.
+ */
+}
+
+static void bnx2fc_cleanup(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_rport *tgt;
+ int i;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ mutex_lock(&hba->hba_mutex);
+ spin_lock_bh(&hba->hba_lock);
+ for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+ tgt = hba->tgt_ofld_list[i];
+ if (tgt) {
+ /* Cleanup IOs belonging to requested vport */
+ if (tgt->port == port) {
+ spin_unlock_bh(&hba->hba_lock);
+ BNX2FC_TGT_DBG(tgt, "flush/cleanup\n");
+ bnx2fc_flush_active_ios(tgt);
+ spin_lock_bh(&hba->hba_lock);
+ }
+ }
+ }
+ spin_unlock_bh(&hba->hba_lock);
+ mutex_unlock(&hba->hba_mutex);
+}
+
+static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt,
+ struct fc_frame *fp)
+{
+ struct fc_rport_priv *rdata = tgt->rdata;
+ struct fc_frame_header *fh;
+ int rc = 0;
+
+ fh = fc_frame_header_get(fp);
+ BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, "
+ "r_ctl = 0x%x\n", rdata->ids.port_id,
+ ntohs(fh->fh_ox_id), fh->fh_r_ctl);
+ if ((fh->fh_type == FC_TYPE_ELS) &&
+ (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_ADISC:
+ rc = bnx2fc_send_adisc(tgt, fp);
+ break;
+ case ELS_LOGO:
+ rc = bnx2fc_send_logo(tgt, fp);
+ break;
+ case ELS_RLS:
+ rc = bnx2fc_send_rls(tgt, fp);
+ break;
+ default:
+ break;
+ }
+ } else if ((fh->fh_type == FC_TYPE_BLS) &&
+ (fh->fh_r_ctl == FC_RCTL_BA_ABTS))
+ BNX2FC_TGT_DBG(tgt, "ABTS frame\n");
+ else {
+ BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x "
+ "rctl 0x%x thru non-offload path\n",
+ fh->fh_type, fh->fh_r_ctl);
+ return -ENODEV;
+ }
+ if (rc)
+ return -ENOMEM;
+ else
+ return 0;
+}
+
+/**
+ * bnx2fc_xmit - bnx2fc's FCoE frame transmit function
+ *
+ * @lport: the associated local port
+ * @fp: the fc_frame to be transmitted
+ */
+static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct ethhdr *eh;
+ struct fcoe_crc_eof *cp;
+ struct sk_buff *skb;
+ struct fc_frame_header *fh;
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ struct fcoe_hdr *hp;
+ struct bnx2fc_rport *tgt;
+ struct fcoe_dev_stats *stats;
+ u8 sof, eof;
+ u32 crc;
+ unsigned int hlen, tlen, elen;
+ int wlen, rc = 0;
+
+ port = (struct fcoe_port *)lport_priv(lport);
+ hba = port->priv;
+
+ fh = fc_frame_header_get(fp);
+
+ skb = fp_skb(fp);
+ if (!lport->link_up) {
+ BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+ if (!hba->ctlr.sel_fcf) {
+ BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+ if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb))
+ return 0;
+ }
+
+ sof = fr_sof(fp);
+ eof = fr_eof(fp);
+
+ /*
+ * Snoop the frame header to check if the frame is for
+ * an offloaded session
+ */
+ /*
+ * tgt_ofld_list access is synchronized using
+ * both hba mutex and hba lock. Atleast hba mutex or
+ * hba lock needs to be held for read access.
+ */
+
+ spin_lock_bh(&hba->hba_lock);
+ tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id));
+ if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
+ /* This frame is for offloaded session */
+ BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session "
+ "port_id = 0x%x\n", ntoh24(fh->fh_d_id));
+ spin_unlock_bh(&hba->hba_lock);
+ rc = bnx2fc_xmit_l2_frame(tgt, fp);
+ if (rc != -ENODEV) {
+ kfree_skb(skb);
+ return rc;
+ }
+ } else {
+ spin_unlock_bh(&hba->hba_lock);
+ }
+
+ elen = sizeof(struct ethhdr);
+ hlen = sizeof(struct fcoe_hdr);
+ tlen = sizeof(struct fcoe_crc_eof);
+ wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
+
+ skb->ip_summed = CHECKSUM_NONE;
+ crc = fcoe_fc_crc(fp);
+
+ /* copy port crc and eof to the skb buff */
+ if (skb_is_nonlinear(skb)) {
+ skb_frag_t *frag;
+ if (bnx2fc_get_paged_crc_eof(skb, tlen)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
+ cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
+ + frag->page_offset;
+ } else {
+ cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+ }
+
+ memset(cp, 0, sizeof(*cp));
+ cp->fcoe_eof = eof;
+ cp->fcoe_crc32 = cpu_to_le32(~crc);
+ if (skb_is_nonlinear(skb)) {
+ kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
+ cp = NULL;
+ }
+
+ /* adjust skb network/transport offsets to match mac/fcoe/port */
+ skb_push(skb, elen + hlen);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb->mac_len = elen;
+ skb->protocol = htons(ETH_P_FCOE);
+ skb->dev = hba->netdev;
+
+ /* fill up mac and fcoe headers */
+ eh = eth_hdr(skb);
+ eh->h_proto = htons(ETH_P_FCOE);
+ if (hba->ctlr.map_dest)
+ fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
+ else
+ /* insert GW address */
+ memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN);
+
+ if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN))
+ memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN);
+ else
+ memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
+
+ hp = (struct fcoe_hdr *)(eh + 1);
+ memset(hp, 0, sizeof(*hp));
+ if (FC_FCOE_VER)
+ FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
+ hp->fcoe_sof = sof;
+
+ /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
+ if (lport->seq_offload && fr_max_payload(fp)) {
+ skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
+ skb_shinfo(skb)->gso_size = fr_max_payload(fp);
+ } else {
+ skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ }
+
+ /*update tx stats */
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats->TxFrames++;
+ stats->TxWords += wlen;
+ put_cpu();
+
+ /* send down to lld */
+ fr_dev(fp) = lport;
+ if (port->fcoe_pending_queue.qlen)
+ fcoe_check_wait_queue(lport, skb);
+ else if (fcoe_start_io(skb))
+ fcoe_check_wait_queue(lport, skb);
+
+ return 0;
+}
+
+/**
+ * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ
+ *
+ * @skb: the receive socket buffer
+ * @dev: associated net device
+ * @ptype: context
+ * @olddev: last device
+ *
+ * This function receives the packet and builds FC frame and passes it up
+ */
+static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *olddev)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_hba *hba;
+ struct fc_frame_header *fh;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_percpu_s *bg;
+ unsigned short oxid;
+
+ hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type);
+ lport = hba->ctlr.lp;
+
+ if (unlikely(lport == NULL)) {
+ printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n");
+ goto err;
+ }
+
+ if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
+ printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n");
+ goto err;
+ }
+
+ /*
+ * Check for minimum frame length, and make sure required FCoE
+ * and FC headers are pulled into the linear data area.
+ */
+ if (unlikely((skb->len < FCOE_MIN_FRAME) ||
+ !pskb_may_pull(skb, FCOE_HEADER_LEN)))
+ goto err;
+
+ skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+
+ oxid = ntohs(fh->fh_ox_id);
+
+ fr = fcoe_dev_from_skb(skb);
+ fr->fr_dev = lport;
+ fr->ptype = ptype;
+
+ bg = &bnx2fc_global;
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+
+ __skb_queue_tail(&bg->fcoe_rx_list, skb);
+ if (bg->fcoe_rx_list.qlen == 1)
+ wake_up_process(bg->thread);
+
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+ return 0;
+err:
+ kfree_skb(skb);
+ return -1;
+}
+
+static int bnx2fc_l2_rcv_thread(void *arg)
+{
+ struct fcoe_percpu_s *bg = arg;
+ struct sk_buff *skb;
+
+ set_user_nice(current, -20);
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_RUNNING);
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) {
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+ bnx2fc_recv_frame(skb);
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ }
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+
+static void bnx2fc_recv_frame(struct sk_buff *skb)
+{
+ u32 fr_len;
+ struct fc_lport *lport;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_dev_stats *stats;
+ struct fc_frame_header *fh;
+ struct fcoe_crc_eof crc_eof;
+ struct fc_frame *fp;
+ struct fc_lport *vn_port;
+ struct fcoe_port *port;
+ u8 *mac = NULL;
+ u8 *dest_mac = NULL;
+ struct fcoe_hdr *hp;
+
+ fr = fcoe_dev_from_skb(skb);
+ lport = fr->fr_dev;
+ if (unlikely(lport == NULL)) {
+ printk(KERN_ALERT PFX "Invalid lport struct\n");
+ kfree_skb(skb);
+ return;
+ }
+
+ if (skb_is_nonlinear(skb))
+ skb_linearize(skb);
+ mac = eth_hdr(skb)->h_source;
+ dest_mac = eth_hdr(skb)->h_dest;
+
+ /* Pull the header */
+ hp = (struct fcoe_hdr *) skb_network_header(skb);
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+ skb_pull(skb, sizeof(struct fcoe_hdr));
+ fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats->RxFrames++;
+ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+
+ fp = (struct fc_frame *)skb;
+ fc_frame_init(fp);
+ fr_dev(fp) = lport;
+ fr_sof(fp) = hp->fcoe_sof;
+ if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ fr_eof(fp) = crc_eof.fcoe_eof;
+ fr_crc(fp) = crc_eof.fcoe_crc32;
+ if (pskb_trim(skb, fr_len)) {
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+
+ fh = fc_frame_header_get(fp);
+
+ vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
+ if (vn_port) {
+ port = lport_priv(vn_port);
+ if (compare_ether_addr(port->data_src_addr, dest_mac)
+ != 0) {
+ BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ }
+ if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
+ fh->fh_type == FC_TYPE_FCP) {
+ /* Drop FCP data. We dont this in L2 path */
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
+ fh->fh_type == FC_TYPE_ELS) {
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_LOGO:
+ if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
+ /* drop non-FIP LOGO */
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ break;
+ }
+ }
+ if (le32_to_cpu(fr_crc(fp)) !=
+ ~crc32(~0, skb->data, fr_len)) {
+ if (stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING PFX "dropping frame with "
+ "CRC error\n");
+ stats->InvalidCRCCount++;
+ put_cpu();
+ kfree_skb(skb);
+ return;
+ }
+ put_cpu();
+ fc_exch_recv(lport, fp);
+}
+
+/**
+ * bnx2fc_percpu_io_thread - thread per cpu for ios
+ *
+ * @arg: ptr to bnx2fc_percpu_info structure
+ */
+int bnx2fc_percpu_io_thread(void *arg)
+{
+ struct bnx2fc_percpu_s *p = arg;
+ struct bnx2fc_work *work, *tmp;
+ LIST_HEAD(work_list);
+
+ set_user_nice(current, -20);
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+ set_current_state(TASK_RUNNING);
+ spin_lock_bh(&p->fp_work_lock);
+ while (!list_empty(&p->work_list)) {
+ list_splice_init(&p->work_list, &work_list);
+ spin_unlock_bh(&p->fp_work_lock);
+
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del_init(&work->list);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ kfree(work);
+ }
+
+ spin_lock_bh(&p->fp_work_lock);
+ }
+ spin_unlock_bh(&p->fp_work_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+
+ return 0;
+}
+
+static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
+{
+ struct fc_host_statistics *bnx2fc_stats;
+ struct fc_lport *lport = shost_priv(shost);
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_statistics_params *fw_stats;
+ int rc = 0;
+
+ fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer;
+ if (!fw_stats)
+ return NULL;
+
+ bnx2fc_stats = fc_get_host_stats(shost);
+
+ init_completion(&hba->stat_req_done);
+ if (bnx2fc_send_stat_req(hba))
+ return bnx2fc_stats;
+ rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
+ if (!rc) {
+ BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
+ return bnx2fc_stats;
+ }
+ bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat1.fc_crc_cnt;
+ bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
+ bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
+ bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;
+ bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4;
+
+ bnx2fc_stats->dumped_frames = 0;
+ bnx2fc_stats->lip_count = 0;
+ bnx2fc_stats->nos_count = 0;
+ bnx2fc_stats->loss_of_sync_count = 0;
+ bnx2fc_stats->loss_of_signal_count = 0;
+ bnx2fc_stats->prim_seq_protocol_err_count = 0;
+
+ return bnx2fc_stats;
+}
+
+static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct Scsi_Host *shost = lport->host;
+ int rc = 0;
+
+ shost->max_cmd_len = BNX2FC_MAX_CMD_LEN;
+ shost->max_lun = BNX2FC_MAX_LUN;
+ shost->max_id = BNX2FC_MAX_FCP_TGT;
+ shost->max_channel = 0;
+ if (lport->vport)
+ shost->transportt = bnx2fc_vport_xport_template;
+ else
+ shost->transportt = bnx2fc_transport_template;
+
+ /* Add the new host to SCSI-ml */
+ rc = scsi_add_host(lport->host, dev);
+ if (rc) {
+ printk(KERN_ERR PFX "Error on scsi_add_host\n");
+ return rc;
+ }
+ if (!lport->vport)
+ fc_host_max_npiv_vports(lport->host) = USHRT_MAX;
+ sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s",
+ BNX2FC_NAME, BNX2FC_VERSION,
+ hba->netdev->name);
+
+ return 0;
+}
+
+static int bnx2fc_mfs_update(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *netdev = hba->netdev;
+ u32 mfs;
+ u32 max_mfs;
+
+ mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
+ sizeof(struct fcoe_crc_eof));
+ max_mfs = BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header);
+ BNX2FC_HBA_DBG(lport, "mfs = %d, max_mfs = %d\n", mfs, max_mfs);
+ if (mfs > max_mfs)
+ mfs = max_mfs;
+
+ /* Adjust mfs to be a multiple of 256 bytes */
+ mfs = (((mfs - sizeof(struct fc_frame_header)) / BNX2FC_MIN_PAYLOAD) *
+ BNX2FC_MIN_PAYLOAD);
+ mfs = mfs + sizeof(struct fc_frame_header);
+
+ BNX2FC_HBA_DBG(lport, "Set MFS = %d\n", mfs);
+ if (fc_set_mfs(lport, mfs))
+ return -EINVAL;
+ return 0;
+}
+static void bnx2fc_link_speed_update(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *netdev = hba->netdev;
+ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+
+ if (!dev_ethtool_get_settings(netdev, &ecmd)) {
+ lport->link_supported_speeds &=
+ ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+ if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full))
+ lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+ if (ecmd.supported & SUPPORTED_10000baseT_Full)
+ lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
+
+ if (ecmd.speed == SPEED_1000)
+ lport->link_speed = FC_PORTSPEED_1GBIT;
+ if (ecmd.speed == SPEED_10000)
+ lport->link_speed = FC_PORTSPEED_10GBIT;
+ }
+ return;
+}
+static int bnx2fc_link_ok(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *dev = hba->phys_dev;
+ int rc = 0;
+
+ if ((dev->flags & IFF_UP) && netif_carrier_ok(dev))
+ clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ else {
+ set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ rc = -1;
+ }
+ return rc;
+}
+
+/**
+ * bnx2fc_get_link_state - get network link state
+ *
+ * @hba: adapter instance pointer
+ *
+ * updates adapter structure flag based on netdev state
+ */
+void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
+{
+ if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
+ set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+ else
+ clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+}
+
+static int bnx2fc_net_config(struct fc_lport *lport)
+{
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ u64 wwnn, wwpn;
+
+ port = lport_priv(lport);
+ hba = port->priv;
+
+ /* require support for get_pauseparam ethtool op. */
+ if (!hba->phys_dev->ethtool_ops ||
+ !hba->phys_dev->ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ if (bnx2fc_mfs_update(lport))
+ return -EINVAL;
+
+ skb_queue_head_init(&port->fcoe_pending_queue);
+ port->fcoe_pending_queue_active = 0;
+ setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
+
+ bnx2fc_link_speed_update(lport);
+
+ if (!lport->vport) {
+ wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0);
+ BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
+ fc_set_wwnn(lport, wwnn);
+
+ wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0);
+ BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
+ fc_set_wwpn(lport, wwpn);
+ }
+
+ return 0;
+}
+
+static void bnx2fc_destroy_timer(unsigned long data)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
+
+ BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - "
+ "Destroy compl not received!!\n");
+ hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
+ wake_up_interruptible(&hba->destroy_wait);
+}
+
+/**
+ * bnx2fc_indicate_netevent - Generic netdev event handler
+ *
+ * @context: adapter structure pointer
+ * @event: event type
+ *
+ * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and
+ * NETDEV_CHANGE_MTU events
+ */
+static void bnx2fc_indicate_netevent(void *context, unsigned long event)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+ struct fc_lport *lport = hba->ctlr.lp;
+ struct fc_lport *vport;
+ u32 link_possible = 1;
+
+ if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n",
+ hba->netdev->name, event);
+ return;
+ }
+
+ /*
+ * ASSUMPTION:
+ * indicate_netevent cannot be called from cnic unless bnx2fc
+ * does register_device
+ */
+ BUG_ON(!lport);
+
+ BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n",
+ hba->netdev->name, event);
+
+ switch (event) {
+ case NETDEV_UP:
+ BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n",
+ hba->adapter_state);
+ if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+ printk(KERN_ERR "indicate_netevent: "\
+ "adapter is not UP!!\n");
+ /* fall thru to update mfs if MTU has changed */
+ case NETDEV_CHANGEMTU:
+ BNX2FC_HBA_DBG(lport, "NETDEV_CHANGEMTU event\n");
+ bnx2fc_mfs_update(lport);
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ bnx2fc_mfs_update(vport);
+ mutex_unlock(&lport->lp_mutex);
+ break;
+
+ case NETDEV_DOWN:
+ BNX2FC_HBA_DBG(lport, "Port down\n");
+ clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ link_possible = 0;
+ break;
+
+ case NETDEV_GOING_DOWN:
+ BNX2FC_HBA_DBG(lport, "Port going down\n");
+ set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+ link_possible = 0;
+ break;
+
+ case NETDEV_CHANGE:
+ BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n");
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Unkonwn netevent %ld", event);
+ return;
+ }
+
+ bnx2fc_link_speed_update(lport);
+
+ if (link_possible && !bnx2fc_link_ok(lport)) {
+ printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n");
+ fcoe_ctlr_link_up(&hba->ctlr);
+ } else {
+ printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n");
+ if (fcoe_ctlr_link_down(&hba->ctlr)) {
+ clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+ per_cpu_ptr(lport->dev_stats,
+ get_cpu())->LinkFailureCount++;
+ put_cpu();
+ fcoe_clean_pending_queue(lport);
+
+ init_waitqueue_head(&hba->shutdown_wait);
+ BNX2FC_HBA_DBG(lport, "indicate_netevent "
+ "num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ hba->wait_for_link_down = 1;
+ BNX2FC_HBA_DBG(lport, "waiting for uploads to "
+ "compl proc = %s\n",
+ current->comm);
+ wait_event_interruptible(hba->shutdown_wait,
+ (hba->num_ofld_sess == 0));
+ BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ hba->wait_for_link_down = 0;
+
+ if (signal_pending(current))
+ flush_signals(current);
+ }
+ }
+}
+
+static int bnx2fc_libfc_config(struct fc_lport *lport)
+{
+
+ /* Set the function pointers set by bnx2fc driver */
+ memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ,
+ sizeof(struct libfc_function_template));
+ fc_elsct_init(lport);
+ fc_exch_init(lport);
+ fc_rport_init(lport);
+ fc_disc_init(lport);
+ return 0;
+}
+
+static int bnx2fc_em_config(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+
+ if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
+ FCOE_MAX_XID, NULL)) {
+ printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID,
+ BNX2FC_MAX_XID);
+
+ if (!hba->cmd_mgr) {
+ printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
+ fc_exch_mgr_free(lport);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int bnx2fc_lport_config(struct fc_lport *lport)
+{
+ lport->link_up = 0;
+ lport->qfull = 0;
+ lport->max_retry_count = 3;
+ lport->max_rport_retry_count = 3;
+ lport->e_d_tov = 2 * 1000;
+ lport->r_a_tov = 10 * 1000;
+
+ /* REVISIT: enable when supporting tape devices
+ lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+ FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
+ */
+ lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS);
+ lport->does_npiv = 1;
+
+ memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen));
+ lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA;
+
+ /* alloc stats structure */
+ if (fc_lport_init_stats(lport))
+ return -ENOMEM;
+
+ /* Finish fc_lport configuration */
+ fc_lport_config(lport);
+
+ return 0;
+}
+
+/**
+ * bnx2fc_fip_recv - handle a received FIP frame.
+ *
+ * @skb: the received skb
+ * @dev: associated &net_device
+ * @ptype: the &packet_type structure which was used to register this handler.
+ * @orig_dev: original receive &net_device, in case @ dev is a bond.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev)
+{
+ struct bnx2fc_hba *hba;
+ hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type);
+ fcoe_ctlr_recv(&hba->ctlr, skb);
+ return 0;
+}
+
+/**
+ * bnx2fc_update_src_mac - Update Ethernet MAC filters.
+ *
+ * @fip: FCoE controller.
+ * @old: Unicast MAC address to delete if the MAC is non-zero.
+ * @new: Unicast MAC address to add.
+ *
+ * Remove any previously-set unicast MAC filter.
+ * Add secondary FCoE MAC address filter for our OUI.
+ */
+static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr)
+{
+ struct fcoe_port *port = lport_priv(lport);
+
+ memcpy(port->data_src_addr, addr, ETH_ALEN);
+}
+
+/**
+ * bnx2fc_get_src_mac - return the ethernet source address for an lport
+ *
+ * @lport: libfc port
+ */
+static u8 *bnx2fc_get_src_mac(struct fc_lport *lport)
+{
+ struct fcoe_port *port;
+
+ port = (struct fcoe_port *)lport_priv(lport);
+ return port->data_src_addr;
+}
+
+/**
+ * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame.
+ *
+ * @fip: FCoE controller.
+ * @skb: FIP Packet.
+ */
+static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+ skb->dev = bnx2fc_from_ctlr(fip)->netdev;
+ dev_queue_xmit(skb);
+}
+
+static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fcoe_port *port = lport_priv(n_port);
+ struct bnx2fc_hba *hba = port->priv;
+ struct net_device *netdev = hba->netdev;
+ struct fc_lport *vn_port;
+
+ if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "vn ports cannot be created on"
+ "this hba\n");
+ return -EIO;
+ }
+ mutex_lock(&bnx2fc_dev_lock);
+ vn_port = bnx2fc_if_create(hba, &vport->dev, 1);
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ if (IS_ERR(vn_port)) {
+ printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n",
+ netdev->name);
+ return -EIO;
+ }
+
+ if (disabled) {
+ fc_vport_set_state(vport, FC_VPORT_DISABLED);
+ } else {
+ vn_port->boot_time = jiffies;
+ fc_lport_init(vn_port);
+ fc_fabric_login(vn_port);
+ fc_vport_setlink(vn_port);
+ }
+ return 0;
+}
+
+static int bnx2fc_vport_destroy(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port = vport->dd_data;
+ struct fcoe_port *port = lport_priv(vn_port);
+
+ mutex_lock(&n_port->lp_mutex);
+ list_del(&vn_port->list);
+ mutex_unlock(&n_port->lp_mutex);
+ queue_work(bnx2fc_wq, &port->destroy_work);
+ return 0;
+}
+
+static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
+{
+ struct fc_lport *lport = vport->dd_data;
+
+ if (disable) {
+ fc_vport_set_state(vport, FC_VPORT_DISABLED);
+ fc_fabric_logoff(lport);
+ } else {
+ lport->boot_time = jiffies;
+ fc_fabric_login(lport);
+ fc_vport_setlink(lport);
+ }
+ return 0;
+}
+
+
+static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba)
+{
+ struct net_device *netdev = hba->netdev;
+ struct net_device *physdev = hba->phys_dev;
+ struct netdev_hw_addr *ha;
+ int sel_san_mac = 0;
+
+ /* Do not support for bonding device */
+ if ((netdev->priv_flags & IFF_MASTER_ALB) ||
+ (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
+ (netdev->priv_flags & IFF_MASTER_8023AD)) {
+ return -EOPNOTSUPP;
+ }
+
+ /* setup Source MAC Address */
+ rcu_read_lock();
+ for_each_dev_addr(physdev, ha) {
+ BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ",
+ ha->type);
+ printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0],
+ ha->addr[1], ha->addr[2], ha->addr[3],
+ ha->addr[4], ha->addr[5]);
+
+ if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
+ (is_valid_ether_addr(ha->addr))) {
+ memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
+ sel_san_mac = 1;
+ BNX2FC_MISC_DBG("Found SAN MAC\n");
+ }
+ }
+ rcu_read_unlock();
+
+ if (!sel_san_mac)
+ return -ENODEV;
+
+ hba->fip_packet_type.func = bnx2fc_fip_recv;
+ hba->fip_packet_type.type = htons(ETH_P_FIP);
+ hba->fip_packet_type.dev = netdev;
+ dev_add_pack(&hba->fip_packet_type);
+
+ hba->fcoe_packet_type.func = bnx2fc_rcv;
+ hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+ hba->fcoe_packet_type.dev = netdev;
+ dev_add_pack(&hba->fcoe_packet_type);
+
+ return 0;
+}
+
+static int bnx2fc_attach_transport(void)
+{
+ bnx2fc_transport_template =
+ fc_attach_transport(&bnx2fc_transport_function);
+
+ if (bnx2fc_transport_template == NULL) {
+ printk(KERN_ERR PFX "Failed to attach FC transport\n");
+ return -ENODEV;
+ }
+
+ bnx2fc_vport_xport_template =
+ fc_attach_transport(&bnx2fc_vport_xport_function);
+ if (bnx2fc_vport_xport_template == NULL) {
+ printk(KERN_ERR PFX
+ "Failed to attach FC transport for vport\n");
+ fc_release_transport(bnx2fc_transport_template);
+ bnx2fc_transport_template = NULL;
+ return -ENODEV;
+ }
+ return 0;
+}
+static void bnx2fc_release_transport(void)
+{
+ fc_release_transport(bnx2fc_transport_template);
+ fc_release_transport(bnx2fc_vport_xport_template);
+ bnx2fc_transport_template = NULL;
+ bnx2fc_vport_xport_template = NULL;
+}
+
+static void bnx2fc_interface_release(struct kref *kref)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *netdev;
+ struct net_device *phys_dev;
+
+ hba = container_of(kref, struct bnx2fc_hba, kref);
+ BNX2FC_HBA_DBG(hba->ctlr.lp, "Interface is being released\n");
+
+ netdev = hba->netdev;
+ phys_dev = hba->phys_dev;
+
+ /* tear-down FIP controller */
+ if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done))
+ fcoe_ctlr_destroy(&hba->ctlr);
+
+ /* Free the command manager */
+ if (hba->cmd_mgr) {
+ bnx2fc_cmd_mgr_free(hba->cmd_mgr);
+ hba->cmd_mgr = NULL;
+ }
+ dev_put(netdev);
+ module_put(THIS_MODULE);
+}
+
+static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba)
+{
+ kref_get(&hba->kref);
+}
+
+static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba)
+{
+ kref_put(&hba->kref, bnx2fc_interface_release);
+}
+static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba)
+{
+ bnx2fc_unbind_pcidev(hba);
+ kfree(hba);
+}
+
+/**
+ * bnx2fc_interface_create - create a new fcoe instance
+ *
+ * @cnic: pointer to cnic device
+ *
+ * Creates a new FCoE instance on the given device which include allocating
+ * hba structure, scsi_host and lport structures.
+ */
+static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic)
+{
+ struct bnx2fc_hba *hba;
+ int rc;
+
+ hba = kzalloc(sizeof(*hba), GFP_KERNEL);
+ if (!hba) {
+ printk(KERN_ERR PFX "Unable to allocate hba structure\n");
+ return NULL;
+ }
+ spin_lock_init(&hba->hba_lock);
+ mutex_init(&hba->hba_mutex);
+
+ hba->cnic = cnic;
+ rc = bnx2fc_bind_pcidev(hba);
+ if (rc)
+ goto bind_err;
+ hba->phys_dev = cnic->netdev;
+ /* will get overwritten after we do vlan discovery */
+ hba->netdev = hba->phys_dev;
+
+ init_waitqueue_head(&hba->shutdown_wait);
+ init_waitqueue_head(&hba->destroy_wait);
+
+ return hba;
+bind_err:
+ printk(KERN_ERR PFX "create_interface: bind error\n");
+ kfree(hba);
+ return NULL;
+}
+
+static int bnx2fc_interface_setup(struct bnx2fc_hba *hba,
+ enum fip_state fip_mode)
+{
+ int rc = 0;
+ struct net_device *netdev = hba->netdev;
+ struct fcoe_ctlr *fip = &hba->ctlr;
+
+ dev_hold(netdev);
+ kref_init(&hba->kref);
+
+ hba->flags = 0;
+
+ /* Initialize FIP */
+ memset(fip, 0, sizeof(*fip));
+ fcoe_ctlr_init(fip, fip_mode);
+ hba->ctlr.send = bnx2fc_fip_send;
+ hba->ctlr.update_mac = bnx2fc_update_src_mac;
+ hba->ctlr.get_src_addr = bnx2fc_get_src_mac;
+ set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done);
+
+ rc = bnx2fc_netdev_setup(hba);
+ if (rc)
+ goto setup_err;
+
+ hba->next_conn_id = 0;
+
+ memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list));
+ hba->num_ofld_sess = 0;
+
+ return 0;
+
+setup_err:
+ fcoe_ctlr_destroy(&hba->ctlr);
+ dev_put(netdev);
+ bnx2fc_interface_put(hba);
+ return rc;
+}
+
+/**
+ * bnx2fc_if_create - Create FCoE instance on a given interface
+ *
+ * @hba: FCoE interface to create a local port on
+ * @parent: Device pointer to be the parent in sysfs for the SCSI host
+ * @npiv: Indicates if the port is vport or not
+ *
+ * Creates a fc_lport instance and a Scsi_Host instance and configure them.
+ *
+ * Returns: Allocated fc_lport or an error pointer
+ */
+static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba,
+ struct device *parent, int npiv)
+{
+ struct fc_lport *lport = NULL;
+ struct fcoe_port *port;
+ struct Scsi_Host *shost;
+ struct fc_vport *vport = dev_to_vport(parent);
+ int rc = 0;
+
+ /* Allocate Scsi_Host structure */
+ if (!npiv) {
+ lport = libfc_host_alloc(&bnx2fc_shost_template,
+ sizeof(struct fcoe_port));
+ } else {
+ lport = libfc_vport_create(vport,
+ sizeof(struct fcoe_port));
+ }
+
+ if (!lport) {
+ printk(KERN_ERR PFX "could not allocate scsi host structure\n");
+ return NULL;
+ }
+ shost = lport->host;
+ port = lport_priv(lport);
+ port->lport = lport;
+ port->priv = hba;
+ INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
+
+ /* Configure fcoe_port */
+ rc = bnx2fc_lport_config(lport);
+ if (rc)
+ goto lp_config_err;
+
+ if (npiv) {
+ vport = dev_to_vport(parent);
+ printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n",
+ vport->node_name, vport->port_name);
+ fc_set_wwnn(lport, vport->node_name);
+ fc_set_wwpn(lport, vport->port_name);
+ }
+ /* Configure netdev and networking properties of the lport */
+ rc = bnx2fc_net_config(lport);
+ if (rc) {
+ printk(KERN_ERR PFX "Error on bnx2fc_net_config\n");
+ goto lp_config_err;
+ }
+
+ rc = bnx2fc_shost_config(lport, parent);
+ if (rc) {
+ printk(KERN_ERR PFX "Couldnt configure shost for %s\n",
+ hba->netdev->name);
+ goto lp_config_err;
+ }
+
+ /* Initialize the libfc library */
+ rc = bnx2fc_libfc_config(lport);
+ if (rc) {
+ printk(KERN_ERR PFX "Couldnt configure libfc\n");
+ goto shost_err;
+ }
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+
+ /* Allocate exchange manager */
+ if (!npiv) {
+ rc = bnx2fc_em_config(lport);
+ if (rc) {
+ printk(KERN_ERR PFX "Error on bnx2fc_em_config\n");
+ goto shost_err;
+ }
+ }
+
+ bnx2fc_interface_get(hba);
+ return lport;
+
+shost_err:
+ scsi_remove_host(shost);
+lp_config_err:
+ scsi_host_put(lport->host);
+ return NULL;
+}
+
+static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba)
+{
+ /* Dont listen for Ethernet packets anymore */
+ __dev_remove_pack(&hba->fcoe_packet_type);
+ __dev_remove_pack(&hba->fip_packet_type);
+ synchronize_net();
+}
+
+static void bnx2fc_if_destroy(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+
+ BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n");
+ /* Stop the transmit retry timer */
+ del_timer_sync(&port->timer);
+
+ /* Free existing transmit skbs */
+ fcoe_clean_pending_queue(lport);
+
+ bnx2fc_interface_put(hba);
+
+ /* Free queued packets for the receive thread */
+ bnx2fc_clean_rx_queue(lport);
+
+ /* Detach from scsi-ml */
+ fc_remove_host(lport->host);
+ scsi_remove_host(lport->host);
+
+ /*
+ * Note that only the physical lport will have the exchange manager.
+ * for vports, this function is NOP
+ */
+ fc_exch_mgr_free(lport);
+
+ /* Free memory used by statistical counters */
+ fc_lport_free_stats(lport);
+
+ /* Release Scsi_Host */
+ scsi_host_put(lport->host);
+}
+
+/**
+ * bnx2fc_destroy - Destroy a bnx2fc FCoE interface
+ *
+ * @buffer: The name of the Ethernet interface to be destroyed
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_destroy(struct net_device *netdev)
+{
+ struct bnx2fc_hba *hba = NULL;
+ struct net_device *phys_dev;
+ int rc = 0;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ mutex_lock(&bnx2fc_dev_lock);
+#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto netdev_err;
+ }
+#endif
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(netdev);
+ else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -ENODEV;
+ goto netdev_err;
+ }
+
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba || !hba->ctlr.lp) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n");
+ goto netdev_err;
+ }
+
+ if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n");
+ goto netdev_err;
+ }
+
+ bnx2fc_netdev_cleanup(hba);
+
+ bnx2fc_stop(hba);
+
+ bnx2fc_if_destroy(hba->ctlr.lp);
+
+ destroy_workqueue(hba->timer_work_queue);
+
+ if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
+ bnx2fc_fw_destroy(hba);
+
+ clear_bit(BNX2FC_CREATE_DONE, &hba->init_done);
+netdev_err:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+static void bnx2fc_destroy_work(struct work_struct *work)
+{
+ struct fcoe_port *port;
+ struct fc_lport *lport;
+
+ port = container_of(work, struct fcoe_port, destroy_work);
+ lport = port->lport;
+
+ BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
+
+ bnx2fc_port_shutdown(lport);
+ rtnl_lock();
+ mutex_lock(&bnx2fc_dev_lock);
+ bnx2fc_if_destroy(lport);
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+}
+
+static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba)
+{
+ bnx2fc_free_fw_resc(hba);
+ bnx2fc_free_task_ctx(hba);
+}
+
+/**
+ * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated
+ * pci structure
+ *
+ * @hba: Adapter instance
+ */
+static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba)
+{
+ if (bnx2fc_setup_task_ctx(hba))
+ goto mem_err;
+
+ if (bnx2fc_setup_fw_resc(hba))
+ goto mem_err;
+
+ return 0;
+mem_err:
+ bnx2fc_unbind_adapter_devices(hba);
+ return -ENOMEM;
+}
+
+static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba)
+{
+ struct cnic_dev *cnic;
+
+ if (!hba->cnic) {
+ printk(KERN_ERR PFX "cnic is NULL\n");
+ return -ENODEV;
+ }
+ cnic = hba->cnic;
+ hba->pcidev = cnic->pcidev;
+ if (hba->pcidev)
+ pci_dev_get(hba->pcidev);
+
+ return 0;
+}
+
+static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
+{
+ if (hba->pcidev)
+ pci_dev_put(hba->pcidev);
+ hba->pcidev = NULL;
+}
+
+
+
+/**
+ * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance
+ *
+ * @handle: transport handle pointing to adapter struture
+ *
+ * This function maps adapter structure to pcidev structure and initiates
+ * firmware handshake to enable/initialize on-chip FCoE components.
+ * This bnx2fc - cnic interface api callback is used after following
+ * conditions are met -
+ * a) underlying network interface is up (marked by event NETDEV_UP
+ * from netdev
+ * b) bnx2fc adatper structure is registered.
+ */
+static void bnx2fc_ulp_start(void *handle)
+{
+ struct bnx2fc_hba *hba = handle;
+ struct fc_lport *lport = hba->ctlr.lp;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ mutex_lock(&bnx2fc_dev_lock);
+
+ if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done))
+ goto start_disc;
+
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done))
+ bnx2fc_fw_init(hba);
+
+start_disc:
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ BNX2FC_MISC_DBG("bnx2fc started.\n");
+
+ /* Kick off Fabric discovery*/
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "ulp_init: start discovery\n");
+ lport->tt.frame_send = bnx2fc_xmit;
+ bnx2fc_start_disc(hba);
+ }
+}
+
+static void bnx2fc_port_shutdown(struct fc_lport *lport)
+{
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ fc_fabric_logoff(lport);
+ fc_lport_destroy(lport);
+}
+
+static void bnx2fc_stop(struct bnx2fc_hba *hba)
+{
+ struct fc_lport *lport;
+ struct fc_lport *vport;
+
+ BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__,
+ hba->init_done);
+ if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) &&
+ test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ lport = hba->ctlr.lp;
+ bnx2fc_port_shutdown(lport);
+ BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d "
+ "offloaded sessions\n",
+ hba->num_ofld_sess);
+ wait_event_interruptible(hba->shutdown_wait,
+ (hba->num_ofld_sess == 0));
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
+ fcoe_ctlr_link_down(&hba->ctlr);
+ fcoe_clean_pending_queue(lport);
+
+ mutex_lock(&hba->hba_mutex);
+ clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+
+ clear_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ mutex_unlock(&hba->hba_mutex);
+ }
+}
+
+static int bnx2fc_fw_init(struct bnx2fc_hba *hba)
+{
+#define BNX2FC_INIT_POLL_TIME (1000 / HZ)
+ int rc = -1;
+ int i = HZ;
+
+ rc = bnx2fc_bind_adapter_devices(hba);
+ if (rc) {
+ printk(KERN_ALERT PFX
+ "bnx2fc_bind_adapter_devices failed - rc = %d\n", rc);
+ goto err_out;
+ }
+
+ rc = bnx2fc_send_fw_fcoe_init_msg(hba);
+ if (rc) {
+ printk(KERN_ALERT PFX
+ "bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc);
+ goto err_unbind;
+ }
+
+ /*
+ * Wait until the adapter init message is complete, and adapter
+ * state is UP.
+ */
+ while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+ msleep(BNX2FC_INIT_POLL_TIME);
+
+ if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) {
+ printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize. "
+ "Ignoring...\n",
+ hba->cnic->netdev->name);
+ rc = -1;
+ goto err_unbind;
+ }
+
+
+ /* Mark HBA to indicate that the FW INIT is done */
+ set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done);
+ return 0;
+
+err_unbind:
+ bnx2fc_unbind_adapter_devices(hba);
+err_out:
+ return rc;
+}
+
+static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
+{
+ if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) {
+ init_timer(&hba->destroy_timer);
+ hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT +
+ jiffies;
+ hba->destroy_timer.function = bnx2fc_destroy_timer;
+ hba->destroy_timer.data = (unsigned long)hba;
+ add_timer(&hba->destroy_timer);
+ wait_event_interruptible(hba->destroy_wait,
+ (hba->flags &
+ BNX2FC_FLAG_DESTROY_CMPL));
+ /* This should never happen */
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&hba->destroy_timer);
+ }
+ bnx2fc_unbind_adapter_devices(hba);
+ }
+}
+
+/**
+ * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance
+ *
+ * @handle: transport handle pointing to adapter structure
+ *
+ * Driver checks if adapter is already in shutdown mode, if not start
+ * the shutdown process.
+ */
+static void bnx2fc_ulp_stop(void *handle)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle;
+
+ printk(KERN_ERR "ULP_STOP\n");
+
+ mutex_lock(&bnx2fc_dev_lock);
+ bnx2fc_stop(hba);
+ bnx2fc_fw_destroy(hba);
+ mutex_unlock(&bnx2fc_dev_lock);
+}
+
+static void bnx2fc_start_disc(struct bnx2fc_hba *hba)
+{
+ struct fc_lport *lport;
+ int wait_cnt = 0;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ /* Kick off FIP/FLOGI */
+ if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ printk(KERN_ERR PFX "Init not done yet\n");
+ return;
+ }
+
+ lport = hba->ctlr.lp;
+ BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
+
+ if (!bnx2fc_link_ok(lport)) {
+ BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
+ fcoe_ctlr_link_up(&hba->ctlr);
+ fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
+ set_bit(ADAPTER_STATE_READY, &hba->adapter_state);
+ }
+
+ /* wait for the FCF to be selected before issuing FLOGI */
+ while (!hba->ctlr.sel_fcf) {
+ msleep(250);
+ /* give up after 3 secs */
+ if (++wait_cnt > 12)
+ break;
+ }
+ fc_lport_init(lport);
+ fc_fabric_login(lport);
+}
+
+
+/**
+ * bnx2fc_ulp_init - Initialize an adapter instance
+ *
+ * @dev : cnic device handle
+ * Called from cnic_register_driver() context to initialize all
+ * enumerated cnic devices. This routine allocates adapter structure
+ * and other device specific resources.
+ */
+static void bnx2fc_ulp_init(struct cnic_dev *dev)
+{
+ struct bnx2fc_hba *hba;
+ int rc = 0;
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ /* bnx2fc works only when bnx2x is loaded */
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s,"
+ " flags: %lx\n",
+ dev->netdev->name, dev->flags);
+ return;
+ }
+
+ /* Configure FCoE interface */
+ hba = bnx2fc_interface_create(dev);
+ if (!hba) {
+ printk(KERN_ERR PFX "hba initialization failed\n");
+ return;
+ }
+
+ /* Add HBA to the adapter list */
+ mutex_lock(&bnx2fc_dev_lock);
+ list_add_tail(&hba->link, &adapter_list);
+ adapter_count++;
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
+ rc = dev->register_device(dev, CNIC_ULP_FCOE,
+ (void *) hba);
+ if (rc)
+ printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc);
+ else
+ set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
+}
+
+
+static int bnx2fc_disable(struct net_device *netdev)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *phys_dev;
+ struct ethtool_drvinfo drvinfo;
+ int rc = 0;
+
+ if (!rtnl_trylock()) {
+ printk(KERN_ERR PFX "retrying for rtnl_lock\n");
+ return -EIO;
+ }
+
+ mutex_lock(&bnx2fc_dev_lock);
+
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(netdev);
+ else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* verify if the physical device is a netxtreme2 device */
+ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+ if (strcmp(drvinfo.driver, "bnx2x")) {
+ printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+ } else {
+ printk(KERN_ERR PFX "unable to obtain drv_info\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n");
+
+ /* obtain hba and initialize rest of the structure */
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba || !hba->ctlr.lp) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n");
+ } else {
+ fcoe_ctlr_link_down(&hba->ctlr);
+ fcoe_clean_pending_queue(hba->ctlr.lp);
+ }
+
+nodev:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+
+static int bnx2fc_enable(struct net_device *netdev)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *phys_dev;
+ struct ethtool_drvinfo drvinfo;
+ int rc = 0;
+
+ if (!rtnl_trylock()) {
+ printk(KERN_ERR PFX "retrying for rtnl_lock\n");
+ return -EIO;
+ }
+
+ BNX2FC_MISC_DBG("Entered %s\n", __func__);
+ mutex_lock(&bnx2fc_dev_lock);
+
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(netdev);
+ else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+ /* verify if the physical device is a netxtreme2 device */
+ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+ if (strcmp(drvinfo.driver, "bnx2x")) {
+ printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+ } else {
+ printk(KERN_ERR PFX "unable to obtain drv_info\n");
+ rc = -ENODEV;
+ goto nodev;
+ }
+
+ /* obtain hba and initialize rest of the structure */
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba || !hba->ctlr.lp) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n");
+ } else if (!bnx2fc_link_ok(hba->ctlr.lp))
+ fcoe_ctlr_link_up(&hba->ctlr);
+
+nodev:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+/**
+ * bnx2fc_create - Create bnx2fc FCoE interface
+ *
+ * @buffer: The name of Ethernet interface to create on
+ * @kp: The associated kernel param
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+ struct bnx2fc_hba *hba;
+ struct net_device *phys_dev;
+ struct fc_lport *lport;
+ struct ethtool_drvinfo drvinfo;
+ int rc = 0;
+ int vlan_id;
+
+ BNX2FC_MISC_DBG("Entered bnx2fc_create\n");
+ if (fip_mode != FIP_MODE_FABRIC) {
+ printk(KERN_ERR "fip mode not FABRIC\n");
+ return -EIO;
+ }
+
+ if (!rtnl_trylock()) {
+ printk(KERN_ERR "trying for rtnl_lock\n");
+ return -EIO;
+ }
+ mutex_lock(&bnx2fc_dev_lock);
+
+#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE
+ if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+ rc = -ENODEV;
+ goto mod_err;
+ }
+#endif
+
+ if (!try_module_get(THIS_MODULE)) {
+ rc = -EINVAL;
+ goto mod_err;
+ }
+
+ /* obtain physical netdev */
+ if (netdev->priv_flags & IFF_802_1Q_VLAN) {
+ phys_dev = vlan_dev_real_dev(netdev);
+ vlan_id = vlan_dev_vlan_id(netdev);
+ } else {
+ printk(KERN_ERR PFX "Not a vlan device\n");
+ rc = -EINVAL;
+ goto netdev_err;
+ }
+ /* verify if the physical device is a netxtreme2 device */
+ if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo);
+ if (strcmp(drvinfo.driver, "bnx2x")) {
+ printk(KERN_ERR PFX "Not a netxtreme2 device\n");
+ rc = -EINVAL;
+ goto netdev_err;
+ }
+ } else {
+ printk(KERN_ERR PFX "unable to obtain drv_info\n");
+ rc = -EINVAL;
+ goto netdev_err;
+ }
+
+ /* obtain hba and initialize rest of the structure */
+ hba = bnx2fc_hba_lookup(phys_dev);
+ if (!hba) {
+ rc = -ENODEV;
+ printk(KERN_ERR PFX "bnx2fc_create: hba not found\n");
+ goto netdev_err;
+ }
+
+ if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) {
+ rc = bnx2fc_fw_init(hba);
+ if (rc)
+ goto netdev_err;
+ }
+
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ rc = -EEXIST;
+ goto netdev_err;
+ }
+
+ /* update netdev with vlan netdev */
+ hba->netdev = netdev;
+ hba->vlan_id = vlan_id;
+ hba->vlan_enabled = 1;
+
+ rc = bnx2fc_interface_setup(hba, fip_mode);
+ if (rc) {
+ printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n");
+ goto ifput_err;
+ }
+
+ hba->timer_work_queue =
+ create_singlethread_workqueue("bnx2fc_timer_wq");
+ if (!hba->timer_work_queue) {
+ printk(KERN_ERR PFX "ulp_init could not create timer_wq\n");
+ rc = -EINVAL;
+ goto ifput_err;
+ }
+
+ lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0);
+ if (!lport) {
+ printk(KERN_ERR PFX "Failed to create interface (%s)\n",
+ netdev->name);
+ bnx2fc_netdev_cleanup(hba);
+ rc = -EINVAL;
+ goto if_create_err;
+ }
+
+ lport->boot_time = jiffies;
+
+ /* Make this master N_port */
+ hba->ctlr.lp = lport;
+
+ set_bit(BNX2FC_CREATE_DONE, &hba->init_done);
+ printk(KERN_ERR PFX "create: START DISC\n");
+ bnx2fc_start_disc(hba);
+ /*
+ * Release from kref_init in bnx2fc_interface_setup, on success
+ * lport should be holding a reference taken in bnx2fc_if_create
+ */
+ bnx2fc_interface_put(hba);
+ /* put netdev that was held while calling dev_get_by_name */
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return 0;
+
+if_create_err:
+ destroy_workqueue(hba->timer_work_queue);
+ifput_err:
+ bnx2fc_interface_put(hba);
+netdev_err:
+ module_put(THIS_MODULE);
+mod_err:
+ mutex_unlock(&bnx2fc_dev_lock);
+ rtnl_unlock();
+ return rc;
+}
+
+/**
+ * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance
+ *
+ * @cnic: Pointer to cnic device instance
+ *
+ **/
+static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
+{
+ struct list_head *list;
+ struct list_head *temp;
+ struct bnx2fc_hba *hba;
+
+ /* Called with bnx2fc_dev_lock held */
+ list_for_each_safe(list, temp, &adapter_list) {
+ hba = (struct bnx2fc_hba *)list;
+ if (hba->cnic == cnic)
+ return hba;
+ }
+ return NULL;
+}
+
+static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev)
+{
+ struct list_head *list;
+ struct list_head *temp;
+ struct bnx2fc_hba *hba;
+
+ /* Called with bnx2fc_dev_lock held */
+ list_for_each_safe(list, temp, &adapter_list) {
+ hba = (struct bnx2fc_hba *)list;
+ if (hba->phys_dev == phys_dev)
+ return hba;
+ }
+ printk(KERN_ERR PFX "hba_lookup: hba NULL\n");
+ return NULL;
+}
+
+/**
+ * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources
+ *
+ * @dev cnic device handle
+ */
+static void bnx2fc_ulp_exit(struct cnic_dev *dev)
+{
+ struct bnx2fc_hba *hba;
+
+ BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
+
+ if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n",
+ dev->netdev->name, dev->flags);
+ return;
+ }
+
+ mutex_lock(&bnx2fc_dev_lock);
+ hba = bnx2fc_find_hba_for_cnic(dev);
+ if (!hba) {
+ printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n",
+ dev);
+ mutex_unlock(&bnx2fc_dev_lock);
+ return;
+ }
+
+ list_del_init(&hba->link);
+ adapter_count--;
+
+ if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) {
+ /* destroy not called yet, move to quiesced list */
+ bnx2fc_netdev_cleanup(hba);
+ bnx2fc_if_destroy(hba->ctlr.lp);
+ }
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ bnx2fc_ulp_stop(hba);
+ /* unregister cnic device */
+ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
+ bnx2fc_interface_destroy(hba);
+}
+
+/**
+ * bnx2fc_fcoe_reset - Resets the fcoe
+ *
+ * @shost: shost the reset is from
+ *
+ * Returns: always 0
+ */
+static int bnx2fc_fcoe_reset(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+ fc_lport_reset(lport);
+ return 0;
+}
+
+
+static bool bnx2fc_match(struct net_device *netdev)
+{
+ mutex_lock(&bnx2fc_dev_lock);
+ if (netdev->priv_flags & IFF_802_1Q_VLAN) {
+ struct net_device *phys_dev = vlan_dev_real_dev(netdev);
+
+ if (bnx2fc_hba_lookup(phys_dev)) {
+ mutex_unlock(&bnx2fc_dev_lock);
+ return true;
+ }
+ }
+ mutex_unlock(&bnx2fc_dev_lock);
+ return false;
+}
+
+
+static struct fcoe_transport bnx2fc_transport = {
+ .name = {"bnx2fc"},
+ .attached = false,
+ .list = LIST_HEAD_INIT(bnx2fc_transport.list),
+ .match = bnx2fc_match,
+ .create = bnx2fc_create,
+ .destroy = bnx2fc_destroy,
+ .enable = bnx2fc_enable,
+ .disable = bnx2fc_disable,
+};
+
+/**
+ * bnx2fc_percpu_thread_create - Create a receive thread for an
+ * online CPU
+ *
+ * @cpu: cpu index for the online cpu
+ */
+static void bnx2fc_percpu_thread_create(unsigned int cpu)
+{
+ struct bnx2fc_percpu_s *p;
+ struct task_struct *thread;
+
+ p = &per_cpu(bnx2fc_percpu, cpu);
+
+ thread = kthread_create(bnx2fc_percpu_io_thread,
+ (void *)p,
+ "bnx2fc_thread/%d", cpu);
+ /* bind thread to the cpu */
+ if (likely(!IS_ERR(p->iothread))) {
+ kthread_bind(thread, cpu);
+ p->iothread = thread;
+ wake_up_process(thread);
+ }
+}
+
+static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
+{
+ struct bnx2fc_percpu_s *p;
+ struct task_struct *thread;
+ struct bnx2fc_work *work, *tmp;
+ LIST_HEAD(work_list);
+
+ BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
+
+ /* Prevent any new work from being queued for this CPU */
+ p = &per_cpu(bnx2fc_percpu, cpu);
+ spin_lock_bh(&p->fp_work_lock);
+ thread = p->iothread;
+ p->iothread = NULL;
+
+
+ /* Free all work in the list */
+ list_for_each_entry_safe(work, tmp, &work_list, list) {
+ list_del_init(&work->list);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ kfree(work);
+ }
+
+ spin_unlock_bh(&p->fp_work_lock);
+
+ if (thread)
+ kthread_stop(thread);
+}
+
+/**
+ * bnx2fc_cpu_callback - Handler for CPU hotplug events
+ *
+ * @nfb: The callback data block
+ * @action: The event triggering the callback
+ * @hcpu: The index of the CPU that the event is for
+ *
+ * This creates or destroys per-CPU data for fcoe
+ *
+ * Returns NOTIFY_OK always.
+ */
+static int bnx2fc_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ printk(PFX "CPU %x online: Create Rx thread\n", cpu);
+ bnx2fc_percpu_thread_create(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ printk(PFX "CPU %x offline: Remove Rx thread\n", cpu);
+ bnx2fc_percpu_thread_destroy(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+/**
+ * bnx2fc_mod_init - module init entry point
+ *
+ * Initialize driver wide global data structures, and register
+ * with cnic module
+ **/
+static int __init bnx2fc_mod_init(void)
+{
+ struct fcoe_percpu_s *bg;
+ struct task_struct *l2_thread;
+ int rc = 0;
+ unsigned int cpu = 0;
+ struct bnx2fc_percpu_s *p;
+
+ printk(KERN_INFO PFX "%s", version);
+
+ /* register as a fcoe transport */
+ rc = fcoe_transport_attach(&bnx2fc_transport);
+ if (rc) {
+ printk(KERN_ERR "failed to register an fcoe transport, check "
+ "if libfcoe is loaded\n");
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&adapter_list);
+ mutex_init(&bnx2fc_dev_lock);
+ adapter_count = 0;
+
+ /* Attach FC transport template */
+ rc = bnx2fc_attach_transport();
+ if (rc)
+ goto detach_ft;
+
+ bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0);
+ if (!bnx2fc_wq) {
+ rc = -ENOMEM;
+ goto release_bt;
+ }
+
+ bg = &bnx2fc_global;
+ skb_queue_head_init(&bg->fcoe_rx_list);
+ l2_thread = kthread_create(bnx2fc_l2_rcv_thread,
+ (void *)bg,
+ "bnx2fc_l2_thread");
+ if (IS_ERR(l2_thread)) {
+ rc = PTR_ERR(l2_thread);
+ goto free_wq;
+ }
+ wake_up_process(l2_thread);
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ bg->thread = l2_thread;
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+ for_each_possible_cpu(cpu) {
+ p = &per_cpu(bnx2fc_percpu, cpu);
+ INIT_LIST_HEAD(&p->work_list);
+ spin_lock_init(&p->fp_work_lock);
+ }
+
+ for_each_online_cpu(cpu) {
+ bnx2fc_percpu_thread_create(cpu);
+ }
+
+ /* Initialize per CPU interrupt thread */
+ register_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+ cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb);
+
+ return 0;
+
+free_wq:
+ destroy_workqueue(bnx2fc_wq);
+release_bt:
+ bnx2fc_release_transport();
+detach_ft:
+ fcoe_transport_detach(&bnx2fc_transport);
+out:
+ return rc;
+}
+
+static void __exit bnx2fc_mod_exit(void)
+{
+ LIST_HEAD(to_be_deleted);
+ struct bnx2fc_hba *hba, *next;
+ struct fcoe_percpu_s *bg;
+ struct task_struct *l2_thread;
+ struct sk_buff *skb;
+ unsigned int cpu = 0;
+
+ /*
+ * NOTE: Since cnic calls register_driver routine rtnl_lock,
+ * it will have higher precedence than bnx2fc_dev_lock.
+ * unregister_device() cannot be called with bnx2fc_dev_lock
+ * held.
+ */
+ mutex_lock(&bnx2fc_dev_lock);
+ list_splice(&adapter_list, &to_be_deleted);
+ INIT_LIST_HEAD(&adapter_list);
+ adapter_count = 0;
+ mutex_unlock(&bnx2fc_dev_lock);
+
+ /* Unregister with cnic */
+ list_for_each_entry_safe(hba, next, &to_be_deleted, link) {
+ list_del_init(&hba->link);
+ printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n",
+ hba, atomic_read(&hba->kref.refcount));
+ bnx2fc_ulp_stop(hba);
+ /* unregister cnic device */
+ if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED,
+ &hba->reg_with_cnic))
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE);
+ bnx2fc_interface_destroy(hba);
+ }
+ cnic_unregister_driver(CNIC_ULP_FCOE);
+
+ /* Destroy global thread */
+ bg = &bnx2fc_global;
+ spin_lock_bh(&bg->fcoe_rx_list.lock);
+ l2_thread = bg->thread;
+ bg->thread = NULL;
+ while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL)
+ kfree_skb(skb);
+
+ spin_unlock_bh(&bg->fcoe_rx_list.lock);
+
+ if (l2_thread)
+ kthread_stop(l2_thread);
+
+ unregister_hotcpu_notifier(&bnx2fc_cpu_notifier);
+
+ /* Destroy per cpu threads */
+ for_each_online_cpu(cpu) {
+ bnx2fc_percpu_thread_destroy(cpu);
+ }
+
+ destroy_workqueue(bnx2fc_wq);
+ /*
+ * detach from scsi transport
+ * must happen after all destroys are done
+ */
+ bnx2fc_release_transport();
+
+ /* detach from fcoe transport */
+ fcoe_transport_detach(&bnx2fc_transport);
+}
+
+module_init(bnx2fc_mod_init);
+module_exit(bnx2fc_mod_exit);
+
+static struct fc_function_template bnx2fc_transport_function = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+
+ .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
+ sizeof(struct bnx2fc_rport)),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = bnx2fc_get_host_stats,
+
+ .issue_fc_host_lip = bnx2fc_fcoe_reset,
+
+ .terminate_rport_io = fc_rport_terminate_io,
+
+ .vport_create = bnx2fc_vport_create,
+ .vport_delete = bnx2fc_vport_destroy,
+ .vport_disable = bnx2fc_vport_disable,
+};
+
+static struct fc_function_template bnx2fc_vport_xport_function = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+
+ .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
+ sizeof(struct bnx2fc_rport)),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = fc_get_host_stats,
+ .issue_fc_host_lip = bnx2fc_fcoe_reset,
+ .terminate_rport_io = fc_rport_terminate_io,
+};
+
+/**
+ * scsi_host_template structure used while registering with SCSI-ml
+ */
+static struct scsi_host_template bnx2fc_shost_template = {
+ .module = THIS_MODULE,
+ .name = "Broadcom Offload FCoE Initiator",
+ .queuecommand = bnx2fc_queuecommand,
+ .eh_abort_handler = bnx2fc_eh_abort, /* abts */
+ .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */
+ .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */
+ .eh_host_reset_handler = fc_eh_host_reset,
+ .slave_alloc = fc_slave_alloc,
+ .change_queue_depth = fc_change_queue_depth,
+ .change_queue_type = fc_change_queue_type,
+ .this_id = -1,
+ .cmd_per_lun = 3,
+ .can_queue = (BNX2FC_MAX_OUTSTANDING_CMNDS/2),
+ .use_clustering = ENABLE_CLUSTERING,
+ .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
+ .max_sectors = 512,
+};
+
+static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
+ .frame_send = bnx2fc_xmit,
+ .elsct_send = bnx2fc_elsct_send,
+ .fcp_abort_io = bnx2fc_abort_io,
+ .fcp_cleanup = bnx2fc_cleanup,
+ .rport_event_callback = bnx2fc_rport_event_handler,
+};
+
+/**
+ * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface
+ * structure carrying callback function pointers
+ */
+static struct cnic_ulp_ops bnx2fc_cnic_cb = {
+ .owner = THIS_MODULE,
+ .cnic_init = bnx2fc_ulp_init,
+ .cnic_exit = bnx2fc_ulp_exit,
+ .cnic_start = bnx2fc_ulp_start,
+ .cnic_stop = bnx2fc_ulp_stop,
+ .indicate_kcqes = bnx2fc_indicate_kcqe,
+ .indicate_netevent = bnx2fc_indicate_netevent,
+};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
new file mode 100644
index 000000000000..4f4096836742
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -0,0 +1,1868 @@
+/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * This file contains the code that low level functions that interact
+ * with 57712 FCoE firmware.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+
+DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
+
+static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *new_cqe_kcqe);
+static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe);
+static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe);
+static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
+static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *conn_destroy);
+
+int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
+{
+ struct fcoe_kwqe_stat stat_req;
+ struct kwqe *kwqe_arr[2];
+ int num_kwqes = 1;
+ int rc = 0;
+
+ memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
+ stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
+ stat_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
+ stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
+
+ kwqe_arr[0] = (struct kwqe *) &stat_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
+ *
+ * @hba: adapter structure pointer
+ *
+ * Send down FCoE firmware init KWQEs which initiates the initial handshake
+ * with the f/w.
+ *
+ */
+int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
+{
+ struct fcoe_kwqe_init1 fcoe_init1;
+ struct fcoe_kwqe_init2 fcoe_init2;
+ struct fcoe_kwqe_init3 fcoe_init3;
+ struct kwqe *kwqe_arr[3];
+ int num_kwqes = 3;
+ int rc = 0;
+
+ if (!hba->cnic) {
+ printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n");
+ return -ENODEV;
+ }
+
+ /* fill init1 KWQE */
+ memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
+ fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
+ fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
+ fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
+ fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
+ fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
+ fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
+ fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
+ fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
+ fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
+ fcoe_init1.task_list_pbl_addr_hi =
+ (u32) ((u64) hba->task_ctx_bd_dma >> 32);
+ fcoe_init1.mtu = hba->netdev->mtu;
+
+ fcoe_init1.flags = (PAGE_SHIFT <<
+ FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
+
+ fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
+
+ /* fill init2 KWQE */
+ memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
+ fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
+ fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
+ fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
+ ((u64) hba->hash_tbl_pbl_dma >> 32);
+
+ fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
+ fcoe_init2.t2_hash_tbl_addr_hi = (u32)
+ ((u64) hba->t2_hash_tbl_dma >> 32);
+
+ fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
+ fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
+ ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
+
+ fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
+
+ /* fill init3 KWQE */
+ memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
+ fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
+ fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+ fcoe_init3.error_bit_map_lo = 0xffffffff;
+ fcoe_init3.error_bit_map_hi = 0xffffffff;
+
+
+ kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
+ kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
+ kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
+{
+ struct fcoe_kwqe_destroy fcoe_destroy;
+ struct kwqe *kwqe_arr[2];
+ int num_kwqes = 1;
+ int rc = -1;
+
+ /* fill destroy KWQE */
+ memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
+ fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
+ fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
+ FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+ kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct fc_lport *lport = port->lport;
+ struct bnx2fc_hba *hba = port->priv;
+ struct kwqe *kwqe_arr[4];
+ struct fcoe_kwqe_conn_offload1 ofld_req1;
+ struct fcoe_kwqe_conn_offload2 ofld_req2;
+ struct fcoe_kwqe_conn_offload3 ofld_req3;
+ struct fcoe_kwqe_conn_offload4 ofld_req4;
+ struct fc_rport_priv *rdata = tgt->rdata;
+ struct fc_rport *rport = tgt->rport;
+ int num_kwqes = 4;
+ u32 port_id;
+ int rc = 0;
+ u16 conn_id;
+
+ /* Initialize offload request 1 structure */
+ memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
+
+ ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
+ ofld_req1.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+
+ conn_id = (u16)tgt->fcoe_conn_id;
+ ofld_req1.fcoe_conn_id = conn_id;
+
+
+ ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
+ ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
+
+ ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
+ ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
+
+ ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
+ ofld_req1.rq_first_pbe_addr_hi =
+ (u32)((u64) tgt->rq_dma >> 32);
+
+ ofld_req1.rq_prod = 0x8000;
+
+ /* Initialize offload request 2 structure */
+ memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
+
+ ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
+ ofld_req2.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
+
+ ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
+ ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
+
+ ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
+ ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
+
+ ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
+ ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
+
+ /* Initialize offload request 3 structure */
+ memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
+
+ ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
+ ofld_req3.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req3.vlan_tag = hba->vlan_id <<
+ FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
+ ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
+
+ port_id = fc_host_port_id(lport->host);
+ if (port_id == 0) {
+ BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Store s_id of the initiator for further reference. This will
+ * be used during disable/destroy during linkdown processing as
+ * when the lport is reset, the port_id also is reset to 0
+ */
+ tgt->sid = port_id;
+ ofld_req3.s_id[0] = (port_id & 0x000000FF);
+ ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
+ ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+ port_id = rport->port_id;
+ ofld_req3.d_id[0] = (port_id & 0x000000FF);
+ ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
+ ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
+
+ ofld_req3.tx_total_conc_seqs = rdata->max_seq;
+
+ ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
+ ofld_req3.rx_max_fc_pay_len = lport->mfs;
+
+ ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
+ ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
+ ofld_req3.rx_open_seqs_exch_c3 = 1;
+
+ ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
+ ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
+
+ /* set mul_n_port_ids supported flag to 0, until it is supported */
+ ofld_req3.flags = 0;
+ /*
+ ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
+ */
+ /* Info from PLOGI response */
+ ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
+
+ ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
+
+ /* vlan flag */
+ ofld_req3.flags |= (hba->vlan_enabled <<
+ FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
+
+ /* C2_VALID and ACK flags are not set as they are not suppported */
+
+
+ /* Initialize offload request 4 structure */
+ memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
+ ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
+ ofld_req4.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
+
+
+ ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5];
+ /* local mac */
+ ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4];
+ ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3];
+ ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2];
+ ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1];
+ ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0];
+ ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
+ ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
+ ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
+ ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
+ ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
+ ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
+
+ ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
+ ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
+
+ ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
+ ofld_req4.confq_pbl_base_addr_hi =
+ (u32)((u64) tgt->confq_pbl_dma >> 32);
+
+ kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+ kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+ kwqe_arr[2] = (struct kwqe *) &ofld_req3;
+ kwqe_arr[3] = (struct kwqe *) &ofld_req4;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct kwqe *kwqe_arr[2];
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_kwqe_conn_enable_disable enbl_req;
+ struct fc_lport *lport = port->lport;
+ struct fc_rport *rport = tgt->rport;
+ int num_kwqes = 1;
+ int rc = 0;
+ u32 port_id;
+
+ memset(&enbl_req, 0x00,
+ sizeof(struct fcoe_kwqe_conn_enable_disable));
+ enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
+ enbl_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
+ /* local mac */
+ enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4];
+ enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
+ enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
+ enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
+ enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
+
+ enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
+ enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
+ enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
+ enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
+ enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
+ enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
+
+ port_id = fc_host_port_id(lport->host);
+ if (port_id != tgt->sid) {
+ printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
+ "sid = 0x%x\n", port_id, tgt->sid);
+ port_id = tgt->sid;
+ }
+ enbl_req.s_id[0] = (port_id & 0x000000FF);
+ enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
+ enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+ port_id = rport->port_id;
+ enbl_req.d_id[0] = (port_id & 0x000000FF);
+ enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
+ enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
+ enbl_req.vlan_tag = hba->vlan_id <<
+ FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
+ enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
+ enbl_req.vlan_flag = hba->vlan_enabled;
+ enbl_req.context_id = tgt->context_id;
+ enbl_req.conn_id = tgt->fcoe_conn_id;
+
+ kwqe_arr[0] = (struct kwqe *) &enbl_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_disable_req - initiates FCoE Session disable
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_disable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct bnx2fc_hba *hba = port->priv;
+ struct fcoe_kwqe_conn_enable_disable disable_req;
+ struct kwqe *kwqe_arr[2];
+ struct fc_rport *rport = tgt->rport;
+ int num_kwqes = 1;
+ int rc = 0;
+ u32 port_id;
+
+ memset(&disable_req, 0x00,
+ sizeof(struct fcoe_kwqe_conn_enable_disable));
+ disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
+ disable_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
+ disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
+ disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
+ disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
+ disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
+
+ disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
+ disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
+ disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
+ disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
+ disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
+ disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
+
+ port_id = tgt->sid;
+ disable_req.s_id[0] = (port_id & 0x000000FF);
+ disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
+ disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
+
+
+ port_id = rport->port_id;
+ disable_req.d_id[0] = (port_id & 0x000000FF);
+ disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
+ disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
+ disable_req.context_id = tgt->context_id;
+ disable_req.conn_id = tgt->fcoe_conn_id;
+ disable_req.vlan_tag = hba->vlan_id <<
+ FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
+ disable_req.vlan_tag |=
+ 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
+ disable_req.vlan_flag = hba->vlan_enabled;
+
+ kwqe_arr[0] = (struct kwqe *) &disable_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+/**
+ * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
+ *
+ * @port: port structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ */
+int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ struct fcoe_kwqe_conn_destroy destroy_req;
+ struct kwqe *kwqe_arr[2];
+ int num_kwqes = 1;
+ int rc = 0;
+
+ memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
+ destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
+ destroy_req.hdr.flags =
+ (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+ destroy_req.context_id = tgt->context_id;
+ destroy_req.conn_id = tgt->fcoe_conn_id;
+
+ kwqe_arr[0] = (struct kwqe *) &destroy_req;
+
+ if (hba->cnic && hba->cnic->submit_kwqes)
+ rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+
+ return rc;
+}
+
+static void bnx2fc_unsol_els_work(struct work_struct *work)
+{
+ struct bnx2fc_unsol_els *unsol_els;
+ struct fc_lport *lport;
+ struct fc_frame *fp;
+
+ unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
+ lport = unsol_els->lport;
+ fp = unsol_els->fp;
+ fc_exch_recv(lport, fp);
+ kfree(unsol_els);
+}
+
+void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
+ unsigned char *buf,
+ u32 frame_len, u16 l2_oxid)
+{
+ struct fcoe_port *port = tgt->port;
+ struct fc_lport *lport = port->lport;
+ struct bnx2fc_unsol_els *unsol_els;
+ struct fc_frame_header *fh;
+ struct fc_frame *fp;
+ struct sk_buff *skb;
+ u32 payload_len;
+ u32 crc;
+ u8 op;
+
+
+ unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
+ if (!unsol_els) {
+ BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
+ l2_oxid, frame_len);
+
+ payload_len = frame_len - sizeof(struct fc_frame_header);
+
+ fp = fc_frame_alloc(lport, payload_len);
+ if (!fp) {
+ printk(KERN_ERR PFX "fc_frame_alloc failure\n");
+ return;
+ }
+
+ fh = (struct fc_frame_header *) fc_frame_header_get(fp);
+ /* Copy FC Frame header and payload into the frame */
+ memcpy(fh, buf, frame_len);
+
+ if (l2_oxid != FC_XID_UNKNOWN)
+ fh->fh_ox_id = htons(l2_oxid);
+
+ skb = fp_skb(fp);
+
+ if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
+ (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
+
+ if (fh->fh_type == FC_TYPE_ELS) {
+ op = fc_frame_payload_op(fp);
+ if ((op == ELS_TEST) || (op == ELS_ESTC) ||
+ (op == ELS_FAN) || (op == ELS_CSU)) {
+ /*
+ * No need to reply for these
+ * ELS requests
+ */
+ printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
+ kfree_skb(skb);
+ return;
+ }
+ }
+ crc = fcoe_fc_crc(fp);
+ fc_frame_init(fp);
+ fr_dev(fp) = lport;
+ fr_sof(fp) = FC_SOF_I3;
+ fr_eof(fp) = FC_EOF_T;
+ fr_crc(fp) = cpu_to_le32(~crc);
+ unsol_els->lport = lport;
+ unsol_els->fp = fp;
+ INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
+ queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
+ } else {
+ BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
+ kfree_skb(skb);
+ }
+}
+
+static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
+{
+ u8 num_rq;
+ struct fcoe_err_report_entry *err_entry;
+ unsigned char *rq_data;
+ unsigned char *buf = NULL, *buf1;
+ int i;
+ u16 xid;
+ u32 frame_len, len;
+ struct bnx2fc_cmd *io_req = NULL;
+ struct fcoe_task_ctx_entry *task, *task_page;
+ struct bnx2fc_hba *hba = tgt->port->priv;
+ int task_idx, index;
+ int rc = 0;
+
+
+ BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
+ switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
+ case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
+ frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
+ FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
+
+ num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
+
+ rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
+ if (rq_data) {
+ buf = rq_data;
+ } else {
+ buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
+ GFP_ATOMIC);
+
+ if (!buf1) {
+ BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
+ break;
+ }
+
+ for (i = 0; i < num_rq; i++) {
+ rq_data = (unsigned char *)
+ bnx2fc_get_next_rqe(tgt, 1);
+ len = BNX2FC_RQ_BUF_SZ;
+ memcpy(buf1, rq_data, len);
+ buf1 += len;
+ }
+ }
+ bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
+ FC_XID_UNKNOWN);
+
+ if (buf != rq_data)
+ kfree(buf);
+ bnx2fc_return_rqe(tgt, num_rq);
+ break;
+
+ case FCOE_ERROR_DETECTION_CQE_TYPE:
+ /*
+ *In case of error reporting CQE a single RQ entry
+ * is consumes.
+ */
+ spin_lock_bh(&tgt->tgt_lock);
+ num_rq = 1;
+ err_entry = (struct fcoe_err_report_entry *)
+ bnx2fc_get_next_rqe(tgt, 1);
+ xid = err_entry->fc_hdr.ox_id;
+ BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
+ BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
+ err_entry->err_warn_bitmap_hi,
+ err_entry->err_warn_bitmap_lo);
+ BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
+ err_entry->tx_buf_off, err_entry->rx_buf_off);
+
+ bnx2fc_return_rqe(tgt, 1);
+
+ if (xid > BNX2FC_MAX_XID) {
+ BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
+ xid);
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)
+ hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+
+ io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
+ if (!io_req) {
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
+ printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
+ "progress.. ignore unsol err\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+ }
+
+ /*
+ * If ABTS is already in progress, and FW error is
+ * received after that, do not cancel the timeout_work
+ * and let the error recovery continue by explicitly
+ * logging out the target, when the ABTS eventually
+ * times out.
+ */
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &io_req->req_flags)) {
+ /*
+ * Cancel the timeout_work, as we received IO
+ * completion with FW error.
+ */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* timer hold */
+
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc != SUCCESS) {
+ BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
+ "failed. issue cleanup\n");
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
+ } else
+ printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
+ "in ABTS processing\n", xid);
+ spin_unlock_bh(&tgt->tgt_lock);
+ break;
+
+ case FCOE_WARNING_DETECTION_CQE_TYPE:
+ /*
+ *In case of warning reporting CQE a single RQ entry
+ * is consumes.
+ */
+ num_rq = 1;
+ err_entry = (struct fcoe_err_report_entry *)
+ bnx2fc_get_next_rqe(tgt, 1);
+ xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
+ BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
+ BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
+ err_entry->err_warn_bitmap_hi,
+ err_entry->err_warn_bitmap_lo);
+ BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
+ err_entry->tx_buf_off, err_entry->rx_buf_off);
+
+ bnx2fc_return_rqe(tgt, 1);
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
+ break;
+ }
+}
+
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
+{
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_cmd *io_req;
+ int task_idx, index;
+ u16 xid;
+ u8 cmd_type;
+ u8 rx_state = 0;
+ u8 num_rq;
+
+ spin_lock_bh(&tgt->tgt_lock);
+ xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
+ if (xid >= BNX2FC_MAX_TASKS) {
+ printk(KERN_ALERT PFX "ERROR:xid out of range\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ return;
+ }
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+
+ num_rq = ((task->rx_wr_tx_rd.rx_flags &
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >>
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT);
+
+ io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
+
+ if (io_req == NULL) {
+ printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ return;
+ }
+
+ /* Timestamp IO completion time */
+ cmd_type = io_req->cmd_type;
+
+ /* optimized completion path */
+ if (cmd_type == BNX2FC_SCSI_CMD) {
+ rx_state = ((task->rx_wr_tx_rd.rx_flags &
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >>
+ FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT);
+
+ if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
+ bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return;
+ }
+ }
+
+ /* Process other IO completion types */
+ switch (cmd_type) {
+ case BNX2FC_SCSI_CMD:
+ if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
+ bnx2fc_process_abts_compl(io_req, task, num_rq);
+ else if (rx_state ==
+ FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
+ bnx2fc_process_cleanup_compl(io_req, task, num_rq);
+ else
+ printk(KERN_ERR PFX "Invalid rx state - %d\n",
+ rx_state);
+ break;
+
+ case BNX2FC_TASK_MGMT_CMD:
+ BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
+ bnx2fc_process_tm_compl(io_req, task, num_rq);
+ break;
+
+ case BNX2FC_ABTS:
+ /*
+ * ABTS request received by firmware. ABTS response
+ * will be delivered to the task belonging to the IO
+ * that was aborted
+ */
+ BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ break;
+
+ case BNX2FC_ELS:
+ BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n");
+ bnx2fc_process_els_compl(io_req, task, num_rq);
+ break;
+
+ case BNX2FC_CLEANUP:
+ BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
+ break;
+ }
+ spin_unlock_bh(&tgt->tgt_lock);
+}
+
+struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
+{
+ struct bnx2fc_work *work;
+ work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
+ if (!work)
+ return NULL;
+
+ INIT_LIST_HEAD(&work->list);
+ work->tgt = tgt;
+ work->wqe = wqe;
+ return work;
+}
+
+int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
+{
+ struct fcoe_cqe *cq;
+ u32 cq_cons;
+ struct fcoe_cqe *cqe;
+ u16 wqe;
+ bool more_cqes_found = false;
+
+ /*
+ * cq_lock is a low contention lock used to protect
+ * the CQ data structure from being freed up during
+ * the upload operation
+ */
+ spin_lock_bh(&tgt->cq_lock);
+
+ if (!tgt->cq) {
+ printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
+ spin_unlock_bh(&tgt->cq_lock);
+ return 0;
+ }
+ cq = tgt->cq;
+ cq_cons = tgt->cq_cons_idx;
+ cqe = &cq[cq_cons];
+
+ do {
+ more_cqes_found ^= true;
+
+ while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
+ (tgt->cq_curr_toggle_bit <<
+ FCOE_CQE_TOGGLE_BIT_SHIFT)) {
+
+ /* new entry on the cq */
+ if (wqe & FCOE_CQE_CQE_TYPE) {
+ /* Unsolicited event notification */
+ bnx2fc_process_unsol_compl(tgt, wqe);
+ } else {
+ struct bnx2fc_work *work = NULL;
+ struct bnx2fc_percpu_s *fps = NULL;
+ unsigned int cpu = wqe % num_possible_cpus();
+
+ fps = &per_cpu(bnx2fc_percpu, cpu);
+ spin_lock_bh(&fps->fp_work_lock);
+ if (unlikely(!fps->iothread))
+ goto unlock;
+
+ work = bnx2fc_alloc_work(tgt, wqe);
+ if (work)
+ list_add_tail(&work->list,
+ &fps->work_list);
+unlock:
+ spin_unlock_bh(&fps->fp_work_lock);
+
+ /* Pending work request completion */
+ if (fps->iothread && work)
+ wake_up_process(fps->iothread);
+ else
+ bnx2fc_process_cq_compl(tgt, wqe);
+ }
+ cqe++;
+ tgt->cq_cons_idx++;
+
+ if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
+ tgt->cq_cons_idx = 0;
+ cqe = cq;
+ tgt->cq_curr_toggle_bit =
+ 1 - tgt->cq_curr_toggle_bit;
+ }
+ }
+ /* Re-arm CQ */
+ if (more_cqes_found) {
+ tgt->conn_db->cq_arm.lo = -1;
+ wmb();
+ }
+ } while (more_cqes_found);
+
+ /*
+ * Commit tgt->cq_cons_idx change to the memory
+ * spin_lock implies full memory barrier, no need to smp_wmb
+ */
+
+ spin_unlock_bh(&tgt->cq_lock);
+ return 0;
+}
+
+/**
+ * bnx2fc_fastpath_notification - process global event queue (KCQ)
+ *
+ * @hba: adapter structure pointer
+ * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
+ *
+ * Fast path event notification handler
+ */
+static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *new_cqe_kcqe)
+{
+ u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
+ struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
+
+ if (!tgt) {
+ printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id);
+ return;
+ }
+
+ bnx2fc_process_new_cqes(tgt);
+}
+
+/**
+ * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
+ *
+ * @hba: adapter structure pointer
+ * @ofld_kcqe: connection offload kcqe pointer
+ *
+ * handle session offload completion, enable the session if offload is
+ * successful.
+ */
+static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe)
+{
+ struct bnx2fc_rport *tgt;
+ struct fcoe_port *port;
+ u32 conn_id;
+ u32 context_id;
+ int rc;
+
+ conn_id = ofld_kcqe->fcoe_conn_id;
+ context_id = ofld_kcqe->fcoe_conn_context_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
+ return;
+ }
+ BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
+ ofld_kcqe->fcoe_conn_context_id);
+ port = tgt->port;
+ if (hba != tgt->port->priv) {
+ printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+ goto ofld_cmpl_err;
+ }
+ /*
+ * cnic has allocated a context_id for this session; use this
+ * while enabling the session.
+ */
+ tgt->context_id = context_id;
+ if (ofld_kcqe->completion_status) {
+ if (ofld_kcqe->completion_status ==
+ FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
+ printk(KERN_ERR PFX "unable to allocate FCoE context "
+ "resources\n");
+ set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
+ }
+ goto ofld_cmpl_err;
+ } else {
+
+ /* now enable the session */
+ rc = bnx2fc_send_session_enable_req(port, tgt);
+ if (rc) {
+ printk(KERN_ALERT PFX "enable session failed\n");
+ goto ofld_cmpl_err;
+ }
+ }
+ return;
+ofld_cmpl_err:
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+}
+
+/**
+ * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
+ *
+ * @hba: adapter structure pointer
+ * @ofld_kcqe: connection offload kcqe pointer
+ *
+ * handle session enable completion, mark the rport as ready
+ */
+
+static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *ofld_kcqe)
+{
+ struct bnx2fc_rport *tgt;
+ u32 conn_id;
+ u32 context_id;
+
+ context_id = ofld_kcqe->fcoe_conn_context_id;
+ conn_id = ofld_kcqe->fcoe_conn_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
+ ofld_kcqe->fcoe_conn_context_id);
+
+ /*
+ * context_id should be the same for this target during offload
+ * and enable
+ */
+ if (tgt->context_id != context_id) {
+ printk(KERN_ALERT PFX "context id mis-match\n");
+ return;
+ }
+ if (hba != tgt->port->priv) {
+ printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+ goto enbl_cmpl_err;
+ }
+ if (ofld_kcqe->completion_status) {
+ goto enbl_cmpl_err;
+ } else {
+ /* enable successful - rport ready for issuing IOs */
+ set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+ }
+ return;
+
+enbl_cmpl_err:
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+}
+
+static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *disable_kcqe)
+{
+
+ struct bnx2fc_rport *tgt;
+ u32 conn_id;
+
+ conn_id = disable_kcqe->fcoe_conn_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
+
+ if (disable_kcqe->completion_status) {
+ printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n",
+ disable_kcqe->completion_status);
+ return;
+ } else {
+ /* disable successful */
+ BNX2FC_TGT_DBG(tgt, "disable successful\n");
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+ }
+}
+
+static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
+ struct fcoe_kcqe *destroy_kcqe)
+{
+ struct bnx2fc_rport *tgt;
+ u32 conn_id;
+
+ conn_id = destroy_kcqe->fcoe_conn_id;
+ tgt = hba->tgt_ofld_list[conn_id];
+ if (!tgt) {
+ printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n");
+ return;
+ }
+
+ BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
+
+ if (destroy_kcqe->completion_status) {
+ printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n",
+ destroy_kcqe->completion_status);
+ return;
+ } else {
+ /* destroy successful */
+ BNX2FC_TGT_DBG(tgt, "upload successful\n");
+ clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+ }
+}
+
+static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
+{
+ switch (err_code) {
+ case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
+ printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
+ break;
+
+ case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
+ printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
+ break;
+
+ case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
+ printk(KERN_ERR PFX "init_failure due to NIC error\n");
+ break;
+
+ default:
+ printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
+ }
+}
+
+/**
+ * bnx2fc_indicae_kcqe - process KCQE
+ *
+ * @hba: adapter structure pointer
+ * @kcqe: kcqe pointer
+ * @num_cqe: Number of completion queue elements
+ *
+ * Generic KCQ event handler
+ */
+void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
+ u32 num_cqe)
+{
+ struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+ int i = 0;
+ struct fcoe_kcqe *kcqe = NULL;
+
+ while (i < num_cqe) {
+ kcqe = (struct fcoe_kcqe *) kcq[i++];
+
+ switch (kcqe->op_code) {
+ case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
+ bnx2fc_fastpath_notification(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
+ bnx2fc_process_ofld_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_ENABLE_CONN:
+ bnx2fc_process_enable_conn_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_INIT_FUNC:
+ if (kcqe->completion_status !=
+ FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
+ bnx2fc_init_failure(hba,
+ kcqe->completion_status);
+ } else {
+ set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+ bnx2fc_get_link_state(hba);
+ printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
+ (u8)hba->pcidev->bus->number);
+ }
+ break;
+
+ case FCOE_KCQE_OPCODE_DESTROY_FUNC:
+ if (kcqe->completion_status !=
+ FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
+
+ printk(KERN_ERR PFX "DESTROY failed\n");
+ } else {
+ printk(KERN_ERR PFX "DESTROY success\n");
+ }
+ hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
+ wake_up_interruptible(&hba->destroy_wait);
+ break;
+
+ case FCOE_KCQE_OPCODE_DISABLE_CONN:
+ bnx2fc_process_conn_disable_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_DESTROY_CONN:
+ bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
+ break;
+
+ case FCOE_KCQE_OPCODE_STAT_FUNC:
+ if (kcqe->completion_status !=
+ FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
+ printk(KERN_ERR PFX "STAT failed\n");
+ complete(&hba->stat_req_done);
+ break;
+
+ case FCOE_KCQE_OPCODE_FCOE_ERROR:
+ /* fall thru */
+ default:
+ printk(KERN_ALERT PFX "unknown opcode 0x%x\n",
+ kcqe->op_code);
+ }
+ }
+}
+
+void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
+{
+ struct fcoe_sqe *sqe;
+
+ sqe = &tgt->sq[tgt->sq_prod_idx];
+
+ /* Fill SQ WQE */
+ sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
+ sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
+
+ /* Advance SQ Prod Idx */
+ if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
+ tgt->sq_prod_idx = 0;
+ tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
+ }
+}
+
+void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
+{
+ struct b577xx_doorbell_set_prod ev_doorbell;
+ u32 msg;
+
+ wmb();
+
+ memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod));
+ ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE;
+
+ ev_doorbell.prod = tgt->sq_prod_idx |
+ (tgt->sq_curr_toggle_bit << 15);
+ ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE <<
+ B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
+ msg = *((u32 *)&ev_doorbell);
+ writel(cpu_to_le32(msg), tgt->ctx_base);
+
+ mmiowb();
+
+}
+
+int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
+{
+ u32 context_id = tgt->context_id;
+ struct fcoe_port *port = tgt->port;
+ u32 reg_off;
+ resource_size_t reg_base;
+ struct bnx2fc_hba *hba = port->priv;
+
+ reg_base = pci_resource_start(hba->pcidev,
+ BNX2X_DOORBELL_PCI_BAR);
+ reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
+ (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
+ tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ if (!tgt->ctx_base)
+ return -ENOMEM;
+ return 0;
+}
+
+char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
+{
+ char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
+
+ if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
+ return NULL;
+
+ tgt->rq_cons_idx += num_items;
+
+ if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
+ tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
+
+ return buf;
+}
+
+void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
+{
+ /* return the rq buffer */
+ u32 next_prod_idx = tgt->rq_prod_idx + num_items;
+ if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
+ /* Wrap around RQ */
+ next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
+ }
+ tgt->rq_prod_idx = next_prod_idx;
+ tgt->conn_db->rq_prod = tgt->rq_prod_idx;
+}
+
+void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u16 orig_xid)
+{
+ u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ u32 context_id = tgt->context_id;
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ /* Tx Write Rx Read */
+ task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
+ task->tx_wr_rx_rd.init_flags = task_type <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+ /* Common */
+ task->cmn.common_flags = context_id <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
+ task->cmn.general.cleanup_info.task_id = orig_xid;
+
+
+}
+
+void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task)
+{
+ struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fc_frame_header *fc_hdr;
+ u8 task_type = 0;
+ u64 *hdr;
+ u64 temp_hdr[3];
+ u32 context_id;
+
+
+ /* Obtain task_type */
+ if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
+ (io_req->cmd_type == BNX2FC_ELS)) {
+ task_type = FCOE_TASK_TYPE_MIDPATH;
+ } else if (io_req->cmd_type == BNX2FC_ABTS) {
+ task_type = FCOE_TASK_TYPE_ABTS;
+ }
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ /* Setup the task from io_req for easy reference */
+ io_req->task = task;
+
+ BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
+ io_req->cmd_type, task_type);
+
+ /* Tx only */
+ if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
+ (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)mp_req->mp_req_bd_dma;
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)mp_req->mp_req_bd_dma >> 32);
+ task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
+ BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n",
+ (unsigned long long)mp_req->mp_req_bd_dma);
+ }
+
+ /* Tx Write Rx Read */
+ task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
+ task->tx_wr_rx_rd.init_flags = task_type <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+
+ /* Common */
+ task->cmn.data_2_trns = io_req->data_xfer_len;
+ context_id = tgt->context_id;
+ task->cmn.common_flags = context_id <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
+
+ /* Rx Write Tx Read */
+ fc_hdr = &(mp_req->req_fc_hdr);
+ if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+ fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
+ fc_hdr->fh_rx_id = htons(0xffff);
+ task->rx_wr_tx_rd.rx_id = 0xffff;
+ } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
+ fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
+ }
+
+ /* Fill FC Header into middle path buffer */
+ hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+ memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
+ hdr[0] = cpu_to_be64(temp_hdr[0]);
+ hdr[1] = cpu_to_be64(temp_hdr[1]);
+ hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+ /* Rx Only */
+ if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)mp_req->mp_resp_bd_dma;
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
+ task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
+ }
+}
+
+void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task)
+{
+ u8 task_type;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct io_bdt *bd_tbl = io_req->bd_tbl;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ u64 *fcp_cmnd;
+ u64 tmp_fcp_cmnd[4];
+ u32 context_id;
+ int cnt, i;
+ int bd_count;
+
+ memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
+
+ /* Setup the task from io_req for easy reference */
+ io_req->task = task;
+
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+ task_type = FCOE_TASK_TYPE_WRITE;
+ else
+ task_type = FCOE_TASK_TYPE_READ;
+
+ /* Tx only */
+ if (task_type == FCOE_TASK_TYPE_WRITE) {
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)bd_tbl->bd_tbl_dma;
+ task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+ task->tx_wr_only.sgl_ctx.mul_sges.sgl_size =
+ bd_tbl->bd_valid;
+ }
+
+ /*Tx Write Rx Read */
+ /* Init state to NORMAL */
+ task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
+ task->tx_wr_rx_rd.init_flags = task_type <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
+ task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
+
+ /* Common */
+ task->cmn.data_2_trns = io_req->data_xfer_len;
+ context_id = tgt->context_id;
+ task->cmn.common_flags = context_id <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
+ task->cmn.common_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
+
+ /* Set initiative ownership */
+ task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT;
+
+ /* Set initial seq counter */
+ task->cmn.tx_low_seq_cnt = 1;
+
+ /* Set state to "waiting for the first packet" */
+ task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME;
+
+ /* Fill FCP_CMND IU */
+ fcp_cmnd = (u64 *)
+ task->cmn.general.cmd_info.fcp_cmd_payload.opaque;
+ bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
+
+ /* swap fcp_cmnd */
+ cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
+
+ for (i = 0; i < cnt; i++) {
+ *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
+ fcp_cmnd++;
+ }
+
+ /* Rx Write Tx Read */
+ task->rx_wr_tx_rd.rx_id = 0xffff;
+
+ /* Rx Only */
+ if (task_type == FCOE_TASK_TYPE_READ) {
+
+ bd_count = bd_tbl->bd_valid;
+ if (bd_count == 1) {
+
+ struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
+
+ task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo =
+ fcoe_bd_tbl->buf_addr_lo;
+ task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi =
+ fcoe_bd_tbl->buf_addr_hi;
+ task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem =
+ fcoe_bd_tbl->buf_len;
+ task->tx_wr_rx_rd.init_flags |= 1 <<
+ FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT;
+ } else {
+
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
+ (u32)bd_tbl->bd_tbl_dma;
+ task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
+ (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
+ task->rx_wr_only.sgl_ctx.mul_sges.sgl_size =
+ bd_tbl->bd_valid;
+ }
+ }
+}
+
+/**
+ * bnx2fc_setup_task_ctx - allocate and map task context
+ *
+ * @hba: pointer to adapter structure
+ *
+ * allocate memory for task context, and associated BD table to be used
+ * by firmware
+ *
+ */
+int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
+{
+ int rc = 0;
+ struct regpair *task_ctx_bdt;
+ dma_addr_t addr;
+ int i;
+
+ /*
+ * Allocate task context bd table. A page size of bd table
+ * can map 256 buffers. Each buffer contains 32 task context
+ * entries. Hence the limit with one page is 8192 task context
+ * entries.
+ */
+ hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_bd_dma,
+ GFP_KERNEL);
+ if (!hba->task_ctx_bd_tbl) {
+ printk(KERN_ERR PFX "unable to allocate task context BDT\n");
+ rc = -1;
+ goto out;
+ }
+ memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
+
+ /*
+ * Allocate task_ctx which is an array of pointers pointing to
+ * a page containing 32 task contexts
+ */
+ hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
+ GFP_KERNEL);
+ if (!hba->task_ctx) {
+ printk(KERN_ERR PFX "unable to allocate task context array\n");
+ rc = -1;
+ goto out1;
+ }
+
+ /*
+ * Allocate task_ctx_dma which is an array of dma addresses
+ */
+ hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
+ sizeof(dma_addr_t)), GFP_KERNEL);
+ if (!hba->task_ctx_dma) {
+ printk(KERN_ERR PFX "unable to alloc context mapping array\n");
+ rc = -1;
+ goto out2;
+ }
+
+ task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
+ for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+
+ hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->task_ctx_dma[i],
+ GFP_KERNEL);
+ if (!hba->task_ctx[i]) {
+ printk(KERN_ERR PFX "unable to alloc task context\n");
+ rc = -1;
+ goto out3;
+ }
+ memset(hba->task_ctx[i], 0, PAGE_SIZE);
+ addr = (u64)hba->task_ctx_dma[i];
+ task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
+ task_ctx_bdt->lo = cpu_to_le32((u32)addr);
+ task_ctx_bdt++;
+ }
+ return 0;
+
+out3:
+ for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+ if (hba->task_ctx[i]) {
+
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx[i], hba->task_ctx_dma[i]);
+ hba->task_ctx[i] = NULL;
+ }
+ }
+
+ kfree(hba->task_ctx_dma);
+ hba->task_ctx_dma = NULL;
+out2:
+ kfree(hba->task_ctx);
+ hba->task_ctx = NULL;
+out1:
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
+ hba->task_ctx_bd_tbl = NULL;
+out:
+ return rc;
+}
+
+void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
+{
+ int i;
+
+ if (hba->task_ctx_bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx_bd_tbl,
+ hba->task_ctx_bd_dma);
+ hba->task_ctx_bd_tbl = NULL;
+ }
+
+ if (hba->task_ctx) {
+ for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+ if (hba->task_ctx[i]) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->task_ctx[i],
+ hba->task_ctx_dma[i]);
+ hba->task_ctx[i] = NULL;
+ }
+ }
+ kfree(hba->task_ctx);
+ hba->task_ctx = NULL;
+ }
+
+ kfree(hba->task_ctx_dma);
+ hba->task_ctx_dma = NULL;
+}
+
+static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
+{
+ int i;
+ int segment_count;
+ int hash_table_size;
+ u32 *pbl;
+
+ segment_count = hba->hash_tbl_segment_count;
+ hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
+ sizeof(struct fcoe_hash_table_entry);
+
+ pbl = hba->hash_tbl_pbl;
+ for (i = 0; i < segment_count; ++i) {
+ dma_addr_t dma_address;
+
+ dma_address = le32_to_cpu(*pbl);
+ ++pbl;
+ dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
+ ++pbl;
+ dma_free_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ hba->hash_tbl_segments[i],
+ dma_address);
+
+ }
+
+ if (hba->hash_tbl_pbl) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->hash_tbl_pbl,
+ hba->hash_tbl_pbl_dma);
+ hba->hash_tbl_pbl = NULL;
+ }
+}
+
+static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
+{
+ int i;
+ int hash_table_size;
+ int segment_count;
+ int segment_array_size;
+ int dma_segment_array_size;
+ dma_addr_t *dma_segment_array;
+ u32 *pbl;
+
+ hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
+ sizeof(struct fcoe_hash_table_entry);
+
+ segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
+ segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
+ hba->hash_tbl_segment_count = segment_count;
+
+ segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
+ hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
+ if (!hba->hash_tbl_segments) {
+ printk(KERN_ERR PFX "hash table pointers alloc failed\n");
+ return -ENOMEM;
+ }
+ dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
+ dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
+ if (!dma_segment_array) {
+ printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < segment_count; ++i) {
+ hba->hash_tbl_segments[i] =
+ dma_alloc_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ &dma_segment_array[i],
+ GFP_KERNEL);
+ if (!hba->hash_tbl_segments[i]) {
+ printk(KERN_ERR PFX "hash segment alloc failed\n");
+ while (--i >= 0) {
+ dma_free_coherent(&hba->pcidev->dev,
+ BNX2FC_HASH_TBL_CHUNK_SIZE,
+ hba->hash_tbl_segments[i],
+ dma_segment_array[i]);
+ hba->hash_tbl_segments[i] = NULL;
+ }
+ kfree(dma_segment_array);
+ return -ENOMEM;
+ }
+ memset(hba->hash_tbl_segments[i], 0,
+ BNX2FC_HASH_TBL_CHUNK_SIZE);
+ }
+
+ hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->hash_tbl_pbl_dma,
+ GFP_KERNEL);
+ if (!hba->hash_tbl_pbl) {
+ printk(KERN_ERR PFX "hash table pbl alloc failed\n");
+ kfree(dma_segment_array);
+ return -ENOMEM;
+ }
+ memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
+
+ pbl = hba->hash_tbl_pbl;
+ for (i = 0; i < segment_count; ++i) {
+ u64 paddr = dma_segment_array[i];
+ *pbl = cpu_to_le32((u32) paddr);
+ ++pbl;
+ *pbl = cpu_to_le32((u32) (paddr >> 32));
+ ++pbl;
+ }
+ pbl = hba->hash_tbl_pbl;
+ i = 0;
+ while (*pbl && *(pbl + 1)) {
+ u32 lo;
+ u32 hi;
+ lo = *pbl;
+ ++pbl;
+ hi = *pbl;
+ ++pbl;
+ ++i;
+ }
+ kfree(dma_segment_array);
+ return 0;
+}
+
+/**
+ * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
+ *
+ * @hba: Pointer to adapter structure
+ *
+ */
+int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
+{
+ u64 addr;
+ u32 mem_size;
+ int i;
+
+ if (bnx2fc_allocate_hash_table(hba))
+ return -ENOMEM;
+
+ mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
+ hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_ptr_dma,
+ GFP_KERNEL);
+ if (!hba->t2_hash_tbl_ptr) {
+ printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+ memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
+
+ mem_size = BNX2FC_NUM_MAX_SESS *
+ sizeof(struct fcoe_t2_hash_table_entry);
+ hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
+ &hba->t2_hash_tbl_dma,
+ GFP_KERNEL);
+ if (!hba->t2_hash_tbl) {
+ printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+ memset(hba->t2_hash_tbl, 0x00, mem_size);
+ for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+ addr = (unsigned long) hba->t2_hash_tbl_dma +
+ ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
+ hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
+ hba->t2_hash_tbl[i].next.hi = addr >> 32;
+ }
+
+ hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE, &hba->dummy_buf_dma,
+ GFP_KERNEL);
+ if (!hba->dummy_buffer) {
+ printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+
+ hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+ PAGE_SIZE,
+ &hba->stats_buf_dma,
+ GFP_KERNEL);
+ if (!hba->stats_buffer) {
+ printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
+ bnx2fc_free_fw_resc(hba);
+ return -ENOMEM;
+ }
+ memset(hba->stats_buffer, 0x00, PAGE_SIZE);
+
+ return 0;
+}
+
+void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
+{
+ u32 mem_size;
+
+ if (hba->stats_buffer) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->stats_buffer, hba->stats_buf_dma);
+ hba->stats_buffer = NULL;
+ }
+
+ if (hba->dummy_buffer) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->dummy_buffer, hba->dummy_buf_dma);
+ hba->dummy_buffer = NULL;
+ }
+
+ if (hba->t2_hash_tbl_ptr) {
+ mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
+ dma_free_coherent(&hba->pcidev->dev, mem_size,
+ hba->t2_hash_tbl_ptr,
+ hba->t2_hash_tbl_ptr_dma);
+ hba->t2_hash_tbl_ptr = NULL;
+ }
+
+ if (hba->t2_hash_tbl) {
+ mem_size = BNX2FC_NUM_MAX_SESS *
+ sizeof(struct fcoe_t2_hash_table_entry);
+ dma_free_coherent(&hba->pcidev->dev, mem_size,
+ hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
+ hba->t2_hash_tbl = NULL;
+ }
+ bnx2fc_free_hash_table(hba);
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
new file mode 100644
index 000000000000..0f1dd23730db
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -0,0 +1,1833 @@
+/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * IO manager and SCSI IO processing.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
+ int bd_index);
+static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
+static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
+static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
+ struct bnx2fc_cmd *io_req);
+static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
+static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
+static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+ struct fcoe_fcp_rsp_payload *fcp_rsp,
+ u8 num_rq);
+
+void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
+ unsigned int timer_msec)
+{
+ struct bnx2fc_hba *hba = io_req->port->priv;
+
+ if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work,
+ msecs_to_jiffies(timer_msec)))
+ kref_get(&io_req->refcount);
+}
+
+static void bnx2fc_cmd_timeout(struct work_struct *work)
+{
+ struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd,
+ timeout_work.work);
+ struct fc_lport *lport;
+ struct fc_rport_priv *rdata;
+ u8 cmd_type = io_req->cmd_type;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ int logo_issued;
+ int rc;
+
+ BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d,"
+ "req_flags = %lx\n", cmd_type, io_req->req_flags);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) {
+ clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
+ /*
+ * ideally we should hold the io_req until RRQ complets,
+ * and release io_req from timeout hold.
+ */
+ spin_unlock_bh(&tgt->tgt_lock);
+ bnx2fc_send_rrq(io_req);
+ return;
+ }
+ if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n");
+ goto done;
+ }
+
+ switch (cmd_type) {
+ case BNX2FC_SCSI_CMD:
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags)) {
+ /* Handle eh_abort timeout */
+ BNX2FC_IO_DBG(io_req, "eh_abort timed out\n");
+ complete(&io_req->tm_done);
+ } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &io_req->req_flags)) {
+ /* Handle internally generated ABTS timeout */
+ BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n",
+ io_req->refcount.refcount.counter);
+ if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags))) {
+
+ lport = io_req->port->lport;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(
+ BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ /* Explicitly logo the target */
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Explicit "
+ "logo - tgt flags = 0x%lx\n",
+ tgt->flags);
+
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ return;
+ }
+ } else {
+ /* Hanlde IO timeout */
+ BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n");
+ if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "IO completed before "
+ " timer expiry\n");
+ goto done;
+ }
+
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &io_req->req_flags)) {
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc == SUCCESS)
+ goto done;
+ /*
+ * Explicitly logo the target if
+ * abts initiation fails
+ */
+ lport = io_req->port->lport;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(
+ BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Explicit "
+ "logo - tgt flags = 0x%lx\n",
+ tgt->flags);
+
+
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ return;
+ } else {
+ BNX2FC_IO_DBG(io_req, "IO already in "
+ "ABTS processing\n");
+ }
+ }
+ break;
+ case BNX2FC_ELS:
+
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n");
+
+ if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags)) {
+ lport = io_req->port->lport;
+ rdata = io_req->tgt->rdata;
+ logo_issued = test_and_set_bit(
+ BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ /* Explicitly logo the target */
+ if (!logo_issued) {
+ BNX2FC_IO_DBG(io_req, "Explicitly logo"
+ "(els)\n");
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ return;
+ }
+ } else {
+ /*
+ * Handle ELS timeout.
+ * tgt_lock is used to sync compl path and timeout
+ * path. If els compl path is processing this IO, we
+ * have nothing to do here, just release the timer hold
+ */
+ BNX2FC_IO_DBG(io_req, "ELS timed out\n");
+ if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
+ &io_req->req_flags))
+ goto done;
+
+ /* Indicate the cb_func that this ELS is timed out */
+ set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags);
+
+ if ((io_req->cb_func) && (io_req->cb_arg)) {
+ io_req->cb_func(io_req->cb_arg);
+ io_req->cb_arg = NULL;
+ }
+ }
+ break;
+ default:
+ printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n",
+ cmd_type);
+ break;
+ }
+
+done:
+ /* release the cmd that was held when timer was set */
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+}
+
+static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code)
+{
+ /* Called with host lock held */
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+
+ /*
+ * active_cmd_queue may have other command types as well,
+ * and during flush operation, we want to error back only
+ * scsi commands.
+ */
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD)
+ return;
+
+ BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code);
+ bnx2fc_unmap_sg_list(io_req);
+ io_req->sc_cmd = NULL;
+ if (!sc_cmd) {
+ printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. "
+ "IO(0x%x) already cleaned up\n",
+ io_req->xid);
+ return;
+ }
+ sc_cmd->result = err_code << 16;
+
+ BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n",
+ sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries,
+ sc_cmd->allowed);
+ scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+}
+
+struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba,
+ u16 min_xid, u16 max_xid)
+{
+ struct bnx2fc_cmd_mgr *cmgr;
+ struct io_bdt *bdt_info;
+ struct bnx2fc_cmd *io_req;
+ size_t len;
+ u32 mem_size;
+ u16 xid;
+ int i;
+ int num_ios;
+ size_t bd_tbl_sz;
+
+ if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
+ printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \
+ and max_xid 0x%x\n", min_xid, max_xid);
+ return NULL;
+ }
+ BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid);
+
+ num_ios = max_xid - min_xid + 1;
+ len = (num_ios * (sizeof(struct bnx2fc_cmd *)));
+ len += sizeof(struct bnx2fc_cmd_mgr);
+
+ cmgr = kzalloc(len, GFP_KERNEL);
+ if (!cmgr) {
+ printk(KERN_ERR PFX "failed to alloc cmgr\n");
+ return NULL;
+ }
+
+ cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) *
+ num_possible_cpus(), GFP_KERNEL);
+ if (!cmgr->free_list) {
+ printk(KERN_ERR PFX "failed to alloc free_list\n");
+ goto mem_err;
+ }
+
+ cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) *
+ num_possible_cpus(), GFP_KERNEL);
+ if (!cmgr->free_list_lock) {
+ printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
+ goto mem_err;
+ }
+
+ cmgr->hba = hba;
+ cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ INIT_LIST_HEAD(&cmgr->free_list[i]);
+ spin_lock_init(&cmgr->free_list_lock[i]);
+ }
+
+ /* Pre-allocated pool of bnx2fc_cmds */
+ xid = BNX2FC_MIN_XID;
+ for (i = 0; i < num_ios; i++) {
+ io_req = kzalloc(sizeof(*io_req), GFP_KERNEL);
+
+ if (!io_req) {
+ printk(KERN_ERR PFX "failed to alloc io_req\n");
+ goto mem_err;
+ }
+
+ INIT_LIST_HEAD(&io_req->link);
+ INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout);
+
+ io_req->xid = xid++;
+ if (io_req->xid >= BNX2FC_MAX_OUTSTANDING_CMNDS)
+ printk(KERN_ERR PFX "ERROR allocating xids - 0x%x\n",
+ io_req->xid);
+ list_add_tail(&io_req->link,
+ &cmgr->free_list[io_req->xid % num_possible_cpus()]);
+ io_req++;
+ }
+
+ /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
+ mem_size = num_ios * sizeof(struct io_bdt *);
+ cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
+ if (!cmgr->io_bdt_pool) {
+ printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
+ goto mem_err;
+ }
+
+ mem_size = sizeof(struct io_bdt);
+ for (i = 0; i < num_ios; i++) {
+ cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL);
+ if (!cmgr->io_bdt_pool[i]) {
+ printk(KERN_ERR PFX "failed to alloc "
+ "io_bdt_pool[%d]\n", i);
+ goto mem_err;
+ }
+ }
+
+ /* Allocate an map fcoe_bdt_ctx structures */
+ bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
+ for (i = 0; i < num_ios; i++) {
+ bdt_info = cmgr->io_bdt_pool[i];
+ bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+ bd_tbl_sz,
+ &bdt_info->bd_tbl_dma,
+ GFP_KERNEL);
+ if (!bdt_info->bd_tbl) {
+ printk(KERN_ERR PFX "failed to alloc "
+ "bdt_tbl[%d]\n", i);
+ goto mem_err;
+ }
+ }
+
+ return cmgr;
+
+mem_err:
+ bnx2fc_cmd_mgr_free(cmgr);
+ return NULL;
+}
+
+void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr)
+{
+ struct io_bdt *bdt_info;
+ struct bnx2fc_hba *hba = cmgr->hba;
+ size_t bd_tbl_sz;
+ u16 min_xid = BNX2FC_MIN_XID;
+ u16 max_xid = BNX2FC_MAX_XID;
+ int num_ios;
+ int i;
+
+ num_ios = max_xid - min_xid + 1;
+
+ /* Free fcoe_bdt_ctx structures */
+ if (!cmgr->io_bdt_pool)
+ goto free_cmd_pool;
+
+ bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx);
+ for (i = 0; i < num_ios; i++) {
+ bdt_info = cmgr->io_bdt_pool[i];
+ if (bdt_info->bd_tbl) {
+ dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz,
+ bdt_info->bd_tbl,
+ bdt_info->bd_tbl_dma);
+ bdt_info->bd_tbl = NULL;
+ }
+ }
+
+ /* Destroy io_bdt pool */
+ for (i = 0; i < num_ios; i++) {
+ kfree(cmgr->io_bdt_pool[i]);
+ cmgr->io_bdt_pool[i] = NULL;
+ }
+
+ kfree(cmgr->io_bdt_pool);
+ cmgr->io_bdt_pool = NULL;
+
+free_cmd_pool:
+ kfree(cmgr->free_list_lock);
+
+ /* Destroy cmd pool */
+ if (!cmgr->free_list)
+ goto free_cmgr;
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ struct list_head *list;
+ struct list_head *tmp;
+
+ list_for_each_safe(list, tmp, &cmgr->free_list[i]) {
+ struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list;
+ list_del(&io_req->link);
+ kfree(io_req);
+ }
+ }
+ kfree(cmgr->free_list);
+free_cmgr:
+ /* Free command manager itself */
+ kfree(cmgr);
+}
+
+struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type)
+{
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
+ struct bnx2fc_cmd *io_req;
+ struct list_head *listp;
+ struct io_bdt *bd_tbl;
+ u32 max_sqes;
+ u16 xid;
+
+ max_sqes = tgt->max_sqes;
+ switch (type) {
+ case BNX2FC_TASK_MGMT_CMD:
+ max_sqes = BNX2FC_TM_MAX_SQES;
+ break;
+ case BNX2FC_ELS:
+ max_sqes = BNX2FC_ELS_MAX_SQES;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * NOTE: Free list insertions and deletions are protected with
+ * cmgr lock
+ */
+ spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ if ((list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) ||
+ (tgt->num_active_ios.counter >= max_sqes)) {
+ BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available "
+ "ios(%d):sqes(%d)\n",
+ tgt->num_active_ios.counter, tgt->max_sqes);
+ if (list_empty(&(cmd_mgr->free_list[smp_processor_id()])))
+ printk(KERN_ERR PFX "elstm_alloc: list_empty\n");
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ return NULL;
+ }
+
+ listp = (struct list_head *)
+ cmd_mgr->free_list[smp_processor_id()].next;
+ list_del_init(listp);
+ io_req = (struct bnx2fc_cmd *) listp;
+ xid = io_req->xid;
+ cmd_mgr->cmds[xid] = io_req;
+ atomic_inc(&tgt->num_active_ios);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+
+ INIT_LIST_HEAD(&io_req->link);
+
+ io_req->port = port;
+ io_req->cmd_mgr = cmd_mgr;
+ io_req->req_flags = 0;
+ io_req->cmd_type = type;
+
+ /* Bind io_bdt for this io_req */
+ /* Have a static link between io_req and io_bdt_pool */
+ bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
+ bd_tbl->io_req = io_req;
+
+ /* Hold the io_req against deletion */
+ kref_init(&io_req->refcount);
+ return io_req;
+}
+static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt)
+{
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr;
+ struct bnx2fc_cmd *io_req;
+ struct list_head *listp;
+ struct io_bdt *bd_tbl;
+ u32 max_sqes;
+ u16 xid;
+
+ max_sqes = BNX2FC_SCSI_MAX_SQES;
+ /*
+ * NOTE: Free list insertions and deletions are protected with
+ * cmgr lock
+ */
+ spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ if ((list_empty(&cmd_mgr->free_list[smp_processor_id()])) ||
+ (tgt->num_active_ios.counter >= max_sqes)) {
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ return NULL;
+ }
+
+ listp = (struct list_head *)
+ cmd_mgr->free_list[smp_processor_id()].next;
+ list_del_init(listp);
+ io_req = (struct bnx2fc_cmd *) listp;
+ xid = io_req->xid;
+ cmd_mgr->cmds[xid] = io_req;
+ atomic_inc(&tgt->num_active_ios);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+
+ INIT_LIST_HEAD(&io_req->link);
+
+ io_req->port = port;
+ io_req->cmd_mgr = cmd_mgr;
+ io_req->req_flags = 0;
+
+ /* Bind io_bdt for this io_req */
+ /* Have a static link between io_req and io_bdt_pool */
+ bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
+ bd_tbl->io_req = io_req;
+
+ /* Hold the io_req against deletion */
+ kref_init(&io_req->refcount);
+ return io_req;
+}
+
+void bnx2fc_cmd_release(struct kref *ref)
+{
+ struct bnx2fc_cmd *io_req = container_of(ref,
+ struct bnx2fc_cmd, refcount);
+ struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
+
+ spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+ if (io_req->cmd_type != BNX2FC_SCSI_CMD)
+ bnx2fc_free_mp_resc(io_req);
+ cmd_mgr->cmds[io_req->xid] = NULL;
+ /* Delete IO from retire queue */
+ list_del_init(&io_req->link);
+ /* Add it to the free list */
+ list_add(&io_req->link,
+ &cmd_mgr->free_list[smp_processor_id()]);
+ atomic_dec(&io_req->tgt->num_active_ios);
+ spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]);
+}
+
+static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
+ struct bnx2fc_hba *hba = io_req->port->priv;
+ size_t sz = sizeof(struct fcoe_bd_ctx);
+
+ /* clear tm flags */
+ mp_req->tm_flags = 0;
+ if (mp_req->mp_req_bd) {
+ dma_free_coherent(&hba->pcidev->dev, sz,
+ mp_req->mp_req_bd,
+ mp_req->mp_req_bd_dma);
+ mp_req->mp_req_bd = NULL;
+ }
+ if (mp_req->mp_resp_bd) {
+ dma_free_coherent(&hba->pcidev->dev, sz,
+ mp_req->mp_resp_bd,
+ mp_req->mp_resp_bd_dma);
+ mp_req->mp_resp_bd = NULL;
+ }
+ if (mp_req->req_buf) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->req_buf,
+ mp_req->req_buf_dma);
+ mp_req->req_buf = NULL;
+ }
+ if (mp_req->resp_buf) {
+ dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->resp_buf,
+ mp_req->resp_buf_dma);
+ mp_req->resp_buf = NULL;
+ }
+}
+
+int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_mp_req *mp_req;
+ struct fcoe_bd_ctx *mp_req_bd;
+ struct fcoe_bd_ctx *mp_resp_bd;
+ struct bnx2fc_hba *hba = io_req->port->priv;
+ dma_addr_t addr;
+ size_t sz;
+
+ mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
+ memset(mp_req, 0, sizeof(struct bnx2fc_mp_req));
+
+ mp_req->req_len = sizeof(struct fcp_cmnd);
+ io_req->data_xfer_len = mp_req->req_len;
+ mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &mp_req->req_buf_dma,
+ GFP_ATOMIC);
+ if (!mp_req->req_buf) {
+ printk(KERN_ERR PFX "unable to alloc MP req buffer\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+
+ mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ &mp_req->resp_buf_dma,
+ GFP_ATOMIC);
+ if (!mp_req->resp_buf) {
+ printk(KERN_ERR PFX "unable to alloc TM resp buffer\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+ memset(mp_req->req_buf, 0, PAGE_SIZE);
+ memset(mp_req->resp_buf, 0, PAGE_SIZE);
+
+ /* Allocate and map mp_req_bd and mp_resp_bd */
+ sz = sizeof(struct fcoe_bd_ctx);
+ mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
+ &mp_req->mp_req_bd_dma,
+ GFP_ATOMIC);
+ if (!mp_req->mp_req_bd) {
+ printk(KERN_ERR PFX "unable to alloc MP req bd\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+ mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
+ &mp_req->mp_resp_bd_dma,
+ GFP_ATOMIC);
+ if (!mp_req->mp_req_bd) {
+ printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
+ bnx2fc_free_mp_resc(io_req);
+ return FAILED;
+ }
+ /* Fill bd table */
+ addr = mp_req->req_buf_dma;
+ mp_req_bd = mp_req->mp_req_bd;
+ mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
+ mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
+ mp_req_bd->buf_len = PAGE_SIZE;
+ mp_req_bd->flags = 0;
+
+ /*
+ * MP buffer is either a task mgmt command or an ELS.
+ * So the assumption is that it consumes a single bd
+ * entry in the bd table
+ */
+ mp_resp_bd = mp_req->mp_resp_bd;
+ addr = mp_req->resp_buf_dma;
+ mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
+ mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
+ mp_resp_bd->buf_len = PAGE_SIZE;
+ mp_resp_bd->flags = 0;
+
+ return SUCCESS;
+}
+
+static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
+{
+ struct fc_lport *lport;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fcoe_port *port;
+ struct bnx2fc_hba *hba;
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_cmd *io_req;
+ struct bnx2fc_mp_req *tm_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct Scsi_Host *host = sc_cmd->device->host;
+ struct fc_frame_header *fc_hdr;
+ struct fcp_cmnd *fcp_cmnd;
+ int task_idx, index;
+ int rc = SUCCESS;
+ u16 xid;
+ u32 sid, did;
+ unsigned long start = jiffies;
+
+ lport = shost_priv(host);
+ port = lport_priv(lport);
+ hba = port->priv;
+
+ if (rport == NULL) {
+ printk(KERN_ALERT PFX "device_reset: rport is NULL\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+
+ rc = fc_block_scsi_eh(sc_cmd);
+ if (rc)
+ return rc;
+
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ printk(KERN_ERR PFX "device_reset: link is not ready\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+ /* rport and tgt are allocated together, so tgt should be non-NULL */
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
+ printk(KERN_ERR PFX "device_reset: tgt not offloaded\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+retry_tmf:
+ io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD);
+ if (!io_req) {
+ if (time_after(jiffies, start + HZ)) {
+ printk(KERN_ERR PFX "tmf: Failed TMF");
+ rc = FAILED;
+ goto tmf_err;
+ }
+ msleep(20);
+ goto retry_tmf;
+ }
+ /* Initialize rest of io_req fields */
+ io_req->sc_cmd = sc_cmd;
+ io_req->port = port;
+ io_req->tgt = tgt;
+
+ tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req);
+
+ rc = bnx2fc_init_mp_req(io_req);
+ if (rc == FAILED) {
+ printk(KERN_ERR PFX "Task mgmt MP request init failed\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ goto tmf_err;
+ }
+
+ /* Set TM flags */
+ io_req->io_req_flags = 0;
+ tm_req->tm_flags = tm_flags;
+
+ /* Fill FCP_CMND */
+ bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
+ fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
+ memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len);
+ fcp_cmnd->fc_dl = 0;
+
+ /* Fill FC header */
+ fc_hdr = &(tm_req->req_fc_hdr);
+ sid = tgt->sid;
+ did = rport->port_id;
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid,
+ FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+ /* Obtain exchange id */
+ xid = io_req->xid;
+
+ BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid);
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_mp_task(io_req, task);
+
+ sc_cmd->SCp.ptr = (char *)io_req;
+
+ /* Obtain free SQ entry */
+ spin_lock_bh(&tgt->tgt_lock);
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Enqueue the io_req to active_tm_queue */
+ io_req->on_tmf_queue = 1;
+ list_add_tail(&io_req->link, &tgt->active_tm_queue);
+
+ init_completion(&io_req->tm_done);
+ io_req->wait_for_comp = 1;
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ rc = wait_for_completion_timeout(&io_req->tm_done,
+ BNX2FC_TM_TIMEOUT * HZ);
+ spin_lock_bh(&tgt->tgt_lock);
+
+ io_req->wait_for_comp = 0;
+ if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags)))
+ set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
+
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ if (!rc) {
+ printk(KERN_ERR PFX "task mgmt command failed...\n");
+ rc = FAILED;
+ } else {
+ printk(KERN_ERR PFX "task mgmt command success...\n");
+ rc = SUCCESS;
+ }
+tmf_err:
+ return rc;
+}
+
+int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fc_rport *rport = tgt->rport;
+ struct fc_rport_priv *rdata = tgt->rdata;
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ struct bnx2fc_cmd *abts_io_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fc_frame_header *fc_hdr;
+ struct bnx2fc_mp_req *abts_req;
+ int task_idx, index;
+ u32 sid, did;
+ u16 xid;
+ int rc = SUCCESS;
+ u32 r_a_tov = rdata->r_a_tov;
+
+ /* called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n");
+
+ port = io_req->port;
+ hba = port->priv;
+ lport = port->lport;
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ if (rport == NULL) {
+ printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ printk(KERN_ERR PFX "initiate_abts: link is not ready\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
+ if (!abts_io_req) {
+ printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
+ rc = FAILED;
+ goto abts_err;
+ }
+
+ /* Initialize rest of io_req fields */
+ abts_io_req->sc_cmd = NULL;
+ abts_io_req->port = port;
+ abts_io_req->tgt = tgt;
+ abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */
+
+ abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req);
+ memset(abts_req, 0, sizeof(struct bnx2fc_mp_req));
+
+ /* Fill FC header */
+ fc_hdr = &(abts_req->req_fc_hdr);
+
+ /* Obtain oxid and rxid for the original exchange to be aborted */
+ fc_hdr->fh_ox_id = htons(io_req->xid);
+ fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id);
+
+ sid = tgt->sid;
+ did = rport->port_id;
+
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid,
+ FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+
+ xid = abts_io_req->xid;
+ BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n");
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_mp_task(abts_io_req, task);
+
+ /*
+ * ABTS task is a temporary task that will be cleaned up
+ * irrespective of ABTS response. We need to start the timer
+ * for the original exchange, as the CQE is posted for the original
+ * IO request.
+ *
+ * Timer for ABTS is started only when it is originated by a
+ * TM request. For the ABTS issued as part of ULP timeout,
+ * scsi-ml maintains the timers.
+ */
+
+ /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/
+ bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov);
+
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+
+abts_err:
+ return rc;
+}
+
+int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
+{
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct bnx2fc_hba *hba;
+ struct fcoe_port *port;
+ struct bnx2fc_cmd *cleanup_io_req;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ int task_idx, index;
+ u16 xid, orig_xid;
+ int rc = 0;
+
+ /* ASSUMPTION: called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n");
+
+ port = io_req->port;
+ hba = port->priv;
+ lport = port->lport;
+
+ cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
+ if (!cleanup_io_req) {
+ printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+ rc = -1;
+ goto cleanup_err;
+ }
+
+ /* Initialize rest of io_req fields */
+ cleanup_io_req->sc_cmd = NULL;
+ cleanup_io_req->port = port;
+ cleanup_io_req->tgt = tgt;
+ cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */
+
+ xid = cleanup_io_req->xid;
+
+ task_idx = xid/BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ orig_xid = io_req->xid;
+
+ BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid);
+
+ bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid);
+
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+
+cleanup_err:
+ return rc;
+}
+
+/**
+ * bnx2fc_eh_target_reset: Reset a target
+ *
+ * @sc_cmd: SCSI command
+ *
+ * Set from SCSI host template to send task mgmt command to the target
+ * and wait for the response
+ */
+int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd)
+{
+ return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
+}
+
+/**
+ * bnx2fc_eh_device_reset - Reset a single LUN
+ *
+ * @sc_cmd: SCSI command
+ *
+ * Set from SCSI host template to send task mgmt command to the target
+ * and wait for the response
+ */
+int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
+{
+ return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
+}
+
+/**
+ * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
+ * SCSI command
+ *
+ * @sc_cmd: SCSI_ML command pointer
+ *
+ * SCSI abort request handler
+ */
+int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct bnx2fc_cmd *io_req;
+ struct fc_lport *lport;
+ struct bnx2fc_rport *tgt;
+ int rc = FAILED;
+
+
+ rc = fc_block_scsi_eh(sc_cmd);
+ if (rc)
+ return rc;
+
+ lport = shost_priv(sc_cmd->device->host);
+ if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
+ printk(KERN_ALERT PFX "eh_abort: link not ready\n");
+ return rc;
+ }
+
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n");
+
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr;
+ if (!io_req) {
+ /* Command might have just completed */
+ printk(KERN_ERR PFX "eh_abort: io_req is NULL\n");
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+ BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n",
+ io_req->refcount.refcount.counter);
+
+ /* Hold IO request across abort processing */
+ kref_get(&io_req->refcount);
+
+ BUG_ON(tgt != io_req->tgt);
+
+ /* Remove the io_req from the active_q. */
+ /*
+ * Task Mgmt functions (LUN RESET & TGT RESET) will not
+ * issue an ABTS on this particular IO req, as the
+ * io_req is no longer in the active_q.
+ */
+ if (tgt->flush_in_prog) {
+ printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ "flush in progress\n", io_req->xid);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+
+ if (io_req->on_active_queue == 0) {
+ printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ "not on active_q\n", io_req->xid);
+ /*
+ * This condition can happen only due to the FW bug,
+ * where we do not receive cleanup response from
+ * the FW. Handle this case gracefully by erroring
+ * back the IO request to SCSI-ml
+ */
+ bnx2fc_scsi_done(io_req, DID_ABORT);
+
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+
+ /*
+ * Only eh_abort processing will remove the IO from
+ * active_cmd_q before processing the request. this is
+ * done to avoid race conditions between IOs aborted
+ * as part of task management completion and eh_abort
+ * processing
+ */
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ /* Move IO req to retire queue */
+ list_add_tail(&io_req->link, &tgt->io_retire_queue);
+
+ init_completion(&io_req->tm_done);
+ io_req->wait_for_comp = 1;
+
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
+ /* Cancel the current timer running on this io_req */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+ set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
+ rc = bnx2fc_initiate_abts(io_req);
+ } else {
+ printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) "
+ "already in abts processing\n", io_req->xid);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return SUCCESS;
+ }
+ if (rc == FAILED) {
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return rc;
+ }
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ wait_for_completion(&io_req->tm_done);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req->wait_for_comp = 0;
+ if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags))) {
+ /* Let the scsi-ml try to recover this command */
+ printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
+ io_req->xid);
+ rc = FAILED;
+ } else {
+ /*
+ * We come here even when there was a race condition
+ * between timeout and abts completion, and abts
+ * completion happens just in time.
+ */
+ BNX2FC_IO_DBG(io_req, "abort succeeded\n");
+ rc = SUCCESS;
+ bnx2fc_scsi_done(io_req, DID_ABORT);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
+
+ /* release the reference taken in eh_abort */
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return rc;
+}
+
+void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq)
+{
+ BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl "
+ "refcnt = %d, cmd_type = %d\n",
+ io_req->refcount.refcount.counter, io_req->cmd_type);
+ bnx2fc_scsi_done(io_req, DID_ERROR);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+}
+
+void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq)
+{
+ u32 r_ctl;
+ u32 r_a_tov = FC_DEF_R_A_TOV;
+ u8 issue_rrq = 0;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+
+ BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x"
+ "refcnt = %d, cmd_type = %d\n",
+ io_req->xid,
+ io_req->refcount.refcount.counter, io_req->cmd_type);
+
+ if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req, "Timer context finished processing"
+ " this io\n");
+ return;
+ }
+
+ /* Do not issue RRQ as this IO is already cleanedup */
+ if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP,
+ &io_req->req_flags))
+ goto io_compl;
+
+ /*
+ * For ABTS issued due to SCSI eh_abort_handler, timeout
+ * values are maintained by scsi-ml itself. Cancel timeout
+ * in case ABTS issued as part of task management function
+ * or due to FW error.
+ */
+ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl;
+
+ switch (r_ctl) {
+ case FC_RCTL_BA_ACC:
+ /*
+ * Dont release this cmd yet. It will be relesed
+ * after we get RRQ response
+ */
+ BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n");
+ issue_rrq = 1;
+ break;
+
+ case FC_RCTL_BA_RJT:
+ BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n");
+ break;
+ default:
+ printk(KERN_ERR PFX "Unknown ABTS response\n");
+ break;
+ }
+
+ if (issue_rrq) {
+ BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n");
+ set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
+ }
+ set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags);
+ bnx2fc_cmd_timer_set(io_req, r_a_tov);
+
+io_compl:
+ if (io_req->wait_for_comp) {
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags))
+ complete(&io_req->tm_done);
+ } else {
+ /*
+ * We end up here when ABTS is issued as
+ * in asynchronous context, i.e., as part
+ * of task management completion, or
+ * when FW error is received or when the
+ * ABTS is issued when the IO is timed
+ * out.
+ */
+
+ if (io_req->on_active_queue) {
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ /* Move IO req to retire queue */
+ list_add_tail(&io_req->link, &tgt->io_retire_queue);
+ }
+ bnx2fc_scsi_done(io_req, DID_ERROR);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
+}
+
+static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct list_head *list;
+ struct list_head *tmp;
+ struct bnx2fc_cmd *cmd;
+ int tm_lun = sc_cmd->device->lun;
+ int rc = 0;
+ int lun;
+
+ /* called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n");
+ /*
+ * Walk thru the active_ios queue and ABORT the IO
+ * that matches with the LUN that was reset
+ */
+ list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
+ cmd = (struct bnx2fc_cmd *)list;
+ lun = cmd->sc_cmd->device->lun;
+ if (lun == tm_lun) {
+ /* Initiate ABTS on this cmd */
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &cmd->req_flags)) {
+ /* cancel the IO timeout */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release);
+ /* timer hold */
+ rc = bnx2fc_initiate_abts(cmd);
+ /* abts shouldnt fail in this context */
+ WARN_ON(rc != SUCCESS);
+ } else
+ printk(KERN_ERR PFX "lun_rst: abts already in"
+ " progress for this IO 0x%x\n",
+ cmd->xid);
+ }
+ }
+}
+
+static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct list_head *list;
+ struct list_head *tmp;
+ struct bnx2fc_cmd *cmd;
+ int rc = 0;
+
+ /* called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n");
+ /*
+ * Walk thru the active_ios queue and ABORT the IO
+ * that matches with the LUN that was reset
+ */
+ list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
+ cmd = (struct bnx2fc_cmd *)list;
+ /* Initiate ABTS */
+ if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
+ &cmd->req_flags)) {
+ /* cancel the IO timeout */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* timer hold */
+ rc = bnx2fc_initiate_abts(cmd);
+ /* abts shouldnt fail in this context */
+ WARN_ON(rc != SUCCESS);
+
+ } else
+ printk(KERN_ERR PFX "tgt_rst: abts already in progress"
+ " for this IO 0x%x\n", cmd->xid);
+ }
+}
+
+void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task, u8 num_rq)
+{
+ struct bnx2fc_mp_req *tm_req;
+ struct fc_frame_header *fc_hdr;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ u64 *hdr;
+ u64 *temp_hdr;
+ void *rsp_buf;
+
+ /* Called with tgt_lock held */
+ BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n");
+
+ if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags)))
+ set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags);
+ else {
+ /* TM has already timed out and we got
+ * delayed completion. Ignore completion
+ * processing.
+ */
+ return;
+ }
+
+ tm_req = &(io_req->mp_req);
+ fc_hdr = &(tm_req->resp_fc_hdr);
+ hdr = (u64 *)fc_hdr;
+ temp_hdr = (u64 *)
+ &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
+ hdr[0] = cpu_to_be64(temp_hdr[0]);
+ hdr[1] = cpu_to_be64(temp_hdr[1]);
+ hdr[2] = cpu_to_be64(temp_hdr[2]);
+
+ tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off;
+
+ rsp_buf = tm_req->resp_buf;
+
+ if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
+ bnx2fc_parse_fcp_rsp(io_req,
+ (struct fcoe_fcp_rsp_payload *)
+ rsp_buf, num_rq);
+ if (io_req->fcp_rsp_code == 0) {
+ /* TM successful */
+ if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
+ bnx2fc_lun_reset_cmpl(io_req);
+ else if (tm_req->tm_flags & FCP_TMF_TGT_RESET)
+ bnx2fc_tgt_reset_cmpl(io_req);
+ }
+ } else {
+ printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n",
+ fc_hdr->fh_r_ctl);
+ }
+ if (!sc_cmd->SCp.ptr) {
+ printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n");
+ return;
+ }
+ switch (io_req->fcp_status) {
+ case FC_GOOD:
+ if (io_req->cdb_status == 0) {
+ /* Good IO completion */
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ /* Transport status is good, SCSI status not good */
+ sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+ }
+ if (io_req->fcp_resid)
+ scsi_set_resid(sc_cmd, io_req->fcp_resid);
+ break;
+
+ default:
+ BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n",
+ io_req->fcp_status);
+ break;
+ }
+
+ sc_cmd = io_req->sc_cmd;
+ io_req->sc_cmd = NULL;
+
+ /* check if the io_req exists in tgt's tmf_q */
+ if (io_req->on_tmf_queue) {
+
+ list_del_init(&io_req->link);
+ io_req->on_tmf_queue = 0;
+ } else {
+
+ printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n");
+ return;
+ }
+
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ if (io_req->wait_for_comp) {
+ BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n");
+ complete(&io_req->tm_done);
+ }
+}
+
+static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
+ int bd_index)
+{
+ struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+ int frag_size, sg_frags;
+
+ sg_frags = 0;
+ while (sg_len) {
+ if (sg_len >= BNX2FC_BD_SPLIT_SZ)
+ frag_size = BNX2FC_BD_SPLIT_SZ;
+ else
+ frag_size = sg_len;
+ bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff;
+ bd[bd_index + sg_frags].buf_addr_hi = addr >> 32;
+ bd[bd_index + sg_frags].buf_len = (u16)frag_size;
+ bd[bd_index + sg_frags].flags = 0;
+
+ addr += (u64) frag_size;
+ sg_frags++;
+ sg_len -= frag_size;
+ }
+ return sg_frags;
+
+}
+
+static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+ struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+ struct scatterlist *sg;
+ int byte_count = 0;
+ int sg_count = 0;
+ int bd_count = 0;
+ int sg_frags;
+ unsigned int sg_len;
+ u64 addr;
+ int i;
+
+ sg_count = scsi_dma_map(sc);
+ scsi_for_each_sg(sc, sg, sg_count, i) {
+ sg_len = sg_dma_len(sg);
+ addr = sg_dma_address(sg);
+ if (sg_len > BNX2FC_MAX_BD_LEN) {
+ sg_frags = bnx2fc_split_bd(io_req, addr, sg_len,
+ bd_count);
+ } else {
+
+ sg_frags = 1;
+ bd[bd_count].buf_addr_lo = addr & 0xffffffff;
+ bd[bd_count].buf_addr_hi = addr >> 32;
+ bd[bd_count].buf_len = (u16)sg_len;
+ bd[bd_count].flags = 0;
+ }
+ bd_count += sg_frags;
+ byte_count += sg_len;
+ }
+ if (byte_count != scsi_bufflen(sc))
+ printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, "
+ "task_id = 0x%x\n", byte_count, scsi_bufflen(sc),
+ io_req->xid);
+ return bd_count;
+}
+
+static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+ struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
+ int bd_count;
+
+ if (scsi_sg_count(sc))
+ bd_count = bnx2fc_map_sg(io_req);
+ else {
+ bd_count = 0;
+ bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
+ bd[0].buf_len = bd[0].flags = 0;
+ }
+ io_req->bd_tbl->bd_valid = bd_count;
+}
+
+static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+
+ if (io_req->bd_tbl->bd_valid && sc) {
+ scsi_dma_unmap(sc);
+ io_req->bd_tbl->bd_valid = 0;
+ }
+}
+
+void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
+ struct fcp_cmnd *fcp_cmnd)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ char tag[2];
+
+ memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+
+ int_to_scsilun(sc_cmd->device->lun,
+ (struct scsi_lun *) fcp_cmnd->fc_lun);
+
+
+ fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
+ memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
+
+ fcp_cmnd->fc_cmdref = 0;
+ fcp_cmnd->fc_pri_ta = 0;
+ fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
+ fcp_cmnd->fc_flags = io_req->io_req_flags;
+
+ if (scsi_populate_tag_msg(sc_cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ;
+ break;
+ case ORDERED_QUEUE_TAG:
+ fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED;
+ break;
+ default:
+ fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
+ break;
+ }
+ } else {
+ fcp_cmnd->fc_pri_ta = 0;
+ }
+}
+
+static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
+ struct fcoe_fcp_rsp_payload *fcp_rsp,
+ u8 num_rq)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ u8 rsp_flags = fcp_rsp->fcp_flags.flags;
+ u32 rq_buff_len = 0;
+ int i;
+ unsigned char *rq_data;
+ unsigned char *dummy;
+ int fcp_sns_len = 0;
+ int fcp_rsp_len = 0;
+
+ io_req->fcp_status = FC_GOOD;
+ io_req->fcp_resid = fcp_rsp->fcp_resid;
+
+ io_req->scsi_comp_flags = rsp_flags;
+ CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
+ fcp_rsp->scsi_status_code;
+
+ /* Fetch fcp_rsp_info and fcp_sns_info if available */
+ if (num_rq) {
+
+ /*
+ * We do not anticipate num_rq >1, as the linux defined
+ * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO
+ * 256 bytes of single rq buffer is good enough to hold this.
+ */
+
+ if (rsp_flags &
+ FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) {
+ fcp_rsp_len = rq_buff_len
+ = fcp_rsp->fcp_rsp_len;
+ }
+
+ if (rsp_flags &
+ FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) {
+ fcp_sns_len = fcp_rsp->fcp_sns_len;
+ rq_buff_len += fcp_rsp->fcp_sns_len;
+ }
+
+ io_req->fcp_rsp_len = fcp_rsp_len;
+ io_req->fcp_sns_len = fcp_sns_len;
+
+ if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) {
+ /* Invalid sense sense length. */
+ printk(KERN_ALERT PFX "invalid sns length %d\n",
+ rq_buff_len);
+ /* reset rq_buff_len */
+ rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
+ }
+
+ rq_data = bnx2fc_get_next_rqe(tgt, 1);
+
+ if (num_rq > 1) {
+ /* We do not need extra sense data */
+ for (i = 1; i < num_rq; i++)
+ dummy = bnx2fc_get_next_rqe(tgt, 1);
+ }
+
+ /* fetch fcp_rsp_code */
+ if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
+ /* Only for task management function */
+ io_req->fcp_rsp_code = rq_data[3];
+ printk(KERN_ERR PFX "fcp_rsp_code = %d\n",
+ io_req->fcp_rsp_code);
+ }
+
+ /* fetch sense data */
+ rq_data += fcp_rsp_len;
+
+ if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
+ printk(KERN_ERR PFX "Truncating sense buffer\n");
+ fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
+ }
+
+ memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer));
+ if (fcp_sns_len)
+ memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
+
+ /* return RQ entries */
+ for (i = 0; i < num_rq; i++)
+ bnx2fc_return_rqe(tgt, 1);
+ }
+}
+
+/**
+ * bnx2fc_queuecommand - Queuecommand function of the scsi template
+ *
+ * @host: The Scsi_Host the command was issued to
+ * @sc_cmd: struct scsi_cmnd to be executed
+ *
+ * This is the IO strategy routine, called by SCSI-ML
+ **/
+int bnx2fc_queuecommand(struct Scsi_Host *host,
+ struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lport = shost_priv(host);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct bnx2fc_rport *tgt;
+ struct bnx2fc_cmd *io_req;
+ int rc = 0;
+ int rval;
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval) {
+ sc_cmd->result = rval;
+ sc_cmd->scsi_done(sc_cmd);
+ return 0;
+ }
+
+ if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+
+ /* rport and tgt are allocated together, so tgt should be non-NULL */
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ /*
+ * Session is not offloaded yet. Let SCSI-ml retry
+ * the command.
+ */
+ rc = SCSI_MLQUEUE_TARGET_BUSY;
+ goto exit_qcmd;
+ }
+
+ io_req = bnx2fc_cmd_alloc(tgt);
+ if (!io_req) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+ io_req->sc_cmd = sc_cmd;
+
+ if (bnx2fc_post_io_req(tgt, io_req)) {
+ printk(KERN_ERR PFX "Unable to post io_req\n");
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+exit_qcmd:
+ return rc;
+}
+
+void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
+ struct fcoe_task_ctx_entry *task,
+ u8 num_rq)
+{
+ struct fcoe_fcp_rsp_payload *fcp_rsp;
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct scsi_cmnd *sc_cmd;
+ struct Scsi_Host *host;
+
+
+ /* scsi_cmd_cmpl is called with tgt lock held */
+
+ if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) {
+ /* we will not receive ABTS response for this IO */
+ BNX2FC_IO_DBG(io_req, "Timer context finished processing "
+ "this scsi cmd\n");
+ }
+
+ /* Cancel the timeout_work, as we received IO completion */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ sc_cmd = io_req->sc_cmd;
+ if (sc_cmd == NULL) {
+ printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n");
+ return;
+ }
+
+ /* Fetch fcp_rsp from task context and perform cmd completion */
+ fcp_rsp = (struct fcoe_fcp_rsp_payload *)
+ &(task->cmn.general.rsp_info.fcp_rsp.payload);
+
+ /* parse fcp_rsp and obtain sense data from RQ if available */
+ bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
+
+ host = sc_cmd->device->host;
+ if (!sc_cmd->SCp.ptr) {
+ printk(KERN_ERR PFX "SCp.ptr is NULL\n");
+ return;
+ }
+ io_req->sc_cmd = NULL;
+
+ if (io_req->on_active_queue) {
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ /* Move IO req to retire queue */
+ list_add_tail(&io_req->link, &tgt->io_retire_queue);
+ } else {
+ /* This should not happen, but could have been pulled
+ * by bnx2fc_flush_active_ios(), or during a race
+ * between command abort and (late) completion.
+ */
+ BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n");
+ if (io_req->wait_for_comp)
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags))
+ complete(&io_req->tm_done);
+ }
+
+ bnx2fc_unmap_sg_list(io_req);
+
+ switch (io_req->fcp_status) {
+ case FC_GOOD:
+ if (io_req->cdb_status == 0) {
+ /* Good IO completion */
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ /* Transport status is good, SCSI status not good */
+ BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d"
+ " fcp_resid = 0x%x\n",
+ io_req->cdb_status, io_req->fcp_resid);
+ sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+ }
+ if (io_req->fcp_resid)
+ scsi_set_resid(sc_cmd, io_req->fcp_resid);
+ break;
+ default:
+ printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n",
+ io_req->fcp_status);
+ break;
+ }
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+}
+
+static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
+ struct bnx2fc_cmd *io_req)
+{
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_hba *hba = port->priv;
+ struct fc_lport *lport = port->lport;
+ struct fcoe_dev_stats *stats;
+ int task_idx, index;
+ u16 xid;
+
+ /* Initialize rest of io_req fields */
+ io_req->cmd_type = BNX2FC_SCSI_CMD;
+ io_req->port = port;
+ io_req->tgt = tgt;
+ io_req->data_xfer_len = scsi_bufflen(sc_cmd);
+ sc_cmd->SCp.ptr = (char *)io_req;
+
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ io_req->io_req_flags = BNX2FC_READ;
+ stats->InputRequests++;
+ stats->InputBytes += io_req->data_xfer_len;
+ } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ io_req->io_req_flags = BNX2FC_WRITE;
+ stats->OutputRequests++;
+ stats->OutputBytes += io_req->data_xfer_len;
+ } else {
+ io_req->io_req_flags = 0;
+ stats->ControlRequests++;
+ }
+ put_cpu();
+
+ xid = io_req->xid;
+
+ /* Build buffer descriptor list for firmware from sg list */
+ bnx2fc_build_bd_list_from_sg(io_req);
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+
+ /* Initialize task context for this IO request */
+ task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx];
+ task = &(task_page[index]);
+ bnx2fc_init_task(io_req, task);
+
+ spin_lock_bh(&tgt->tgt_lock);
+
+ if (tgt->flush_in_prog) {
+ printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return -EAGAIN;
+ }
+
+ if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
+ printk(KERN_ERR PFX "Session not ready...post_io\n");
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return -EAGAIN;
+ }
+
+ /* Time IO req */
+ bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT);
+ /* Obtain free SQ entry */
+ bnx2fc_add_2_sq(tgt, xid);
+
+ /* Enqueue the io_req to active_cmd_queue */
+
+ io_req->on_active_queue = 1;
+ /* move io_req from pending_queue to active_queue */
+ list_add_tail(&io_req->link, &tgt->active_cmd_queue);
+
+ /* Ring doorbell */
+ bnx2fc_ring_doorbell(tgt);
+ spin_unlock_bh(&tgt->tgt_lock);
+ return 0;
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
new file mode 100644
index 000000000000..7ea93af60260
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -0,0 +1,844 @@
+/* bnx2fc_tgt.c: Broadcom NetXtreme II Linux FCoE offload driver.
+ * Handles operations such as session offload/upload etc, and manages
+ * session resources such as connection id and qp resources.
+ *
+ * Copyright (c) 2008 - 2010 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
+ */
+
+#include "bnx2fc.h"
+static void bnx2fc_upld_timer(unsigned long data);
+static void bnx2fc_ofld_timer(unsigned long data);
+static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
+ struct fcoe_port *port,
+ struct fc_rport_priv *rdata);
+static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt);
+static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
+
+static void bnx2fc_upld_timer(unsigned long data)
+{
+
+ struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+
+ BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
+ /* fake upload completion */
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
+}
+
+static void bnx2fc_ofld_timer(unsigned long data)
+{
+
+ struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+
+ BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
+ /* NOTE: This function should never be called, as
+ * offload should never timeout
+ */
+ /*
+ * If the timer has expired, this session is dead
+ * Clear offloaded flag and logout of this device.
+ * Since OFFLOADED flag is cleared, this case
+ * will be considered as offload error and the
+ * port will be logged off, and conn_id, session
+ * resources are freed up in bnx2fc_offload_session
+ */
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->ofld_wait);
+}
+
+static void bnx2fc_offload_session(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt,
+ struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_rport *rport = rdata->rport;
+ struct bnx2fc_hba *hba = port->priv;
+ int rval;
+ int i = 0;
+
+ /* Initialize bnx2fc_rport */
+ /* NOTE: tgt is already bzero'd */
+ rval = bnx2fc_init_tgt(tgt, port, rdata);
+ if (rval) {
+ printk(KERN_ERR PFX "Failed to allocate conn id for "
+ "port_id (%6x)\n", rport->port_id);
+ goto ofld_err;
+ }
+
+ /* Allocate session resources */
+ rval = bnx2fc_alloc_session_resc(hba, tgt);
+ if (rval) {
+ printk(KERN_ERR PFX "Failed to allocate resources\n");
+ goto ofld_err;
+ }
+
+ /*
+ * Initialize FCoE session offload process.
+ * Upon completion of offload process add
+ * rport to list of rports
+ */
+retry_ofld:
+ clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ rval = bnx2fc_send_session_ofld_req(port, tgt);
+ if (rval) {
+ printk(KERN_ERR PFX "ofld_req failed\n");
+ goto ofld_err;
+ }
+
+ /*
+ * wait for the session is offloaded and enabled. 3 Secs
+ * should be ample time for this process to complete.
+ */
+ setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ wait_event_interruptible(tgt->ofld_wait,
+ (test_bit(
+ BNX2FC_FLAG_OFLD_REQ_CMPL,
+ &tgt->flags)));
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&tgt->ofld_timer);
+
+ if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
+ if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
+ &tgt->flags)) {
+ BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, "
+ "retry ofld..%d\n", i++);
+ msleep_interruptible(1000);
+ if (i > 3) {
+ i = 0;
+ goto ofld_err;
+ }
+ goto retry_ofld;
+ }
+ goto ofld_err;
+ }
+ if (bnx2fc_map_doorbell(tgt)) {
+ printk(KERN_ERR PFX "map doorbell failed - no mem\n");
+ /* upload will take care of cleaning up sess resc */
+ lport->tt.rport_logoff(rdata);
+ }
+ return;
+
+ofld_err:
+ /* couldn't offload the session. log off from this rport */
+ BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
+ lport->tt.rport_logoff(rdata);
+ /* Free session resources */
+ bnx2fc_free_session_resc(hba, tgt);
+ if (tgt->fcoe_conn_id != -1)
+ bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+}
+
+void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
+{
+ struct bnx2fc_cmd *io_req;
+ struct list_head *list;
+ struct list_head *tmp;
+ int rc;
+ int i = 0;
+ BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
+ tgt->num_active_ios.counter);
+
+ spin_lock_bh(&tgt->tgt_lock);
+ tgt->flush_in_prog = 1;
+
+ list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ i++;
+ io_req = (struct bnx2fc_cmd *)list;
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+ BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
+
+ if (cancel_delayed_work(&io_req->timeout_work)) {
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags)) {
+ /* Handle eh_abort timeout */
+ BNX2FC_IO_DBG(io_req, "eh_abort for IO "
+ "cleaned up\n");
+ complete(&io_req->tm_done);
+ }
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+ }
+
+ set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags);
+ set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
+
+ list_for_each_safe(list, tmp, &tgt->els_queue) {
+ i++;
+ io_req = (struct bnx2fc_cmd *)list;
+ list_del_init(&io_req->link);
+ io_req->on_active_queue = 0;
+
+ BNX2FC_IO_DBG(io_req, "els_queue cleanup\n");
+
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+
+ if ((io_req->cb_func) && (io_req->cb_arg)) {
+ io_req->cb_func(io_req->cb_arg);
+ io_req->cb_arg = NULL;
+ }
+
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
+
+ list_for_each_safe(list, tmp, &tgt->io_retire_queue) {
+ i++;
+ io_req = (struct bnx2fc_cmd *)list;
+ list_del_init(&io_req->link);
+
+ BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
+
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+
+ clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
+ }
+
+ BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i);
+ i = 0;
+ spin_unlock_bh(&tgt->tgt_lock);
+ /* wait for active_ios to go to 0 */
+ while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT))
+ msleep(25);
+ if (tgt->num_active_ios.counter != 0)
+ printk(KERN_ERR PFX "CLEANUP on port 0x%x:"
+ " active_ios = %d\n",
+ tgt->rdata->ids.port_id, tgt->num_active_ios.counter);
+ spin_lock_bh(&tgt->tgt_lock);
+ tgt->flush_in_prog = 0;
+ spin_unlock_bh(&tgt->tgt_lock);
+}
+
+static void bnx2fc_upload_session(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt)
+{
+ struct bnx2fc_hba *hba = port->priv;
+
+ BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
+ tgt->num_active_ios.counter);
+
+ /*
+ * Called with hba->hba_mutex held.
+ * This is a blocking call
+ */
+ clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ bnx2fc_send_session_disable_req(port, tgt);
+
+ /*
+ * wait for upload to complete. 3 Secs
+ * should be sufficient time for this process to complete.
+ */
+ setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
+ wait_event_interruptible(tgt->upld_wait,
+ (test_bit(
+ BNX2FC_FLAG_UPLD_REQ_COMPL,
+ &tgt->flags)));
+
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&tgt->upld_timer);
+
+ /*
+ * traverse thru the active_q and tmf_q and cleanup
+ * IOs in these lists
+ */
+ BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n",
+ tgt->flags);
+ bnx2fc_flush_active_ios(tgt);
+
+ /* Issue destroy KWQE */
+ if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) {
+ BNX2FC_TGT_DBG(tgt, "send destroy req\n");
+ clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ bnx2fc_send_session_destroy_req(hba, tgt);
+
+ /* wait for destroy to complete */
+ setup_timer(&tgt->upld_timer,
+ bnx2fc_upld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ wait_event_interruptible(tgt->upld_wait,
+ (test_bit(
+ BNX2FC_FLAG_UPLD_REQ_COMPL,
+ &tgt->flags)));
+
+ if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
+ printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
+
+ BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
+ tgt->flags);
+ if (signal_pending(current))
+ flush_signals(current);
+
+ del_timer_sync(&tgt->upld_timer);
+
+ } else
+ printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy"
+ " not sent to FW\n");
+
+ /* Free session resources */
+ spin_lock_bh(&tgt->cq_lock);
+ bnx2fc_free_session_resc(hba, tgt);
+ bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+ spin_unlock_bh(&tgt->cq_lock);
+}
+
+static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
+ struct fcoe_port *port,
+ struct fc_rport_priv *rdata)
+{
+
+ struct fc_rport *rport = rdata->rport;
+ struct bnx2fc_hba *hba = port->priv;
+
+ tgt->rport = rport;
+ tgt->rdata = rdata;
+ tgt->port = port;
+
+ if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) {
+ BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n");
+ tgt->fcoe_conn_id = -1;
+ return -1;
+ }
+
+ tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt);
+ if (tgt->fcoe_conn_id == -1)
+ return -1;
+
+ BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id);
+
+ tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
+ tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
+ tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
+
+ /* Initialize the toggle bit */
+ tgt->sq_curr_toggle_bit = 1;
+ tgt->cq_curr_toggle_bit = 1;
+ tgt->sq_prod_idx = 0;
+ tgt->cq_cons_idx = 0;
+ tgt->rq_prod_idx = 0x8000;
+ tgt->rq_cons_idx = 0;
+ atomic_set(&tgt->num_active_ios, 0);
+
+ tgt->work_time_slice = 2;
+
+ spin_lock_init(&tgt->tgt_lock);
+ spin_lock_init(&tgt->cq_lock);
+
+ /* Initialize active_cmd_queue list */
+ INIT_LIST_HEAD(&tgt->active_cmd_queue);
+
+ /* Initialize IO retire queue */
+ INIT_LIST_HEAD(&tgt->io_retire_queue);
+
+ INIT_LIST_HEAD(&tgt->els_queue);
+
+ /* Initialize active_tm_queue list */
+ INIT_LIST_HEAD(&tgt->active_tm_queue);
+
+ init_waitqueue_head(&tgt->ofld_wait);
+ init_waitqueue_head(&tgt->upld_wait);
+
+ return 0;
+}
+
+/**
+ * This event_callback is called after successful completion of libfc
+ * initiated target login. bnx2fc can proceed with initiating the session
+ * establishment.
+ */
+void bnx2fc_rport_event_handler(struct fc_lport *lport,
+ struct fc_rport_priv *rdata,
+ enum fc_rport_event event)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct bnx2fc_hba *hba = port->priv;
+ struct fc_rport *rport = rdata->rport;
+ struct fc_rport_libfc_priv *rp;
+ struct bnx2fc_rport *tgt;
+ u32 port_id;
+
+ BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n",
+ event, rdata->ids.port_id);
+ switch (event) {
+ case RPORT_EV_READY:
+ if (!rport) {
+ printk(KERN_ALERT PFX "rport is NULL: ERROR!\n");
+ break;
+ }
+
+ rp = rport->dd_data;
+ if (rport->port_id == FC_FID_DIR_SERV) {
+ /*
+ * bnx2fc_rport structure doesnt exist for
+ * directory server.
+ * We should not come here, as lport will
+ * take care of fabric login
+ */
+ printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n",
+ rdata->ids.port_id);
+ break;
+ }
+
+ if (rdata->spp_type != FC_TYPE_FCP) {
+ BNX2FC_HBA_DBG(lport, "not FCP type target."
+ " not offloading\n");
+ break;
+ }
+ if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ BNX2FC_HBA_DBG(lport, "not FCP_TARGET"
+ " not offloading\n");
+ break;
+ }
+
+ /*
+ * Offlaod process is protected with hba mutex.
+ * Use the same mutex_lock for upload process too
+ */
+ mutex_lock(&hba->hba_mutex);
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ /* This can happen when ADISC finds the same target */
+ if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
+ BNX2FC_TGT_DBG(tgt, "already offloaded\n");
+ mutex_unlock(&hba->hba_mutex);
+ return;
+ }
+
+ /*
+ * Offload the session. This is a blocking call, and will
+ * wait until the session is offloaded.
+ */
+ bnx2fc_offload_session(port, tgt, rdata);
+
+ BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+
+ if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
+ /*
+ * Session is offloaded and enabled. Map
+ * doorbell register for this target
+ */
+ BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
+ /* This counter is protected with hba mutex */
+ hba->num_ofld_sess++;
+
+ set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+ } else {
+ /*
+ * Offload or enable would have failed.
+ * In offload/enable completion path, the
+ * rport would have already been removed
+ */
+ BNX2FC_TGT_DBG(tgt, "Port is being logged off as "
+ "offloaded flag not set\n");
+ }
+ mutex_unlock(&hba->hba_mutex);
+ break;
+ case RPORT_EV_LOGO:
+ case RPORT_EV_FAILED:
+ case RPORT_EV_STOP:
+ port_id = rdata->ids.port_id;
+ if (port_id == FC_FID_DIR_SERV)
+ break;
+
+ if (!rport) {
+ printk(KERN_ALERT PFX "%x - rport not created Yet!!\n",
+ port_id);
+ break;
+ }
+ rp = rport->dd_data;
+ mutex_lock(&hba->hba_mutex);
+ /*
+ * Perform session upload. Note that rdata->peers is already
+ * removed from disc->rports list before we get this event.
+ */
+ tgt = (struct bnx2fc_rport *)&rp[1];
+
+ if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
+ mutex_unlock(&hba->hba_mutex);
+ break;
+ }
+ clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+
+ bnx2fc_upload_session(port, tgt);
+ hba->num_ofld_sess--;
+ BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n",
+ hba->num_ofld_sess);
+ /*
+ * Try to wake up the linkdown wait thread. If num_ofld_sess
+ * is 0, the waiting therad wakes up
+ */
+ if ((hba->wait_for_link_down) &&
+ (hba->num_ofld_sess == 0)) {
+ wake_up_interruptible(&hba->shutdown_wait);
+ }
+ if (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags)) {
+ printk(KERN_ERR PFX "Relogin to the tgt\n");
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_login(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ }
+ mutex_unlock(&hba->hba_mutex);
+
+ break;
+
+ case RPORT_EV_NONE:
+ break;
+ }
+}
+
+/**
+ * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id
+ *
+ * @port: fcoe_port struct to lookup the target port on
+ * @port_id: The remote port ID to look up
+ */
+struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
+ u32 port_id)
+{
+ struct bnx2fc_hba *hba = port->priv;
+ struct bnx2fc_rport *tgt;
+ struct fc_rport_priv *rdata;
+ int i;
+
+ for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
+ tgt = hba->tgt_ofld_list[i];
+ if ((tgt) && (tgt->port == port)) {
+ rdata = tgt->rdata;
+ if (rdata->ids.port_id == port_id) {
+ if (rdata->rp_state != RPORT_ST_DELETE) {
+ BNX2FC_TGT_DBG(tgt, "rport "
+ "obtained\n");
+ return tgt;
+ } else {
+ printk(KERN_ERR PFX "rport 0x%x "
+ "is in DELETED state\n",
+ rdata->ids.port_id);
+ return NULL;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+
+/**
+ * bnx2fc_alloc_conn_id - allocates FCOE Connection id
+ *
+ * @hba: pointer to adapter structure
+ * @tgt: pointer to bnx2fc_rport structure
+ */
+static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ u32 conn_id, next;
+
+ /* called with hba mutex held */
+
+ /*
+ * tgt_ofld_list access is synchronized using
+ * both hba mutex and hba lock. Atleast hba mutex or
+ * hba lock needs to be held for read access.
+ */
+
+ spin_lock_bh(&hba->hba_lock);
+ next = hba->next_conn_id;
+ conn_id = hba->next_conn_id++;
+ if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS)
+ hba->next_conn_id = 0;
+
+ while (hba->tgt_ofld_list[conn_id] != NULL) {
+ conn_id++;
+ if (conn_id == BNX2FC_NUM_MAX_SESS)
+ conn_id = 0;
+
+ if (conn_id == next) {
+ /* No free conn_ids are available */
+ spin_unlock_bh(&hba->hba_lock);
+ return -1;
+ }
+ }
+ hba->tgt_ofld_list[conn_id] = tgt;
+ tgt->fcoe_conn_id = conn_id;
+ spin_unlock_bh(&hba->hba_lock);
+ return conn_id;
+}
+
+static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id)
+{
+ /* called with hba mutex held */
+ spin_lock_bh(&hba->hba_lock);
+ hba->tgt_ofld_list[conn_id] = NULL;
+ hba->next_conn_id = conn_id;
+ spin_unlock_bh(&hba->hba_lock);
+}
+
+/**
+ *bnx2fc_alloc_session_resc - Allocate qp resources for the session
+ *
+ */
+static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ dma_addr_t page;
+ int num_pages;
+ u32 *pbl;
+
+ /* Allocate and map SQ */
+ tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
+ tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ &tgt->sq_dma, GFP_KERNEL);
+ if (!tgt->sq) {
+ printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n",
+ tgt->sq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->sq, 0, tgt->sq_mem_size);
+
+ /* Allocate and map CQ */
+ tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
+ tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ &tgt->cq_dma, GFP_KERNEL);
+ if (!tgt->cq) {
+ printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n",
+ tgt->cq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->cq, 0, tgt->cq_mem_size);
+
+ /* Allocate and map RQ and RQ PBL */
+ tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
+ tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ &tgt->rq_dma, GFP_KERNEL);
+ if (!tgt->rq) {
+ printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n",
+ tgt->rq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->rq, 0, tgt->rq_mem_size);
+
+ tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *);
+ tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ &tgt->rq_pbl_dma, GFP_KERNEL);
+ if (!tgt->rq_pbl) {
+ printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n",
+ tgt->rq_pbl_size);
+ goto mem_alloc_failure;
+ }
+
+ memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
+ num_pages = tgt->rq_mem_size / PAGE_SIZE;
+ page = tgt->rq_dma;
+ pbl = (u32 *)tgt->rq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += PAGE_SIZE;
+ }
+
+ /* Allocate and map XFERQ */
+ tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
+ tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) &
+ PAGE_MASK;
+
+ tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
+ &tgt->xferq_dma, GFP_KERNEL);
+ if (!tgt->xferq) {
+ printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n",
+ tgt->xferq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->xferq, 0, tgt->xferq_mem_size);
+
+ /* Allocate and map CONFQ & CONFQ PBL */
+ tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
+ tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) &
+ PAGE_MASK;
+
+ tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
+ &tgt->confq_dma, GFP_KERNEL);
+ if (!tgt->confq) {
+ printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n",
+ tgt->confq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->confq, 0, tgt->confq_mem_size);
+
+ tgt->confq_pbl_size =
+ (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *);
+ tgt->confq_pbl_size =
+ (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+ tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->confq_pbl_size,
+ &tgt->confq_pbl_dma, GFP_KERNEL);
+ if (!tgt->confq_pbl) {
+ printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n",
+ tgt->confq_pbl_size);
+ goto mem_alloc_failure;
+ }
+
+ memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
+ num_pages = tgt->confq_mem_size / PAGE_SIZE;
+ page = tgt->confq_dma;
+ pbl = (u32 *)tgt->confq_pbl;
+
+ while (num_pages--) {
+ *pbl = (u32)page;
+ pbl++;
+ *pbl = (u32)((u64)page >> 32);
+ pbl++;
+ page += PAGE_SIZE;
+ }
+
+ /* Allocate and map ConnDB */
+ tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
+
+ tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
+ tgt->conn_db_mem_size,
+ &tgt->conn_db_dma, GFP_KERNEL);
+ if (!tgt->conn_db) {
+ printk(KERN_ALERT PFX "unable to allocate conn_db %d\n",
+ tgt->conn_db_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->conn_db, 0, tgt->conn_db_mem_size);
+
+
+ /* Allocate and map LCQ */
+ tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
+ tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) &
+ PAGE_MASK;
+
+ tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ &tgt->lcq_dma, GFP_KERNEL);
+
+ if (!tgt->lcq) {
+ printk(KERN_ALERT PFX "unable to allocate lcq %d\n",
+ tgt->lcq_mem_size);
+ goto mem_alloc_failure;
+ }
+ memset(tgt->lcq, 0, tgt->lcq_mem_size);
+
+ /* Arm CQ */
+ tgt->conn_db->cq_arm.lo = -1;
+ tgt->conn_db->rq_prod = 0x8000;
+
+ return 0;
+
+mem_alloc_failure:
+ bnx2fc_free_session_resc(hba, tgt);
+ bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
+ return -ENOMEM;
+}
+
+/**
+ * bnx2i_free_session_resc - free qp resources for the session
+ *
+ * @hba: adapter structure pointer
+ * @tgt: bnx2fc_rport structure pointer
+ *
+ * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL
+ */
+static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
+ struct bnx2fc_rport *tgt)
+{
+ BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
+
+ if (tgt->ctx_base) {
+ iounmap(tgt->ctx_base);
+ tgt->ctx_base = NULL;
+ }
+ /* Free LCQ */
+ if (tgt->lcq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
+ tgt->lcq, tgt->lcq_dma);
+ tgt->lcq = NULL;
+ }
+ /* Free connDB */
+ if (tgt->conn_db) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size,
+ tgt->conn_db, tgt->conn_db_dma);
+ tgt->conn_db = NULL;
+ }
+ /* Free confq and confq pbl */
+ if (tgt->confq_pbl) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size,
+ tgt->confq_pbl, tgt->confq_pbl_dma);
+ tgt->confq_pbl = NULL;
+ }
+ if (tgt->confq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
+ tgt->confq, tgt->confq_dma);
+ tgt->confq = NULL;
+ }
+ /* Free XFERQ */
+ if (tgt->xferq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
+ tgt->xferq, tgt->xferq_dma);
+ tgt->xferq = NULL;
+ }
+ /* Free RQ PBL and RQ */
+ if (tgt->rq_pbl) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
+ tgt->rq_pbl, tgt->rq_pbl_dma);
+ tgt->rq_pbl = NULL;
+ }
+ if (tgt->rq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
+ tgt->rq, tgt->rq_dma);
+ tgt->rq = NULL;
+ }
+ /* Free CQ */
+ if (tgt->cq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
+ tgt->cq, tgt->cq_dma);
+ tgt->cq = NULL;
+ }
+ /* Free SQ */
+ if (tgt->sq) {
+ dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
+ tgt->sq, tgt->sq_dma);
+ tgt->sq = NULL;
+ }
+}
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index e1ca5fe7e6bb..cfd59023227b 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -360,7 +360,7 @@ struct bnx2i_hba {
#define ADAPTER_STATE_LINK_DOWN 2
#define ADAPTER_STATE_INIT_FAILED 31
unsigned int mtu_supported;
- #define BNX2I_MAX_MTU_SUPPORTED 1500
+ #define BNX2I_MAX_MTU_SUPPORTED 9000
struct Scsi_Host *shost;
@@ -751,6 +751,8 @@ extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
struct iscsi_task *mtask);
extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_text(struct bnx2i_conn *conn,
+ struct iscsi_task *mtask);
extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
struct bnx2i_cmd *cmnd);
extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 96505e3ab986..1da34c019b8a 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -445,6 +445,56 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
}
/**
+ * bnx2i_send_iscsi_text - post iSCSI text WQE to hardware
+ * @conn: iscsi connection
+ * @mtask: driver command structure which is requesting
+ * a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Text request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn,
+ struct iscsi_task *mtask)
+{
+ struct bnx2i_cmd *bnx2i_cmd;
+ struct bnx2i_text_request *text_wqe;
+ struct iscsi_text *text_hdr;
+ u32 dword;
+
+ bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
+ text_hdr = (struct iscsi_text *)mtask->hdr;
+ text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe;
+
+ memset(text_wqe, 0, sizeof(struct bnx2i_text_request));
+
+ text_wqe->op_code = text_hdr->opcode;
+ text_wqe->op_attr = text_hdr->flags;
+ text_wqe->data_length = ntoh24(text_hdr->dlength);
+ text_wqe->itt = mtask->itt |
+ (ISCSI_TASK_TYPE_MPATH << ISCSI_TEXT_REQUEST_TYPE_SHIFT);
+ text_wqe->ttt = be32_to_cpu(text_hdr->ttt);
+
+ text_wqe->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+
+ text_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
+ text_wqe->resp_bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
+
+ dword = ((1 << ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT) |
+ (bnx2i_conn->gen_pdu.resp_buf_size <<
+ ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
+ text_wqe->resp_buffer = dword;
+ text_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
+ text_wqe->bd_list_addr_hi =
+ (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
+ text_wqe->num_bds = 1;
+ text_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+ bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+ return 0;
+}
+
+
+/**
* bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
* @conn: iscsi connection
* @cmd: driver command structure which is requesting
@@ -490,15 +540,18 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
nopout_hdr = (struct iscsi_nopout *)task->hdr;
nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
+
+ memset(nopout_wqe, 0x00, sizeof(struct bnx2i_nop_out_request));
+
nopout_wqe->op_code = nopout_hdr->opcode;
nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
- u32 tmp = nopout_hdr->lun[0];
+ u32 tmp = nopout_wqe->lun[0];
/* 57710 requires LUN field to be swapped */
- nopout_hdr->lun[0] = nopout_hdr->lun[1];
- nopout_hdr->lun[1] = tmp;
+ nopout_wqe->lun[0] = nopout_wqe->lun[1];
+ nopout_wqe->lun[1] = tmp;
}
nopout_wqe->itt = ((u16)task->itt |
@@ -1425,6 +1478,68 @@ done:
return 0;
}
+
+/**
+ * bnx2i_process_text_resp - this function handles iscsi text response
+ * @session: iscsi session pointer
+ * @bnx2i_conn: iscsi connection pointer
+ * @cqe: pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI Text Response CQE& complete it to open-iscsi user daemon
+ */
+static int bnx2i_process_text_resp(struct iscsi_session *session,
+ struct bnx2i_conn *bnx2i_conn,
+ struct cqe *cqe)
+{
+ struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct iscsi_task *task;
+ struct bnx2i_text_response *text;
+ struct iscsi_text_rsp *resp_hdr;
+ int pld_len;
+ int pad_len;
+
+ text = (struct bnx2i_text_response *) cqe;
+ spin_lock(&session->lock);
+ task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);
+ if (!task)
+ goto done;
+
+ resp_hdr = (struct iscsi_text_rsp *)&bnx2i_conn->gen_pdu.resp_hdr;
+ memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+ resp_hdr->opcode = text->op_code;
+ resp_hdr->flags = text->response_flags;
+ resp_hdr->hlength = 0;
+
+ hton24(resp_hdr->dlength, text->data_length);
+ resp_hdr->itt = task->hdr->itt;
+ resp_hdr->ttt = cpu_to_be32(text->ttt);
+ resp_hdr->statsn = task->hdr->exp_statsn;
+ resp_hdr->exp_cmdsn = cpu_to_be32(text->exp_cmd_sn);
+ resp_hdr->max_cmdsn = cpu_to_be32(text->max_cmd_sn);
+ pld_len = text->data_length;
+ bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf +
+ pld_len;
+ pad_len = 0;
+ if (pld_len & 0x3)
+ pad_len = 4 - (pld_len % 4);
+
+ if (pad_len) {
+ int i = 0;
+ for (i = 0; i < pad_len; i++) {
+ bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
+ bnx2i_conn->gen_pdu.resp_wr_ptr++;
+ }
+ }
+ __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
+ bnx2i_conn->gen_pdu.resp_buf,
+ bnx2i_conn->gen_pdu.resp_wr_ptr -
+ bnx2i_conn->gen_pdu.resp_buf);
+done:
+ spin_unlock(&session->lock);
+ return 0;
+}
+
+
/**
* bnx2i_process_tmf_resp - this function handles iscsi TMF response
* @session: iscsi session pointer
@@ -1766,6 +1881,10 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
bnx2i_process_tmf_resp(session, bnx2i_conn,
qp->cq_cons_qe);
break;
+ case ISCSI_OP_TEXT_RSP:
+ bnx2i_process_text_resp(session, bnx2i_conn,
+ qp->cq_cons_qe);
+ break;
case ISCSI_OP_LOGOUT_RSP:
bnx2i_process_logout_resp(session, bnx2i_conn,
qp->cq_cons_qe);
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 72a7b2d4a439..1d24a2819736 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
#define DRV_MODULE_NAME "bnx2i"
-#define DRV_MODULE_VERSION "2.6.2.2"
-#define DRV_MODULE_RELDATE "Nov 23, 2010"
+#define DRV_MODULE_VERSION "2.6.2.3"
+#define DRV_MODULE_RELDATE "Dec 31, 2010"
static char version[] __devinitdata =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -29,7 +29,7 @@ static char version[] __devinitdata =
MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and "
"Eddie Wai <eddie.wai@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711"
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711/57712"
" iSCSI Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
@@ -88,9 +88,11 @@ void bnx2i_identify_device(struct bnx2i_hba *hba)
(hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
- } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711E)
+ } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57711E ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57712 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_57712E)
set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
else
printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
@@ -161,6 +163,21 @@ void bnx2i_start(void *handle)
struct bnx2i_hba *hba = handle;
int i = HZ;
+ if (!hba->cnic->max_iscsi_conn) {
+ printk(KERN_ALERT "bnx2i: dev %s does not support "
+ "iSCSI\n", hba->netdev->name);
+
+ if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+ mutex_lock(&bnx2i_dev_lock);
+ list_del_init(&hba->link);
+ adapter_count--;
+ hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+ clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+ mutex_unlock(&bnx2i_dev_lock);
+ bnx2i_free_hba(hba);
+ }
+ return;
+ }
bnx2i_send_fw_iscsi_init_msg(hba);
while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
msleep(BNX2I_INIT_POLL_TIME);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f0dce26593eb..1809f9ccc4ce 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1092,6 +1092,9 @@ static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
case ISCSI_OP_SCSI_TMFUNC:
rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
break;
+ case ISCSI_OP_TEXT:
+ rc = bnx2i_send_iscsi_text(bnx2i_conn, task);
+ break;
default:
iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
"send_gen: unsupported op 0x%x\n",
@@ -1455,42 +1458,40 @@ static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
/**
- * bnx2i_conn_get_param - return iscsi connection parameter to caller
- * @cls_conn: pointer to iscsi cls conn
+ * bnx2i_ep_get_param - return iscsi ep parameter to caller
+ * @ep: pointer to iscsi endpoint
* @param: parameter type identifier
* @buf: buffer pointer
*
- * returns iSCSI connection parameters
+ * returns iSCSI ep parameters
*/
-static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf)
+static int bnx2i_ep_get_param(struct iscsi_endpoint *ep,
+ enum iscsi_param param, char *buf)
{
- struct iscsi_conn *conn = cls_conn->dd_data;
- struct bnx2i_conn *bnx2i_conn = conn->dd_data;
- int len = 0;
+ struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
+ struct bnx2i_hba *hba = bnx2i_ep->hba;
+ int len = -ENOTCONN;
- if (!(bnx2i_conn && bnx2i_conn->ep && bnx2i_conn->ep->hba))
- goto out;
+ if (!hba)
+ return -ENOTCONN;
switch (param) {
case ISCSI_PARAM_CONN_PORT:
- mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
- if (bnx2i_conn->ep->cm_sk)
- len = sprintf(buf, "%hu\n",
- bnx2i_conn->ep->cm_sk->dst_port);
- mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
+ mutex_lock(&hba->net_dev_lock);
+ if (bnx2i_ep->cm_sk)
+ len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port);
+ mutex_unlock(&hba->net_dev_lock);
break;
case ISCSI_PARAM_CONN_ADDRESS:
- mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock);
- if (bnx2i_conn->ep->cm_sk)
- len = sprintf(buf, "%pI4\n",
- &bnx2i_conn->ep->cm_sk->dst_ip);
- mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock);
+ mutex_lock(&hba->net_dev_lock);
+ if (bnx2i_ep->cm_sk)
+ len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip);
+ mutex_unlock(&hba->net_dev_lock);
break;
default:
- return iscsi_conn_get_param(cls_conn, param, buf);
+ return -ENOSYS;
}
-out:
+
return len;
}
@@ -1935,13 +1936,13 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
cnic_dev_10g = 1;
switch (bnx2i_ep->state) {
- case EP_STATE_CONNECT_FAILED:
case EP_STATE_CLEANUP_FAILED:
case EP_STATE_OFLD_FAILED:
case EP_STATE_DISCONN_TIMEDOUT:
ret = 0;
break;
case EP_STATE_CONNECT_START:
+ case EP_STATE_CONNECT_FAILED:
case EP_STATE_CONNECT_COMPL:
case EP_STATE_ULP_UPDATE_START:
case EP_STATE_ULP_UPDATE_COMPL:
@@ -2167,7 +2168,8 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.name = "bnx2i",
.caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
CAP_MULTI_R2T | CAP_DATADGST |
- CAP_DATA_PATH_OFFLOAD,
+ CAP_DATA_PATH_OFFLOAD |
+ CAP_TEXT_NEGO,
.param_mask = ISCSI_MAX_RECV_DLENGTH |
ISCSI_MAX_XMIT_DLENGTH |
ISCSI_HDRDGST_EN |
@@ -2200,7 +2202,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.bind_conn = bnx2i_conn_bind,
.destroy_conn = bnx2i_conn_destroy,
.set_param = iscsi_set_param,
- .get_conn_param = bnx2i_conn_get_param,
+ .get_conn_param = iscsi_conn_get_param,
.get_session_param = iscsi_session_get_param,
.get_host_param = bnx2i_host_get_param,
.start_conn = bnx2i_conn_start,
@@ -2209,6 +2211,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
.xmit_task = bnx2i_task_xmit,
.get_stats = bnx2i_conn_get_stats,
/* TCP connect - disconnect - option-2 interface calls */
+ .get_ep_param = bnx2i_ep_get_param,
.ep_connect = bnx2i_ep_connect,
.ep_poll = bnx2i_ep_poll,
.ep_disconnect = bnx2i_ep_disconnect,
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index a129a170b47b..fc2cdb62f53b 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -105,7 +105,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
/* owner and name should be set already */
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
| CAP_DATADGST | CAP_DIGEST_OFFLOAD |
- CAP_PADDING_OFFLOAD,
+ CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
.param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
@@ -137,7 +137,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
- .get_conn_param = cxgbi_get_conn_param,
+ .get_conn_param = iscsi_conn_get_param,
.set_param = cxgbi_set_conn_param,
.get_stats = cxgbi_get_conn_stats,
/* pdu xmit req from user space */
@@ -152,6 +152,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
.xmit_pdu = cxgbi_conn_xmit_pdu,
.parse_pdu_itt = cxgbi_parse_pdu_itt,
/* TCP connect/disconnect */
+ .get_ep_param = cxgbi_get_ep_param,
.ep_connect = cxgbi_ep_connect,
.ep_poll = cxgbi_ep_poll,
.ep_disconnect = cxgbi_ep_disconnect,
@@ -1108,10 +1109,11 @@ static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
csk, idx, npods, gl);
for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
- struct sk_buff *skb = ddp->gl_skb[idx];
+ struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
+ PPOD_SIZE, 0, GFP_ATOMIC);
- /* hold on to the skb until we clear the ddp mapping */
- skb_get(skb);
+ if (!skb)
+ return -ENOMEM;
ulp_mem_io_set_hdr(skb, pm_addr);
cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head +
@@ -1136,56 +1138,20 @@ static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
cdev, idx, npods, tag);
for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) {
- struct sk_buff *skb = ddp->gl_skb[idx];
+ struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
+ PPOD_SIZE, 0, GFP_ATOMIC);
if (!skb) {
- pr_err("tag 0x%x, 0x%x, %d/%u, skb NULL.\n",
+ pr_err("tag 0x%x, 0x%x, %d/%u, skb OOM.\n",
tag, idx, i, npods);
continue;
}
- ddp->gl_skb[idx] = NULL;
- memset(skb->head + sizeof(struct ulp_mem_io), 0, PPOD_SIZE);
ulp_mem_io_set_hdr(skb, pm_addr);
skb->priority = CPL_PRIORITY_CONTROL;
cxgb3_ofld_send(cdev->lldev, skb);
}
}
-static void ddp_free_gl_skb(struct cxgbi_ddp_info *ddp, int idx, int cnt)
-{
- int i;
-
- log_debug(1 << CXGBI_DBG_DDP,
- "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
-
- for (i = 0; i < cnt; i++, idx++)
- if (ddp->gl_skb[idx]) {
- kfree_skb(ddp->gl_skb[idx]);
- ddp->gl_skb[idx] = NULL;
- }
-}
-
-static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx,
- int cnt, gfp_t gfp)
-{
- int i;
-
- log_debug(1 << CXGBI_DBG_DDP,
- "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt);
-
- for (i = 0; i < cnt; i++) {
- struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) +
- PPOD_SIZE, 0, gfp);
- if (skb)
- ddp->gl_skb[idx + i] = skb;
- else {
- ddp_free_gl_skb(ddp, idx, i);
- return -ENOMEM;
- }
- }
- return 0;
-}
-
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
unsigned int tid, int pg_idx, bool reply)
{
@@ -1316,8 +1282,6 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev)
}
tdev->ulp_iscsi = ddp;
- cdev->csk_ddp_free_gl_skb = ddp_free_gl_skb;
- cdev->csk_ddp_alloc_gl_skb = ddp_alloc_gl_skb;
cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
cdev->csk_ddp_set = ddp_set_map;
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
index 5f5e3394b594..20593fd69d8f 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h
@@ -24,10 +24,21 @@
extern cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
-#define cxgb3i_get_private_ipv4addr(ndev) \
- (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr)
-#define cxgb3i_set_private_ipv4addr(ndev, addr) \
- (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr
+static inline unsigned int cxgb3i_get_private_ipv4addr(struct net_device *ndev)
+{
+ return ((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr;
+}
+
+static inline void cxgb3i_set_private_ipv4addr(struct net_device *ndev,
+ unsigned int addr)
+{
+ struct port_info *pi = (struct port_info *)netdev_priv(ndev);
+
+ pi->iscsic.flags = addr ? 1 : 0;
+ pi->iscsi_ipv4addr = addr;
+ if (addr)
+ memcpy(pi->iscsic.mac_addr, ndev->dev_addr, ETH_ALEN);
+}
struct cpl_iscsi_hdr_norss {
union opcode_tid ot;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 8c04fada710b..f3a4cd7cf782 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -106,7 +106,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.name = DRV_MODULE_NAME,
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
CAP_DATADGST | CAP_DIGEST_OFFLOAD |
- CAP_PADDING_OFFLOAD,
+ CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
.param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH |
ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
@@ -138,7 +138,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.destroy_conn = iscsi_tcp_conn_teardown,
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_conn_stop,
- .get_conn_param = cxgbi_get_conn_param,
+ .get_conn_param = iscsi_conn_get_param,
.set_param = cxgbi_set_conn_param,
.get_stats = cxgbi_get_conn_stats,
/* pdu xmit req from user space */
@@ -153,6 +153,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.xmit_pdu = cxgbi_conn_xmit_pdu,
.parse_pdu_itt = cxgbi_parse_pdu_itt,
/* TCP connect/disconnect */
+ .get_ep_param = cxgbi_get_ep_param,
.ep_connect = cxgbi_ep_connect,
.ep_poll = cxgbi_ep_poll,
.ep_disconnect = cxgbi_ep_disconnect,
@@ -1425,8 +1426,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
cxgbi_ddp_page_size_factor(pgsz_factor);
cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
- cdev->csk_ddp_free_gl_skb = NULL;
- cdev->csk_ddp_alloc_gl_skb = NULL;
cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
cdev->csk_ddp_set = ddp_set_map;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index a24dff9f9163..de764ea7419d 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -530,6 +530,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
csk->dst = dst;
csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
csk->daddr.sin_port = daddr->sin_port;
+ csk->daddr.sin_family = daddr->sin_family;
csk->saddr.sin_addr.s_addr = rt->rt_src;
return csk;
@@ -1264,12 +1265,6 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
return idx;
}
- if (cdev->csk_ddp_alloc_gl_skb) {
- err = cdev->csk_ddp_alloc_gl_skb(ddp, idx, npods, gfp);
- if (err < 0)
- goto unmark_entries;
- }
-
tag = cxgbi_ddp_tag_base(tformat, sw_tag);
tag |= idx << PPOD_IDX_SHIFT;
@@ -1280,11 +1275,8 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
hdr.page_offset = htonl(gl->offset);
err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl);
- if (err < 0) {
- if (cdev->csk_ddp_free_gl_skb)
- cdev->csk_ddp_free_gl_skb(ddp, idx, npods);
+ if (err < 0)
goto unmark_entries;
- }
ddp->idx_last = idx;
log_debug(1 << CXGBI_DBG_DDP,
@@ -1350,8 +1342,6 @@ static void ddp_destroy(struct kref *kref)
>> PPOD_PAGES_SHIFT;
pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods);
kfree(gl);
- if (cdev->csk_ddp_free_gl_skb)
- cdev->csk_ddp_free_gl_skb(ddp, i, npods);
i += npods;
} else
i++;
@@ -1394,8 +1384,6 @@ int cxgbi_ddp_init(struct cxgbi_device *cdev,
return -ENOMEM;
}
ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
- ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) +
- ppmax * sizeof(struct cxgbi_gather_list *));
cdev->ddp = ddp;
spin_lock_init(&ddp->map_lock);
@@ -1895,13 +1883,16 @@ EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
{
- u8 submode = 0;
+ if (hcrc || dcrc) {
+ u8 submode = 0;
- if (hcrc)
- submode |= 1;
- if (dcrc)
- submode |= 2;
- cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
+ if (hcrc)
+ submode |= 1;
+ if (dcrc)
+ submode |= 2;
+ cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
+ } else
+ cxgbi_skcb_ulp_mode(skb) = 0;
}
int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
@@ -2197,32 +2188,34 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
}
EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
-int cxgbi_get_conn_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf)
+int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
+ char *buf)
{
- struct iscsi_conn *iconn = cls_conn->dd_data;
+ struct cxgbi_endpoint *cep = ep->dd_data;
+ struct cxgbi_sock *csk;
int len;
log_debug(1 << CXGBI_DBG_ISCSI,
- "cls_conn 0x%p, param %d.\n", cls_conn, param);
+ "cls_conn 0x%p, param %d.\n", ep, param);
switch (param) {
case ISCSI_PARAM_CONN_PORT:
- spin_lock_bh(&iconn->session->lock);
- len = sprintf(buf, "%hu\n", iconn->portal_port);
- spin_unlock_bh(&iconn->session->lock);
- break;
case ISCSI_PARAM_CONN_ADDRESS:
- spin_lock_bh(&iconn->session->lock);
- len = sprintf(buf, "%s\n", iconn->portal_address);
- spin_unlock_bh(&iconn->session->lock);
- break;
+ if (!cep)
+ return -ENOTCONN;
+
+ csk = cep->csk;
+ if (!csk)
+ return -ENOTCONN;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &csk->daddr, param, buf);
default:
- return iscsi_conn_get_param(cls_conn, param, buf);
+ return -ENOSYS;
}
return len;
}
-EXPORT_SYMBOL_GPL(cxgbi_get_conn_param);
+EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
struct iscsi_cls_conn *
cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
@@ -2289,11 +2282,6 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
cxgbi_conn_max_xmit_dlength(conn);
cxgbi_conn_max_recv_dlength(conn);
- spin_lock_bh(&conn->session->lock);
- sprintf(conn->portal_address, "%pI4", &csk->daddr.sin_addr.s_addr);
- conn->portal_port = ntohs(csk->daddr.sin_port);
- spin_unlock_bh(&conn->session->lock);
-
log_debug(1 << CXGBI_DBG_ISCSI,
"cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
cls_session, cls_conn, ep, cconn, csk);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index c57d59db000c..0a20fd5f7102 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -131,7 +131,6 @@ struct cxgbi_ddp_info {
unsigned int rsvd_tag_mask;
spinlock_t map_lock;
struct cxgbi_gather_list **gl_map;
- struct sk_buff **gl_skb;
};
#define DDP_PGIDX_MAX 4
@@ -536,8 +535,6 @@ struct cxgbi_device {
struct cxgbi_ddp_info *ddp;
void (*dev_ddp_cleanup)(struct cxgbi_device *);
- void (*csk_ddp_free_gl_skb)(struct cxgbi_ddp_info *, int, int);
- int (*csk_ddp_alloc_gl_skb)(struct cxgbi_ddp_info *, int, int, gfp_t);
int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *,
unsigned int, unsigned int,
struct cxgbi_gather_list *);
@@ -715,7 +712,7 @@ void cxgbi_cleanup_task(struct iscsi_task *task);
void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
int cxgbi_set_conn_param(struct iscsi_cls_conn *,
enum iscsi_param, char *, int);
-int cxgbi_get_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *);
+int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param, char *);
struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32);
int cxgbi_bind_conn(struct iscsi_cls_session *,
struct iscsi_cls_conn *, u64, int);
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index b837c5b3c8f9..564e6ecd17c2 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -25,16 +25,9 @@
#include <scsi/scsi_dh.h>
#include "../scsi_priv.h"
-struct scsi_dh_devinfo_list {
- struct list_head node;
- char vendor[9];
- char model[17];
- struct scsi_device_handler *handler;
-};
-
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(scsi_dh_list);
-static LIST_HEAD(scsi_dh_dev_list);
+static int scsi_dh_list_idx = 1;
static struct scsi_device_handler *get_device_handler(const char *name)
{
@@ -51,40 +44,18 @@ static struct scsi_device_handler *get_device_handler(const char *name)
return found;
}
-
-static struct scsi_device_handler *
-scsi_dh_cache_lookup(struct scsi_device *sdev)
+static struct scsi_device_handler *get_device_handler_by_idx(int idx)
{
- struct scsi_dh_devinfo_list *tmp;
- struct scsi_device_handler *found_dh = NULL;
+ struct scsi_device_handler *tmp, *found = NULL;
spin_lock(&list_lock);
- list_for_each_entry(tmp, &scsi_dh_dev_list, node) {
- if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) &&
- !strncmp(sdev->model, tmp->model, strlen(tmp->model))) {
- found_dh = tmp->handler;
+ list_for_each_entry(tmp, &scsi_dh_list, list) {
+ if (tmp->idx == idx) {
+ found = tmp;
break;
}
}
spin_unlock(&list_lock);
-
- return found_dh;
-}
-
-static int scsi_dh_handler_lookup(struct scsi_device_handler *scsi_dh,
- struct scsi_device *sdev)
-{
- int i, found = 0;
-
- for(i = 0; scsi_dh->devlist[i].vendor; i++) {
- if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor,
- strlen(scsi_dh->devlist[i].vendor)) &&
- !strncmp(sdev->model, scsi_dh->devlist[i].model,
- strlen(scsi_dh->devlist[i].model))) {
- found = 1;
- break;
- }
- }
return found;
}
@@ -102,41 +73,14 @@ device_handler_match(struct scsi_device_handler *scsi_dh,
struct scsi_device *sdev)
{
struct scsi_device_handler *found_dh = NULL;
- struct scsi_dh_devinfo_list *tmp;
+ int idx;
- found_dh = scsi_dh_cache_lookup(sdev);
- if (found_dh)
- return found_dh;
+ idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model,
+ SCSI_DEVINFO_DH);
+ found_dh = get_device_handler_by_idx(idx);
- if (scsi_dh) {
- if (scsi_dh_handler_lookup(scsi_dh, sdev))
- found_dh = scsi_dh;
- } else {
- struct scsi_device_handler *tmp_dh;
-
- spin_lock(&list_lock);
- list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
- if (scsi_dh_handler_lookup(tmp_dh, sdev))
- found_dh = tmp_dh;
- }
- spin_unlock(&list_lock);
- }
-
- if (found_dh) { /* If device is found, add it to the cache */
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
- if (tmp) {
- strncpy(tmp->vendor, sdev->vendor, 8);
- strncpy(tmp->model, sdev->model, 16);
- tmp->vendor[8] = '\0';
- tmp->model[16] = '\0';
- tmp->handler = found_dh;
- spin_lock(&list_lock);
- list_add(&tmp->node, &scsi_dh_dev_list);
- spin_unlock(&list_lock);
- } else {
- found_dh = NULL;
- }
- }
+ if (scsi_dh && found_dh != scsi_dh)
+ found_dh = NULL;
return found_dh;
}
@@ -373,12 +317,25 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
*/
int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
{
+ int i;
+
if (get_device_handler(scsi_dh->name))
return -EBUSY;
spin_lock(&list_lock);
+ scsi_dh->idx = scsi_dh_list_idx++;
list_add(&scsi_dh->list, &scsi_dh_list);
spin_unlock(&list_lock);
+
+ for (i = 0; scsi_dh->devlist[i].vendor; i++) {
+ scsi_dev_info_list_add_keyed(0,
+ scsi_dh->devlist[i].vendor,
+ scsi_dh->devlist[i].model,
+ NULL,
+ scsi_dh->idx,
+ SCSI_DEVINFO_DH);
+ }
+
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
@@ -395,7 +352,7 @@ EXPORT_SYMBOL_GPL(scsi_register_device_handler);
*/
int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
{
- struct scsi_dh_devinfo_list *tmp, *pos;
+ int i;
if (!get_device_handler(scsi_dh->name))
return -ENODEV;
@@ -403,14 +360,14 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
scsi_dh_notifier_remove);
+ for (i = 0; scsi_dh->devlist[i].vendor; i++) {
+ scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor,
+ scsi_dh->devlist[i].model,
+ SCSI_DEVINFO_DH);
+ }
+
spin_lock(&list_lock);
list_del(&scsi_dh->list);
- list_for_each_entry_safe(pos, tmp, &scsi_dh_dev_list, node) {
- if (pos->handler == scsi_dh) {
- list_del(&pos->node);
- kfree(pos);
- }
- }
spin_unlock(&list_lock);
printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name);
@@ -576,6 +533,10 @@ static int __init scsi_dh_init(void)
{
int r;
+ r = scsi_dev_info_add_list(SCSI_DEVINFO_DH, "SCSI Device Handler");
+ if (r)
+ return r;
+
r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
if (!r)
@@ -590,6 +551,7 @@ static void __exit scsi_dh_exit(void)
bus_for_each_dev(&scsi_bus_type, NULL, NULL,
scsi_dh_sysfs_attr_remove);
bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
+ scsi_dev_info_remove_list(SCSI_DEVINFO_DH);
}
module_init(scsi_dh_init);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 6b729324b8d3..7cae0bc85390 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -253,13 +253,15 @@ static void stpg_endio(struct request *req, int error)
{
struct alua_dh_data *h = req->end_io_data;
struct scsi_sense_hdr sense_hdr;
- unsigned err = SCSI_DH_IO;
+ unsigned err = SCSI_DH_OK;
if (error || host_byte(req->errors) != DID_OK ||
- msg_byte(req->errors) != COMMAND_COMPLETE)
+ msg_byte(req->errors) != COMMAND_COMPLETE) {
+ err = SCSI_DH_IO;
goto done;
+ }
- if (err == SCSI_DH_IO && h->senselen > 0) {
+ if (h->senselen > 0) {
err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
&sense_hdr);
if (!err) {
@@ -285,7 +287,8 @@ static void stpg_endio(struct request *req, int error)
print_alua_state(h->state));
}
done:
- blk_put_request(req);
+ req->end_io_data = NULL;
+ __blk_put_request(req->q, req);
if (h->callback_fn) {
h->callback_fn(h->callback_data, err);
h->callback_fn = h->callback_data = NULL;
@@ -303,7 +306,6 @@ done:
static unsigned submit_stpg(struct alua_dh_data *h)
{
struct request *rq;
- int err = SCSI_DH_RES_TEMP_UNAVAIL;
int stpg_len = 8;
struct scsi_device *sdev = h->sdev;
@@ -332,7 +334,7 @@ static unsigned submit_stpg(struct alua_dh_data *h)
rq->end_io_data = h;
blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio);
- return err;
+ return SCSI_DH_OK;
}
/*
@@ -730,7 +732,9 @@ static const struct scsi_dh_devlist alua_dev_list[] = {
{"Pillar", "Axiom" },
{"Intel", "Multi-Flex"},
{"NETAPP", "LUN"},
+ {"NETAPP", "LUN C-Mode"},
{"AIX", "NVDISK"},
+ {"Promise", "VTrak"},
{NULL, NULL}
};
@@ -759,7 +763,7 @@ static int alua_bus_attach(struct scsi_device *sdev)
unsigned long flags;
int err = SCSI_DH_OK;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 6faf472f7537..48441f6908a4 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -650,7 +650,7 @@ static int clariion_bus_attach(struct scsi_device *sdev)
unsigned long flags;
int err;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index e3916641e627..b479f1eef968 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -225,7 +225,8 @@ static void start_stop_endio(struct request *req, int error)
}
}
done:
- blk_put_request(req);
+ req->end_io_data = NULL;
+ __blk_put_request(req->q, req);
if (h->callback_fn) {
h->callback_fn(h->callback_data, err);
h->callback_fn = h->callback_data = NULL;
@@ -338,8 +339,8 @@ static int hp_sw_bus_attach(struct scsi_device *sdev)
unsigned long flags;
int ret;
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
- + sizeof(struct hp_sw_dh_data) , GFP_KERNEL);
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ + sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n",
HP_SW_NAME);
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 5be3ae15cb71..293c183dfe6d 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -281,11 +281,13 @@ static struct request *get_rdac_req(struct scsi_device *sdev,
}
static struct request *rdac_failover_get(struct scsi_device *sdev,
- struct rdac_dh_data *h)
+ struct rdac_dh_data *h, struct list_head *list)
{
struct request *rq;
struct rdac_mode_common *common;
unsigned data_size;
+ struct rdac_queue_data *qdata;
+ u8 *lun_table;
if (h->ctlr->use_ms10) {
struct rdac_pg_expanded *rdac_pg;
@@ -298,6 +300,7 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
rdac_pg->subpage_code = 0x1;
rdac_pg->page_len[0] = 0x01;
rdac_pg->page_len[1] = 0x28;
+ lun_table = rdac_pg->lun_table;
} else {
struct rdac_pg_legacy *rdac_pg;
@@ -307,11 +310,16 @@ static struct request *rdac_failover_get(struct scsi_device *sdev,
common = &rdac_pg->common;
rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
rdac_pg->page_len = 0x68;
+ lun_table = rdac_pg->lun_table;
}
common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
common->rdac_options = RDAC_FORCED_QUIESENCE;
+ list_for_each_entry(qdata, list, entry) {
+ lun_table[qdata->h->lun] = 0x81;
+ }
+
/* get request for block layer packet command */
rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
if (!rq)
@@ -565,7 +573,6 @@ static void send_mode_select(struct work_struct *work)
int err, retry_cnt = RDAC_RETRY_COUNT;
struct rdac_queue_data *tmp, *qdata;
LIST_HEAD(list);
- u8 *lun_table;
spin_lock(&ctlr->ms_lock);
list_splice_init(&ctlr->ms_head, &list);
@@ -573,21 +580,12 @@ static void send_mode_select(struct work_struct *work)
ctlr->ms_sdev = NULL;
spin_unlock(&ctlr->ms_lock);
- if (ctlr->use_ms10)
- lun_table = ctlr->mode_select.expanded.lun_table;
- else
- lun_table = ctlr->mode_select.legacy.lun_table;
-
retry:
err = SCSI_DH_RES_TEMP_UNAVAIL;
- rq = rdac_failover_get(sdev, h);
+ rq = rdac_failover_get(sdev, h, &list);
if (!rq)
goto done;
- list_for_each_entry(qdata, &list, entry) {
- lun_table[qdata->h->lun] = 0x81;
- }
-
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"%s MODE_SELECT command",
(char *) h->ctlr->array_name, h->ctlr->index,
@@ -769,6 +767,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{"DELL", "MD32xx"},
{"DELL", "MD32xxi"},
{"DELL", "MD36xxi"},
+ {"DELL", "MD36xxf"},
{"LSI", "INF-01-00"},
{"ENGENIO", "INF-01-00"},
{"STK", "FLEXLINE 380"},
@@ -800,7 +799,7 @@ static int rdac_bus_attach(struct scsi_device *sdev)
int err;
char array_name[ARRAY_LABEL_LEN];
- scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
+ scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
@@ -906,4 +905,5 @@ module_exit(rdac_exit);
MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
+MODULE_VERSION("01.00.0000.0000");
MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
index 950f27615c76..f6d37d0271f7 100644
--- a/drivers/scsi/fcoe/Makefile
+++ b/drivers/scsi/fcoe/Makefile
@@ -1,2 +1,4 @@
obj-$(CONFIG_FCOE) += fcoe.o
obj-$(CONFIG_LIBFCOE) += libfcoe.o
+
+libfcoe-objs := fcoe_ctlr.o fcoe_transport.o
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 3becc6a20a4f..bde6ee5333eb 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -31,6 +31,7 @@
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/ctype.h>
+#include <linux/workqueue.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -58,6 +59,8 @@ MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
DEFINE_MUTEX(fcoe_config_mutex);
+static struct workqueue_struct *fcoe_wq;
+
/* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */
static DECLARE_COMPLETION(fcoe_flush_completion);
@@ -72,7 +75,6 @@ static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
static int fcoe_rcv(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
static int fcoe_percpu_receive_thread(void *);
-static void fcoe_clean_pending_queue(struct fc_lport *);
static void fcoe_percpu_clean(struct fc_lport *);
static int fcoe_link_speed_update(struct fc_lport *);
static int fcoe_link_ok(struct fc_lport *);
@@ -80,7 +82,6 @@ static int fcoe_link_ok(struct fc_lport *);
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
static int fcoe_hostlist_add(const struct fc_lport *);
-static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
static int fcoe_device_notification(struct notifier_block *, ulong, void *);
static void fcoe_dev_setup(void);
static void fcoe_dev_cleanup(void);
@@ -101,10 +102,11 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
-static int fcoe_create(const char *, struct kernel_param *);
-static int fcoe_destroy(const char *, struct kernel_param *);
-static int fcoe_enable(const char *, struct kernel_param *);
-static int fcoe_disable(const char *, struct kernel_param *);
+static bool fcoe_match(struct net_device *netdev);
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
+static int fcoe_destroy(struct net_device *netdev);
+static int fcoe_enable(struct net_device *netdev);
+static int fcoe_disable(struct net_device *netdev);
static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
u32 did, struct fc_frame *,
@@ -117,24 +119,6 @@ static void fcoe_recv_frame(struct sk_buff *skb);
static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
-module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR);
-__MODULE_PARM_TYPE(create, "string");
-MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
-module_param_call(create_vn2vn, fcoe_create, NULL,
- (void *)FIP_MODE_VN2VN, S_IWUSR);
-__MODULE_PARM_TYPE(create_vn2vn, "string");
-MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance "
- "on an Ethernet interface");
-module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(destroy, "string");
-MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
-module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(enable, "string");
-MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
-module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR);
-__MODULE_PARM_TYPE(disable, "string");
-MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
-
/* notification function for packets from net device */
static struct notifier_block fcoe_notifier = {
.notifier_call = fcoe_device_notification,
@@ -145,8 +129,8 @@ static struct notifier_block fcoe_cpu_notifier = {
.notifier_call = fcoe_cpu_callback,
};
-static struct scsi_transport_template *fcoe_transport_template;
-static struct scsi_transport_template *fcoe_vport_transport_template;
+static struct scsi_transport_template *fcoe_nport_scsi_transport;
+static struct scsi_transport_template *fcoe_vport_scsi_transport;
static int fcoe_vport_destroy(struct fc_vport *);
static int fcoe_vport_create(struct fc_vport *, bool disabled);
@@ -163,7 +147,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
.lport_set_port_id = fcoe_set_port_id,
};
-struct fc_function_template fcoe_transport_function = {
+struct fc_function_template fcoe_nport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -203,7 +187,7 @@ struct fc_function_template fcoe_transport_function = {
.bsg_request = fc_lport_bsg_request,
};
-struct fc_function_template fcoe_vport_transport_function = {
+struct fc_function_template fcoe_vport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -354,10 +338,18 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
struct fcoe_interface *fcoe;
int err;
+ if (!try_module_get(THIS_MODULE)) {
+ FCOE_NETDEV_DBG(netdev,
+ "Could not get a reference to the module\n");
+ fcoe = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
if (!fcoe) {
FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
- return NULL;
+ fcoe = ERR_PTR(-ENOMEM);
+ goto out_nomod;
}
dev_hold(netdev);
@@ -376,9 +368,15 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
fcoe_ctlr_destroy(&fcoe->ctlr);
kfree(fcoe);
dev_put(netdev);
- return NULL;
+ fcoe = ERR_PTR(err);
+ goto out_nomod;
}
+ goto out;
+
+out_nomod:
+ module_put(THIS_MODULE);
+out:
return fcoe;
}
@@ -440,6 +438,7 @@ static void fcoe_interface_release(struct kref *kref)
fcoe_ctlr_destroy(&fcoe->ctlr);
kfree(fcoe);
dev_put(netdev);
+ module_put(THIS_MODULE);
}
/**
@@ -503,7 +502,7 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
rtnl_lock();
if (!is_zero_ether_addr(port->data_src_addr))
@@ -559,17 +558,6 @@ static int fcoe_lport_config(struct fc_lport *lport)
}
/**
- * fcoe_queue_timer() - The fcoe queue timer
- * @lport: The local port
- *
- * Calls fcoe_check_wait_queue on timeout
- */
-static void fcoe_queue_timer(ulong lport)
-{
- fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
-}
-
-/**
* fcoe_get_wwn() - Get the world wide name from LLD if it supports it
* @netdev: the associated net device
* @wwn: the output WWN
@@ -648,7 +636,7 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
/* Setup lport private data to point to fcoe softc */
port = lport_priv(lport);
- fcoe = port->fcoe;
+ fcoe = port->priv;
/*
* Determine max frame size based on underlying device and optional
@@ -706,9 +694,9 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
lport->host->max_cmd_len = FCOE_MAX_CMD_LEN;
if (lport->vport)
- lport->host->transportt = fcoe_vport_transport_template;
+ lport->host->transportt = fcoe_vport_scsi_transport;
else
- lport->host->transportt = fcoe_transport_template;
+ lport->host->transportt = fcoe_nport_scsi_transport;
/* add the new host to the SCSI-ml */
rc = scsi_add_host(lport->host, dev);
@@ -758,7 +746,7 @@ bool fcoe_oem_match(struct fc_frame *fp)
static inline int fcoe_em_config(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct fcoe_interface *oldfcoe = NULL;
struct net_device *old_real_dev, *cur_real_dev;
u16 min_xid = FCOE_MIN_XID;
@@ -842,7 +830,7 @@ skip_oem:
static void fcoe_if_destroy(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct net_device *netdev = fcoe->netdev;
FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
@@ -884,7 +872,6 @@ static void fcoe_if_destroy(struct fc_lport *lport)
/* Release the Scsi_Host */
scsi_host_put(lport->host);
- module_put(THIS_MODULE);
}
/**
@@ -939,8 +926,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
struct device *parent, int npiv)
{
struct net_device *netdev = fcoe->netdev;
- struct fc_lport *lport = NULL;
+ struct fc_lport *lport, *n_port;
struct fcoe_port *port;
+ struct Scsi_Host *shost;
int rc;
/*
* parent is only a vport if npiv is 1,
@@ -950,13 +938,11 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
FCOE_NETDEV_DBG(netdev, "Create Interface\n");
- if (!npiv) {
- lport = libfc_host_alloc(&fcoe_shost_template,
- sizeof(struct fcoe_port));
- } else {
- lport = libfc_vport_create(vport,
- sizeof(struct fcoe_port));
- }
+ if (!npiv)
+ lport = libfc_host_alloc(&fcoe_shost_template, sizeof(*port));
+ else
+ lport = libfc_vport_create(vport, sizeof(*port));
+
if (!lport) {
FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
rc = -ENOMEM;
@@ -964,7 +950,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
}
port = lport_priv(lport);
port->lport = lport;
- port->fcoe = fcoe;
+ port->priv = fcoe;
+ port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH;
+ port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH;
INIT_WORK(&port->destroy_work, fcoe_destroy_work);
/* configure a fc_lport including the exchange manager */
@@ -1007,24 +995,27 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
goto out_lp_destroy;
}
- if (!npiv) {
- /*
- * fcoe_em_alloc() and fcoe_hostlist_add() both
- * need to be atomic with respect to other changes to the
- * hostlist since fcoe_em_alloc() looks for an existing EM
- * instance on host list updated by fcoe_hostlist_add().
- *
- * This is currently handled through the fcoe_config_mutex
- * begin held.
- */
-
+ /*
+ * fcoe_em_alloc() and fcoe_hostlist_add() both
+ * need to be atomic with respect to other changes to the
+ * hostlist since fcoe_em_alloc() looks for an existing EM
+ * instance on host list updated by fcoe_hostlist_add().
+ *
+ * This is currently handled through the fcoe_config_mutex
+ * begin held.
+ */
+ if (!npiv)
/* lport exch manager allocation */
rc = fcoe_em_config(lport);
- if (rc) {
- FCOE_NETDEV_DBG(netdev, "Could not configure the EM "
- "for the interface\n");
- goto out_lp_destroy;
- }
+ else {
+ shost = vport_to_shost(vport);
+ n_port = shost_priv(shost);
+ rc = fc_exch_mgr_list_clone(n_port, lport);
+ }
+
+ if (rc) {
+ FCOE_NETDEV_DBG(netdev, "Could not configure the EM\n");
+ goto out_lp_destroy;
}
fcoe_interface_get(fcoe);
@@ -1048,11 +1039,12 @@ out:
static int __init fcoe_if_init(void)
{
/* attach to scsi transport */
- fcoe_transport_template = fc_attach_transport(&fcoe_transport_function);
- fcoe_vport_transport_template =
- fc_attach_transport(&fcoe_vport_transport_function);
+ fcoe_nport_scsi_transport =
+ fc_attach_transport(&fcoe_nport_fc_functions);
+ fcoe_vport_scsi_transport =
+ fc_attach_transport(&fcoe_vport_fc_functions);
- if (!fcoe_transport_template) {
+ if (!fcoe_nport_scsi_transport) {
printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
return -ENODEV;
}
@@ -1069,10 +1061,10 @@ static int __init fcoe_if_init(void)
*/
int __exit fcoe_if_exit(void)
{
- fc_release_transport(fcoe_transport_template);
- fc_release_transport(fcoe_vport_transport_template);
- fcoe_transport_template = NULL;
- fcoe_vport_transport_template = NULL;
+ fc_release_transport(fcoe_nport_scsi_transport);
+ fc_release_transport(fcoe_vport_scsi_transport);
+ fcoe_nport_scsi_transport = NULL;
+ fcoe_vport_scsi_transport = NULL;
return 0;
}
@@ -1359,108 +1351,22 @@ err2:
}
/**
- * fcoe_start_io() - Start FCoE I/O
- * @skb: The packet to be transmitted
- *
- * This routine is called from the net device to start transmitting
- * FCoE packets.
- *
- * Returns: 0 for success
- */
-static inline int fcoe_start_io(struct sk_buff *skb)
-{
- struct sk_buff *nskb;
- int rc;
-
- nskb = skb_clone(skb, GFP_ATOMIC);
- rc = dev_queue_xmit(nskb);
- if (rc != 0)
- return rc;
- kfree_skb(skb);
- return 0;
-}
-
-/**
- * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
+ * fcoe_alloc_paged_crc_eof() - Allocate a page to be used for the trailer CRC
* @skb: The packet to be transmitted
* @tlen: The total length of the trailer
*
- * This routine allocates a page for frame trailers. The page is re-used if
- * there is enough room left on it for the current trailer. If there isn't
- * enough buffer left a new page is allocated for the trailer. Reference to
- * the page from this function as well as the skbs using the page fragments
- * ensure that the page is freed at the appropriate time.
- *
* Returns: 0 for success
*/
-static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
{
struct fcoe_percpu_s *fps;
- struct page *page;
+ int rc;
fps = &get_cpu_var(fcoe_percpu);
- page = fps->crc_eof_page;
- if (!page) {
- page = alloc_page(GFP_ATOMIC);
- if (!page) {
- put_cpu_var(fcoe_percpu);
- return -ENOMEM;
- }
- fps->crc_eof_page = page;
- fps->crc_eof_offset = 0;
- }
-
- get_page(page);
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
- fps->crc_eof_offset, tlen);
- skb->len += tlen;
- skb->data_len += tlen;
- skb->truesize += tlen;
- fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
-
- if (fps->crc_eof_offset >= PAGE_SIZE) {
- fps->crc_eof_page = NULL;
- fps->crc_eof_offset = 0;
- put_page(page);
- }
+ rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
put_cpu_var(fcoe_percpu);
- return 0;
-}
-/**
- * fcoe_fc_crc() - Calculates the CRC for a given frame
- * @fp: The frame to be checksumed
- *
- * This uses crc32() routine to calculate the CRC for a frame
- *
- * Return: The 32 bit CRC value
- */
-u32 fcoe_fc_crc(struct fc_frame *fp)
-{
- struct sk_buff *skb = fp_skb(fp);
- struct skb_frag_struct *frag;
- unsigned char *data;
- unsigned long off, len, clen;
- u32 crc;
- unsigned i;
-
- crc = crc32(~0, skb->data, skb_headlen(skb));
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- off = frag->page_offset;
- len = frag->size;
- while (len > 0) {
- clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
- data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
- KM_SKB_DATA_SOFTIRQ);
- crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
- kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
- off += clen;
- len -= clen;
- }
- }
- return crc;
+ return rc;
}
/**
@@ -1483,7 +1389,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
unsigned int tlen; /* trailer length */
unsigned int elen; /* eth header, may include vlan */
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
u8 sof, eof;
struct fcoe_hdr *hp;
@@ -1524,7 +1430,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
/* copy port crc and eof to the skb buff */
if (skb_is_nonlinear(skb)) {
skb_frag_t *frag;
- if (fcoe_get_paged_crc_eof(skb, tlen)) {
+ if (fcoe_alloc_paged_crc_eof(skb, tlen)) {
kfree_skb(skb);
return -ENOMEM;
}
@@ -1604,6 +1510,56 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
}
/**
+ * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC
+ * @lport: The local port the frame was received on
+ * @fp: The received frame
+ *
+ * Return: 0 on passing filtering checks
+ */
+static inline int fcoe_filter_frames(struct fc_lport *lport,
+ struct fc_frame *fp)
+{
+ struct fcoe_interface *fcoe;
+ struct fc_frame_header *fh;
+ struct sk_buff *skb = (struct sk_buff *)fp;
+ struct fcoe_dev_stats *stats;
+
+ /*
+ * We only check CRC if no offload is available and if it is
+ * it's solicited data, in which case, the FCP layer would
+ * check it during the copy.
+ */
+ if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ else
+ fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
+
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP)
+ return 0;
+
+ fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
+ if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
+ ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
+ FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
+ return -EINVAL;
+ }
+
+ if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED) ||
+ le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) {
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ return 0;
+ }
+
+ stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats->InvalidCRCCount++;
+ if (stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+ return -EINVAL;
+}
+
+/**
* fcoe_recv_frame() - process a single received frame
* @skb: frame to process
*/
@@ -1613,7 +1569,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
struct fcoe_dev_stats *stats;
- struct fc_frame_header *fh;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
struct fcoe_port *port;
@@ -1644,7 +1599,6 @@ static void fcoe_recv_frame(struct sk_buff *skb)
* was done in fcoe_rcv already.
*/
hp = (struct fcoe_hdr *) skb_network_header(skb);
- fh = (struct fc_frame_header *) skb_transport_header(skb);
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
@@ -1677,35 +1631,11 @@ static void fcoe_recv_frame(struct sk_buff *skb)
if (pskb_trim(skb, fr_len))
goto drop;
- /*
- * We only check CRC if no offload is available and if it is
- * it's solicited data, in which case, the FCP layer would
- * check it during the copy.
- */
- if (lport->crc_offload &&
- skb->ip_summed == CHECKSUM_UNNECESSARY)
- fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
- else
- fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
-
- fh = fc_frame_header_get(fp);
- if ((fh->fh_r_ctl != FC_RCTL_DD_SOL_DATA ||
- fh->fh_type != FC_TYPE_FCP) &&
- (fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
- if (le32_to_cpu(fr_crc(fp)) !=
- ~crc32(~0, skb->data, fr_len)) {
- if (stats->InvalidCRCCount < 5)
- printk(KERN_WARNING "fcoe: dropping "
- "frame with CRC error\n");
- stats->InvalidCRCCount++;
- goto drop;
- }
- fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ if (!fcoe_filter_frames(lport, fp)) {
+ put_cpu();
+ fc_exch_recv(lport, fp);
+ return;
}
- put_cpu();
- fc_exch_recv(lport, fp);
- return;
-
drop:
stats->ErrorFrames++;
put_cpu();
@@ -1744,64 +1674,6 @@ int fcoe_percpu_receive_thread(void *arg)
}
/**
- * fcoe_check_wait_queue() - Attempt to clear the transmit backlog
- * @lport: The local port whose backlog is to be cleared
- *
- * This empties the wait_queue, dequeues the head of the wait_queue queue
- * and calls fcoe_start_io() for each packet. If all skb have been
- * transmitted it returns the qlen. If an error occurs it restores
- * wait_queue (to try again later) and returns -1.
- *
- * The wait_queue is used when the skb transmit fails. The failed skb
- * will go in the wait_queue which will be emptied by the timer function or
- * by the next skb transmit.
- */
-static void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
-{
- struct fcoe_port *port = lport_priv(lport);
- int rc;
-
- spin_lock_bh(&port->fcoe_pending_queue.lock);
-
- if (skb)
- __skb_queue_tail(&port->fcoe_pending_queue, skb);
-
- if (port->fcoe_pending_queue_active)
- goto out;
- port->fcoe_pending_queue_active = 1;
-
- while (port->fcoe_pending_queue.qlen) {
- /* keep qlen > 0 until fcoe_start_io succeeds */
- port->fcoe_pending_queue.qlen++;
- skb = __skb_dequeue(&port->fcoe_pending_queue);
-
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
- rc = fcoe_start_io(skb);
- spin_lock_bh(&port->fcoe_pending_queue.lock);
-
- if (rc) {
- __skb_queue_head(&port->fcoe_pending_queue, skb);
- /* undo temporary increment above */
- port->fcoe_pending_queue.qlen--;
- break;
- }
- /* undo temporary increment above */
- port->fcoe_pending_queue.qlen--;
- }
-
- if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
- lport->qfull = 0;
- if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
- mod_timer(&port->timer, jiffies + 2);
- port->fcoe_pending_queue_active = 0;
-out:
- if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
- lport->qfull = 1;
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
- return;
-}
-
-/**
* fcoe_dev_setup() - Setup the link change notification interface
*/
static void fcoe_dev_setup(void)
@@ -1872,7 +1744,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp);
fcoe_interface_cleanup(fcoe);
- schedule_work(&port->destroy_work);
+ queue_work(fcoe_wq, &port->destroy_work);
goto out;
break;
case NETDEV_FEAT_CHANGE:
@@ -1898,39 +1770,16 @@ out:
}
/**
- * fcoe_if_to_netdev() - Parse a name buffer to get a net device
- * @buffer: The name of the net device
- *
- * Returns: NULL or a ptr to net_device
- */
-static struct net_device *fcoe_if_to_netdev(const char *buffer)
-{
- char *cp;
- char ifname[IFNAMSIZ + 2];
-
- if (buffer) {
- strlcpy(ifname, buffer, IFNAMSIZ);
- cp = ifname + strlen(ifname);
- while (--cp >= ifname && *cp == '\n')
- *cp = '\0';
- return dev_get_by_name(&init_net, ifname);
- }
- return NULL;
-}
-
-/**
* fcoe_disable() - Disables a FCoE interface
- * @buffer: The name of the Ethernet interface to be disabled
- * @kp: The associated kernel parameter
+ * @netdev : The net_device object the Ethernet interface to create on
*
- * Called from sysfs.
+ * Called from fcoe transport.
*
* Returns: 0 for success
*/
-static int fcoe_disable(const char *buffer, struct kernel_param *kp)
+static int fcoe_disable(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
- struct net_device *netdev;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
@@ -1946,16 +1795,9 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp)
}
#endif
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
- goto out_nodev;
- }
-
if (!rtnl_trylock()) {
- dev_put(netdev);
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
fcoe = fcoe_hostlist_lookup_port(netdev);
@@ -1967,7 +1809,6 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp)
} else
rc = -ENODEV;
- dev_put(netdev);
out_nodev:
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -1975,17 +1816,15 @@ out_nodev:
/**
* fcoe_enable() - Enables a FCoE interface
- * @buffer: The name of the Ethernet interface to be enabled
- * @kp: The associated kernel parameter
+ * @netdev : The net_device object the Ethernet interface to create on
*
- * Called from sysfs.
+ * Called from fcoe transport.
*
* Returns: 0 for success
*/
-static int fcoe_enable(const char *buffer, struct kernel_param *kp)
+static int fcoe_enable(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
- struct net_device *netdev;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
@@ -2000,17 +1839,9 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp)
goto out_nodev;
}
#endif
-
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
- goto out_nodev;
- }
-
if (!rtnl_trylock()) {
- dev_put(netdev);
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
fcoe = fcoe_hostlist_lookup_port(netdev);
@@ -2021,7 +1852,6 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp)
else if (!fcoe_link_ok(fcoe->ctlr.lp))
fcoe_ctlr_link_up(&fcoe->ctlr);
- dev_put(netdev);
out_nodev:
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -2029,17 +1859,15 @@ out_nodev:
/**
* fcoe_destroy() - Destroy a FCoE interface
- * @buffer: The name of the Ethernet interface to be destroyed
- * @kp: The associated kernel parameter
+ * @netdev : The net_device object the Ethernet interface to create on
*
- * Called from sysfs.
+ * Called from fcoe transport
*
* Returns: 0 for success
*/
-static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
+static int fcoe_destroy(struct net_device *netdev)
{
struct fcoe_interface *fcoe;
- struct net_device *netdev;
int rc = 0;
mutex_lock(&fcoe_config_mutex);
@@ -2054,32 +1882,21 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
goto out_nodev;
}
#endif
-
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
- goto out_nodev;
- }
-
if (!rtnl_trylock()) {
- dev_put(netdev);
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
fcoe = fcoe_hostlist_lookup_port(netdev);
if (!fcoe) {
rtnl_unlock();
rc = -ENODEV;
- goto out_putdev;
+ goto out_nodev;
}
fcoe_interface_cleanup(fcoe);
list_del(&fcoe->list);
/* RTNL mutex is dropped by fcoe_if_destroy */
fcoe_if_destroy(fcoe->ctlr.lp);
-
-out_putdev:
- dev_put(netdev);
out_nodev:
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -2102,27 +1919,39 @@ static void fcoe_destroy_work(struct work_struct *work)
}
/**
+ * fcoe_match() - Check if the FCoE is supported on the given netdevice
+ * @netdev : The net_device object the Ethernet interface to create on
+ *
+ * Called from fcoe transport.
+ *
+ * Returns: always returns true as this is the default FCoE transport,
+ * i.e., support all netdevs.
+ */
+static bool fcoe_match(struct net_device *netdev)
+{
+ return true;
+}
+
+/**
* fcoe_create() - Create a fcoe interface
- * @buffer: The name of the Ethernet interface to create on
- * @kp: The associated kernel param
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
*
- * Called from sysfs.
+ * Called from fcoe transport
*
* Returns: 0 for success
*/
-static int fcoe_create(const char *buffer, struct kernel_param *kp)
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
{
- enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
int rc;
struct fcoe_interface *fcoe;
struct fc_lport *lport;
- struct net_device *netdev;
mutex_lock(&fcoe_config_mutex);
if (!rtnl_trylock()) {
mutex_unlock(&fcoe_config_mutex);
- return restart_syscall();
+ return -ERESTARTSYS;
}
#ifdef CONFIG_FCOE_MODULE
@@ -2133,31 +1962,20 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
*/
if (THIS_MODULE->state != MODULE_STATE_LIVE) {
rc = -ENODEV;
- goto out_nomod;
- }
-#endif
-
- if (!try_module_get(THIS_MODULE)) {
- rc = -EINVAL;
- goto out_nomod;
- }
-
- netdev = fcoe_if_to_netdev(buffer);
- if (!netdev) {
- rc = -ENODEV;
goto out_nodev;
}
+#endif
/* look for existing lport */
if (fcoe_hostlist_lookup(netdev)) {
rc = -EEXIST;
- goto out_putdev;
+ goto out_nodev;
}
fcoe = fcoe_interface_create(netdev, fip_mode);
- if (!fcoe) {
- rc = -ENOMEM;
- goto out_putdev;
+ if (IS_ERR(fcoe)) {
+ rc = PTR_ERR(fcoe);
+ goto out_nodev;
}
lport = fcoe_if_create(fcoe, &netdev->dev, 0);
@@ -2186,18 +2004,13 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp)
* should be holding a reference taken in fcoe_if_create().
*/
fcoe_interface_put(fcoe);
- dev_put(netdev);
rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return 0;
out_free:
fcoe_interface_put(fcoe);
-out_putdev:
- dev_put(netdev);
out_nodev:
- module_put(THIS_MODULE);
-out_nomod:
rtnl_unlock();
mutex_unlock(&fcoe_config_mutex);
return rc;
@@ -2212,8 +2025,7 @@ out_nomod:
*/
int fcoe_link_speed_update(struct fc_lport *lport)
{
- struct fcoe_port *port = lport_priv(lport);
- struct net_device *netdev = port->fcoe->netdev;
+ struct net_device *netdev = fcoe_netdev(lport);
struct ethtool_cmd ecmd = { ETHTOOL_GSET };
if (!dev_ethtool_get_settings(netdev, &ecmd)) {
@@ -2244,8 +2056,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
*/
int fcoe_link_ok(struct fc_lport *lport)
{
- struct fcoe_port *port = lport_priv(lport);
- struct net_device *netdev = port->fcoe->netdev;
+ struct net_device *netdev = fcoe_netdev(lport);
if (netif_oper_up(netdev))
return 0;
@@ -2309,24 +2120,6 @@ void fcoe_percpu_clean(struct fc_lport *lport)
}
/**
- * fcoe_clean_pending_queue() - Dequeue a skb and free it
- * @lport: The local port to dequeue a skb on
- */
-void fcoe_clean_pending_queue(struct fc_lport *lport)
-{
- struct fcoe_port *port = lport_priv(lport);
- struct sk_buff *skb;
-
- spin_lock_bh(&port->fcoe_pending_queue.lock);
- while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
- kfree_skb(skb);
- spin_lock_bh(&port->fcoe_pending_queue.lock);
- }
- spin_unlock_bh(&port->fcoe_pending_queue.lock);
-}
-
-/**
* fcoe_reset() - Reset a local port
* @shost: The SCSI host associated with the local port to be reset
*
@@ -2335,7 +2128,13 @@ void fcoe_clean_pending_queue(struct fc_lport *lport)
int fcoe_reset(struct Scsi_Host *shost)
{
struct fc_lport *lport = shost_priv(shost);
- fc_lport_reset(lport);
+ struct fcoe_port *port = lport_priv(lport);
+ struct fcoe_interface *fcoe = port->priv;
+
+ fcoe_ctlr_link_down(&fcoe->ctlr);
+ fcoe_clean_pending_queue(fcoe->ctlr.lp);
+ if (!fcoe_link_ok(fcoe->ctlr.lp))
+ fcoe_ctlr_link_up(&fcoe->ctlr);
return 0;
}
@@ -2393,12 +2192,24 @@ static int fcoe_hostlist_add(const struct fc_lport *lport)
fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
if (!fcoe) {
port = lport_priv(lport);
- fcoe = port->fcoe;
+ fcoe = port->priv;
list_add_tail(&fcoe->list, &fcoe_hostlist);
}
return 0;
}
+
+static struct fcoe_transport fcoe_sw_transport = {
+ .name = {FCOE_TRANSPORT_DEFAULT},
+ .attached = false,
+ .list = LIST_HEAD_INIT(fcoe_sw_transport.list),
+ .match = fcoe_match,
+ .create = fcoe_create,
+ .destroy = fcoe_destroy,
+ .enable = fcoe_enable,
+ .disable = fcoe_disable,
+};
+
/**
* fcoe_init() - Initialize fcoe.ko
*
@@ -2410,6 +2221,18 @@ static int __init fcoe_init(void)
unsigned int cpu;
int rc = 0;
+ fcoe_wq = alloc_workqueue("fcoe", 0, 0);
+ if (!fcoe_wq)
+ return -ENOMEM;
+
+ /* register as a fcoe transport */
+ rc = fcoe_transport_attach(&fcoe_sw_transport);
+ if (rc) {
+ printk(KERN_ERR "failed to register an fcoe transport, check "
+ "if libfcoe is loaded\n");
+ return rc;
+ }
+
mutex_lock(&fcoe_config_mutex);
for_each_possible_cpu(cpu) {
@@ -2440,6 +2263,7 @@ out_free:
fcoe_percpu_thread_destroy(cpu);
}
mutex_unlock(&fcoe_config_mutex);
+ destroy_workqueue(fcoe_wq);
return rc;
}
module_init(fcoe_init);
@@ -2465,7 +2289,7 @@ static void __exit fcoe_exit(void)
list_del(&fcoe->list);
port = lport_priv(fcoe->ctlr.lp);
fcoe_interface_cleanup(fcoe);
- schedule_work(&port->destroy_work);
+ queue_work(fcoe_wq, &port->destroy_work);
}
rtnl_unlock();
@@ -2476,16 +2300,21 @@ static void __exit fcoe_exit(void)
mutex_unlock(&fcoe_config_mutex);
- /* flush any asyncronous interface destroys,
- * this should happen after the netdev notifier is unregistered */
- flush_scheduled_work();
- /* That will flush out all the N_Ports on the hostlist, but now we
- * may have NPIV VN_Ports scheduled for destruction */
- flush_scheduled_work();
+ /*
+ * destroy_work's may be chained but destroy_workqueue()
+ * can take care of them. Just kill the fcoe_wq.
+ */
+ destroy_workqueue(fcoe_wq);
- /* detach from scsi transport
- * must happen after all destroys are done, therefor after the flush */
+ /*
+ * Detaching from the scsi transport must happen after all
+ * destroys are done on the fcoe_wq. destroy_workqueue will
+ * enusre the fcoe_wq is flushed.
+ */
fcoe_if_exit();
+
+ /* detach from fcoe transport */
+ fcoe_transport_detach(&fcoe_sw_transport);
}
module_exit(fcoe_exit);
@@ -2557,7 +2386,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
void *arg, u32 timeout)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct fcoe_ctlr *fip = &fcoe->ctlr;
struct fc_frame_header *fh = fc_frame_header_get(fp);
@@ -2590,7 +2419,7 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
struct Scsi_Host *shost = vport_to_shost(vport);
struct fc_lport *n_port = shost_priv(shost);
struct fcoe_port *port = lport_priv(n_port);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
struct net_device *netdev = fcoe->netdev;
struct fc_lport *vn_port;
@@ -2630,7 +2459,7 @@ static int fcoe_vport_destroy(struct fc_vport *vport)
mutex_lock(&n_port->lp_mutex);
list_del(&vn_port->list);
mutex_unlock(&n_port->lp_mutex);
- schedule_work(&port->destroy_work);
+ queue_work(fcoe_wq, &port->destroy_work);
return 0;
}
@@ -2734,7 +2563,7 @@ static void fcoe_set_port_id(struct fc_lport *lport,
u32 port_id, struct fc_frame *fp)
{
struct fcoe_port *port = lport_priv(lport);
- struct fcoe_interface *fcoe = port->fcoe;
+ struct fcoe_interface *fcoe = port->priv;
if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index c69b2c56c2d1..408a6fd78fb4 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -24,7 +24,7 @@
#include <linux/kthread.h>
#define FCOE_MAX_QUEUE_DEPTH 256
-#define FCOE_LOW_QUEUE_DEPTH 32
+#define FCOE_MIN_QUEUE_DEPTH 32
#define FCOE_WORD_TO_BYTE 4
@@ -40,12 +40,6 @@
#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
-/*
- * Max MTU for FCoE: 14 (FCoE header) + 24 (FC header) + 2112 (max FC payload)
- * + 4 (FC CRC) + 4 (FCoE trailer) = 2158 bytes
- */
-#define FCOE_MTU 2158
-
unsigned int fcoe_debug_logging;
module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
@@ -71,21 +65,6 @@ do { \
netdev->name, ##args);)
/**
- * struct fcoe_percpu_s - The per-CPU context for FCoE receive threads
- * @thread: The thread context
- * @fcoe_rx_list: The queue of pending packets to process
- * @page: The memory page for calculating frame trailer CRCs
- * @crc_eof_offset: The offset into the CRC page pointing to available
- * memory for a new trailer
- */
-struct fcoe_percpu_s {
- struct task_struct *thread;
- struct sk_buff_head fcoe_rx_list;
- struct page *crc_eof_page;
- int crc_eof_offset;
-};
-
-/**
* struct fcoe_interface - A FCoE interface
* @list: Handle for a list of FCoE interfaces
* @netdev: The associated net device
@@ -108,30 +87,6 @@ struct fcoe_interface {
struct kref kref;
};
-/**
- * struct fcoe_port - The FCoE private structure
- * @fcoe: The associated fcoe interface
- * @lport: The associated local port
- * @fcoe_pending_queue: The pending Rx queue of skbs
- * @fcoe_pending_queue_active: Indicates if the pending queue is active
- * @timer: The queue timer
- * @destroy_work: Handle for work context
- * (to prevent RTNL deadlocks)
- * @data_srt_addr: Source address for data
- *
- * An instance of this structure is to be allocated along with the
- * Scsi_Host and libfc fc_lport structures.
- */
-struct fcoe_port {
- struct fcoe_interface *fcoe;
- struct fc_lport *lport;
- struct sk_buff_head fcoe_pending_queue;
- u8 fcoe_pending_queue_active;
- struct timer_list timer;
- struct work_struct destroy_work;
- u8 data_src_addr[ETH_ALEN];
-};
-
#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
/**
@@ -140,7 +95,8 @@ struct fcoe_port {
*/
static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
{
- return ((struct fcoe_port *)lport_priv(lport))->fcoe->netdev;
+ return ((struct fcoe_interface *)
+ ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
}
#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 625c6be25396..c93f007e702f 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -44,9 +44,7 @@
#include <scsi/libfc.h>
#include <scsi/libfcoe.h>
-MODULE_AUTHOR("Open-FCoE.org");
-MODULE_DESCRIPTION("FIP discovery protocol support for FCoE HBAs");
-MODULE_LICENSE("GPL v2");
+#include "libfcoe.h"
#define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */
#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */
@@ -66,31 +64,7 @@ static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS;
static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS;
static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS;
-unsigned int libfcoe_debug_logging;
-module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
-
-#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
-#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
-
-#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
-do { \
- if (unlikely(libfcoe_debug_logging & LEVEL)) \
- do { \
- CMD; \
- } while (0); \
-} while (0)
-
-#define LIBFCOE_DBG(fmt, args...) \
- LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
- printk(KERN_INFO "libfcoe: " fmt, ##args);)
-
-#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
- LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
- printk(KERN_INFO "host%d: fip: " fmt, \
- (fip)->lp->host->host_no, ##args);)
-
-static const char *fcoe_ctlr_states[] = {
+static const char * const fcoe_ctlr_states[] = {
[FIP_ST_DISABLED] = "DISABLED",
[FIP_ST_LINK_WAIT] = "LINK_WAIT",
[FIP_ST_AUTO] = "AUTO",
@@ -308,8 +282,8 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
struct fip_mac_desc mac;
struct fip_wwn_desc wwnn;
struct fip_size_desc size;
- } __attribute__((packed)) desc;
- } __attribute__((packed)) *sol;
+ } __packed desc;
+ } __packed * sol;
u32 fcoe_size;
skb = dev_alloc_skb(sizeof(*sol));
@@ -456,7 +430,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
struct ethhdr eth;
struct fip_header fip;
struct fip_mac_desc mac;
- } __attribute__((packed)) *kal;
+ } __packed * kal;
struct fip_vn_desc *vn;
u32 len;
struct fc_lport *lp;
@@ -527,7 +501,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
struct ethhdr eth;
struct fip_header fip;
struct fip_encaps encaps;
- } __attribute__((packed)) *cap;
+ } __packed * cap;
struct fc_frame_header *fh;
struct fip_mac_desc *mac;
struct fcoe_fcf *fcf;
@@ -1819,7 +1793,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
struct fip_mac_desc mac;
struct fip_wwn_desc wwnn;
struct fip_vn_desc vn;
- } __attribute__((packed)) *frame;
+ } __packed * frame;
struct fip_fc4_feat *ff;
struct fip_size_desc *size;
u32 fcp_feat;
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
new file mode 100644
index 000000000000..258684101bfd
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -0,0 +1,770 @@
+/*
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+#include <linux/crc32.h>
+#include <scsi/libfcoe.h>
+
+#include "libfcoe.h"
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs");
+MODULE_LICENSE("GPL v2");
+
+static int fcoe_transport_create(const char *, struct kernel_param *);
+static int fcoe_transport_destroy(const char *, struct kernel_param *);
+static int fcoe_transport_show(char *buffer, const struct kernel_param *kp);
+static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device);
+static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device);
+static int fcoe_transport_enable(const char *, struct kernel_param *);
+static int fcoe_transport_disable(const char *, struct kernel_param *);
+static int libfcoe_device_notification(struct notifier_block *notifier,
+ ulong event, void *ptr);
+
+static LIST_HEAD(fcoe_transports);
+static DEFINE_MUTEX(ft_mutex);
+static LIST_HEAD(fcoe_netdevs);
+static DEFINE_MUTEX(fn_mutex);
+
+unsigned int libfcoe_debug_logging;
+module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+module_param_call(show, NULL, fcoe_transport_show, NULL, S_IRUSR);
+__MODULE_PARM_TYPE(show, "string");
+MODULE_PARM_DESC(show, " Show attached FCoE transports");
+
+module_param_call(create, fcoe_transport_create, NULL,
+ (void *)FIP_MODE_FABRIC, S_IWUSR);
+__MODULE_PARM_TYPE(create, "string");
+MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
+
+module_param_call(create_vn2vn, fcoe_transport_create, NULL,
+ (void *)FIP_MODE_VN2VN, S_IWUSR);
+__MODULE_PARM_TYPE(create_vn2vn, "string");
+MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance "
+ "on an Ethernet interface");
+
+module_param_call(destroy, fcoe_transport_destroy, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(destroy, "string");
+MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface");
+
+module_param_call(enable, fcoe_transport_enable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(enable, "string");
+MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface.");
+
+module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR);
+__MODULE_PARM_TYPE(disable, "string");
+MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface.");
+
+/* notification function for packets from net device */
+static struct notifier_block libfcoe_notifier = {
+ .notifier_call = libfcoe_device_notification,
+};
+
+/**
+ * fcoe_fc_crc() - Calculates the CRC for a given frame
+ * @fp: The frame to be checksumed
+ *
+ * This uses crc32() routine to calculate the CRC for a frame
+ *
+ * Return: The 32 bit CRC value
+ */
+u32 fcoe_fc_crc(struct fc_frame *fp)
+{
+ struct sk_buff *skb = fp_skb(fp);
+ struct skb_frag_struct *frag;
+ unsigned char *data;
+ unsigned long off, len, clen;
+ u32 crc;
+ unsigned i;
+
+ crc = crc32(~0, skb->data, skb_headlen(skb));
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ off = frag->page_offset;
+ len = frag->size;
+ while (len > 0) {
+ clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
+ data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
+ KM_SKB_DATA_SOFTIRQ);
+ crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
+ kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
+ off += clen;
+ len -= clen;
+ }
+ }
+ return crc;
+}
+EXPORT_SYMBOL_GPL(fcoe_fc_crc);
+
+/**
+ * fcoe_start_io() - Start FCoE I/O
+ * @skb: The packet to be transmitted
+ *
+ * This routine is called from the net device to start transmitting
+ * FCoE packets.
+ *
+ * Returns: 0 for success
+ */
+int fcoe_start_io(struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+ int rc;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
+ rc = dev_queue_xmit(nskb);
+ if (rc != 0)
+ return rc;
+ kfree_skb(skb);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_start_io);
+
+
+/**
+ * fcoe_clean_pending_queue() - Dequeue a skb and free it
+ * @lport: The local port to dequeue a skb on
+ */
+void fcoe_clean_pending_queue(struct fc_lport *lport)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ struct sk_buff *skb;
+
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+ while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+ kfree_skb(skb);
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+ }
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+}
+EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue);
+
+/**
+ * fcoe_check_wait_queue() - Attempt to clear the transmit backlog
+ * @lport: The local port whose backlog is to be cleared
+ *
+ * This empties the wait_queue, dequeues the head of the wait_queue queue
+ * and calls fcoe_start_io() for each packet. If all skb have been
+ * transmitted it returns the qlen. If an error occurs it restores
+ * wait_queue (to try again later) and returns -1.
+ *
+ * The wait_queue is used when the skb transmit fails. The failed skb
+ * will go in the wait_queue which will be emptied by the timer function or
+ * by the next skb transmit.
+ */
+void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb)
+{
+ struct fcoe_port *port = lport_priv(lport);
+ int rc;
+
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+
+ if (skb)
+ __skb_queue_tail(&port->fcoe_pending_queue, skb);
+
+ if (port->fcoe_pending_queue_active)
+ goto out;
+ port->fcoe_pending_queue_active = 1;
+
+ while (port->fcoe_pending_queue.qlen) {
+ /* keep qlen > 0 until fcoe_start_io succeeds */
+ port->fcoe_pending_queue.qlen++;
+ skb = __skb_dequeue(&port->fcoe_pending_queue);
+
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+ rc = fcoe_start_io(skb);
+ spin_lock_bh(&port->fcoe_pending_queue.lock);
+
+ if (rc) {
+ __skb_queue_head(&port->fcoe_pending_queue, skb);
+ /* undo temporary increment above */
+ port->fcoe_pending_queue.qlen--;
+ break;
+ }
+ /* undo temporary increment above */
+ port->fcoe_pending_queue.qlen--;
+ }
+
+ if (port->fcoe_pending_queue.qlen < port->min_queue_depth)
+ lport->qfull = 0;
+ if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
+ mod_timer(&port->timer, jiffies + 2);
+ port->fcoe_pending_queue_active = 0;
+out:
+ if (port->fcoe_pending_queue.qlen > port->max_queue_depth)
+ lport->qfull = 1;
+ spin_unlock_bh(&port->fcoe_pending_queue.lock);
+}
+EXPORT_SYMBOL_GPL(fcoe_check_wait_queue);
+
+/**
+ * fcoe_queue_timer() - The fcoe queue timer
+ * @lport: The local port
+ *
+ * Calls fcoe_check_wait_queue on timeout
+ */
+void fcoe_queue_timer(ulong lport)
+{
+ fcoe_check_wait_queue((struct fc_lport *)lport, NULL);
+}
+EXPORT_SYMBOL_GPL(fcoe_queue_timer);
+
+/**
+ * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC
+ * @skb: The packet to be transmitted
+ * @tlen: The total length of the trailer
+ * @fps: The fcoe context
+ *
+ * This routine allocates a page for frame trailers. The page is re-used if
+ * there is enough room left on it for the current trailer. If there isn't
+ * enough buffer left a new page is allocated for the trailer. Reference to
+ * the page from this function as well as the skbs using the page fragments
+ * ensure that the page is freed at the appropriate time.
+ *
+ * Returns: 0 for success
+ */
+int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
+ struct fcoe_percpu_s *fps)
+{
+ struct page *page;
+
+ page = fps->crc_eof_page;
+ if (!page) {
+ page = alloc_page(GFP_ATOMIC);
+ if (!page)
+ return -ENOMEM;
+
+ fps->crc_eof_page = page;
+ fps->crc_eof_offset = 0;
+ }
+
+ get_page(page);
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
+ fps->crc_eof_offset, tlen);
+ skb->len += tlen;
+ skb->data_len += tlen;
+ skb->truesize += tlen;
+ fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
+
+ if (fps->crc_eof_offset >= PAGE_SIZE) {
+ fps->crc_eof_page = NULL;
+ fps->crc_eof_offset = 0;
+ put_page(page);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fcoe_get_paged_crc_eof);
+
+/**
+ * fcoe_transport_lookup - find an fcoe transport that matches a netdev
+ * @netdev: The netdev to look for from all attached transports
+ *
+ * Returns : ptr to the fcoe transport that supports this netdev or NULL
+ * if not found.
+ *
+ * The ft_mutex should be held when this is called
+ */
+static struct fcoe_transport *fcoe_transport_lookup(struct net_device *netdev)
+{
+ struct fcoe_transport *ft = NULL;
+
+ list_for_each_entry(ft, &fcoe_transports, list)
+ if (ft->match && ft->match(netdev))
+ return ft;
+ return NULL;
+}
+
+/**
+ * fcoe_transport_attach - Attaches an FCoE transport
+ * @ft: The fcoe transport to be attached
+ *
+ * Returns : 0 for success
+ */
+int fcoe_transport_attach(struct fcoe_transport *ft)
+{
+ int rc = 0;
+
+ mutex_lock(&ft_mutex);
+ if (ft->attached) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already attached\n",
+ ft->name);
+ rc = -EEXIST;
+ goto out_attach;
+ }
+
+ /* Add default transport to the tail */
+ if (strcmp(ft->name, FCOE_TRANSPORT_DEFAULT))
+ list_add(&ft->list, &fcoe_transports);
+ else
+ list_add_tail(&ft->list, &fcoe_transports);
+
+ ft->attached = true;
+ LIBFCOE_TRANSPORT_DBG("attaching transport %s\n", ft->name);
+
+out_attach:
+ mutex_unlock(&ft_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(fcoe_transport_attach);
+
+/**
+ * fcoe_transport_attach - Detaches an FCoE transport
+ * @ft: The fcoe transport to be attached
+ *
+ * Returns : 0 for success
+ */
+int fcoe_transport_detach(struct fcoe_transport *ft)
+{
+ int rc = 0;
+
+ mutex_lock(&ft_mutex);
+ if (!ft->attached) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already detached\n",
+ ft->name);
+ rc = -ENODEV;
+ goto out_attach;
+ }
+
+ list_del(&ft->list);
+ ft->attached = false;
+ LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name);
+
+out_attach:
+ mutex_unlock(&ft_mutex);
+ return rc;
+
+}
+EXPORT_SYMBOL(fcoe_transport_detach);
+
+static int fcoe_transport_show(char *buffer, const struct kernel_param *kp)
+{
+ int i, j;
+ struct fcoe_transport *ft = NULL;
+
+ i = j = sprintf(buffer, "Attached FCoE transports:");
+ mutex_lock(&ft_mutex);
+ list_for_each_entry(ft, &fcoe_transports, list) {
+ i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name);
+ if (i >= PAGE_SIZE)
+ break;
+ }
+ mutex_unlock(&ft_mutex);
+ if (i == j)
+ i += snprintf(&buffer[i], IFNAMSIZ, "none");
+ return i;
+}
+
+static int __init fcoe_transport_init(void)
+{
+ register_netdevice_notifier(&libfcoe_notifier);
+ return 0;
+}
+
+static int __exit fcoe_transport_exit(void)
+{
+ struct fcoe_transport *ft;
+
+ unregister_netdevice_notifier(&libfcoe_notifier);
+ mutex_lock(&ft_mutex);
+ list_for_each_entry(ft, &fcoe_transports, list)
+ printk(KERN_ERR "FCoE transport %s is still attached!\n",
+ ft->name);
+ mutex_unlock(&ft_mutex);
+ return 0;
+}
+
+
+static int fcoe_add_netdev_mapping(struct net_device *netdev,
+ struct fcoe_transport *ft)
+{
+ struct fcoe_netdev_mapping *nm;
+
+ nm = kmalloc(sizeof(*nm), GFP_KERNEL);
+ if (!nm) {
+ printk(KERN_ERR "Unable to allocate netdev_mapping");
+ return -ENOMEM;
+ }
+
+ nm->netdev = netdev;
+ nm->ft = ft;
+
+ mutex_lock(&fn_mutex);
+ list_add(&nm->list, &fcoe_netdevs);
+ mutex_unlock(&fn_mutex);
+ return 0;
+}
+
+
+static void fcoe_del_netdev_mapping(struct net_device *netdev)
+{
+ struct fcoe_netdev_mapping *nm = NULL, *tmp;
+
+ mutex_lock(&fn_mutex);
+ list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) {
+ if (nm->netdev == netdev) {
+ list_del(&nm->list);
+ kfree(nm);
+ mutex_unlock(&fn_mutex);
+ return;
+ }
+ }
+ mutex_unlock(&fn_mutex);
+}
+
+
+/**
+ * fcoe_netdev_map_lookup - find the fcoe transport that matches the netdev on which
+ * it was created
+ *
+ * Returns : ptr to the fcoe transport that supports this netdev or NULL
+ * if not found.
+ *
+ * The ft_mutex should be held when this is called
+ */
+static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *netdev)
+{
+ struct fcoe_transport *ft = NULL;
+ struct fcoe_netdev_mapping *nm;
+
+ mutex_lock(&fn_mutex);
+ list_for_each_entry(nm, &fcoe_netdevs, list) {
+ if (netdev == nm->netdev) {
+ ft = nm->ft;
+ mutex_unlock(&fn_mutex);
+ return ft;
+ }
+ }
+
+ mutex_unlock(&fn_mutex);
+ return NULL;
+}
+
+/**
+ * fcoe_if_to_netdev() - Parse a name buffer to get a net device
+ * @buffer: The name of the net device
+ *
+ * Returns: NULL or a ptr to net_device
+ */
+static struct net_device *fcoe_if_to_netdev(const char *buffer)
+{
+ char *cp;
+ char ifname[IFNAMSIZ + 2];
+
+ if (buffer) {
+ strlcpy(ifname, buffer, IFNAMSIZ);
+ cp = ifname + strlen(ifname);
+ while (--cp >= ifname && *cp == '\n')
+ *cp = '\0';
+ return dev_get_by_name(&init_net, ifname);
+ }
+ return NULL;
+}
+
+/**
+ * libfcoe_device_notification() - Handler for net device events
+ * @notifier: The context of the notification
+ * @event: The type of event
+ * @ptr: The net device that the event was on
+ *
+ * This function is called by the Ethernet driver in case of link change event.
+ *
+ * Returns: 0 for success
+ */
+static int libfcoe_device_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+{
+ struct net_device *netdev = ptr;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ printk(KERN_ERR "libfcoe_device_notification: NETDEV_UNREGISTER %s\n",
+ netdev->name);
+ fcoe_del_netdev_mapping(netdev);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+
+/**
+ * fcoe_transport_create() - Create a fcoe interface
+ * @buffer: The name of the Ethernet interface to create on
+ * @kp: The associated kernel param
+ *
+ * Called from sysfs. This holds the ft_mutex while calling the
+ * registered fcoe transport's create function.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_create(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+ enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer);
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (ft) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already has existing "
+ "FCoE instance on %s.\n",
+ ft->name, netdev->name);
+ rc = -EEXIST;
+ goto out_putdev;
+ }
+
+ ft = fcoe_transport_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ goto out_putdev;
+ }
+
+ rc = fcoe_add_netdev_mapping(netdev, ft);
+ if (rc) {
+ LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping "
+ "for FCoE transport %s for %s.\n",
+ ft->name, netdev->name);
+ goto out_putdev;
+ }
+
+ /* pass to transport create */
+ rc = ft->create ? ft->create(netdev, fip_mode) : -ENODEV;
+ if (rc)
+ fcoe_del_netdev_mapping(netdev);
+
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n",
+ ft->name, (rc) ? "failed" : "succeeded",
+ netdev->name);
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * fcoe_transport_destroy() - Destroy a FCoE interface
+ * @buffer: The name of the Ethernet interface to be destroyed
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs. This holds the ft_mutex while calling the
+ * registered fcoe transport's destroy function.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer);
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ goto out_putdev;
+ }
+
+ /* pass to transport destroy */
+ rc = ft->destroy ? ft->destroy(netdev) : -ENODEV;
+ fcoe_del_netdev_mapping(netdev);
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n",
+ ft->name, (rc) ? "failed" : "succeeded",
+ netdev->name);
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * fcoe_transport_disable() - Disables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be disabled
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev)
+ goto out_nodev;
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft)
+ goto out_putdev;
+
+ rc = ft->disable ? ft->disable(netdev) : -ENODEV;
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * fcoe_transport_enable() - Enables a FCoE interface
+ * @buffer: The name of the Ethernet interface to be enabled
+ * @kp: The associated kernel parameter
+ *
+ * Called from sysfs.
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ if (!mutex_trylock(&ft_mutex))
+ return restart_syscall();
+
+#ifdef CONFIG_LIBFCOE_MODULE
+ /*
+ * Make sure the module has been initialized, and is not about to be
+ * removed. Module parameter sysfs files are writable before the
+ * module_init function is called and after module_exit.
+ */
+ if (THIS_MODULE->state != MODULE_STATE_LIVE)
+ goto out_nodev;
+#endif
+
+ netdev = fcoe_if_to_netdev(buffer);
+ if (!netdev)
+ goto out_nodev;
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft)
+ goto out_putdev;
+
+ rc = ft->enable ? ft->enable(netdev) : -ENODEV;
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ if (rc == -ERESTARTSYS)
+ return restart_syscall();
+ else
+ return rc;
+}
+
+/**
+ * libfcoe_init() - Initialization routine for libfcoe.ko
+ */
+static int __init libfcoe_init(void)
+{
+ fcoe_transport_init();
+
+ return 0;
+}
+module_init(libfcoe_init);
+
+/**
+ * libfcoe_exit() - Tear down libfcoe.ko
+ */
+static void __exit libfcoe_exit(void)
+{
+ fcoe_transport_exit();
+}
+module_exit(libfcoe_exit);
diff --git a/drivers/scsi/fcoe/libfcoe.h b/drivers/scsi/fcoe/libfcoe.h
new file mode 100644
index 000000000000..6af5fc3a17d8
--- /dev/null
+++ b/drivers/scsi/fcoe/libfcoe.h
@@ -0,0 +1,31 @@
+#ifndef _FCOE_LIBFCOE_H_
+#define _FCOE_LIBFCOE_H_
+
+extern unsigned int libfcoe_debug_logging;
+#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
+#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
+#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
+
+#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
+do { \
+ if (unlikely(libfcoe_debug_logging & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+} while (0)
+
+#define LIBFCOE_DBG(fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
+ printk(KERN_INFO "libfcoe: " fmt, ##args);)
+
+#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
+ printk(KERN_INFO "host%d: fip: " fmt, \
+ (fip)->lp->host->host_no, ##args);)
+
+#define LIBFCOE_TRANSPORT_DBG(fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \
+ printk(KERN_INFO "%s: " fmt, \
+ __func__, ##args);)
+
+#endif /* _FCOE_LIBFCOE_H_ */
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 92f185081e62..671cde9d4060 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -37,7 +37,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.4.0.145"
+#define DRV_VERSION "1.5.0.1"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index db710148d156..b576be734e2e 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -654,7 +654,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
vdev->linkstatus_pa);
if (vdev->stats)
pci_free_consistent(vdev->pdev,
- sizeof(struct vnic_dev),
+ sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
pci_free_consistent(vdev->pdev,
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 12deffccb8da..415ad4fb50d4 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -74,6 +74,10 @@ static int hpsa_allow_any;
module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(hpsa_allow_any,
"Allow hpsa driver to access unknown HP Smart Array hardware");
+static int hpsa_simple_mode;
+module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(hpsa_simple_mode,
+ "Use 'simple mode' rather than 'performant mode'");
/* define the PCI info for the cards we can control */
static const struct pci_device_id hpsa_pci_device_id[] = {
@@ -85,11 +89,13 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
+ {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
{0,}
@@ -109,11 +115,13 @@ static struct board_type products[] = {
{0x3249103C, "Smart Array P812", &SA5_access},
{0x324a103C, "Smart Array P712m", &SA5_access},
{0x324b103C, "Smart Array P711m", &SA5_access},
- {0x3250103C, "Smart Array", &SA5_access},
- {0x3250113C, "Smart Array", &SA5_access},
- {0x3250123C, "Smart Array", &SA5_access},
- {0x3250133C, "Smart Array", &SA5_access},
- {0x3250143C, "Smart Array", &SA5_access},
+ {0x3350103C, "Smart Array", &SA5_access},
+ {0x3351103C, "Smart Array", &SA5_access},
+ {0x3352103C, "Smart Array", &SA5_access},
+ {0x3353103C, "Smart Array", &SA5_access},
+ {0x3354103C, "Smart Array", &SA5_access},
+ {0x3355103C, "Smart Array", &SA5_access},
+ {0x3356103C, "Smart Array", &SA5_access},
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
};
@@ -147,17 +155,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
static int hpsa_slave_alloc(struct scsi_device *sdev);
static void hpsa_slave_destroy(struct scsi_device *sdev);
-static ssize_t raid_level_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t lunid_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t unique_id_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-static ssize_t host_show_firmware_revision(struct device *dev,
- struct device_attribute *attr, char *buf);
static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
-static ssize_t host_store_rescan(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count);
static int check_for_unit_attention(struct ctlr_info *h,
struct CommandList *c);
static void check_ioctl_unit_attention(struct ctlr_info *h,
@@ -173,47 +171,10 @@ static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
unsigned long *memory_bar);
static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
-
-static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
-static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
-static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
-static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
-static DEVICE_ATTR(firmware_revision, S_IRUGO,
- host_show_firmware_revision, NULL);
-
-static struct device_attribute *hpsa_sdev_attrs[] = {
- &dev_attr_raid_level,
- &dev_attr_lunid,
- &dev_attr_unique_id,
- NULL,
-};
-
-static struct device_attribute *hpsa_shost_attrs[] = {
- &dev_attr_rescan,
- &dev_attr_firmware_revision,
- NULL,
-};
-
-static struct scsi_host_template hpsa_driver_template = {
- .module = THIS_MODULE,
- .name = "hpsa",
- .proc_name = "hpsa",
- .queuecommand = hpsa_scsi_queue_command,
- .scan_start = hpsa_scan_start,
- .scan_finished = hpsa_scan_finished,
- .change_queue_depth = hpsa_change_queue_depth,
- .this_id = -1,
- .use_clustering = ENABLE_CLUSTERING,
- .eh_device_reset_handler = hpsa_eh_device_reset_handler,
- .ioctl = hpsa_ioctl,
- .slave_alloc = hpsa_slave_alloc,
- .slave_destroy = hpsa_slave_destroy,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = hpsa_compat_ioctl,
-#endif
- .sdev_attrs = hpsa_sdev_attrs,
- .shost_attrs = hpsa_shost_attrs,
-};
+static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
+ void __iomem *vaddr, int wait_for_ready);
+#define BOARD_NOT_READY 0
+#define BOARD_READY 1
static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
{
@@ -291,67 +252,63 @@ static ssize_t host_show_firmware_revision(struct device *dev,
fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
}
-/* Enqueuing and dequeuing functions for cmdlists. */
-static inline void addQ(struct hlist_head *list, struct CommandList *c)
+static ssize_t host_show_commands_outstanding(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- hlist_add_head(&c->list, list);
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ctlr_info *h = shost_to_hba(shost);
+
+ return snprintf(buf, 20, "%d\n", h->commands_outstanding);
}
-static inline u32 next_command(struct ctlr_info *h)
+static ssize_t host_show_transport_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- u32 a;
-
- if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
- return h->access.command_completed(h);
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
- if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
- a = *(h->reply_pool_head); /* Next cmd in ring buffer */
- (h->reply_pool_head)++;
- h->commands_outstanding--;
- } else {
- a = FIFO_EMPTY;
- }
- /* Check for wraparound */
- if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
- h->reply_pool_head = h->reply_pool;
- h->reply_pool_wraparound ^= 1;
- }
- return a;
+ h = shost_to_hba(shost);
+ return snprintf(buf, 20, "%s\n",
+ h->transMethod & CFGTBL_Trans_Performant ?
+ "performant" : "simple");
}
-/* set_performant_mode: Modify the tag for cciss performant
- * set bit 0 for pull model, bits 3-1 for block fetch
- * register number
- */
-static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
-{
- if (likely(h->transMethod == CFGTBL_Trans_Performant))
- c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
-}
+/* List of controllers which cannot be reset on kexec with reset_devices */
+static u32 unresettable_controller[] = {
+ 0x324a103C, /* Smart Array P712m */
+ 0x324b103C, /* SmartArray P711m */
+ 0x3223103C, /* Smart Array P800 */
+ 0x3234103C, /* Smart Array P400 */
+ 0x3235103C, /* Smart Array P400i */
+ 0x3211103C, /* Smart Array E200i */
+ 0x3212103C, /* Smart Array E200 */
+ 0x3213103C, /* Smart Array E200i */
+ 0x3214103C, /* Smart Array E200i */
+ 0x3215103C, /* Smart Array E200i */
+ 0x3237103C, /* Smart Array E500 */
+ 0x323D103C, /* Smart Array P700m */
+ 0x409C0E11, /* Smart Array 6400 */
+ 0x409D0E11, /* Smart Array 6400 EM */
+};
-static void enqueue_cmd_and_start_io(struct ctlr_info *h,
- struct CommandList *c)
+static int ctlr_is_resettable(struct ctlr_info *h)
{
- unsigned long flags;
+ int i;
- set_performant_mode(h, c);
- spin_lock_irqsave(&h->lock, flags);
- addQ(&h->reqQ, c);
- h->Qdepth++;
- start_io(h);
- spin_unlock_irqrestore(&h->lock, flags);
+ for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
+ if (unresettable_controller[i] == h->board_id)
+ return 0;
+ return 1;
}
-static inline void removeQ(struct CommandList *c)
+static ssize_t host_show_resettable(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- if (WARN_ON(hlist_unhashed(&c->list)))
- return;
- hlist_del_init(&c->list);
-}
+ struct ctlr_info *h;
+ struct Scsi_Host *shost = class_to_shost(dev);
-static inline int is_hba_lunid(unsigned char scsi3addr[])
-{
- return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
+ h = shost_to_hba(shost);
+ return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h));
}
static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
@@ -359,15 +316,6 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
return (scsi3addr[3] & 0xC0) == 0x40;
}
-static inline int is_scsi_rev_5(struct ctlr_info *h)
-{
- if (!h->hba_inquiry_data)
- return 0;
- if ((h->hba_inquiry_data[2] & 0x07) == 5)
- return 1;
- return 0;
-}
-
static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
"UNKNOWN"
};
@@ -459,6 +407,129 @@ static ssize_t unique_id_show(struct device *dev,
sn[12], sn[13], sn[14], sn[15]);
}
+static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
+static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
+static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
+static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+static DEVICE_ATTR(firmware_revision, S_IRUGO,
+ host_show_firmware_revision, NULL);
+static DEVICE_ATTR(commands_outstanding, S_IRUGO,
+ host_show_commands_outstanding, NULL);
+static DEVICE_ATTR(transport_mode, S_IRUGO,
+ host_show_transport_mode, NULL);
+static DEVICE_ATTR(resettable, S_IRUGO,
+ host_show_resettable, NULL);
+
+static struct device_attribute *hpsa_sdev_attrs[] = {
+ &dev_attr_raid_level,
+ &dev_attr_lunid,
+ &dev_attr_unique_id,
+ NULL,
+};
+
+static struct device_attribute *hpsa_shost_attrs[] = {
+ &dev_attr_rescan,
+ &dev_attr_firmware_revision,
+ &dev_attr_commands_outstanding,
+ &dev_attr_transport_mode,
+ &dev_attr_resettable,
+ NULL,
+};
+
+static struct scsi_host_template hpsa_driver_template = {
+ .module = THIS_MODULE,
+ .name = "hpsa",
+ .proc_name = "hpsa",
+ .queuecommand = hpsa_scsi_queue_command,
+ .scan_start = hpsa_scan_start,
+ .scan_finished = hpsa_scan_finished,
+ .change_queue_depth = hpsa_change_queue_depth,
+ .this_id = -1,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = hpsa_eh_device_reset_handler,
+ .ioctl = hpsa_ioctl,
+ .slave_alloc = hpsa_slave_alloc,
+ .slave_destroy = hpsa_slave_destroy,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = hpsa_compat_ioctl,
+#endif
+ .sdev_attrs = hpsa_sdev_attrs,
+ .shost_attrs = hpsa_shost_attrs,
+};
+
+
+/* Enqueuing and dequeuing functions for cmdlists. */
+static inline void addQ(struct list_head *list, struct CommandList *c)
+{
+ list_add_tail(&c->list, list);
+}
+
+static inline u32 next_command(struct ctlr_info *h)
+{
+ u32 a;
+
+ if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+ return h->access.command_completed(h);
+
+ if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
+ a = *(h->reply_pool_head); /* Next cmd in ring buffer */
+ (h->reply_pool_head)++;
+ h->commands_outstanding--;
+ } else {
+ a = FIFO_EMPTY;
+ }
+ /* Check for wraparound */
+ if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
+ h->reply_pool_head = h->reply_pool;
+ h->reply_pool_wraparound ^= 1;
+ }
+ return a;
+}
+
+/* set_performant_mode: Modify the tag for cciss performant
+ * set bit 0 for pull model, bits 3-1 for block fetch
+ * register number
+ */
+static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
+{
+ if (likely(h->transMethod & CFGTBL_Trans_Performant))
+ c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
+}
+
+static void enqueue_cmd_and_start_io(struct ctlr_info *h,
+ struct CommandList *c)
+{
+ unsigned long flags;
+
+ set_performant_mode(h, c);
+ spin_lock_irqsave(&h->lock, flags);
+ addQ(&h->reqQ, c);
+ h->Qdepth++;
+ start_io(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static inline void removeQ(struct CommandList *c)
+{
+ if (WARN_ON(list_empty(&c->list)))
+ return;
+ list_del_init(&c->list);
+}
+
+static inline int is_hba_lunid(unsigned char scsi3addr[])
+{
+ return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
+}
+
+static inline int is_scsi_rev_5(struct ctlr_info *h)
+{
+ if (!h->hba_inquiry_data)
+ return 0;
+ if ((h->hba_inquiry_data[2] & 0x07) == 5)
+ return 1;
+ return 0;
+}
+
static int hpsa_find_target_lun(struct ctlr_info *h,
unsigned char scsi3addr[], int bus, int *target, int *lun)
{
@@ -1130,6 +1201,10 @@ static void complete_scsi_command(struct CommandList *cp,
cmd->result = DID_TIME_OUT << 16;
dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
break;
+ case CMD_UNABORTABLE:
+ cmd->result = DID_ERROR << 16;
+ dev_warn(&h->pdev->dev, "Command unabortable\n");
+ break;
default:
cmd->result = DID_ERROR << 16;
dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
@@ -1160,7 +1235,7 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
sh->sg_tablesize = h->maxsgentries;
h->scsi_host = sh;
sh->hostdata[0] = (unsigned long) h;
- sh->irq = h->intr[PERF_MODE_INT];
+ sh->irq = h->intr[h->intr_mode];
sh->unique_id = sh->irq;
error = scsi_add_host(sh, &h->pdev->dev);
if (error)
@@ -1295,6 +1370,9 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp)
case CMD_TIMEOUT:
dev_warn(d, "cp %p timed out\n", cp);
break;
+ case CMD_UNABORTABLE:
+ dev_warn(d, "Command unabortable\n");
+ break;
default:
dev_warn(d, "cp %p returned unknown status %x\n", cp,
ei->CommandStatus);
@@ -1595,6 +1673,8 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
return 0;
+ memset(scsi3addr, 0, 8);
+ scsi3addr[3] = target;
if (is_hba_lunid(scsi3addr))
return 0; /* Don't add the RAID controller here. */
@@ -1609,8 +1689,6 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
return 0;
}
- memset(scsi3addr, 0, 8);
- scsi3addr[3] = target;
if (hpsa_update_device_info(h, scsi3addr, this_device))
return 0;
(*nmsa2xxx_enclosures)++;
@@ -2199,7 +2277,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
c->cmdindex = i;
- INIT_HLIST_NODE(&c->list);
+ INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
temp64.val = (u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -2237,7 +2315,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
}
memset(c->err_info, 0, sizeof(*c->err_info));
- INIT_HLIST_NODE(&c->list);
+ INIT_LIST_HEAD(&c->list);
c->busaddr = (u32) cmd_dma_handle;
temp64.val = (u64) err_dma_handle;
c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -2267,7 +2345,7 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
pci_free_consistent(h->pdev, sizeof(*c->err_info),
c->err_info, (dma_addr_t) temp64.val);
pci_free_consistent(h->pdev, sizeof(*c),
- c, (dma_addr_t) c->busaddr);
+ c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
}
#ifdef CONFIG_COMPAT
@@ -2281,6 +2359,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
int err;
u32 cp;
+ memset(&arg64, 0, sizeof(arg64));
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
sizeof(arg64.LUN_info));
@@ -2317,6 +2396,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
int err;
u32 cp;
+ memset(&arg64, 0, sizeof(arg64));
err = 0;
err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
sizeof(arg64.LUN_info));
@@ -2433,15 +2513,17 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
if (buff == NULL)
return -EFAULT;
- }
- if (iocommand.Request.Type.Direction == XFER_WRITE) {
- /* Copy the data into the buffer we created */
- if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
- kfree(buff);
- return -EFAULT;
+ if (iocommand.Request.Type.Direction == XFER_WRITE) {
+ /* Copy the data into the buffer we created */
+ if (copy_from_user(buff, iocommand.buf,
+ iocommand.buf_size)) {
+ kfree(buff);
+ return -EFAULT;
+ }
+ } else {
+ memset(buff, 0, iocommand.buf_size);
}
- } else
- memset(buff, 0, iocommand.buf_size);
+ }
c = cmd_special_alloc(h);
if (c == NULL) {
kfree(buff);
@@ -2487,8 +2569,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
cmd_special_free(h, c);
return -EFAULT;
}
-
- if (iocommand.Request.Type.Direction == XFER_READ) {
+ if (iocommand.Request.Type.Direction == XFER_READ &&
+ iocommand.buf_size > 0) {
/* Copy the data out of the buffer we created */
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
kfree(buff);
@@ -2581,14 +2663,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
c->cmd_type = CMD_IOCTL_PEND;
c->Header.ReplyQueue = 0;
-
- if (ioc->buf_size > 0) {
- c->Header.SGList = sg_used;
- c->Header.SGTotal = sg_used;
- } else {
- c->Header.SGList = 0;
- c->Header.SGTotal = 0;
- }
+ c->Header.SGList = c->Header.SGTotal = sg_used;
memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
c->Header.Tag.lower = c->busaddr;
memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
@@ -2605,7 +2680,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
}
hpsa_scsi_do_simple_cmd_core(h, c);
- hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
+ if (sg_used)
+ hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
/* Copy the error information out */
memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
@@ -2614,7 +2690,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
status = -EFAULT;
goto cleanup1;
}
- if (ioc->Request.Type.Direction == XFER_READ) {
+ if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
/* Copy the data out of the buffer we created */
BYTE __user *ptr = ioc->buf;
for (i = 0; i < sg_used; i++) {
@@ -2810,8 +2886,8 @@ static void start_io(struct ctlr_info *h)
{
struct CommandList *c;
- while (!hlist_empty(&h->reqQ)) {
- c = hlist_entry(h->reqQ.first, struct CommandList, list);
+ while (!list_empty(&h->reqQ)) {
+ c = list_entry(h->reqQ.next, struct CommandList, list);
/* can't do anything if fifo is full */
if ((h->access.fifo_full(h))) {
dev_warn(&h->pdev->dev, "fifo full\n");
@@ -2867,20 +2943,22 @@ static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
static inline u32 hpsa_tag_contains_index(u32 tag)
{
-#define DIRECT_LOOKUP_BIT 0x10
return tag & DIRECT_LOOKUP_BIT;
}
static inline u32 hpsa_tag_to_index(u32 tag)
{
-#define DIRECT_LOOKUP_SHIFT 5
return tag >> DIRECT_LOOKUP_SHIFT;
}
-static inline u32 hpsa_tag_discard_error_bits(u32 tag)
+
+static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
{
-#define HPSA_ERROR_BITS 0x03
- return tag & ~HPSA_ERROR_BITS;
+#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
+#define HPSA_SIMPLE_ERROR_BITS 0x03
+ if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
+ return tag & ~HPSA_SIMPLE_ERROR_BITS;
+ return tag & ~HPSA_PERF_ERROR_BITS;
}
/* process completion of an indexed ("direct lookup") command */
@@ -2904,10 +2982,9 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
{
u32 tag;
struct CommandList *c = NULL;
- struct hlist_node *tmp;
- tag = hpsa_tag_discard_error_bits(raw_tag);
- hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
+ tag = hpsa_tag_discard_error_bits(h, raw_tag);
+ list_for_each_entry(c, &h->cmpQ, list) {
if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
finish_cmd(c, raw_tag);
return next_command(h);
@@ -2957,7 +3034,10 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Send a message CDB to the firmware. */
+/* Send a message CDB to the firmware. Careful, this only works
+ * in simple mode, not performant mode due to the tag lookup.
+ * We only ever use this immediately after a controller reset.
+ */
static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
unsigned char type)
{
@@ -3023,7 +3103,7 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
- if (hpsa_tag_discard_error_bits(tag) == paddr32)
+ if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
break;
msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
}
@@ -3055,38 +3135,6 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
#define hpsa_noop(p) hpsa_message(p, 3, 0)
-static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
-{
-/* the #defines are stolen from drivers/pci/msi.h. */
-#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
-#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
-
- int pos;
- u16 control = 0;
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
- if (pos) {
- pci_read_config_word(pdev, msi_control_reg(pos), &control);
- if (control & PCI_MSI_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI\n");
- pci_write_config_word(pdev, msi_control_reg(pos),
- control & ~PCI_MSI_FLAGS_ENABLE);
- }
- }
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_word(pdev, msi_control_reg(pos), &control);
- if (control & PCI_MSIX_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI-X\n");
- pci_write_config_word(pdev, msi_control_reg(pos),
- control & ~PCI_MSIX_FLAGS_ENABLE);
- }
- }
-
- return 0;
-}
-
static int hpsa_controller_hard_reset(struct pci_dev *pdev,
void * __iomem vaddr, bool use_doorbell)
{
@@ -3142,17 +3190,17 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
*/
static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
{
- u16 saved_config_space[32];
u64 cfg_offset;
u32 cfg_base_addr;
u64 cfg_base_addr_index;
void __iomem *vaddr;
unsigned long paddr;
u32 misc_fw_support, active_transport;
- int rc, i;
+ int rc;
struct CfgTable __iomem *cfgtable;
bool use_doorbell;
u32 board_id;
+ u16 command_register;
/* For controllers as old as the P600, this is very nearly
* the same thing as
@@ -3162,14 +3210,6 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
* pci_set_power_state(pci_dev, PCI_D0);
* pci_restore_state(pci_dev);
*
- * but we can't use these nice canned kernel routines on
- * kexec, because they also check the MSI/MSI-X state in PCI
- * configuration space and do the wrong thing when it is
- * set/cleared. Also, the pci_save/restore_state functions
- * violate the ordering requirements for restoring the
- * configuration space from the CCISS document (see the
- * comment below). So we roll our own ....
- *
* For controllers newer than the P600, the pci power state
* method of resetting doesn't work so we have another way
* using the doorbell register.
@@ -3182,13 +3222,21 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
* likely not be happy. Just forbid resetting this conjoined mess.
* The 640x isn't really supported by hpsa anyway.
*/
- hpsa_lookup_board_id(pdev, &board_id);
+ rc = hpsa_lookup_board_id(pdev, &board_id);
+ if (rc < 0) {
+ dev_warn(&pdev->dev, "Not resetting device.\n");
+ return -ENODEV;
+ }
if (board_id == 0x409C0E11 || board_id == 0x409D0E11)
return -ENOTSUPP;
- for (i = 0; i < 32; i++)
- pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
-
+ /* Save the PCI command register */
+ pci_read_config_word(pdev, 4, &command_register);
+ /* Turn the board off. This is so that later pci_restore_state()
+ * won't turn the board on before the rest of config space is ready.
+ */
+ pci_disable_device(pdev);
+ pci_save_state(pdev);
/* find the first memory BAR, so we can find the cfg table */
rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
@@ -3214,46 +3262,47 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
misc_fw_support = readl(&cfgtable->misc_fw_support);
use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
- /* The doorbell reset seems to cause lockups on some Smart
- * Arrays (e.g. P410, P410i, maybe others). Until this is
- * fixed or at least isolated, avoid the doorbell reset.
- */
- use_doorbell = 0;
-
rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc)
goto unmap_cfgtable;
- /* Restore the PCI configuration space. The Open CISS
- * Specification says, "Restore the PCI Configuration
- * Registers, offsets 00h through 60h. It is important to
- * restore the command register, 16-bits at offset 04h,
- * last. Do not restore the configuration status register,
- * 16-bits at offset 06h." Note that the offset is 2*i.
- */
- for (i = 0; i < 32; i++) {
- if (i == 2 || i == 3)
- continue;
- pci_write_config_word(pdev, 2*i, saved_config_space[i]);
+ pci_restore_state(pdev);
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_warn(&pdev->dev, "failed to enable device.\n");
+ goto unmap_cfgtable;
}
- wmb();
- pci_write_config_word(pdev, 4, saved_config_space[2]);
+ pci_write_config_word(pdev, 4, command_register);
/* Some devices (notably the HP Smart Array 5i Controller)
need a little pause here */
msleep(HPSA_POST_RESET_PAUSE_MSECS);
+ /* Wait for board to become not ready, then ready. */
+ dev_info(&pdev->dev, "Waiting for board to become ready.\n");
+ rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
+ if (rc)
+ dev_warn(&pdev->dev,
+ "failed waiting for board to become not ready\n");
+ rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
+ if (rc) {
+ dev_warn(&pdev->dev,
+ "failed waiting for board to become ready\n");
+ goto unmap_cfgtable;
+ }
+ dev_info(&pdev->dev, "board ready.\n");
+
/* Controller should be in simple mode at this point. If it's not,
* It means we're on one of those controllers which doesn't support
* the doorbell reset method and on which the PCI power management reset
* method doesn't work (P800, for example.)
- * In those cases, pretend the reset worked and hope for the best.
+ * In those cases, don't try to proceed, as it generally doesn't work.
*/
active_transport = readl(&cfgtable->TransportActive);
if (active_transport & PERFORMANT_MODE) {
dev_warn(&pdev->dev, "Unable to successfully reset controller,"
- " proceeding anyway.\n");
- rc = -ENOTSUPP;
+ " Ignoring controller.\n");
+ rc = -ENODEV;
}
unmap_cfgtable:
@@ -3386,7 +3435,7 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
default_int_mode:
#endif /* CONFIG_PCI_MSI */
/* if we get here we're going to use the default interrupt mode */
- h->intr[PERF_MODE_INT] = h->pdev->irq;
+ h->intr[h->intr_mode] = h->pdev->irq;
}
static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
@@ -3438,18 +3487,28 @@ static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
return -ENODEV;
}
-static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h)
+static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
+ void __iomem *vaddr, int wait_for_ready)
{
- int i;
+ int i, iterations;
u32 scratchpad;
+ if (wait_for_ready)
+ iterations = HPSA_BOARD_READY_ITERATIONS;
+ else
+ iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
- for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) {
- scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
- if (scratchpad == HPSA_FIRMWARE_READY)
- return 0;
+ for (i = 0; i < iterations; i++) {
+ scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
+ if (wait_for_ready) {
+ if (scratchpad == HPSA_FIRMWARE_READY)
+ return 0;
+ } else {
+ if (scratchpad != HPSA_FIRMWARE_READY)
+ return 0;
+ }
msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
}
- dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
+ dev_warn(&pdev->dev, "board not ready, timed out.\n");
return -ENODEV;
}
@@ -3497,6 +3556,11 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
{
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+
+ /* Limit commands in memory limited kdump scenario. */
+ if (reset_devices && h->max_commands > 32)
+ h->max_commands = 32;
+
if (h->max_commands < 16) {
dev_warn(&h->pdev->dev, "Controller reports "
"max supported commands of %d, an obvious lie. "
@@ -3571,16 +3635,21 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
{
int i;
+ u32 doorbell_value;
+ unsigned long flags;
/* under certain very rare conditions, this can take awhile.
* (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
* as we enter this code.)
*/
for (i = 0; i < MAX_CONFIG_WAIT; i++) {
- if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+ spin_lock_irqsave(&h->lock, flags);
+ doorbell_value = readl(h->vaddr + SA5_DOORBELL);
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (!(doorbell_value & CFGTBL_ChangeReq))
break;
/* delay and try again */
- msleep(10);
+ usleep_range(10000, 20000);
}
}
@@ -3603,6 +3672,7 @@ static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
"unable to get board into simple mode\n");
return -ENODEV;
}
+ h->transMethod = CFGTBL_Trans_Simple;
return 0;
}
@@ -3641,7 +3711,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
err = -ENOMEM;
goto err_out_free_res;
}
- err = hpsa_wait_for_board_ready(h);
+ err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
if (err)
goto err_out_free_res;
err = hpsa_find_cfgtables(h);
@@ -3710,8 +3780,6 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
return 0; /* just try to do the kdump anyhow. */
if (rc)
return -ENODEV;
- if (hpsa_reset_msi(pdev))
- return -ENODEV;
/* Now try to get the controller to respond to a no-op */
for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
@@ -3749,8 +3817,11 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
h->pdev = pdev;
h->busy_initializing = 1;
- INIT_HLIST_HEAD(&h->cmpQ);
- INIT_HLIST_HEAD(&h->reqQ);
+ h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
+ INIT_LIST_HEAD(&h->cmpQ);
+ INIT_LIST_HEAD(&h->reqQ);
+ spin_lock_init(&h->lock);
+ spin_lock_init(&h->scan_lock);
rc = hpsa_pci_init(h);
if (rc != 0)
goto clean1;
@@ -3777,20 +3848,20 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
h->access.set_intr_mask(h, HPSA_INTR_OFF);
if (h->msix_vector || h->msi_vector)
- rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_msi,
+ rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi,
IRQF_DISABLED, h->devname, h);
else
- rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_intx,
+ rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_intx,
IRQF_DISABLED, h->devname, h);
if (rc) {
dev_err(&pdev->dev, "unable to get irq %d for %s\n",
- h->intr[PERF_MODE_INT], h->devname);
+ h->intr[h->intr_mode], h->devname);
goto clean2;
}
dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
h->devname, pdev->device,
- h->intr[PERF_MODE_INT], dac ? "" : " not");
+ h->intr[h->intr_mode], dac ? "" : " not");
h->cmd_pool_bits =
kmalloc(((h->nr_cmds + BITS_PER_LONG -
@@ -3810,8 +3881,6 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
}
if (hpsa_allocate_sg_chain_blocks(h))
goto clean4;
- spin_lock_init(&h->lock);
- spin_lock_init(&h->scan_lock);
init_waitqueue_head(&h->scan_wait_queue);
h->scan_finished = 1; /* no scan currently in progress */
@@ -3843,7 +3912,7 @@ clean4:
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool,
h->errinfo_pool_dhandle);
- free_irq(h->intr[PERF_MODE_INT], h);
+ free_irq(h->intr[h->intr_mode], h);
clean2:
clean1:
h->busy_initializing = 0;
@@ -3887,7 +3956,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
*/
hpsa_flush_cache(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
- free_irq(h->intr[PERF_MODE_INT], h);
+ free_irq(h->intr[h->intr_mode], h);
#ifdef CONFIG_PCI_MSI
if (h->msix_vector)
pci_disable_msix(h->pdev);
@@ -3989,7 +4058,8 @@ static void calc_bucket_map(int bucket[], int num_buckets,
}
}
-static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
+static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
+ u32 use_short_tags)
{
int i;
unsigned long register_value;
@@ -4037,7 +4107,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
writel(0, &h->transtable->RepQCtrAddrHigh32);
writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
writel(0, &h->transtable->RepQAddr0High32);
- writel(CFGTBL_Trans_Performant,
+ writel(CFGTBL_Trans_Performant | use_short_tags,
&(h->cfgtable->HostWrite.TransportRequest));
writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
hpsa_wait_for_mode_change_ack(h);
@@ -4047,12 +4117,18 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
" performant mode\n");
return;
}
+ /* Change the access methods to the performant access methods */
+ h->access = SA5_performant_access;
+ h->transMethod = CFGTBL_Trans_Performant;
}
static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
{
u32 trans_support;
+ if (hpsa_simple_mode)
+ return;
+
trans_support = readl(&(h->cfgtable->TransportSupport));
if (!(trans_support & PERFORMANT_MODE))
return;
@@ -4072,11 +4148,8 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
|| (h->blockFetchTable == NULL))
goto clean_up;
- hpsa_enter_performant_mode(h);
-
- /* Change the access methods to the performant access methods */
- h->access = SA5_performant_access;
- h->transMethod = CFGTBL_Trans_Performant;
+ hpsa_enter_performant_mode(h,
+ trans_support & CFGTBL_Trans_use_short_tags);
return;
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 19586e189f0f..621a1530054a 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -72,11 +72,12 @@ struct ctlr_info {
unsigned int intr[4];
unsigned int msix_vector;
unsigned int msi_vector;
+ int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
struct access_method access;
/* queue and queue Info */
- struct hlist_head reqQ;
- struct hlist_head cmpQ;
+ struct list_head reqQ;
+ struct list_head cmpQ;
unsigned int Qdepth;
unsigned int maxQsinceinit;
unsigned int maxSG;
@@ -154,12 +155,16 @@ struct ctlr_info {
* HPSA_BOARD_READY_ITERATIONS are derived from those.
*/
#define HPSA_BOARD_READY_WAIT_SECS (120)
+#define HPSA_BOARD_NOT_READY_WAIT_SECS (10)
#define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
#define HPSA_BOARD_READY_POLL_INTERVAL \
((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
#define HPSA_BOARD_READY_ITERATIONS \
((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
+#define HPSA_BOARD_NOT_READY_ITERATIONS \
+ ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
+ HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
#define HPSA_POST_RESET_PAUSE_MSECS (3000)
#define HPSA_POST_RESET_NOOP_RETRIES (12)
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index f5c4c3cc0530..18464900e761 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -104,6 +104,7 @@
#define CFGTBL_Trans_Simple 0x00000002l
#define CFGTBL_Trans_Performant 0x00000004l
+#define CFGTBL_Trans_use_short_tags 0x20000000l
#define CFGTBL_BusType_Ultra2 0x00000001l
#define CFGTBL_BusType_Ultra3 0x00000002l
@@ -265,6 +266,7 @@ struct ErrorInfo {
#define DIRECT_LOOKUP_SHIFT 5
#define DIRECT_LOOKUP_BIT 0x10
+#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1))
#define HPSA_ERROR_BIT 0x02
struct ctlr_info; /* defined in hpsa.h */
@@ -291,7 +293,7 @@ struct CommandList {
struct ctlr_info *h;
int cmd_type;
long cmdindex;
- struct hlist_node list;
+ struct list_head list;
struct request *rq;
struct completion *waiting;
void *scsi_cmd;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d841e98a8bd5..0621238fac4a 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1301,7 +1301,7 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
ipr_clear_res_target(res);
list_move_tail(&res->queue, &ioa_cfg->free_res_q);
}
- } else if (!res->sdev) {
+ } else if (!res->sdev || res->del_from_ml) {
res->add_to_ml = 1;
if (ioa_cfg->allow_ml_add_del)
schedule_work(&ioa_cfg->work_q);
@@ -3104,7 +3104,10 @@ restart:
did_work = 1;
sdev = res->sdev;
if (!scsi_device_get(sdev)) {
- list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ if (!res->add_to_ml)
+ list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ else
+ res->del_from_ml = 0;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_remove_device(sdev);
scsi_device_put(sdev);
@@ -8864,7 +8867,7 @@ static void __ipr_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- flush_scheduled_work();
+ flush_work_sync(&ioa_cfg->work_q);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
spin_lock(&ipr_driver_lock);
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index fec47de72535..a860452a8f71 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -608,54 +608,12 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
iscsi_sw_tcp_release_conn(conn);
}
-static int iscsi_sw_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock,
- char *buf, int *port,
- int (*getname)(struct socket *,
- struct sockaddr *,
- int *addrlen))
-{
- struct sockaddr_storage *addr;
- struct sockaddr_in6 *sin6;
- struct sockaddr_in *sin;
- int rc = 0, len;
-
- addr = kmalloc(sizeof(*addr), GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
-
- if (getname(sock, (struct sockaddr *) addr, &len)) {
- rc = -ENODEV;
- goto free_addr;
- }
-
- switch (addr->ss_family) {
- case AF_INET:
- sin = (struct sockaddr_in *)addr;
- spin_lock_bh(&conn->session->lock);
- sprintf(buf, "%pI4", &sin->sin_addr.s_addr);
- *port = be16_to_cpu(sin->sin_port);
- spin_unlock_bh(&conn->session->lock);
- break;
- case AF_INET6:
- sin6 = (struct sockaddr_in6 *)addr;
- spin_lock_bh(&conn->session->lock);
- sprintf(buf, "%pI6", &sin6->sin6_addr);
- *port = be16_to_cpu(sin6->sin6_port);
- spin_unlock_bh(&conn->session->lock);
- break;
- }
-free_addr:
- kfree(addr);
- return rc;
-}
-
static int
iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
int is_leading)
{
- struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
- struct iscsi_host *ihost = shost_priv(shost);
+ struct iscsi_session *session = cls_session->dd_data;
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
@@ -670,27 +628,15 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
"sockfd_lookup failed %d\n", err);
return -EEXIST;
}
- /*
- * copy these values now because if we drop the session
- * userspace may still want to query the values since we will
- * be using them for the reconnect
- */
- err = iscsi_sw_tcp_get_addr(conn, sock, conn->portal_address,
- &conn->portal_port, kernel_getpeername);
- if (err)
- goto free_socket;
-
- err = iscsi_sw_tcp_get_addr(conn, sock, ihost->local_address,
- &ihost->local_port, kernel_getsockname);
- if (err)
- goto free_socket;
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
if (err)
goto free_socket;
+ spin_lock_bh(&session->lock);
/* bind iSCSI connection and socket */
tcp_sw_conn->sock = sock;
+ spin_unlock_bh(&session->lock);
/* setup Socket parameters */
sk = sock->sk;
@@ -752,24 +698,74 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
struct iscsi_conn *conn = cls_conn->dd_data;
- int len;
+ struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+ struct sockaddr_in6 addr;
+ int rc, len;
switch(param) {
case ISCSI_PARAM_CONN_PORT:
- spin_lock_bh(&conn->session->lock);
- len = sprintf(buf, "%hu\n", conn->portal_port);
- spin_unlock_bh(&conn->session->lock);
- break;
case ISCSI_PARAM_CONN_ADDRESS:
spin_lock_bh(&conn->session->lock);
- len = sprintf(buf, "%s\n", conn->portal_address);
+ if (!tcp_sw_conn || !tcp_sw_conn->sock) {
+ spin_unlock_bh(&conn->session->lock);
+ return -ENOTCONN;
+ }
+ rc = kernel_getpeername(tcp_sw_conn->sock,
+ (struct sockaddr *)&addr, &len);
spin_unlock_bh(&conn->session->lock);
- break;
+ if (rc)
+ return rc;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &addr, param, buf);
default:
return iscsi_conn_get_param(cls_conn, param, buf);
}
- return len;
+ return 0;
+}
+
+static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
+ enum iscsi_host_param param, char *buf)
+{
+ struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
+ struct iscsi_session *session = tcp_sw_host->session;
+ struct iscsi_conn *conn;
+ struct iscsi_tcp_conn *tcp_conn;
+ struct iscsi_sw_tcp_conn *tcp_sw_conn;
+ struct sockaddr_in6 addr;
+ int rc, len;
+
+ switch (param) {
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ spin_lock_bh(&session->lock);
+ conn = session->leadconn;
+ if (!conn) {
+ spin_unlock_bh(&session->lock);
+ return -ENOTCONN;
+ }
+ tcp_conn = conn->dd_data;
+
+ tcp_sw_conn = tcp_conn->dd_data;
+ if (!tcp_sw_conn->sock) {
+ spin_unlock_bh(&session->lock);
+ return -ENOTCONN;
+ }
+
+ rc = kernel_getsockname(tcp_sw_conn->sock,
+ (struct sockaddr *)&addr, &len);
+ spin_unlock_bh(&session->lock);
+ if (rc)
+ return rc;
+
+ return iscsi_conn_get_addr_param((struct sockaddr_storage *)
+ &addr, param, buf);
+ default:
+ return iscsi_host_get_param(shost, param, buf);
+ }
+
+ return 0;
}
static void
@@ -797,6 +793,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
+ struct iscsi_sw_tcp_host *tcp_sw_host;
struct Scsi_Host *shost;
if (ep) {
@@ -804,7 +801,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
return NULL;
}
- shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, 0, 1);
+ shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
+ sizeof(struct iscsi_sw_tcp_host), 1);
if (!shost)
return NULL;
shost->transportt = iscsi_sw_tcp_scsi_transport;
@@ -825,6 +823,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
if (!cls_session)
goto remove_host;
session = cls_session->dd_data;
+ tcp_sw_host = iscsi_host_priv(shost);
+ tcp_sw_host->session = session;
shost->can_queue = session->scsi_cmds_max;
if (iscsi_tcp_r2tpool_alloc(session))
@@ -929,7 +929,7 @@ static struct iscsi_transport iscsi_sw_tcp_transport = {
.start_conn = iscsi_conn_start,
.stop_conn = iscsi_sw_tcp_conn_stop,
/* iscsi host params */
- .get_host_param = iscsi_host_get_param,
+ .get_host_param = iscsi_sw_tcp_host_get_param,
.set_host_param = iscsi_host_set_param,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 94644bad0ed7..666fe09378fa 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -55,6 +55,10 @@ struct iscsi_sw_tcp_conn {
ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
};
+struct iscsi_sw_tcp_host {
+ struct iscsi_session *session;
+};
+
struct iscsi_sw_tcp_hdrbuf {
struct iscsi_hdr hdrbuf;
char hdrextbuf[ISCSI_MAX_AHS_SIZE +
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index d21367d3305f..28231badd9e6 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -38,7 +38,7 @@ u16 fc_cpu_mask; /* cpu mask for possible cpus */
EXPORT_SYMBOL(fc_cpu_mask);
static u16 fc_cpu_order; /* 2's power to represent total possible cpus */
static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
-struct workqueue_struct *fc_exch_workqueue;
+static struct workqueue_struct *fc_exch_workqueue;
/*
* Structure and function definitions for managing Fibre Channel Exchanges
@@ -558,6 +558,22 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
return sp;
}
+/*
+ * Set the response handler for the exchange associated with a sequence.
+ */
+static void fc_seq_set_resp(struct fc_seq *sp,
+ void (*resp)(struct fc_seq *, struct fc_frame *,
+ void *),
+ void *arg)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+
+ spin_lock_bh(&ep->ex_lock);
+ ep->resp = resp;
+ ep->arg = arg;
+ spin_unlock_bh(&ep->ex_lock);
+}
+
/**
* fc_seq_exch_abort() - Abort an exchange and sequence
* @req_sp: The sequence to be aborted
@@ -650,13 +666,10 @@ static void fc_exch_timeout(struct work_struct *work)
if (e_stat & ESB_ST_ABNORMAL)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
+ if (!rc)
+ fc_exch_delete(ep);
if (resp)
resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
- if (!rc) {
- /* delete the exchange if it's already being aborted */
- fc_exch_delete(ep);
- return;
- }
fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
goto done;
}
@@ -1266,6 +1279,8 @@ free:
* @fp: The request frame
*
* On success, the sequence pointer will be returned and also in fr_seq(@fp).
+ * A reference will be held on the exchange/sequence for the caller, which
+ * must call fc_seq_release().
*/
static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
{
@@ -1283,6 +1298,15 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
}
/**
+ * fc_seq_release() - Release the hold
+ * @sp: The sequence.
+ */
+static void fc_seq_release(struct fc_seq *sp)
+{
+ fc_exch_release(fc_seq_exch(sp));
+}
+
+/**
* fc_exch_recv_req() - Handler for an incoming request
* @lport: The local port that received the request
* @mp: The EM that the exchange is on
@@ -2151,6 +2175,7 @@ err:
fc_exch_mgr_del(ema);
return -ENOMEM;
}
+EXPORT_SYMBOL(fc_exch_mgr_list_clone);
/**
* fc_exch_mgr_alloc() - Allocate an exchange manager
@@ -2254,16 +2279,45 @@ void fc_exch_mgr_free(struct fc_lport *lport)
EXPORT_SYMBOL(fc_exch_mgr_free);
/**
+ * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending
+ * upon 'xid'.
+ * @f_ctl: f_ctl
+ * @lport: The local port the frame was received on
+ * @fh: The received frame header
+ */
+static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
+ struct fc_lport *lport,
+ struct fc_frame_header *fh)
+{
+ struct fc_exch_mgr_anchor *ema;
+ u16 xid;
+
+ if (f_ctl & FC_FC_EX_CTX)
+ xid = ntohs(fh->fh_ox_id);
+ else {
+ xid = ntohs(fh->fh_rx_id);
+ if (xid == FC_XID_UNKNOWN)
+ return list_entry(lport->ema_list.prev,
+ typeof(*ema), ema_list);
+ }
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ if ((xid >= ema->mp->min_xid) &&
+ (xid <= ema->mp->max_xid))
+ return ema;
+ }
+ return NULL;
+}
+/**
* fc_exch_recv() - Handler for received frames
* @lport: The local port the frame was received on
- * @fp: The received frame
+ * @fp: The received frame
*/
void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_exch_mgr_anchor *ema;
- u32 f_ctl, found = 0;
- u16 oxid;
+ u32 f_ctl;
/* lport lock ? */
if (!lport || lport->state == LPORT_ST_DISABLED) {
@@ -2274,24 +2328,17 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
}
f_ctl = ntoh24(fh->fh_f_ctl);
- oxid = ntohs(fh->fh_ox_id);
- if (f_ctl & FC_FC_EX_CTX) {
- list_for_each_entry(ema, &lport->ema_list, ema_list) {
- if ((oxid >= ema->mp->min_xid) &&
- (oxid <= ema->mp->max_xid)) {
- found = 1;
- break;
- }
- }
-
- if (!found) {
- FC_LPORT_DBG(lport, "Received response for out "
- "of range oxid:%hx\n", oxid);
- fc_frame_free(fp);
- return;
- }
- } else
- ema = list_entry(lport->ema_list.prev, typeof(*ema), ema_list);
+ ema = fc_find_ema(f_ctl, lport, fh);
+ if (!ema) {
+ FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
+ "fc_ctl <0x%x>, xid <0x%x>\n",
+ f_ctl,
+ (f_ctl & FC_FC_EX_CTX) ?
+ ntohs(fh->fh_ox_id) :
+ ntohs(fh->fh_rx_id));
+ fc_frame_free(fp);
+ return;
+ }
/*
* If frame is marked invalid, just drop it.
@@ -2329,6 +2376,9 @@ int fc_exch_init(struct fc_lport *lport)
if (!lport->tt.seq_start_next)
lport->tt.seq_start_next = fc_seq_start_next;
+ if (!lport->tt.seq_set_resp)
+ lport->tt.seq_set_resp = fc_seq_set_resp;
+
if (!lport->tt.exch_seq_send)
lport->tt.exch_seq_send = fc_exch_seq_send;
@@ -2350,6 +2400,9 @@ int fc_exch_init(struct fc_lport *lport)
if (!lport->tt.seq_assign)
lport->tt.seq_assign = fc_seq_assign;
+ if (!lport->tt.seq_release)
+ lport->tt.seq_release = fc_seq_release;
+
return 0;
}
EXPORT_SYMBOL(fc_exch_init);
@@ -2357,7 +2410,7 @@ EXPORT_SYMBOL(fc_exch_init);
/**
* fc_setup_exch_mgr() - Setup an exchange manager
*/
-int fc_setup_exch_mgr()
+int fc_setup_exch_mgr(void)
{
fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
0, SLAB_HWCACHE_ALIGN, NULL);
@@ -2395,7 +2448,7 @@ int fc_setup_exch_mgr()
/**
* fc_destroy_exch_mgr() - Destroy an exchange manager
*/
-void fc_destroy_exch_mgr()
+void fc_destroy_exch_mgr(void)
{
destroy_workqueue(fc_exch_workqueue);
kmem_cache_destroy(fc_em_cachep);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 5962d1a5a674..b1b03af158bf 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -42,7 +42,7 @@
#include "fc_libfc.h"
-struct kmem_cache *scsi_pkt_cachep;
+static struct kmem_cache *scsi_pkt_cachep;
/* SRB state definitions */
#define FC_SRB_FREE 0 /* cmd is free */
@@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
if (fsp) {
memset(fsp, 0, sizeof(*fsp));
fsp->lp = lport;
+ fsp->xfer_ddp = FC_XID_UNKNOWN;
atomic_set(&fsp->ref_cnt, 1);
init_timer(&fsp->timer);
INIT_LIST_HEAD(&fsp->list);
@@ -1201,6 +1202,7 @@ unlock:
static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
{
int rc = FAILED;
+ unsigned long ticks_left;
if (fc_fcp_send_abort(fsp))
return FAILED;
@@ -1209,13 +1211,13 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
fsp->wait_for_comp = 1;
spin_unlock_bh(&fsp->scsi_pkt_lock);
- rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
+ ticks_left = wait_for_completion_timeout(&fsp->tm_done,
+ FC_SCSI_TM_TOV);
spin_lock_bh(&fsp->scsi_pkt_lock);
fsp->wait_for_comp = 0;
- if (!rc) {
+ if (!ticks_left) {
FC_FCP_DBG(fsp, "target abort cmd failed\n");
- rc = FAILED;
} else if (fsp->state & FC_SRB_ABORTED) {
FC_FCP_DBG(fsp, "target abort cmd passed\n");
rc = SUCCESS;
@@ -1321,7 +1323,7 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
*
* scsi-eh will escalate for when either happens.
*/
- goto out;
+ return;
}
if (fc_fcp_lock_pkt(fsp))
@@ -1787,15 +1789,14 @@ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport)
/**
* fc_queuecommand() - The queuecommand function of the SCSI template
+ * @shost: The Scsi_Host that the command was issued to
* @cmd: The scsi_cmnd to be executed
- * @done: The callback function to be called when the scsi_cmnd is complete
*
- * This is the i/o strategy routine, called by the SCSI layer. This routine
- * is called with the host_lock held.
+ * This is the i/o strategy routine, called by the SCSI layer.
*/
-static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
+int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
{
- struct fc_lport *lport;
+ struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
struct fc_fcp_pkt *fsp;
struct fc_rport_libfc_priv *rpriv;
@@ -1803,15 +1804,12 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
int rc = 0;
struct fcoe_dev_stats *stats;
- lport = shost_priv(sc_cmd->device->host);
-
rval = fc_remote_port_chkready(rport);
if (rval) {
sc_cmd->result = rval;
- done(sc_cmd);
+ sc_cmd->scsi_done(sc_cmd);
return 0;
}
- spin_unlock_irq(lport->host->host_lock);
if (!*(struct fc_remote_port **)rport->dd_data) {
/*
@@ -1819,7 +1817,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
* online
*/
sc_cmd->result = DID_IMM_RETRY << 16;
- done(sc_cmd);
+ sc_cmd->scsi_done(sc_cmd);
goto out;
}
@@ -1842,10 +1840,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
* build the libfc request pkt
*/
fsp->cmd = sc_cmd; /* save the cmd */
- fsp->lp = lport; /* save the softc ptr */
fsp->rport = rport; /* set the remote port ptr */
- fsp->xfer_ddp = FC_XID_UNKNOWN;
- sc_cmd->scsi_done = done;
/*
* set up the transfer length
@@ -1886,11 +1881,8 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs
rc = SCSI_MLQUEUE_HOST_BUSY;
}
out:
- spin_lock_irq(lport->host->host_lock);
return rc;
}
-
-DEF_SCSI_QCMD(fc_queuecommand)
EXPORT_SYMBOL(fc_queuecommand);
/**
@@ -2112,7 +2104,6 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
* the sc passed in is not setup for execution like when sent
* through the queuecommand callout.
*/
- fsp->lp = lport; /* save the softc ptr */
fsp->rport = rport; /* set the remote port ptr */
/*
@@ -2245,7 +2236,7 @@ void fc_fcp_destroy(struct fc_lport *lport)
}
EXPORT_SYMBOL(fc_fcp_destroy);
-int fc_setup_fcp()
+int fc_setup_fcp(void)
{
int rc = 0;
@@ -2261,7 +2252,7 @@ int fc_setup_fcp()
return rc;
}
-void fc_destroy_fcp()
+void fc_destroy_fcp(void)
{
if (scsi_pkt_cachep)
kmem_cache_destroy(scsi_pkt_cachep);
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index 6a48c28e4420..b7735129f1f3 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -35,6 +35,27 @@ unsigned int fc_debug_logging;
module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+DEFINE_MUTEX(fc_prov_mutex);
+static LIST_HEAD(fc_local_ports);
+struct blocking_notifier_head fc_lport_notifier_head =
+ BLOCKING_NOTIFIER_INIT(fc_lport_notifier_head);
+EXPORT_SYMBOL(fc_lport_notifier_head);
+
+/*
+ * Providers which primarily send requests and PRLIs.
+ */
+struct fc4_prov *fc_active_prov[FC_FC4_PROV_SIZE] = {
+ [0] = &fc_rport_t0_prov,
+ [FC_TYPE_FCP] = &fc_rport_fcp_init,
+};
+
+/*
+ * Providers which receive requests.
+ */
+struct fc4_prov *fc_passive_prov[FC_FC4_PROV_SIZE] = {
+ [FC_TYPE_ELS] = &fc_lport_els_prov,
+};
+
/**
* libfc_init() - Initialize libfc.ko
*/
@@ -210,3 +231,102 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
}
EXPORT_SYMBOL(fc_fill_reply_hdr);
+
+/**
+ * fc_fc4_conf_lport_params() - Modify "service_params" of specified lport
+ * if there is service provider (target provider) registered with libfc
+ * for specified "fc_ft_type"
+ * @lport: Local port which service_params needs to be modified
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ */
+void fc_fc4_conf_lport_params(struct fc_lport *lport, enum fc_fh_type type)
+{
+ struct fc4_prov *prov_entry;
+ BUG_ON(type >= FC_FC4_PROV_SIZE);
+ BUG_ON(!lport);
+ prov_entry = fc_passive_prov[type];
+ if (type == FC_TYPE_FCP) {
+ if (prov_entry && prov_entry->recv)
+ lport->service_params |= FCP_SPPF_TARG_FCN;
+ }
+}
+
+void fc_lport_iterate(void (*notify)(struct fc_lport *, void *), void *arg)
+{
+ struct fc_lport *lport;
+
+ mutex_lock(&fc_prov_mutex);
+ list_for_each_entry(lport, &fc_local_ports, lport_list)
+ notify(lport, arg);
+ mutex_unlock(&fc_prov_mutex);
+}
+EXPORT_SYMBOL(fc_lport_iterate);
+
+/**
+ * fc_fc4_register_provider() - register FC-4 upper-level provider.
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ * @prov: structure describing provider including ops vector.
+ *
+ * Returns 0 on success, negative error otherwise.
+ */
+int fc_fc4_register_provider(enum fc_fh_type type, struct fc4_prov *prov)
+{
+ struct fc4_prov **prov_entry;
+ int ret = 0;
+
+ if (type >= FC_FC4_PROV_SIZE)
+ return -EINVAL;
+ mutex_lock(&fc_prov_mutex);
+ prov_entry = (prov->recv ? fc_passive_prov : fc_active_prov) + type;
+ if (*prov_entry)
+ ret = -EBUSY;
+ else
+ *prov_entry = prov;
+ mutex_unlock(&fc_prov_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(fc_fc4_register_provider);
+
+/**
+ * fc_fc4_deregister_provider() - deregister FC-4 upper-level provider.
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ * @prov: structure describing provider including ops vector.
+ */
+void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov)
+{
+ BUG_ON(type >= FC_FC4_PROV_SIZE);
+ mutex_lock(&fc_prov_mutex);
+ if (prov->recv)
+ rcu_assign_pointer(fc_passive_prov[type], NULL);
+ else
+ rcu_assign_pointer(fc_active_prov[type], NULL);
+ mutex_unlock(&fc_prov_mutex);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(fc_fc4_deregister_provider);
+
+/**
+ * fc_fc4_add_lport() - add new local port to list and run notifiers.
+ * @lport: The new local port.
+ */
+void fc_fc4_add_lport(struct fc_lport *lport)
+{
+ mutex_lock(&fc_prov_mutex);
+ list_add_tail(&lport->lport_list, &fc_local_ports);
+ blocking_notifier_call_chain(&fc_lport_notifier_head,
+ FC_LPORT_EV_ADD, lport);
+ mutex_unlock(&fc_prov_mutex);
+}
+
+/**
+ * fc_fc4_del_lport() - remove local port from list and run notifiers.
+ * @lport: The new local port.
+ */
+void fc_fc4_del_lport(struct fc_lport *lport)
+{
+ mutex_lock(&fc_prov_mutex);
+ list_del(&lport->lport_list);
+ blocking_notifier_call_chain(&fc_lport_notifier_head,
+ FC_LPORT_EV_DEL, lport);
+ mutex_unlock(&fc_prov_mutex);
+}
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index eea0c3541b71..fedc819d70c0 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -94,6 +94,17 @@ extern unsigned int fc_debug_logging;
(lport)->host->host_no, ##args))
/*
+ * FC-4 Providers.
+ */
+extern struct fc4_prov *fc_active_prov[]; /* providers without recv */
+extern struct fc4_prov *fc_passive_prov[]; /* providers with recv */
+extern struct mutex fc_prov_mutex; /* lock over table changes */
+
+extern struct fc4_prov fc_rport_t0_prov; /* type 0 provider */
+extern struct fc4_prov fc_lport_els_prov; /* ELS provider */
+extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */
+
+/*
* Set up direct-data placement for this I/O request
*/
void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
@@ -112,6 +123,9 @@ void fc_destroy_fcp(void);
* Internal libfc functions
*/
const char *fc_els_resp_type(struct fc_frame *);
+extern void fc_fc4_add_lport(struct fc_lport *);
+extern void fc_fc4_del_lport(struct fc_lport *);
+extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type);
/*
* Copies a buffer into an sg list
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index c5a10f94f845..8c08b210001d 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -633,6 +633,7 @@ int fc_lport_destroy(struct fc_lport *lport)
lport->tt.fcp_abort_io(lport);
lport->tt.disc_stop_final(lport);
lport->tt.exch_mgr_reset(lport, 0, 0);
+ fc_fc4_del_lport(lport);
return 0;
}
EXPORT_SYMBOL(fc_lport_destroy);
@@ -849,7 +850,7 @@ out:
}
/**
- * fc_lport_recv_req() - The generic lport request handler
+ * fc_lport_recv_els_req() - The generic lport ELS request handler
* @lport: The local port that received the request
* @fp: The request frame
*
@@ -859,9 +860,9 @@ out:
* Locking Note: This function should not be called with the lport
* lock held becuase it will grab the lock.
*/
-static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+static void fc_lport_recv_els_req(struct fc_lport *lport,
+ struct fc_frame *fp)
{
- struct fc_frame_header *fh = fc_frame_header_get(fp);
void (*recv)(struct fc_lport *, struct fc_frame *);
mutex_lock(&lport->lp_mutex);
@@ -873,8 +874,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
*/
if (!lport->link_up)
fc_frame_free(fp);
- else if (fh->fh_type == FC_TYPE_ELS &&
- fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
+ else {
/*
* Check opcode.
*/
@@ -903,14 +903,62 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
}
recv(lport, fp);
- } else {
- FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
- fr_eof(fp));
- fc_frame_free(fp);
}
mutex_unlock(&lport->lp_mutex);
}
+static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *spp_in,
+ struct fc_els_spp *spp_out)
+{
+ return FC_SPP_RESP_INVL;
+}
+
+struct fc4_prov fc_lport_els_prov = {
+ .prli = fc_lport_els_prli,
+ .recv = fc_lport_recv_els_req,
+};
+
+/**
+ * fc_lport_recv_req() - The generic lport request handler
+ * @lport: The lport that received the request
+ * @fp: The frame the request is in
+ *
+ * Locking Note: This function should not be called with the lport
+ * lock held becuase it may grab the lock.
+ */
+static void fc_lport_recv_req(struct fc_lport *lport,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_seq *sp = fr_seq(fp);
+ struct fc4_prov *prov;
+
+ /*
+ * Use RCU read lock and module_lock to be sure module doesn't
+ * deregister and get unloaded while we're calling it.
+ * try_module_get() is inlined and accepts a NULL parameter.
+ * Only ELSes and FCP target ops should come through here.
+ * The locking is unfortunate, and a better scheme is being sought.
+ */
+
+ rcu_read_lock();
+ if (fh->fh_type >= FC_FC4_PROV_SIZE)
+ goto drop;
+ prov = rcu_dereference(fc_passive_prov[fh->fh_type]);
+ if (!prov || !try_module_get(prov->module))
+ goto drop;
+ rcu_read_unlock();
+ prov->recv(lport, fp);
+ module_put(prov->module);
+ return;
+drop:
+ rcu_read_unlock();
+ FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
+ fc_frame_free(fp);
+ lport->tt.exch_done(sp);
+}
+
/**
* fc_lport_reset() - Reset a local port
* @lport: The local port which should be reset
@@ -1542,6 +1590,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport)
*/
int fc_lport_config(struct fc_lport *lport)
{
+ INIT_LIST_HEAD(&lport->ema_list);
INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
mutex_init(&lport->lp_mutex);
@@ -1549,6 +1598,7 @@ int fc_lport_config(struct fc_lport *lport)
fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
fc_lport_add_fc4_type(lport, FC_TYPE_CT);
+ fc_fc4_conf_lport_params(lport, FC_TYPE_FCP);
return 0;
}
@@ -1586,6 +1636,7 @@ int fc_lport_init(struct fc_lport *lport)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
+ fc_fc4_add_lport(lport);
return 0;
}
diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c
index dd2b43bb1c70..f33b897e4784 100644
--- a/drivers/scsi/libfc/fc_npiv.c
+++ b/drivers/scsi/libfc/fc_npiv.c
@@ -37,9 +37,7 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
vn_port = libfc_host_alloc(shost->hostt, privsize);
if (!vn_port)
- goto err_out;
- if (fc_exch_mgr_list_clone(n_port, vn_port))
- goto err_put;
+ return vn_port;
vn_port->vport = vport;
vport->dd_data = vn_port;
@@ -49,11 +47,6 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
mutex_unlock(&n_port->lp_mutex);
return vn_port;
-
-err_put:
- scsi_host_put(vn_port->host);
-err_out:
- return NULL;
}
EXPORT_SYMBOL(libfc_vport_create);
@@ -86,6 +79,7 @@ struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id)
return lport;
}
+EXPORT_SYMBOL(fc_vport_id_lookup);
/*
* When setting the link state of vports during an lport state change, it's
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index a7175adab32d..49e1ccca09d5 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -58,7 +58,7 @@
#include "fc_libfc.h"
-struct workqueue_struct *rport_event_queue;
+static struct workqueue_struct *rport_event_queue;
static void fc_rport_enter_flogi(struct fc_rport_priv *);
static void fc_rport_enter_plogi(struct fc_rport_priv *);
@@ -145,8 +145,10 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
INIT_WORK(&rdata->event_work, fc_rport_work);
- if (port_id != FC_FID_DIR_SERV)
+ if (port_id != FC_FID_DIR_SERV) {
+ rdata->lld_event_callback = lport->tt.rport_event_callback;
list_add_rcu(&rdata->peers, &lport->disc.rports);
+ }
return rdata;
}
@@ -257,6 +259,8 @@ static void fc_rport_work(struct work_struct *work)
struct fc_rport_operations *rport_ops;
struct fc_rport_identifiers ids;
struct fc_rport *rport;
+ struct fc4_prov *prov;
+ u8 type;
mutex_lock(&rdata->rp_mutex);
event = rdata->event;
@@ -300,12 +304,25 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "callback ev %d\n", event);
rport_ops->event_callback(lport, rdata, event);
}
+ if (rdata->lld_event_callback) {
+ FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
+ rdata->lld_event_callback(lport, rdata, event);
+ }
kref_put(&rdata->kref, lport->tt.rport_destroy);
break;
case RPORT_EV_FAILED:
case RPORT_EV_LOGO:
case RPORT_EV_STOP:
+ if (rdata->prli_count) {
+ mutex_lock(&fc_prov_mutex);
+ for (type = 1; type < FC_FC4_PROV_SIZE; type++) {
+ prov = fc_passive_prov[type];
+ if (prov && prov->prlo)
+ prov->prlo(rdata);
+ }
+ mutex_unlock(&fc_prov_mutex);
+ }
port_id = rdata->ids.port_id;
mutex_unlock(&rdata->rp_mutex);
@@ -313,6 +330,10 @@ static void fc_rport_work(struct work_struct *work)
FC_RPORT_DBG(rdata, "callback ev %d\n", event);
rport_ops->event_callback(lport, rdata, event);
}
+ if (rdata->lld_event_callback) {
+ FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
+ rdata->lld_event_callback(lport, rdata, event);
+ }
cancel_delayed_work_sync(&rdata->retry_work);
/*
@@ -336,6 +357,7 @@ static void fc_rport_work(struct work_struct *work)
if (port_id == FC_FID_DIR_SERV) {
rdata->event = RPORT_EV_NONE;
mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, lport->tt.rport_destroy);
} else if ((rdata->flags & FC_RP_STARTED) &&
rdata->major_retries <
lport->max_rport_retry_count) {
@@ -575,7 +597,7 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata,
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
if (PTR_ERR(fp) == -FC_EX_CLOSED)
- return fc_rport_error(rdata, fp);
+ goto out;
if (rdata->retries < rdata->local_port->max_rport_retry_count) {
FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
@@ -588,7 +610,8 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata,
return;
}
- return fc_rport_error(rdata, fp);
+out:
+ fc_rport_error(rdata, fp);
}
/**
@@ -878,6 +901,9 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
+ /* save plogi response sp_features for further reference */
+ rdata->sp_features = ntohs(plp->fl_csp.sp_features);
+
if (lport->point_to_multipoint)
fc_rport_login_complete(rdata, fp);
csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
@@ -949,6 +975,8 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_prli prli;
struct fc_els_spp spp;
} *pp;
+ struct fc_els_spp temp_spp;
+ struct fc4_prov *prov;
u32 roles = FC_RPORT_ROLE_UNKNOWN;
u32 fcp_parm = 0;
u8 op;
@@ -983,6 +1011,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n",
pp->spp.spp_flags);
+ rdata->spp_type = pp->spp.spp_type;
if (resp_code != FC_SPP_RESP_ACK) {
if (resp_code == FC_SPP_RESP_CONF)
fc_rport_error(rdata, fp);
@@ -996,6 +1025,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
fcp_parm = ntohl(pp->spp.spp_params);
if (fcp_parm & FCP_SPPF_RETRY)
rdata->flags |= FC_RP_FLAGS_RETRY;
+ if (fcp_parm & FCP_SPPF_CONF_COMPL)
+ rdata->flags |= FC_RP_FLAGS_CONF_REQ;
+
+ prov = fc_passive_prov[FC_TYPE_FCP];
+ if (prov) {
+ memset(&temp_spp, 0, sizeof(temp_spp));
+ prov->prli(rdata, pp->prli.prli_spp_len,
+ &pp->spp, &temp_spp);
+ }
rdata->supported_classes = FC_COS_CLASS3;
if (fcp_parm & FCP_SPPF_INIT_FCN)
@@ -1033,6 +1071,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
struct fc_els_spp spp;
} *pp;
struct fc_frame *fp;
+ struct fc4_prov *prov;
/*
* If the rport is one of the well known addresses
@@ -1054,9 +1093,20 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
return;
}
- if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
- fc_rport_prli_resp, rdata,
- 2 * lport->r_a_tov))
+ fc_prli_fill(lport, fp);
+
+ prov = fc_passive_prov[FC_TYPE_FCP];
+ if (prov) {
+ pp = fc_frame_payload_get(fp, sizeof(*pp));
+ prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp);
+ }
+
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rdata->ids.port_id,
+ fc_host_port_id(lport->host), FC_TYPE_ELS,
+ FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+ if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp,
+ NULL, rdata, 2 * lport->r_a_tov))
fc_rport_error_retry(rdata, NULL);
else
kref_get(&rdata->kref);
@@ -1642,9 +1692,9 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
unsigned int len;
unsigned int plen;
enum fc_els_spp_resp resp;
+ enum fc_els_spp_resp passive;
struct fc_seq_els_data rjt_data;
- u32 fcp_parm;
- u32 roles = FC_RPORT_ROLE_UNKNOWN;
+ struct fc4_prov *prov;
FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
fc_rport_state(rdata));
@@ -1678,46 +1728,42 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
pp->prli.prli_len = htons(len);
len -= sizeof(struct fc_els_prli);
- /* reinitialize remote port roles */
- rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
-
/*
* Go through all the service parameter pages and build
* response. If plen indicates longer SPP than standard,
* use that. The entire response has been pre-cleared above.
*/
spp = &pp->spp;
+ mutex_lock(&fc_prov_mutex);
while (len >= plen) {
+ rdata->spp_type = rspp->spp_type;
spp->spp_type = rspp->spp_type;
spp->spp_type_ext = rspp->spp_type_ext;
- spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
- resp = FC_SPP_RESP_ACK;
-
- switch (rspp->spp_type) {
- case 0: /* common to all FC-4 types */
- break;
- case FC_TYPE_FCP:
- fcp_parm = ntohl(rspp->spp_params);
- if (fcp_parm & FCP_SPPF_RETRY)
- rdata->flags |= FC_RP_FLAGS_RETRY;
- rdata->supported_classes = FC_COS_CLASS3;
- if (fcp_parm & FCP_SPPF_INIT_FCN)
- roles |= FC_RPORT_ROLE_FCP_INITIATOR;
- if (fcp_parm & FCP_SPPF_TARG_FCN)
- roles |= FC_RPORT_ROLE_FCP_TARGET;
- rdata->ids.roles = roles;
-
- spp->spp_params = htonl(lport->service_params);
- break;
- default:
- resp = FC_SPP_RESP_INVL;
- break;
+ resp = 0;
+
+ if (rspp->spp_type < FC_FC4_PROV_SIZE) {
+ prov = fc_active_prov[rspp->spp_type];
+ if (prov)
+ resp = prov->prli(rdata, plen, rspp, spp);
+ prov = fc_passive_prov[rspp->spp_type];
+ if (prov) {
+ passive = prov->prli(rdata, plen, rspp, spp);
+ if (!resp || passive == FC_SPP_RESP_ACK)
+ resp = passive;
+ }
+ }
+ if (!resp) {
+ if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
+ resp |= FC_SPP_RESP_CONF;
+ else
+ resp |= FC_SPP_RESP_INVL;
}
spp->spp_flags |= resp;
len -= plen;
rspp = (struct fc_els_spp *)((char *)rspp + plen);
spp = (struct fc_els_spp *)((char *)spp + plen);
}
+ mutex_unlock(&fc_prov_mutex);
/*
* Send LS_ACC. If this fails, the originator should retry.
@@ -1887,9 +1933,82 @@ int fc_rport_init(struct fc_lport *lport)
EXPORT_SYMBOL(fc_rport_init);
/**
+ * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page
+ * @spp: response service parameter page
+ *
+ * Returns the value for the response code to be placed in spp_flags;
+ * Returns 0 if not an initiator.
+ */
+static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *rspp,
+ struct fc_els_spp *spp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ u32 fcp_parm;
+
+ fcp_parm = ntohl(rspp->spp_params);
+ rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ if (fcp_parm & FCP_SPPF_INIT_FCN)
+ rdata->ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcp_parm & FCP_SPPF_TARG_FCN)
+ rdata->ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+ if (fcp_parm & FCP_SPPF_RETRY)
+ rdata->flags |= FC_RP_FLAGS_RETRY;
+ rdata->supported_classes = FC_COS_CLASS3;
+
+ if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR))
+ return 0;
+
+ spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
+
+ /*
+ * OR in our service parameters with other providers (target), if any.
+ */
+ fcp_parm = ntohl(spp->spp_params);
+ spp->spp_params = htonl(fcp_parm | lport->service_params);
+ return FC_SPP_RESP_ACK;
+}
+
+/*
+ * FC-4 provider ops for FCP initiator.
+ */
+struct fc4_prov fc_rport_fcp_init = {
+ .prli = fc_rport_fcp_prli,
+};
+
+/**
+ * fc_rport_t0_prli() - Handle incoming PRLI parameters for type 0
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page
+ * @spp: response service parameter page
+ */
+static int fc_rport_t0_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *rspp,
+ struct fc_els_spp *spp)
+{
+ if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR)
+ return FC_SPP_RESP_INVL;
+ return FC_SPP_RESP_ACK;
+}
+
+/*
+ * FC-4 provider ops for type 0 service parameters.
+ *
+ * This handles the special case of type 0 which is always successful
+ * but doesn't do anything otherwise.
+ */
+struct fc4_prov fc_rport_t0_prov = {
+ .prli = fc_rport_t0_prli,
+};
+
+/**
* fc_setup_rport() - Initialize the rport_event_queue
*/
-int fc_setup_rport()
+int fc_setup_rport(void)
{
rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
if (!rport_event_queue)
@@ -1900,7 +2019,7 @@ int fc_setup_rport()
/**
* fc_destroy_rport() - Destroy the rport_event_queue
*/
-void fc_destroy_rport()
+void fc_destroy_rport(void)
{
destroy_workqueue(rport_event_queue);
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index da8b61543ee4..0c550d5b9133 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -3352,6 +3352,47 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
}
EXPORT_SYMBOL_GPL(iscsi_session_get_param);
+int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
+ enum iscsi_param param, char *buf)
+{
+ struct sockaddr_in6 *sin6 = NULL;
+ struct sockaddr_in *sin = NULL;
+ int len;
+
+ switch (addr->ss_family) {
+ case AF_INET:
+ sin = (struct sockaddr_in *)addr;
+ break;
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (param) {
+ case ISCSI_PARAM_CONN_ADDRESS:
+ case ISCSI_HOST_PARAM_IPADDRESS:
+ if (sin)
+ len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr);
+ else
+ len = sprintf(buf, "%pI6\n", &sin6->sin6_addr);
+ break;
+ case ISCSI_PARAM_CONN_PORT:
+ if (sin)
+ len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port));
+ else
+ len = sprintf(buf, "%hu\n",
+ be16_to_cpu(sin6->sin6_port));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param);
+
int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf)
{
@@ -3416,9 +3457,6 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
case ISCSI_HOST_PARAM_INITIATOR_NAME:
len = sprintf(buf, "%s\n", ihost->initiatorname);
break;
- case ISCSI_HOST_PARAM_IPADDRESS:
- len = sprintf(buf, "%s\n", ihost->local_address);
- break;
default:
return -ENOSYS;
}
diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig
index 18f33cd54411..9dafe64e7c7a 100644
--- a/drivers/scsi/libsas/Kconfig
+++ b/drivers/scsi/libsas/Kconfig
@@ -46,11 +46,3 @@ config SCSI_SAS_HOST_SMP
Allows sas hosts to receive SMP frames. Selecting this
option builds an SMP interpreter into libsas. Say
N here if you want to save the few kb this consumes.
-
-config SCSI_SAS_LIBSAS_DEBUG
- bool "Compile the SAS Domain Transport Attributes in debug mode"
- default y
- depends on SCSI_SAS_LIBSAS
- help
- Compiles the SAS Layer in debug mode. In debug mode, the
- SAS Layer prints diagnostic and debug messages.
diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile
index 1ad1323c60fa..566a10024598 100644
--- a/drivers/scsi/libsas/Makefile
+++ b/drivers/scsi/libsas/Makefile
@@ -21,10 +21,6 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
-ifeq ($(CONFIG_SCSI_SAS_LIBSAS_DEBUG),y)
- EXTRA_CFLAGS += -DSAS_DEBUG
-endif
-
obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o
libsas-y += sas_init.o \
sas_phy.o \
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 4d3b704ede1c..31fc21f4d831 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -71,13 +71,13 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
case SAS_SG_ERR:
return AC_ERR_INVALID;
- case SAM_STAT_CHECK_CONDITION:
case SAS_OPEN_TO:
case SAS_OPEN_REJECT:
SAS_DPRINTK("%s: Saw error %d. What to do?\n",
__func__, ts->stat);
return AC_ERR_OTHER;
+ case SAM_STAT_CHECK_CONDITION:
case SAS_ABORTED_TASK:
return AC_ERR_DEV;
@@ -107,13 +107,15 @@ static void sas_ata_task_done(struct sas_task *task)
sas_ha = dev->port->ha;
spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
- if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD) {
+ if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
+ ((stat->stat == SAM_STAT_CHECK_CONDITION &&
+ dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
dev->sata_dev.sstatus = resp->sstatus;
dev->sata_dev.serror = resp->serror;
dev->sata_dev.scontrol = resp->scontrol;
- } else if (stat->stat != SAM_STAT_GOOD) {
+ } else {
ac = sas_to_ata_err(stat);
if (ac) {
SAS_DPRINTK("%s: SAS error %x\n", __func__,
@@ -305,55 +307,6 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
}
}
-static int sas_ata_scr_write(struct ata_link *link, unsigned int sc_reg_in,
- u32 val)
-{
- struct domain_device *dev = link->ap->private_data;
-
- SAS_DPRINTK("STUB %s\n", __func__);
- switch (sc_reg_in) {
- case SCR_STATUS:
- dev->sata_dev.sstatus = val;
- break;
- case SCR_CONTROL:
- dev->sata_dev.scontrol = val;
- break;
- case SCR_ERROR:
- dev->sata_dev.serror = val;
- break;
- case SCR_ACTIVE:
- dev->sata_dev.ap->link.sactive = val;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
- u32 *val)
-{
- struct domain_device *dev = link->ap->private_data;
-
- SAS_DPRINTK("STUB %s\n", __func__);
- switch (sc_reg_in) {
- case SCR_STATUS:
- *val = dev->sata_dev.sstatus;
- return 0;
- case SCR_CONTROL:
- *val = dev->sata_dev.scontrol;
- return 0;
- case SCR_ERROR:
- *val = dev->sata_dev.serror;
- return 0;
- case SCR_ACTIVE:
- *val = dev->sata_dev.ap->link.sactive;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
static struct ata_port_operations sas_sata_ops = {
.prereset = ata_std_prereset,
.softreset = NULL,
@@ -367,8 +320,6 @@ static struct ata_port_operations sas_sata_ops = {
.qc_fill_rtf = sas_ata_qc_fill_rtf,
.port_start = ata_sas_port_start,
.port_stop = ata_sas_port_stop,
- .scr_read = sas_ata_scr_read,
- .scr_write = sas_ata_scr_write
};
static struct ata_port_info sata_port_info = {
@@ -801,7 +752,7 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost)
if (!dev_is_sata(ddev))
continue;
-
+
ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata port error handler");
ata_scsi_port_error_handler(shost, ap);
}
@@ -834,13 +785,13 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
LIST_HEAD(sata_q);
ap = NULL;
-
+
list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
struct domain_device *ddev = cmd_to_domain_dev(cmd);
if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
continue;
- if(ap && ap != ddev->sata_dev.ap)
+ if (ap && ap != ddev->sata_dev.ap)
continue;
ap = ddev->sata_dev.ap;
rtn = 1;
@@ -848,8 +799,21 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
}
if (!list_empty(&sata_q)) {
- ata_port_printk(ap, KERN_DEBUG,"sas eh calling libata cmd error handler\n");
+ ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata cmd error handler\n");
ata_scsi_cmd_error_handler(shost, ap, &sata_q);
+ /*
+ * ata's error handler may leave the cmd on the list
+ * so make sure they don't remain on a stack list
+ * about to go out of scope.
+ *
+ * This looks strange, since the commands are
+ * now part of no list, but the next error
+ * action will be ata_port_error_handler()
+ * which takes no list and sweeps them up
+ * anyway from the ata tag array.
+ */
+ while (!list_empty(&sata_q))
+ list_del_init(sata_q.next);
}
} while (ap);
diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c
index c17c25030f1c..fc460933575c 100644
--- a/drivers/scsi/libsas/sas_dump.c
+++ b/drivers/scsi/libsas/sas_dump.c
@@ -24,8 +24,6 @@
#include "sas_dump.h"
-#ifdef SAS_DEBUG
-
static const char *sas_hae_str[] = {
[0] = "HAE_RESET",
};
@@ -72,5 +70,3 @@ void sas_dump_port(struct asd_sas_port *port)
SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode);
SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys);
}
-
-#endif /* SAS_DEBUG */
diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h
index 47b45d4f5258..800e4c69093f 100644
--- a/drivers/scsi/libsas/sas_dump.h
+++ b/drivers/scsi/libsas/sas_dump.h
@@ -24,19 +24,7 @@
#include "sas_internal.h"
-#ifdef SAS_DEBUG
-
void sas_dprint_porte(int phyid, enum port_event pe);
void sas_dprint_phye(int phyid, enum phy_event pe);
void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he);
void sas_dump_port(struct asd_sas_port *port);
-
-#else /* SAS_DEBUG */
-
-static inline void sas_dprint_porte(int phyid, enum port_event pe) { }
-static inline void sas_dprint_phye(int phyid, enum phy_event pe) { }
-static inline void sas_dprint_hae(struct sas_ha_struct *sas_ha,
- enum ha_event he) { }
-static inline void sas_dump_port(struct asd_sas_port *port) { }
-
-#endif /* SAS_DEBUG */
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 505ffe358293..f3f693b772ac 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -244,6 +244,11 @@ static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
* dev to host FIS as described in section G.5 of
* sas-2 r 04b */
dr = &((struct smp_resp *)disc_resp)->disc;
+ if (memcmp(dev->sas_addr, dr->attached_sas_addr,
+ SAS_ADDR_SIZE) == 0) {
+ sas_printk("Found loopback topology, just ignore it!\n");
+ return 0;
+ }
if (!(dr->attached_dev_type == 0 &&
dr->attached_sata_dev))
break;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 0001374bd6b2..8b538bd1ff2b 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -33,11 +33,7 @@
#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
-#ifdef SAS_DEBUG
-#define SAS_DPRINTK(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
-#else
-#define SAS_DPRINTK(fmt, ...)
-#endif
+#define SAS_DPRINTK(fmt, ...) printk(KERN_DEBUG "sas: " fmt, ## __VA_ARGS__)
#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 67758ea8eb7f..f6e189f40917 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -681,11 +681,10 @@ enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
{
struct sas_task *task = TO_SAS_TASK(cmd);
unsigned long flags;
- enum blk_eh_timer_return rtn;
+ enum blk_eh_timer_return rtn;
if (sas_ata_timed_out(cmd, task, &rtn))
return rtn;
-
if (!task) {
cmd->request->timeout /= 2;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 746dd3d7a092..b64c6da870d3 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2010 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -325,6 +325,7 @@ struct lpfc_vport {
#define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */
#define FC_VFI_REGISTERED 0x800000 /* VFI is registered */
#define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */
+#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */
uint32_t ct_flags;
#define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */
@@ -348,6 +349,8 @@ struct lpfc_vport {
uint32_t fc_myDID; /* fibre channel S_ID */
uint32_t fc_prevDID; /* previous fibre channel S_ID */
+ struct lpfc_name fabric_portname;
+ struct lpfc_name fabric_nodename;
int32_t stopped; /* HBA has not been restarted since last ERATT */
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
@@ -372,6 +375,7 @@ struct lpfc_vport {
#define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */
#define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */
#define WORKER_FDMI_TMO 0x4 /* vport: FDMI timeout */
+#define WORKER_DELAYED_DISC_TMO 0x8 /* vport: delayed discovery */
#define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */
#define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */
@@ -382,6 +386,7 @@ struct lpfc_vport {
struct timer_list fc_fdmitmo;
struct timer_list els_tmofunc;
+ struct timer_list delayed_disc_tmo;
int unreg_vpi_cmpl;
@@ -548,6 +553,8 @@ struct lpfc_hba {
#define LPFC_SLI3_CRP_ENABLED 0x08
#define LPFC_SLI3_BG_ENABLED 0x20
#define LPFC_SLI3_DSS_ENABLED 0x40
+#define LPFC_SLI4_PERFH_ENABLED 0x80
+#define LPFC_SLI4_PHWQ_ENABLED 0x100
uint32_t iocb_cmd_size;
uint32_t iocb_rsp_size;
@@ -655,7 +662,7 @@ struct lpfc_hba {
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
-
+ uint32_t cfg_enable_dss;
lpfc_vpd_t vpd; /* vital product data */
struct pci_dev *pcidev;
@@ -792,6 +799,10 @@ struct lpfc_hba {
struct dentry *debug_slow_ring_trc;
struct lpfc_debugfs_trc *slow_ring_trc;
atomic_t slow_ring_trc_cnt;
+ /* iDiag debugfs sub-directory */
+ struct dentry *idiag_root;
+ struct dentry *idiag_pci_cfg;
+ struct dentry *idiag_que_info;
#endif
/* Used for deferred freeing of ELS data buffers */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 3512abb8a587..e7c020df12fa 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -623,10 +623,14 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
int status = 0;
int cnt = 0;
int i;
+ int rc;
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &status, &online_compl,
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_OFFLINE_PREP);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (status != 0)
@@ -652,7 +656,10 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
}
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &status, &online_compl, type);
+ rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (status != 0)
@@ -671,6 +678,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
*
* Notes:
* Assumes any error from lpfc_do_offline() will be negative.
+ * Do not make this function static.
*
* Returns:
* lpfc_do_offline() return code if not zero
@@ -682,6 +690,7 @@ lpfc_selective_reset(struct lpfc_hba *phba)
{
struct completion online_compl;
int status = 0;
+ int rc;
if (!phba->cfg_enable_hba_reset)
return -EIO;
@@ -692,8 +701,11 @@ lpfc_selective_reset(struct lpfc_hba *phba)
return status;
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &status, &online_compl,
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (status != 0)
@@ -812,14 +824,17 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
int status=0;
+ int rc;
if (!phba->cfg_enable_hba_reset)
return -EACCES;
init_completion(&online_compl);
if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
- lpfc_workq_post_event(phba, &status, &online_compl,
+ rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
wait_for_completion(&online_compl);
} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
@@ -1279,6 +1294,28 @@ lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_dss_show - Return the current state of dss and the configured state
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the formatted text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_dss_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
+ (phba->cfg_enable_dss) ? "Enabled" : "Disabled",
+ (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
+ "" : "Not ");
+}
+
+/**
* lpfc_param_show - Return a cfg attribute value in decimal
*
* Description:
@@ -1597,13 +1634,13 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
#define LPFC_ATTR(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_init(name, defval, minval, maxval)
#define LPFC_ATTR_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1611,7 +1648,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1622,7 +1659,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
#define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_hex_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1630,7 +1667,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_param_hex_show(name)\
lpfc_param_init(name, defval, minval, maxval)\
@@ -1641,13 +1678,13 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
#define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_init(name, defval, minval, maxval)
#define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1655,7 +1692,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1666,7 +1703,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\
#define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1674,7 +1711,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL)
#define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \
static uint lpfc_##name = defval;\
-module_param(lpfc_##name, uint, 0);\
+module_param(lpfc_##name, uint, S_IRUGO);\
MODULE_PARM_DESC(lpfc_##name, desc);\
lpfc_vport_param_hex_show(name)\
lpfc_vport_param_init(name, defval, minval, maxval)\
@@ -1718,7 +1755,7 @@ static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL);
static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL);
static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
-
+static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -1813,6 +1850,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
int stat1=0, stat2=0;
unsigned int i, j, cnt=count;
u8 wwpn[8];
+ int rc;
if (!phba->cfg_enable_hba_reset)
return -EACCES;
@@ -1863,7 +1901,11 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,
"0463 lpfc_soft_wwpn attribute set failed to "
"reinit adapter - %d\n", stat1);
init_completion(&online_compl);
- lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE);
+ rc = lpfc_workq_post_event(phba, &stat2, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (rc == 0)
+ return -ENOMEM;
+
wait_for_completion(&online_compl);
if (stat2)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1954,7 +1996,7 @@ static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\
static int lpfc_poll = 0;
-module_param(lpfc_poll, int, 0);
+module_param(lpfc_poll, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
" 0 - none,"
" 1 - poll with interrupts enabled"
@@ -1964,21 +2006,21 @@ static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
lpfc_poll_show, lpfc_poll_store);
int lpfc_sli_mode = 0;
-module_param(lpfc_sli_mode, int, 0);
+module_param(lpfc_sli_mode, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:"
" 0 - auto (SLI-3 if supported),"
" 2 - select SLI-2 even on SLI-3 capable HBAs,"
" 3 - select SLI-3");
int lpfc_enable_npiv = 1;
-module_param(lpfc_enable_npiv, int, 0);
+module_param(lpfc_enable_npiv, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality");
lpfc_param_show(enable_npiv);
lpfc_param_init(enable_npiv, 1, 0, 1);
static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL);
int lpfc_enable_rrq;
-module_param(lpfc_enable_rrq, int, 0);
+module_param(lpfc_enable_rrq, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality");
lpfc_param_show(enable_rrq);
lpfc_param_init(enable_rrq, 0, 0, 1);
@@ -2040,7 +2082,7 @@ static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
lpfc_txcmplq_hw_show, NULL);
int lpfc_iocb_cnt = 2;
-module_param(lpfc_iocb_cnt, int, 1);
+module_param(lpfc_iocb_cnt, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_iocb_cnt,
"Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs");
lpfc_param_show(iocb_cnt);
@@ -2192,7 +2234,7 @@ static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR,
# disappear until the timer expires. Value range is [0,255]. Default
# value is 30.
*/
-module_param(lpfc_devloss_tmo, int, 0);
+module_param(lpfc_devloss_tmo, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_devloss_tmo,
"Seconds driver will hold I/O waiting "
"for a device to come back");
@@ -2302,7 +2344,7 @@ LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1,
# Default value of this parameter is 1.
*/
static int lpfc_restrict_login = 1;
-module_param(lpfc_restrict_login, int, 0);
+module_param(lpfc_restrict_login, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_restrict_login,
"Restrict virtual ports login to remote initiators.");
lpfc_vport_param_show(restrict_login);
@@ -2473,7 +2515,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
static int lpfc_topology = 0;
-module_param(lpfc_topology, int, 0);
+module_param(lpfc_topology, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology");
lpfc_param_show(topology)
lpfc_param_init(topology, 0, 0, 6)
@@ -2915,7 +2957,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
}
static int lpfc_link_speed = 0;
-module_param(lpfc_link_speed, int, 0);
+module_param(lpfc_link_speed, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_link_speed, "Select link speed");
lpfc_param_show(link_speed)
@@ -3043,7 +3085,7 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr,
}
static int lpfc_aer_support = 1;
-module_param(lpfc_aer_support, int, 1);
+module_param(lpfc_aer_support, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support");
lpfc_param_show(aer_support)
@@ -3155,7 +3197,7 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
# The value is set in milliseconds.
*/
static int lpfc_max_scsicmpl_time;
-module_param(lpfc_max_scsicmpl_time, int, 0);
+module_param(lpfc_max_scsicmpl_time, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_max_scsicmpl_time,
"Use command completion time to control queue depth");
lpfc_vport_param_show(max_scsicmpl_time);
@@ -3331,7 +3373,7 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
*/
unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION;
-module_param(lpfc_prot_mask, uint, 0);
+module_param(lpfc_prot_mask, uint, S_IRUGO);
MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
/*
@@ -3343,9 +3385,28 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask");
#
*/
unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP;
-module_param(lpfc_prot_guard, byte, 0);
+module_param(lpfc_prot_guard, byte, S_IRUGO);
MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type");
+/*
+ * Delay initial NPort discovery when Clean Address bit is cleared in
+ * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed.
+ * This parameter can have value 0 or 1.
+ * When this parameter is set to 0, no delay is added to the initial
+ * discovery.
+ * When this parameter is set to non-zero value, initial Nport discovery is
+ * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion
+ * when Clean Address bit is cleared in FLOGI/FDISC
+ * accept and FCID/Fabric name/Fabric portname is changed.
+ * Default value is 0.
+ */
+int lpfc_delay_discovery;
+module_param(lpfc_delay_discovery, int, S_IRUGO);
+MODULE_PARM_DESC(lpfc_delay_discovery,
+ "Delay NPort discovery when Clean Address bit is cleared. "
+ "Allowed values: 0,1.");
/*
* lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
@@ -3437,6 +3498,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_txcmplq_hw,
&dev_attr_lpfc_fips_level,
&dev_attr_lpfc_fips_rev,
+ &dev_attr_lpfc_dss,
NULL,
};
@@ -4639,6 +4701,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
+ phba->cfg_enable_dss = 1;
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 17fde522c84a..3d40023f4804 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -53,9 +53,9 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_supported_pages(struct lpfcMboxq *);
-void lpfc_sli4_params(struct lpfcMboxq *);
+void lpfc_pc_sli4_params(struct lpfcMboxq *);
int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
-
+int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *);
struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
void lpfc_cleanup_rcv_buffers(struct lpfc_vport *);
void lpfc_rcv_seq_check_edtov(struct lpfc_vport *);
@@ -167,6 +167,8 @@ int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_fdmi_tmo(unsigned long);
void lpfc_fdmi_timeout_handler(struct lpfc_vport *);
+void lpfc_delayed_disc_tmo(unsigned long);
+void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *);
int lpfc_config_port_prep(struct lpfc_hba *);
int lpfc_config_port_post(struct lpfc_hba *);
@@ -341,6 +343,7 @@ extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
extern int lpfc_sli_mode;
extern int lpfc_enable_npiv;
+extern int lpfc_delay_discovery;
int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t);
int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t);
@@ -423,6 +426,6 @@ int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
uint16_t, uint16_t, uint16_t);
void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
-void lpfc_cleanup_vports_rrqs(struct lpfc_vport *);
+void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index c004fa9a681e..d9edfd90d7ff 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1738,6 +1738,55 @@ fdmi_cmd_exit:
return 1;
}
+/**
+ * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer.
+ * @ptr - Context object of the timer.
+ *
+ * This function set the WORKER_DELAYED_DISC_TMO flag and wake up
+ * the worker thread.
+ **/
+void
+lpfc_delayed_disc_tmo(unsigned long ptr)
+{
+ struct lpfc_vport *vport = (struct lpfc_vport *)ptr;
+ struct lpfc_hba *phba = vport->phba;
+ uint32_t tmo_posted;
+ unsigned long iflag;
+
+ spin_lock_irqsave(&vport->work_port_lock, iflag);
+ tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO;
+ if (!tmo_posted)
+ vport->work_port_events |= WORKER_DELAYED_DISC_TMO;
+ spin_unlock_irqrestore(&vport->work_port_lock, iflag);
+
+ if (!tmo_posted)
+ lpfc_worker_wake_up(phba);
+ return;
+}
+
+/**
+ * lpfc_delayed_disc_timeout_handler - Function called by worker thread to
+ * handle delayed discovery.
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This function start nport discovery of the vport.
+ **/
+void
+lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ spin_lock_irq(shost->host_lock);
+ if (!(vport->fc_flag & FC_DISC_DELAYED)) {
+ spin_unlock_irq(shost->host_lock);
+ return;
+ }
+ vport->fc_flag &= ~FC_DISC_DELAYED;
+ spin_unlock_irq(shost->host_lock);
+
+ lpfc_do_scr_ns_plogi(vport->phba, vport);
+}
+
void
lpfc_fdmi_tmo(unsigned long ptr)
{
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index a80d938fafc9..a753581509d6 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -57,8 +57,8 @@
* # mount -t debugfs none /sys/kernel/debug
*
* The lpfc debugfs directory hierarchy is:
- * lpfc/lpfcX/vportY
- * where X is the lpfc hba unique_id
+ * /sys/kernel/debug/lpfc/fnX/vportY
+ * where X is the lpfc hba function unique_id
* where Y is the vport VPI on that hba
*
* Debugging services available per vport:
@@ -82,52 +82,34 @@
* the HBA. X MUST also be a power of 2.
*/
static int lpfc_debugfs_enable = 1;
-module_param(lpfc_debugfs_enable, int, 0);
+module_param(lpfc_debugfs_enable, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
/* This MUST be a power of 2 */
static int lpfc_debugfs_max_disc_trc;
-module_param(lpfc_debugfs_max_disc_trc, int, 0);
+module_param(lpfc_debugfs_max_disc_trc, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
"Set debugfs discovery trace depth");
/* This MUST be a power of 2 */
static int lpfc_debugfs_max_slow_ring_trc;
-module_param(lpfc_debugfs_max_slow_ring_trc, int, 0);
+module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc,
"Set debugfs slow ring trace depth");
static int lpfc_debugfs_mask_disc_trc;
-module_param(lpfc_debugfs_mask_disc_trc, int, 0);
+module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
"Set debugfs discovery trace mask");
#include <linux/debugfs.h>
-/* size of output line, for discovery_trace and slow_ring_trace */
-#define LPFC_DEBUG_TRC_ENTRY_SIZE 100
-
-/* nodelist output buffer size */
-#define LPFC_NODELIST_SIZE 8192
-#define LPFC_NODELIST_ENTRY_SIZE 120
-
-/* dumpHBASlim output buffer size */
-#define LPFC_DUMPHBASLIM_SIZE 4096
-
-/* dumpHostSlim output buffer size */
-#define LPFC_DUMPHOSTSLIM_SIZE 4096
-
-/* hbqinfo output buffer size */
-#define LPFC_HBQINFO_SIZE 8192
-
-struct lpfc_debug {
- char *buffer;
- int len;
-};
-
static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
static unsigned long lpfc_debugfs_start_time = 0L;
+/* iDiag */
+static struct lpfc_idiag idiag;
+
/**
* lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer
* @vport: The vport to gather the log info from.
@@ -996,8 +978,6 @@ lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
return nbytes;
}
-
-
/**
* lpfc_debugfs_nodelist_open - Open the nodelist debugfs file
* @inode: The inode pointer that contains a vport pointer.
@@ -1099,6 +1079,7 @@ lpfc_debugfs_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct lpfc_debug *debug = file->private_data;
+
return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer,
debug->len);
}
@@ -1137,6 +1118,776 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
return 0;
}
+/*
+ * iDiag debugfs file access methods
+ */
+
+/*
+ * iDiag PCI config space register access methods:
+ *
+ * The PCI config space register accessees of read, write, read-modify-write
+ * for set bits, and read-modify-write for clear bits to SLI4 PCI functions
+ * are provided. In the proper SLI4 PCI function's debugfs iDiag directory,
+ *
+ * /sys/kernel/debug/lpfc/fn<#>/iDiag
+ *
+ * the access is through the debugfs entry pciCfg:
+ *
+ * 1. For PCI config space register read access, there are two read methods:
+ * A) read a single PCI config space register in the size of a byte
+ * (8 bits), a word (16 bits), or a dword (32 bits); or B) browse through
+ * the 4K extended PCI config space.
+ *
+ * A) Read a single PCI config space register consists of two steps:
+ *
+ * Step-1: Set up PCI config space register read command, the command
+ * syntax is,
+ *
+ * echo 1 <where> <count> > pciCfg
+ *
+ * where, 1 is the iDiag command for PCI config space read, <where> is the
+ * offset from the beginning of the device's PCI config space to read from,
+ * and <count> is the size of PCI config space register data to read back,
+ * it will be 1 for reading a byte (8 bits), 2 for reading a word (16 bits
+ * or 2 bytes), or 4 for reading a dword (32 bits or 4 bytes).
+ *
+ * Setp-2: Perform the debugfs read operation to execute the idiag command
+ * set up in Step-1,
+ *
+ * cat pciCfg
+ *
+ * Examples:
+ * To read PCI device's vendor-id and device-id from PCI config space,
+ *
+ * echo 1 0 4 > pciCfg
+ * cat pciCfg
+ *
+ * To read PCI device's currnt command from config space,
+ *
+ * echo 1 4 2 > pciCfg
+ * cat pciCfg
+ *
+ * B) Browse through the entire 4K extended PCI config space also consists
+ * of two steps:
+ *
+ * Step-1: Set up PCI config space register browsing command, the command
+ * syntax is,
+ *
+ * echo 1 0 4096 > pciCfg
+ *
+ * where, 1 is the iDiag command for PCI config space read, 0 must be used
+ * as the offset for PCI config space register browse, and 4096 must be
+ * used as the count for PCI config space register browse.
+ *
+ * Step-2: Repeately issue the debugfs read operation to browse through
+ * the entire PCI config space registers:
+ *
+ * cat pciCfg
+ * cat pciCfg
+ * cat pciCfg
+ * ...
+ *
+ * When browsing to the end of the 4K PCI config space, the browse method
+ * shall wrap around to start reading from beginning again, and again...
+ *
+ * 2. For PCI config space register write access, it supports a single PCI
+ * config space register write in the size of a byte (8 bits), a word
+ * (16 bits), or a dword (32 bits). The command syntax is,
+ *
+ * echo 2 <where> <count> <value> > pciCfg
+ *
+ * where, 2 is the iDiag command for PCI config space write, <where> is
+ * the offset from the beginning of the device's PCI config space to write
+ * into, <count> is the size of data to write into the PCI config space,
+ * it will be 1 for writing a byte (8 bits), 2 for writing a word (16 bits
+ * or 2 bytes), or 4 for writing a dword (32 bits or 4 bytes), and <value>
+ * is the data to be written into the PCI config space register at the
+ * offset.
+ *
+ * Examples:
+ * To disable PCI device's interrupt assertion,
+ *
+ * 1) Read in device's PCI config space register command field <cmd>:
+ *
+ * echo 1 4 2 > pciCfg
+ * cat pciCfg
+ *
+ * 2) Set bit 10 (Interrupt Disable bit) in the <cmd>:
+ *
+ * <cmd> = <cmd> | (1 < 10)
+ *
+ * 3) Write the modified command back:
+ *
+ * echo 2 4 2 <cmd> > pciCfg
+ *
+ * 3. For PCI config space register set bits access, it supports a single PCI
+ * config space register set bits in the size of a byte (8 bits), a word
+ * (16 bits), or a dword (32 bits). The command syntax is,
+ *
+ * echo 3 <where> <count> <bitmask> > pciCfg
+ *
+ * where, 3 is the iDiag command for PCI config space set bits, <where> is
+ * the offset from the beginning of the device's PCI config space to set
+ * bits into, <count> is the size of the bitmask to set into the PCI config
+ * space, it will be 1 for setting a byte (8 bits), 2 for setting a word
+ * (16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4 bytes), and
+ * <bitmask> is the bitmask, indicating the bits to be set into the PCI
+ * config space register at the offset. The logic performed to the content
+ * of the PCI config space register, regval, is,
+ *
+ * regval |= <bitmask>
+ *
+ * 4. For PCI config space register clear bits access, it supports a single
+ * PCI config space register clear bits in the size of a byte (8 bits),
+ * a word (16 bits), or a dword (32 bits). The command syntax is,
+ *
+ * echo 4 <where> <count> <bitmask> > pciCfg
+ *
+ * where, 4 is the iDiag command for PCI config space clear bits, <where>
+ * is the offset from the beginning of the device's PCI config space to
+ * clear bits from, <count> is the size of the bitmask to set into the PCI
+ * config space, it will be 1 for setting a byte (8 bits), 2 for setting
+ * a word(16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4
+ * bytes), and <bitmask> is the bitmask, indicating the bits to be cleared
+ * from the PCI config space register at the offset. the logic performed
+ * to the content of the PCI config space register, regval, is,
+ *
+ * regval &= ~<bitmask>
+ *
+ * Note, for all single register read, write, set bits, or clear bits access,
+ * the offset (<where>) must be aligned with the size of the data:
+ *
+ * For data size of byte (8 bits), the offset must be aligned to the byte
+ * boundary; for data size of word (16 bits), the offset must be aligned
+ * to the word boundary; while for data size of dword (32 bits), the offset
+ * must be aligned to the dword boundary. Otherwise, the interface will
+ * return the error:
+ *
+ * "-bash: echo: write error: Invalid argument".
+ *
+ * For example:
+ *
+ * echo 1 2 4 > pciCfg
+ * -bash: echo: write error: Invalid argument
+ *
+ * Note also, all of the numbers in the command fields for all read, write,
+ * set bits, and clear bits PCI config space register command fields can be
+ * either decimal or hex.
+ *
+ * For example,
+ * echo 1 0 4096 > pciCfg
+ *
+ * will be the same as
+ * echo 1 0 0x1000 > pciCfg
+ *
+ * And,
+ * echo 2 155 1 10 > pciCfg
+ *
+ * will be
+ * echo 2 0x9b 1 0xa > pciCfg
+ */
+
+/**
+ * lpfc_idiag_cmd_get - Get and parse idiag debugfs comands from user space
+ * @buf: The pointer to the user space buffer.
+ * @nbytes: The number of bytes in the user space buffer.
+ * @idiag_cmd: pointer to the idiag command struct.
+ *
+ * This routine reads data from debugfs user space buffer and parses the
+ * buffer for getting the idiag command and arguments. The while space in
+ * between the set of data is used as the parsing separator.
+ *
+ * This routine returns 0 when successful, it returns proper error code
+ * back to the user space in error conditions.
+ */
+static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes,
+ struct lpfc_idiag_cmd *idiag_cmd)
+{
+ char mybuf[64];
+ char *pbuf, *step_str;
+ int bsize, i;
+
+ /* Protect copy from user */
+ if (!access_ok(VERIFY_READ, buf, nbytes))
+ return -EFAULT;
+
+ memset(mybuf, 0, sizeof(mybuf));
+ memset(idiag_cmd, 0, sizeof(*idiag_cmd));
+ bsize = min(nbytes, (sizeof(mybuf)-1));
+
+ if (copy_from_user(mybuf, buf, bsize))
+ return -EFAULT;
+ pbuf = &mybuf[0];
+ step_str = strsep(&pbuf, "\t ");
+
+ /* The opcode must present */
+ if (!step_str)
+ return -EINVAL;
+
+ idiag_cmd->opcode = simple_strtol(step_str, NULL, 0);
+ if (idiag_cmd->opcode == 0)
+ return -EINVAL;
+
+ for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) {
+ step_str = strsep(&pbuf, "\t ");
+ if (!step_str)
+ return 0;
+ idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0);
+ }
+ return 0;
+}
+
+/**
+ * lpfc_idiag_open - idiag open debugfs
+ * @inode: The inode pointer that contains a pointer to phba.
+ * @file: The file pointer to attach the file operation.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation. It
+ * gets the reference to phba from the i_private field in @inode, it then
+ * allocates buffer for the file operation, performs the necessary PCI config
+ * space read into the allocated buffer according to the idiag user command
+ * setup, and then returns a pointer to buffer in the private_data field in
+ * @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return an
+ * negative error value.
+ **/
+static int
+lpfc_idiag_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ return -ENOMEM;
+
+ debug->i_private = inode->i_private;
+ debug->buffer = NULL;
+ file->private_data = debug;
+
+ return 0;
+}
+
+/**
+ * lpfc_idiag_release - Release idiag access file operation
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine is the generic release routine for the idiag access file
+ * operation, it frees the buffer that was allocated when the debugfs file
+ * was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_idiag_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ /* Free the buffers to the file operation */
+ kfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+/**
+ * lpfc_idiag_cmd_release - Release idiag cmd access file operation
+ * @inode: The inode pointer that contains a vport pointer. (unused)
+ * @file: The file pointer that contains the buffer to release.
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs file
+ * was opened. It also reset the fields in the idiag command struct in the
+ * case the command is not continuous browsing of the data structure.
+ *
+ * Returns:
+ * This function returns zero.
+ **/
+static int
+lpfc_idiag_cmd_release(struct inode *inode, struct file *file)
+{
+ struct lpfc_debug *debug = file->private_data;
+
+ /* Read PCI config register, if not read all, clear command fields */
+ if ((debug->op == LPFC_IDIAG_OP_RD) &&
+ (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD))
+ if ((idiag.cmd.data[1] == sizeof(uint8_t)) ||
+ (idiag.cmd.data[1] == sizeof(uint16_t)) ||
+ (idiag.cmd.data[1] == sizeof(uint32_t)))
+ memset(&idiag, 0, sizeof(idiag));
+
+ /* Write PCI config register, clear command fields */
+ if ((debug->op == LPFC_IDIAG_OP_WR) &&
+ (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR))
+ memset(&idiag, 0, sizeof(idiag));
+
+ /* Free the buffers to the file operation */
+ kfree(debug->buffer);
+ kfree(debug);
+
+ return 0;
+}
+
+/**
+ * lpfc_idiag_pcicfg_read - idiag debugfs read pcicfg
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba pci config space according to the
+ * idiag command, and copies to user @buf. Depending on the PCI config space
+ * read command setup, it does either a single register read of a byte
+ * (8 bits), a word (16 bits), or a dword (32 bits) or browsing through all
+ * registers from the 4K extended PCI config space.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ int offset_label, offset, len = 0, index = LPFC_PCI_CFG_RD_SIZE;
+ int where, count;
+ char *pbuffer;
+ struct pci_dev *pdev;
+ uint32_t u32val;
+ uint16_t u16val;
+ uint8_t u8val;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return 0;
+
+ /* This is a user read operation */
+ debug->op = LPFC_IDIAG_OP_RD;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_PCI_CFG_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
+ where = idiag.cmd.data[0];
+ count = idiag.cmd.data[1];
+ } else
+ return 0;
+
+ /* Read single PCI config space register */
+ switch (count) {
+ case SIZE_U8: /* byte (8 bits) */
+ pci_read_config_byte(pdev, where, &u8val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: %02x\n", where, u8val);
+ break;
+ case SIZE_U16: /* word (16 bits) */
+ pci_read_config_word(pdev, where, &u16val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: %04x\n", where, u16val);
+ break;
+ case SIZE_U32: /* double word (32 bits) */
+ pci_read_config_dword(pdev, where, &u32val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: %08x\n", where, u32val);
+ break;
+ case LPFC_PCI_CFG_SIZE: /* browse all */
+ goto pcicfg_browse;
+ break;
+ default:
+ /* illegal count */
+ len = 0;
+ break;
+ }
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+
+pcicfg_browse:
+
+ /* Browse all PCI config space registers */
+ offset_label = idiag.offset.last_rd;
+ offset = offset_label;
+
+ /* Read PCI config space */
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%03x: ", offset_label);
+ while (index > 0) {
+ pci_read_config_dword(pdev, offset, &u32val);
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "%08x ", u32val);
+ offset += sizeof(uint32_t);
+ index -= sizeof(uint32_t);
+ if (!index)
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "\n");
+ else if (!(index % (8 * sizeof(uint32_t)))) {
+ offset_label += (8 * sizeof(uint32_t));
+ len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len,
+ "\n%03x: ", offset_label);
+ }
+ }
+
+ /* Set up the offset for next portion of pci cfg read */
+ idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE;
+ if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE)
+ idiag.offset.last_rd = 0;
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
+/**
+ * lpfc_idiag_pcicfg_write - Syntax check and set up idiag pcicfg commands
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the user data from.
+ * @nbytes: The number of bytes to get.
+ * @ppos: The position in the file to start reading from.
+ *
+ * This routine get the debugfs idiag command struct from user space and
+ * then perform the syntax check for PCI config space read or write command
+ * accordingly. In the case of PCI config space read command, it sets up
+ * the command in the idiag command struct for the debugfs read operation.
+ * In the case of PCI config space write operation, it executes the write
+ * operation into the PCI config space accordingly.
+ *
+ * It returns the @nbytges passing in from debugfs user space when successful.
+ * In case of error conditions, it returns proper error code back to the user
+ * space.
+ */
+static ssize_t
+lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ uint32_t where, value, count;
+ uint32_t u32val;
+ uint16_t u16val;
+ uint8_t u8val;
+ struct pci_dev *pdev;
+ int rc;
+
+ pdev = phba->pcidev;
+ if (!pdev)
+ return -EFAULT;
+
+ /* This is a user write operation */
+ debug->op = LPFC_IDIAG_OP_WR;
+
+ rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd);
+ if (rc)
+ return rc;
+
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) {
+ /* Read command from PCI config space, set up command fields */
+ where = idiag.cmd.data[0];
+ count = idiag.cmd.data[1];
+ if (count == LPFC_PCI_CFG_SIZE) {
+ if (where != 0)
+ goto error_out;
+ } else if ((count != sizeof(uint8_t)) &&
+ (count != sizeof(uint16_t)) &&
+ (count != sizeof(uint32_t)))
+ goto error_out;
+ if (count == sizeof(uint8_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t))
+ goto error_out;
+ if (where % sizeof(uint8_t))
+ goto error_out;
+ }
+ if (count == sizeof(uint16_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t))
+ goto error_out;
+ if (where % sizeof(uint16_t))
+ goto error_out;
+ }
+ if (count == sizeof(uint32_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t))
+ goto error_out;
+ if (where % sizeof(uint32_t))
+ goto error_out;
+ }
+ } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST ||
+ idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ /* Write command to PCI config space, read-modify-write */
+ where = idiag.cmd.data[0];
+ count = idiag.cmd.data[1];
+ value = idiag.cmd.data[2];
+ /* Sanity checks */
+ if ((count != sizeof(uint8_t)) &&
+ (count != sizeof(uint16_t)) &&
+ (count != sizeof(uint32_t)))
+ goto error_out;
+ if (count == sizeof(uint8_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t))
+ goto error_out;
+ if (where % sizeof(uint8_t))
+ goto error_out;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+ pci_write_config_byte(pdev, where,
+ (uint8_t)value);
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+ rc = pci_read_config_byte(pdev, where, &u8val);
+ if (!rc) {
+ u8val |= (uint8_t)value;
+ pci_write_config_byte(pdev, where,
+ u8val);
+ }
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ rc = pci_read_config_byte(pdev, where, &u8val);
+ if (!rc) {
+ u8val &= (uint8_t)(~value);
+ pci_write_config_byte(pdev, where,
+ u8val);
+ }
+ }
+ }
+ if (count == sizeof(uint16_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t))
+ goto error_out;
+ if (where % sizeof(uint16_t))
+ goto error_out;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+ pci_write_config_word(pdev, where,
+ (uint16_t)value);
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+ rc = pci_read_config_word(pdev, where, &u16val);
+ if (!rc) {
+ u16val |= (uint16_t)value;
+ pci_write_config_word(pdev, where,
+ u16val);
+ }
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ rc = pci_read_config_word(pdev, where, &u16val);
+ if (!rc) {
+ u16val &= (uint16_t)(~value);
+ pci_write_config_word(pdev, where,
+ u16val);
+ }
+ }
+ }
+ if (count == sizeof(uint32_t)) {
+ if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t))
+ goto error_out;
+ if (where % sizeof(uint32_t))
+ goto error_out;
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)
+ pci_write_config_dword(pdev, where, value);
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) {
+ rc = pci_read_config_dword(pdev, where,
+ &u32val);
+ if (!rc) {
+ u32val |= value;
+ pci_write_config_dword(pdev, where,
+ u32val);
+ }
+ }
+ if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) {
+ rc = pci_read_config_dword(pdev, where,
+ &u32val);
+ if (!rc) {
+ u32val &= ~value;
+ pci_write_config_dword(pdev, where,
+ u32val);
+ }
+ }
+ }
+ } else
+ /* All other opecodes are illegal for now */
+ goto error_out;
+
+ return nbytes;
+error_out:
+ memset(&idiag, 0, sizeof(idiag));
+ return -EINVAL;
+}
+
+/**
+ * lpfc_idiag_queinfo_read - idiag debugfs read queue information
+ * @file: The file pointer to read from.
+ * @buf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the @phba SLI4 PCI function queue information,
+ * and copies to user @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be less
+ * than @nbytes if the end of the file was reached) or a negative error value.
+ **/
+static ssize_t
+lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ int len = 0, fcp_qidx;
+ char *pbuffer;
+
+ if (!debug->buffer)
+ debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL);
+ if (!debug->buffer)
+ return 0;
+ pbuffer = debug->buffer;
+
+ if (*ppos)
+ return 0;
+
+ /* Get slow-path event queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path EQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], EQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.sp_eq->queue_id,
+ phba->sli4_hba.sp_eq->entry_count,
+ phba->sli4_hba.sp_eq->host_index,
+ phba->sli4_hba.sp_eq->hba_index);
+
+ /* Get fast-path event queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Fast-path EQ information:\n");
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], EQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.fp_eq[fcp_qidx]->queue_id,
+ phba->sli4_hba.fp_eq[fcp_qidx]->entry_count,
+ phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
+ phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
+ }
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+
+ /* Get mailbox complete queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Mailbox CQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated EQ-ID [%02d]:\n",
+ phba->sli4_hba.mbx_cq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], CQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.mbx_cq->queue_id,
+ phba->sli4_hba.mbx_cq->entry_count,
+ phba->sli4_hba.mbx_cq->host_index,
+ phba->sli4_hba.mbx_cq->hba_index);
+
+ /* Get slow-path complete queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path CQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated EQ-ID [%02d]:\n",
+ phba->sli4_hba.els_cq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], CQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.els_cq->queue_id,
+ phba->sli4_hba.els_cq->entry_count,
+ phba->sli4_hba.els_cq->host_index,
+ phba->sli4_hba.els_cq->hba_index);
+
+ /* Get fast-path complete queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Fast-path CQ information:\n");
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated EQ-ID [%02d]:\n",
+ phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], EQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id,
+ phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count,
+ phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
+ phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
+ }
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+
+ /* Get mailbox queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Mailbox MQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.mbx_wq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], MQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.mbx_wq->queue_id,
+ phba->sli4_hba.mbx_wq->entry_count,
+ phba->sli4_hba.mbx_wq->host_index,
+ phba->sli4_hba.mbx_wq->hba_index);
+
+ /* Get slow-path work queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path WQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.els_wq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], WQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n",
+ phba->sli4_hba.els_wq->queue_id,
+ phba->sli4_hba.els_wq->entry_count,
+ phba->sli4_hba.els_wq->host_index,
+ phba->sli4_hba.els_wq->hba_index);
+
+ /* Get fast-path work queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Fast-path WQ information:\n");
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], WQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id,
+ phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count,
+ phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
+ phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
+ }
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+
+ /* Get receive queue information */
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "Slow-path RQ information:\n");
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\t\tAssociated CQ-ID [%02d]:\n",
+ phba->sli4_hba.hdr_rq->assoc_qid);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], RHQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.hdr_rq->queue_id,
+ phba->sli4_hba.hdr_rq->entry_count,
+ phba->sli4_hba.hdr_rq->host_index,
+ phba->sli4_hba.hdr_rq->hba_index);
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ "\tID [%02d], RDQE-COUNT [%04d], "
+ "HOST-INDEX [%04x], PORT-INDEX [%04x]\n",
+ phba->sli4_hba.dat_rq->queue_id,
+ phba->sli4_hba.dat_rq->entry_count,
+ phba->sli4_hba.dat_rq->host_index,
+ phba->sli4_hba.dat_rq->hba_index);
+
+ return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
+}
+
#undef lpfc_debugfs_op_disc_trc
static const struct file_operations lpfc_debugfs_op_disc_trc = {
.owner = THIS_MODULE,
@@ -1213,6 +1964,28 @@ static const struct file_operations lpfc_debugfs_op_slow_ring_trc = {
static struct dentry *lpfc_debugfs_root = NULL;
static atomic_t lpfc_debugfs_hba_count;
+
+/*
+ * File operations for the iDiag debugfs
+ */
+#undef lpfc_idiag_op_pciCfg
+static const struct file_operations lpfc_idiag_op_pciCfg = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_idiag_pcicfg_read,
+ .write = lpfc_idiag_pcicfg_write,
+ .release = lpfc_idiag_cmd_release,
+};
+
+#undef lpfc_idiag_op_queInfo
+static const struct file_operations lpfc_idiag_op_queInfo = {
+ .owner = THIS_MODULE,
+ .open = lpfc_idiag_open,
+ .read = lpfc_idiag_queinfo_read,
+ .release = lpfc_idiag_release,
+};
+
#endif
/**
@@ -1249,8 +2022,8 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
if (!lpfc_debugfs_start_time)
lpfc_debugfs_start_time = jiffies;
- /* Setup lpfcX directory for specific HBA */
- snprintf(name, sizeof(name), "lpfc%d", phba->brd_no);
+ /* Setup funcX directory for specific HBA PCI function */
+ snprintf(name, sizeof(name), "fn%d", phba->brd_no);
if (!phba->hba_debugfs_root) {
phba->hba_debugfs_root =
debugfs_create_dir(name, lpfc_debugfs_root);
@@ -1275,28 +2048,38 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
/* Setup dumpHBASlim */
- snprintf(name, sizeof(name), "dumpHBASlim");
- phba->debug_dumpHBASlim =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpHBASlim);
- if (!phba->debug_dumpHBASlim) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0413 Cannot create debugfs dumpHBASlim\n");
- goto debug_failed;
- }
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ snprintf(name, sizeof(name), "dumpHBASlim");
+ phba->debug_dumpHBASlim =
+ debugfs_create_file(name,
+ S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dumpHBASlim);
+ if (!phba->debug_dumpHBASlim) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0413 Cannot create debugfs "
+ "dumpHBASlim\n");
+ goto debug_failed;
+ }
+ } else
+ phba->debug_dumpHBASlim = NULL;
/* Setup dumpHostSlim */
- snprintf(name, sizeof(name), "dumpHostSlim");
- phba->debug_dumpHostSlim =
- debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
- phba->hba_debugfs_root,
- phba, &lpfc_debugfs_op_dumpHostSlim);
- if (!phba->debug_dumpHostSlim) {
- lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "0414 Cannot create debugfs dumpHostSlim\n");
- goto debug_failed;
- }
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ snprintf(name, sizeof(name), "dumpHostSlim");
+ phba->debug_dumpHostSlim =
+ debugfs_create_file(name,
+ S_IFREG|S_IRUGO|S_IWUSR,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_dumpHostSlim);
+ if (!phba->debug_dumpHostSlim) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0414 Cannot create debugfs "
+ "dumpHostSlim\n");
+ goto debug_failed;
+ }
+ } else
+ phba->debug_dumpHBASlim = NULL;
/* Setup dumpData */
snprintf(name, sizeof(name), "dumpData");
@@ -1322,8 +2105,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
-
-
/* Setup slow ring trace */
if (lpfc_debugfs_max_slow_ring_trc) {
num = lpfc_debugfs_max_slow_ring_trc - 1;
@@ -1342,7 +2123,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
}
-
snprintf(name, sizeof(name), "slow_ring_trace");
phba->debug_slow_ring_trc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
@@ -1434,6 +2214,53 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
"0409 Cant create debugfs nodelist\n");
goto debug_failed;
}
+
+ /*
+ * iDiag debugfs root entry points for SLI4 device only
+ */
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ goto debug_failed;
+
+ snprintf(name, sizeof(name), "iDiag");
+ if (!phba->idiag_root) {
+ phba->idiag_root =
+ debugfs_create_dir(name, phba->hba_debugfs_root);
+ if (!phba->idiag_root) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2922 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ /* Initialize iDiag data structure */
+ memset(&idiag, 0, sizeof(idiag));
+ }
+
+ /* iDiag read PCI config space */
+ snprintf(name, sizeof(name), "pciCfg");
+ if (!phba->idiag_pci_cfg) {
+ phba->idiag_pci_cfg =
+ debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
+ phba->idiag_root, phba, &lpfc_idiag_op_pciCfg);
+ if (!phba->idiag_pci_cfg) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2923 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ idiag.offset.last_rd = 0;
+ }
+
+ /* iDiag get PCI function queue information */
+ snprintf(name, sizeof(name), "queInfo");
+ if (!phba->idiag_que_info) {
+ phba->idiag_que_info =
+ debugfs_create_file(name, S_IFREG|S_IRUGO,
+ phba->idiag_root, phba, &lpfc_idiag_op_queInfo);
+ if (!phba->idiag_que_info) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "2924 Can't create idiag debugfs\n");
+ goto debug_failed;
+ }
+ }
+
debug_failed:
return;
#endif
@@ -1508,8 +2335,31 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
phba->debug_slow_ring_trc = NULL;
}
+ /*
+ * iDiag release
+ */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ if (phba->idiag_que_info) {
+ /* iDiag queInfo */
+ debugfs_remove(phba->idiag_que_info);
+ phba->idiag_que_info = NULL;
+ }
+ if (phba->idiag_pci_cfg) {
+ /* iDiag pciCfg */
+ debugfs_remove(phba->idiag_pci_cfg);
+ phba->idiag_pci_cfg = NULL;
+ }
+
+ /* Finally remove the iDiag debugfs root */
+ if (phba->idiag_root) {
+ /* iDiag root */
+ debugfs_remove(phba->idiag_root);
+ phba->idiag_root = NULL;
+ }
+ }
+
if (phba->hba_debugfs_root) {
- debugfs_remove(phba->hba_debugfs_root); /* lpfcX */
+ debugfs_remove(phba->hba_debugfs_root); /* fnX */
phba->hba_debugfs_root = NULL;
atomic_dec(&lpfc_debugfs_hba_count);
}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 03c7313a1012..91b9a9427cda 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2007 Emulex. All rights reserved. *
+ * Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -22,6 +22,44 @@
#define _H_LPFC_DEBUG_FS
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+
+/* size of output line, for discovery_trace and slow_ring_trace */
+#define LPFC_DEBUG_TRC_ENTRY_SIZE 100
+
+/* nodelist output buffer size */
+#define LPFC_NODELIST_SIZE 8192
+#define LPFC_NODELIST_ENTRY_SIZE 120
+
+/* dumpHBASlim output buffer size */
+#define LPFC_DUMPHBASLIM_SIZE 4096
+
+/* dumpHostSlim output buffer size */
+#define LPFC_DUMPHOSTSLIM_SIZE 4096
+
+/* hbqinfo output buffer size */
+#define LPFC_HBQINFO_SIZE 8192
+
+/* rdPciConf output buffer size */
+#define LPFC_PCI_CFG_SIZE 4096
+#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2)
+#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4)
+
+/* queue info output buffer size */
+#define LPFC_QUE_INFO_GET_BUF_SIZE 2048
+
+#define SIZE_U8 sizeof(uint8_t)
+#define SIZE_U16 sizeof(uint16_t)
+#define SIZE_U32 sizeof(uint32_t)
+
+struct lpfc_debug {
+ char *i_private;
+ char op;
+#define LPFC_IDIAG_OP_RD 1
+#define LPFC_IDIAG_OP_WR 2
+ char *buffer;
+ int len;
+};
+
struct lpfc_debugfs_trc {
char *fmt;
uint32_t data1;
@@ -30,6 +68,26 @@ struct lpfc_debugfs_trc {
uint32_t seq_cnt;
unsigned long jif;
};
+
+struct lpfc_idiag_offset {
+ uint32_t last_rd;
+};
+
+#define LPFC_IDIAG_CMD_DATA_SIZE 4
+struct lpfc_idiag_cmd {
+ uint32_t opcode;
+#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001
+#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002
+#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003
+#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004
+ uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE];
+};
+
+struct lpfc_idiag {
+ uint32_t active;
+ struct lpfc_idiag_cmd cmd;
+ struct lpfc_idiag_offset offset;
+};
#endif
/* Mask for discovery_trace */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index c62d567cc845..8e28edf9801e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2009 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -485,6 +485,59 @@ fail:
}
/**
+ * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @sp: pointer to service parameter data structure.
+ *
+ * This routine is called from FLOGI/FDISC completion handler functions.
+ * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter else return
+ * 0. This function also set flag in the vport data structure to delay
+ * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit
+ * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric
+ * node nodename is changed in the completion service parameter.
+ *
+ * Return code
+ * 0 - FCID and Fabric Nodename and Fabric portname is not changed.
+ * 1 - FCID or Fabric Nodename or Fabric portname is changed.
+ *
+ **/
+static uint8_t
+lpfc_check_clean_addr_bit(struct lpfc_vport *vport,
+ struct serv_parm *sp)
+{
+ uint8_t fabric_param_changed = 0;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ if ((vport->fc_prevDID != vport->fc_myDID) ||
+ memcmp(&vport->fabric_portname, &sp->portName,
+ sizeof(struct lpfc_name)) ||
+ memcmp(&vport->fabric_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name)))
+ fabric_param_changed = 1;
+
+ /*
+ * Word 1 Bit 31 in common service parameter is overloaded.
+ * Word 1 Bit 31 in FLOGI request is multiple NPort request
+ * Word 1 Bit 31 in FLOGI response is clean address bit
+ *
+ * If fabric parameter is changed and clean address bit is
+ * cleared delay nport discovery if
+ * - vport->fc_prevDID != 0 (not initial discovery) OR
+ * - lpfc_delay_discovery module parameter is set.
+ */
+ if (fabric_param_changed && !sp->cmn.clean_address_bit &&
+ (vport->fc_prevDID || lpfc_delay_discovery)) {
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_DISC_DELAYED;
+ spin_unlock_irq(shost->host_lock);
+ }
+
+ return fabric_param_changed;
+}
+
+
+/**
* lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
* @vport: pointer to a host virtual N_Port data structure.
* @ndlp: pointer to a node-list data structure.
@@ -512,6 +565,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *np;
struct lpfc_nodelist *next_np;
+ uint8_t fabric_param_changed;
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_FABRIC;
@@ -544,6 +598,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_class_sup |= FC_COS_CLASS4;
ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
sp->cmn.bbRcvSizeLsb;
+
+ fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+ memcpy(&vport->fabric_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+ memcpy(&vport->fabric_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name));
memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
@@ -565,7 +625,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
}
- if ((vport->fc_prevDID != vport->fc_myDID) &&
+ if (fabric_param_changed &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
/* If our NportID changed, we need to ensure all
@@ -2203,6 +2263,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_sli *psli;
+ struct lpfcMboxq *mbox;
psli = &phba->sli;
/* we pass cmdiocb to state machine which needs rspiocb as well */
@@ -2260,6 +2321,21 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
NLP_EVT_CMPL_LOGO);
out:
lpfc_els_free_iocb(phba, cmdiocb);
+ /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
+ if ((vport->fc_flag & FC_PT2PT) &&
+ !(vport->fc_flag & FC_PT2PT_PLOGI)) {
+ phba->pport->fc_myDID = 0;
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (mbox) {
+ lpfc_config_link(phba, mbox);
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
+ MBX_NOT_FINISHED) {
+ mempool_free(mbox, phba->mbox_mem_pool);
+ }
+ }
+ }
return;
}
@@ -2745,7 +2821,8 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
}
break;
case ELS_CMD_FDISC:
- lpfc_issue_els_fdisc(vport, ndlp, retry);
+ if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI))
+ lpfc_issue_els_fdisc(vport, ndlp, retry);
break;
}
return;
@@ -2815,9 +2892,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
switch (irsp->ulpStatus) {
case IOSTAT_FCP_RSP_ERROR:
+ break;
case IOSTAT_REMOTE_STOP:
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ /* This IO was aborted by the target, we don't
+ * know the rxid and because we did not send the
+ * ABTS we cannot generate and RRQ.
+ */
+ lpfc_set_rrq_active(phba, ndlp,
+ cmdiocb->sli4_xritag, 0, 0);
+ }
break;
-
case IOSTAT_LOCAL_REJECT:
switch ((irsp->un.ulpWord[4] & 0xff)) {
case IOERR_LOOP_OPEN_FAILURE:
@@ -4013,28 +4098,34 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport,
uint8_t *pcmd;
struct RRQ *rrq;
uint16_t rxid;
+ uint16_t xri;
struct lpfc_node_rrq *prrq;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt);
pcmd += sizeof(uint32_t);
rrq = (struct RRQ *)pcmd;
- rxid = bf_get(rrq_oxid, rrq);
+ rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg);
+ rxid = be16_to_cpu(bf_get(rrq_rxid, rrq));
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x"
" x%x x%x\n",
- bf_get(rrq_did, rrq),
- bf_get(rrq_oxid, rrq),
+ be32_to_cpu(bf_get(rrq_did, rrq)),
+ be16_to_cpu(bf_get(rrq_oxid, rrq)),
rxid,
iocb->iotag, iocb->iocb.ulpContext);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Clear RRQ: did:x%x flg:x%x exchg:x%.08x",
ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg);
- prrq = lpfc_get_active_rrq(vport, rxid, ndlp->nlp_DID);
+ if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq)))
+ xri = be16_to_cpu(bf_get(rrq_oxid, rrq));
+ else
+ xri = rxid;
+ prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID);
if (prrq)
- lpfc_clr_rrq_active(phba, rxid, prrq);
+ lpfc_clr_rrq_active(phba, xri, prrq);
return;
}
@@ -6166,6 +6257,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (vport->load_flag & FC_UNLOADING)
goto dropit;
+ /* If NPort discovery is delayed drop incoming ELS */
+ if ((vport->fc_flag & FC_DISC_DELAYED) &&
+ (cmd != ELS_CMD_PLOGI))
+ goto dropit;
+
ndlp = lpfc_findnode_did(vport, did);
if (!ndlp) {
/* Cannot find existing Fabric ndlp, so allocate a new one */
@@ -6218,6 +6314,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
lpfc_send_els_event(vport, ndlp, payload);
+
+ /* If Nport discovery is delayed, reject PLOGIs */
+ if (vport->fc_flag & FC_DISC_DELAYED) {
+ rjt_err = LSRJT_UNABLE_TPC;
+ break;
+ }
if (vport->port_state < LPFC_DISC_AUTH) {
if (!(phba->pport->fc_flag & FC_PT2PT) ||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
@@ -6596,6 +6698,21 @@ void
lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
struct lpfc_nodelist *ndlp, *ndlp_fdmi;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+
+ /*
+ * If lpfc_delay_discovery parameter is set and the clean address
+ * bit is cleared and fc fabric parameters chenged, delay FC NPort
+ * discovery.
+ */
+ spin_lock_irq(shost->host_lock);
+ if (vport->fc_flag & FC_DISC_DELAYED) {
+ spin_unlock_irq(shost->host_lock);
+ mod_timer(&vport->delayed_disc_tmo,
+ jiffies + HZ * phba->fc_ratov);
+ return;
+ }
+ spin_unlock_irq(shost->host_lock);
ndlp = lpfc_findnode_did(vport, NameServer_DID);
if (!ndlp) {
@@ -6938,6 +7055,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *next_np;
IOCB_t *irsp = &rspiocb->iocb;
struct lpfc_iocbq *piocb;
+ struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
+ struct serv_parm *sp;
+ uint8_t fabric_param_changed;
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0123 FDISC completes. x%x/x%x prevDID: x%x\n",
@@ -6981,7 +7101,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
- if ((vport->fc_prevDID != vport->fc_myDID) &&
+ prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+ sp = prsp->virt + sizeof(uint32_t);
+ fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
+ memcpy(&vport->fabric_portname, &sp->portName,
+ sizeof(struct lpfc_name));
+ memcpy(&vport->fabric_nodename, &sp->nodeName,
+ sizeof(struct lpfc_name));
+ if (fabric_param_changed &&
!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
/* If our NportID changed, we need to ensure all
* remaining NPORTs get unreg_login'ed so we can
@@ -7582,6 +7709,32 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+ unsigned long iflag = 0;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+ list_for_each_entry_safe(sglq_entry, sglq_next,
+ &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
+ if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
+ sglq_entry->ndlp = NULL;
+ }
+ spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ return;
+}
+
+/**
* lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the els xri abort wcqe structure.
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index bb015960dbc9..154c715fb3af 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -658,6 +658,8 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_ramp_down_queue_handler(phba);
if (work_port_events & WORKER_RAMP_UP_QUEUE)
lpfc_ramp_up_queue_handler(phba);
+ if (work_port_events & WORKER_DELAYED_DISC_TMO)
+ lpfc_delayed_disc_timeout_handler(vport);
}
lpfc_destroy_vport_work_array(phba, vports);
@@ -838,6 +840,11 @@ lpfc_linkdown_port(struct lpfc_vport *vport)
lpfc_port_link_failure(vport);
+ /* Stop delayed Nport discovery */
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_DISC_DELAYED;
+ spin_unlock_irq(shost->host_lock);
+ del_timer_sync(&vport->delayed_disc_tmo);
}
int
@@ -3160,7 +3167,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_unlock_irq(shost->host_lock);
vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
- lpfc_cleanup_vports_rrqs(vport);
+ lpfc_cleanup_vports_rrqs(vport, NULL);
/*
* This shost reference might have been taken at the beginning of
* lpfc_vport_delete()
@@ -3900,6 +3907,8 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
return;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (vport->phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_nlp_put(ndlp);
return;
}
@@ -4289,7 +4298,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
list_del_init(&ndlp->els_retry_evt.evt_listp);
list_del_init(&ndlp->dev_loss_evt.evt_listp);
-
+ lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_unreg_rpi(vport, ndlp);
return 0;
@@ -4426,10 +4435,11 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *ndlp;
+ unsigned long iflags;
- spin_lock_irq(shost->host_lock);
+ spin_lock_irqsave(shost->host_lock, iflags);
ndlp = __lpfc_findnode_did(vport, did);
- spin_unlock_irq(shost->host_lock);
+ spin_unlock_irqrestore(shost->host_lock, iflags);
return ndlp;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 96ed3ba6ba95..94ae37c5111a 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -341,6 +341,12 @@ struct csp {
uint8_t bbCreditMsb;
uint8_t bbCreditlsb; /* FC Word 0, byte 3 */
+/*
+ * Word 1 Bit 31 in common service parameter is overloaded.
+ * Word 1 Bit 31 in FLOGI request is multiple NPort request
+ * Word 1 Bit 31 in FLOGI response is clean address bit
+ */
+#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */
#ifdef __BIG_ENDIAN_BITFIELD
uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
@@ -3198,7 +3204,10 @@ typedef struct {
#define IOERR_SLER_RRQ_RJT_ERR 0x4C
#define IOERR_SLER_RRQ_RETRY_ERR 0x4D
#define IOERR_SLER_ABTS_ERR 0x4E
-
+#define IOERR_ELXSEC_KEY_UNWRAP_ERROR 0xF0
+#define IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR 0xF1
+#define IOERR_ELXSEC_CRYPTO_ERROR 0xF2
+#define IOERR_ELXSEC_CRYPTO_COMPARE_ERROR 0xF3
#define IOERR_DRVR_MASK 0x100
#define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */
#define IOERR_SLI_BRESET 0x102
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 94c1aa1136de..c7178d60c7bf 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -778,6 +778,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
+#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
/* FCoE Opcodes */
#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
@@ -1852,6 +1853,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7
#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2
+#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11
+#define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_perfh_WORD word2
uint32_t word3;
#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
@@ -1877,6 +1881,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7
#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11
+#define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3
};
struct lpfc_mbx_supp_pages {
@@ -1935,7 +1942,7 @@ struct lpfc_mbx_supp_pages {
#define LPFC_SLI4_PARAMETERS 2
};
-struct lpfc_mbx_sli4_params {
+struct lpfc_mbx_pc_sli4_params {
uint32_t word1;
#define qs_SHIFT 0
#define qs_MASK 0x00000001
@@ -2051,6 +2058,88 @@ struct lpfc_mbx_sli4_params {
uint32_t rsvd_13_63[51];
};
+struct lpfc_sli4_parameters {
+ uint32_t word0;
+#define cfg_prot_type_SHIFT 0
+#define cfg_prot_type_MASK 0x000000FF
+#define cfg_prot_type_WORD word0
+ uint32_t word1;
+#define cfg_ft_SHIFT 0
+#define cfg_ft_MASK 0x00000001
+#define cfg_ft_WORD word1
+#define cfg_sli_rev_SHIFT 4
+#define cfg_sli_rev_MASK 0x0000000f
+#define cfg_sli_rev_WORD word1
+#define cfg_sli_family_SHIFT 8
+#define cfg_sli_family_MASK 0x0000000f
+#define cfg_sli_family_WORD word1
+#define cfg_if_type_SHIFT 12
+#define cfg_if_type_MASK 0x0000000f
+#define cfg_if_type_WORD word1
+#define cfg_sli_hint_1_SHIFT 16
+#define cfg_sli_hint_1_MASK 0x000000ff
+#define cfg_sli_hint_1_WORD word1
+#define cfg_sli_hint_2_SHIFT 24
+#define cfg_sli_hint_2_MASK 0x0000001f
+#define cfg_sli_hint_2_WORD word1
+ uint32_t word2;
+ uint32_t word3;
+ uint32_t word4;
+#define cfg_cqv_SHIFT 14
+#define cfg_cqv_MASK 0x00000003
+#define cfg_cqv_WORD word4
+ uint32_t word5;
+ uint32_t word6;
+#define cfg_mqv_SHIFT 14
+#define cfg_mqv_MASK 0x00000003
+#define cfg_mqv_WORD word6
+ uint32_t word7;
+ uint32_t word8;
+#define cfg_wqv_SHIFT 14
+#define cfg_wqv_MASK 0x00000003
+#define cfg_wqv_WORD word8
+ uint32_t word9;
+ uint32_t word10;
+#define cfg_rqv_SHIFT 14
+#define cfg_rqv_MASK 0x00000003
+#define cfg_rqv_WORD word10
+ uint32_t word11;
+#define cfg_rq_db_window_SHIFT 28
+#define cfg_rq_db_window_MASK 0x0000000f
+#define cfg_rq_db_window_WORD word11
+ uint32_t word12;
+#define cfg_fcoe_SHIFT 0
+#define cfg_fcoe_MASK 0x00000001
+#define cfg_fcoe_WORD word12
+#define cfg_phwq_SHIFT 15
+#define cfg_phwq_MASK 0x00000001
+#define cfg_phwq_WORD word12
+#define cfg_loopbk_scope_SHIFT 28
+#define cfg_loopbk_scope_MASK 0x0000000f
+#define cfg_loopbk_scope_WORD word12
+ uint32_t sge_supp_len;
+ uint32_t word14;
+#define cfg_sgl_page_cnt_SHIFT 0
+#define cfg_sgl_page_cnt_MASK 0x0000000f
+#define cfg_sgl_page_cnt_WORD word14
+#define cfg_sgl_page_size_SHIFT 8
+#define cfg_sgl_page_size_MASK 0x000000ff
+#define cfg_sgl_page_size_WORD word14
+#define cfg_sgl_pp_align_SHIFT 16
+#define cfg_sgl_pp_align_MASK 0x000000ff
+#define cfg_sgl_pp_align_WORD word14
+ uint32_t word15;
+ uint32_t word16;
+ uint32_t word17;
+ uint32_t word18;
+ uint32_t word19;
+};
+
+struct lpfc_mbx_get_sli4_parameters {
+ struct mbox_header header;
+ struct lpfc_sli4_parameters sli4_parameters;
+};
+
/* Mailbox Completion Queue Error Messages */
#define MB_CQE_STATUS_SUCCESS 0x0
#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
@@ -2103,7 +2192,8 @@ struct lpfc_mqe {
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
struct lpfc_mbx_query_fw_cfg query_fw_cfg;
struct lpfc_mbx_supp_pages supp_pages;
- struct lpfc_mbx_sli4_params sli4_params;
+ struct lpfc_mbx_pc_sli4_params sli4_params;
+ struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
struct lpfc_mbx_nop nop;
} un;
};
@@ -2381,6 +2471,10 @@ struct wqe_common {
#define wqe_wqes_SHIFT 15
#define wqe_wqes_MASK 0x00000001
#define wqe_wqes_WORD word10
+/* Note that this field overlaps above fields */
+#define wqe_wqid_SHIFT 1
+#define wqe_wqid_MASK 0x0000007f
+#define wqe_wqid_WORD word10
#define wqe_pri_SHIFT 16
#define wqe_pri_MASK 0x00000007
#define wqe_pri_WORD word10
@@ -2599,7 +2693,8 @@ struct fcp_iwrite64_wqe {
uint32_t total_xfer_len;
uint32_t initial_xfer_len;
struct wqe_common wqe_com; /* words 6-11 */
- uint32_t rsvd_12_15[4]; /* word 12-15 */
+ uint32_t rsrvd12;
+ struct ulp_bde64 ph_bde; /* words 13-15 */
};
struct fcp_iread64_wqe {
@@ -2608,7 +2703,8 @@ struct fcp_iread64_wqe {
uint32_t total_xfer_len; /* word 4 */
uint32_t rsrvd5; /* word 5 */
struct wqe_common wqe_com; /* words 6-11 */
- uint32_t rsvd_12_15[4]; /* word 12-15 */
+ uint32_t rsrvd12;
+ struct ulp_bde64 ph_bde; /* words 13-15 */
};
struct fcp_icmnd64_wqe {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 6d0b36aa3389..35665cfb5689 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -460,7 +460,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
|| ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
&& !(phba->lmt & LMT_16Gb))) {
/* Reset link speed to auto */
- lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1302 Invalid speed for this board: "
"Reset link speed to auto: x%x\n",
phba->cfg_link_speed);
@@ -945,17 +945,13 @@ static void
lpfc_rrq_timeout(unsigned long ptr)
{
struct lpfc_hba *phba;
- uint32_t tmo_posted;
unsigned long iflag;
phba = (struct lpfc_hba *)ptr;
spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
- tmo_posted = phba->hba_flag & HBA_RRQ_ACTIVE;
- if (!tmo_posted)
- phba->hba_flag |= HBA_RRQ_ACTIVE;
+ phba->hba_flag |= HBA_RRQ_ACTIVE;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
- if (!tmo_posted)
- lpfc_worker_wake_up(phba);
+ lpfc_worker_wake_up(phba);
}
/**
@@ -2280,6 +2276,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
/* Wait for any activity on ndlps to settle */
msleep(10);
}
+ lpfc_cleanup_vports_rrqs(vport, NULL);
}
/**
@@ -2295,6 +2292,7 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
{
del_timer_sync(&vport->els_tmofunc);
del_timer_sync(&vport->fc_fdmitmo);
+ del_timer_sync(&vport->delayed_disc_tmo);
lpfc_can_disctmo(vport);
return;
}
@@ -2355,6 +2353,10 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
del_timer_sync(&phba->fabric_block_timer);
del_timer_sync(&phba->eratt_poll);
del_timer_sync(&phba->hb_tmofunc);
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ del_timer_sync(&phba->rrq_tmr);
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+ }
phba->hb_outstanding = 0;
switch (phba->pci_dev_grp) {
@@ -2732,6 +2734,11 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
init_timer(&vport->els_tmofunc);
vport->els_tmofunc.function = lpfc_els_timeout;
vport->els_tmofunc.data = (unsigned long)vport;
+
+ init_timer(&vport->delayed_disc_tmo);
+ vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
+ vport->delayed_disc_tmo.data = (unsigned long)vport;
+
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
goto out_put_shost;
@@ -4283,36 +4290,37 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_bsmbx;
}
- /* Get the Supported Pages. It is always available. */
+ /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
lpfc_supported_pages(mboxq);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- if (unlikely(rc)) {
- rc = -EIO;
- mempool_free(mboxq, phba->mbox_mem_pool);
- goto out_free_bsmbx;
- }
-
- mqe = &mboxq->u.mqe;
- memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
- LPFC_MAX_SUPPORTED_PAGES);
- for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
- switch (pn_page[i]) {
- case LPFC_SLI4_PARAMETERS:
- phba->sli4_hba.pc_sli4_params.supported = 1;
- break;
- default:
- break;
+ if (!rc) {
+ mqe = &mboxq->u.mqe;
+ memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
+ LPFC_MAX_SUPPORTED_PAGES);
+ for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
+ switch (pn_page[i]) {
+ case LPFC_SLI4_PARAMETERS:
+ phba->sli4_hba.pc_sli4_params.supported = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ /* Read the port's SLI4 Parameters capabilities if supported. */
+ if (phba->sli4_hba.pc_sli4_params.supported)
+ rc = lpfc_pc_sli4_params_get(phba, mboxq);
+ if (rc) {
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ rc = -EIO;
+ goto out_free_bsmbx;
}
}
-
- /* Read the port's SLI4 Parameters capabilities if supported. */
- if (phba->sli4_hba.pc_sli4_params.supported)
- rc = lpfc_pc_sli4_params_get(phba, mboxq);
+ /*
+ * Get sli4 parameters that override parameters from Port capabilities.
+ * If this call fails it is not a critical error so continue loading.
+ */
+ lpfc_get_sli4_parameters(phba, mboxq);
mempool_free(mboxq, phba->mbox_mem_pool);
- if (rc) {
- rc = -EIO;
- goto out_free_bsmbx;
- }
/* Create all the SLI4 queues */
rc = lpfc_sli4_queue_create(phba);
if (rc)
@@ -7810,7 +7818,7 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
mqe = &mboxq->u.mqe;
/* Read the port's SLI4 Parameters port capabilities */
- lpfc_sli4_params(mboxq);
+ lpfc_pc_sli4_params(mboxq);
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
else {
@@ -7854,6 +7862,66 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
/**
+ * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
+ *
+ * This function is called in the SLI4 code path to read the port's
+ * sli4 capabilities.
+ *
+ * This function may be be called from any context that can block-wait
+ * for the completion. The expectation is that this routine is called
+ * typically from probe_one or from the online routine.
+ **/
+int
+lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+ int rc;
+ struct lpfc_mqe *mqe = &mboxq->u.mqe;
+ struct lpfc_pc_sli4_params *sli4_params;
+ int length;
+ struct lpfc_sli4_parameters *mbx_sli4_parameters;
+
+ /* Read the port's SLI4 Config Parameters */
+ length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
+ length, LPFC_SLI4_MBX_EMBED);
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ else
+ rc = lpfc_sli_issue_mbox_wait(phba, mboxq,
+ lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG));
+ if (unlikely(rc))
+ return rc;
+ sli4_params = &phba->sli4_hba.pc_sli4_params;
+ mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
+ sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
+ sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
+ sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
+ sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
+ mbx_sli4_parameters);
+ sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
+ mbx_sli4_parameters);
+ if (bf_get(cfg_phwq, mbx_sli4_parameters))
+ phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
+ else
+ phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
+ sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
+ sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
+ sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
+ sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
+ sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
+ sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
+ sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
+ mbx_sli4_parameters);
+ sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
+ mbx_sli4_parameters);
+ return 0;
+}
+
+/**
* lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
* @pdev: pointer to PCI device
* @pid: pointer to PCI device identifier
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 23403c650207..dba32dfdb59b 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1263,7 +1263,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
if (phba->cfg_enable_bg)
mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
- mb->un.varCfgPort.cdss = 1; /* Configure Security */
+ if (phba->cfg_enable_dss)
+ mb->un.varCfgPort.cdss = 1; /* Configure Security */
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
@@ -1692,7 +1693,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
* @mbox: pointer to lpfc mbox command.
* @subsystem: The sli4 config sub mailbox subsystem.
* @opcode: The sli4 config sub mailbox command opcode.
- * @length: Length of the sli4 config mailbox command.
+ * @length: Length of the sli4 config mailbox command (including sub-header).
*
* This routine sets up the header fields of SLI4 specific mailbox command
* for sending IOCTL command.
@@ -1723,14 +1724,14 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
if (emb) {
/* Set up main header fields */
bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
- sli4_config->header.cfg_mhdr.payload_length =
- LPFC_MBX_CMD_HDR_LENGTH + length;
+ sli4_config->header.cfg_mhdr.payload_length = length;
/* Set up sub-header fields following main header */
bf_set(lpfc_mbox_hdr_opcode,
&sli4_config->header.cfg_shdr.request, opcode);
bf_set(lpfc_mbox_hdr_subsystem,
&sli4_config->header.cfg_shdr.request, subsystem);
- sli4_config->header.cfg_shdr.request.request_length = length;
+ sli4_config->header.cfg_shdr.request.request_length =
+ length - LPFC_MBX_CMD_HDR_LENGTH;
return length;
}
@@ -1902,6 +1903,7 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
/* Set up host requested features. */
bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
+ bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
/* Enable DIF (block guard) only if configured to do so. */
if (phba->cfg_enable_bg)
@@ -2159,17 +2161,16 @@ lpfc_supported_pages(struct lpfcMboxq *mbox)
}
/**
- * lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params
- * mailbox command.
+ * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
* @mbox: pointer to lpfc mbox command to initialize.
*
* The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
* retrieve the particular SLI4 features supported by the port.
**/
void
-lpfc_sli4_params(struct lpfcMboxq *mbox)
+lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
{
- struct lpfc_mbx_sli4_params *sli4_params;
+ struct lpfc_mbx_pc_sli4_params *sli4_params;
memset(mbox, 0, sizeof(*mbox));
sli4_params = &mbox->u.mqe.un.sli4_params;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d85a7423a694..52b35159fc35 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -350,7 +350,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_maxframe =
((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb;
- /* no need to reg_login if we are already in one of these states */
+ /*
+ * Need to unreg_login if we are already in one of these states and
+ * change to NPR state. This will block the port until after the ACC
+ * completes and the reg_login is issued and completed.
+ */
switch (ndlp->nlp_state) {
case NLP_STE_NPR_NODE:
if (!(ndlp->nlp_flag & NLP_NPR_ADISC))
@@ -359,8 +363,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
case NLP_STE_PRLI_ISSUE:
case NLP_STE_UNMAPPED_NODE:
case NLP_STE_MAPPED_NODE:
- lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
- return 1;
+ lpfc_unreg_rpi(vport, ndlp);
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
}
if ((vport->fc_flag & FC_PT2PT) &&
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c97751c95d77..bf34178b80bf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -609,6 +609,32 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
}
/**
+ * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
+ * @vport: pointer to lpfc vport data structure.
+ *
+ * This routine is invoked by the vport cleanup for deletions and the cleanup
+ * for an ndlp on removal.
+ **/
+void
+lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_scsi_buf *psb, *next_psb;
+ unsigned long iflag = 0;
+
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ list_for_each_entry_safe(psb, next_psb,
+ &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+ if (psb->rdata && psb->rdata->pnode
+ && psb->rdata->pnode->vport == vport)
+ psb->rdata = NULL;
+ }
+ spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
+
+/**
* lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
* @phba: pointer to lpfc hba data structure.
* @axri: pointer to the fcp xri abort wcqe structure.
@@ -640,7 +666,11 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
psb->status = IOSTAT_SUCCESS;
spin_unlock(
&phba->sli4_hba.abts_scsi_buf_list_lock);
- ndlp = psb->rdata->pnode;
+ if (psb->rdata && psb->rdata->pnode)
+ ndlp = psb->rdata->pnode;
+ else
+ ndlp = NULL;
+
rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (ndlp)
@@ -964,36 +994,29 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
static struct lpfc_scsi_buf*
lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
- struct lpfc_scsi_buf *lpfc_cmd = NULL;
- struct lpfc_scsi_buf *start_lpfc_cmd = NULL;
- struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
+ struct lpfc_scsi_buf *lpfc_cmd ;
unsigned long iflag = 0;
int found = 0;
spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
- list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
- spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
- while (!found && lpfc_cmd) {
+ list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list,
+ list) {
if (lpfc_test_rrq_active(phba, ndlp,
- lpfc_cmd->cur_iocbq.sli4_xritag)) {
- lpfc_release_scsi_buf_s4(phba, lpfc_cmd);
- spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
- list_remove_head(scsi_buf_list, lpfc_cmd,
- struct lpfc_scsi_buf, list);
- spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
- iflag);
- if (lpfc_cmd == start_lpfc_cmd) {
- lpfc_cmd = NULL;
- break;
- } else
- continue;
- }
+ lpfc_cmd->cur_iocbq.sli4_xritag))
+ continue;
+ list_del(&lpfc_cmd->list);
found = 1;
lpfc_cmd->seg_cnt = 0;
lpfc_cmd->nonsg_phys = 0;
lpfc_cmd->prot_seg_cnt = 0;
+ break;
}
- return lpfc_cmd;
+ spin_unlock_irqrestore(&phba->scsi_buf_list_lock,
+ iflag);
+ if (!found)
+ return NULL;
+ else
+ return lpfc_cmd;
}
/**
* lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
@@ -1981,12 +2004,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
struct scatterlist *sgel = NULL;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+ struct sli4_sge *first_data_sgl;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
dma_addr_t physaddr;
uint32_t num_bde = 0;
uint32_t dma_len;
uint32_t dma_offset = 0;
int nseg;
+ struct ulp_bde64 *bde;
/*
* There are three possibilities here - use scatter-gather segment, use
@@ -2011,7 +2036,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
bf_set(lpfc_sli4_sge_last, sgl, 0);
sgl->word2 = cpu_to_le32(sgl->word2);
sgl += 1;
-
+ first_data_sgl = sgl;
lpfc_cmd->seg_cnt = nseg;
if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
@@ -2047,6 +2072,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
dma_offset += dma_len;
sgl++;
}
+ /* setup the performance hint (first data BDE) if enabled */
+ if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) {
+ bde = (struct ulp_bde64 *)
+ &(iocb_cmd->unsli3.sli3Words[5]);
+ bde->addrLow = first_data_sgl->addr_lo;
+ bde->addrHigh = first_data_sgl->addr_hi;
+ bde->tus.f.bdeSize =
+ le32_to_cpu(first_data_sgl->sge_len);
+ bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ bde->tus.w = cpu_to_le32(bde->tus.w);
+ }
} else {
sgl += 1;
/* clear the last flag in the fcp_rsp map entry */
@@ -2471,6 +2507,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_worker_wake_up(phba);
break;
case IOSTAT_LOCAL_REJECT:
+ case IOSTAT_REMOTE_STOP:
+ if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
+ lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
+ lpfc_cmd->result ==
+ IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
+ cmd->result = ScsiResult(DID_NO_CONNECT, 0);
+ break;
+ }
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
@@ -2478,7 +2524,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
cmd->result = ScsiResult(DID_REQUEUE, 0);
break;
}
-
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
@@ -2497,7 +2542,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
"on unprotected cmd\n");
}
}
-
+ if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
+ && (phba->sli_rev == LPFC_SLI_REV4)
+ && (pnode && NLP_CHK_NODE_ACT(pnode))) {
+ /* This IO was aborted by the target, we don't
+ * know the rxid and because we did not send the
+ * ABTS we cannot generate and RRQ.
+ */
+ lpfc_set_rrq_active(phba, pnode,
+ lpfc_cmd->cur_iocbq.sli4_xritag,
+ 0, 0);
+ }
/* else: fall through */
default:
cmd->result = ScsiResult(DID_ERROR, 0);
@@ -2508,9 +2563,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE))
cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
SAM_STAT_BUSY);
- } else {
+ } else
cmd->result = ScsiResult(DID_OK, 0);
- }
if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
@@ -3004,11 +3058,11 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
* transport is still transitioning.
*/
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
+ cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
goto out_fail_command;
}
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
- goto out_host_busy;
+ goto out_tgt_busy;
lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
if (lpfc_cmd == NULL) {
@@ -3125,6 +3179,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
out_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
+ out_tgt_busy:
+ return SCSI_MLQUEUE_TARGET_BUSY;
+
out_fail_command:
done(cmnd);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index a359d2b873ce..2ee0374a9908 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -96,7 +96,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
/* set consumption flag every once in a while */
if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
-
+ if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
+ bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
/* Update the host index before invoking device */
@@ -534,15 +535,35 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t adj_xri;
struct lpfc_node_rrq *rrq;
int empty;
+ uint32_t did = 0;
+
+
+ if (!ndlp)
+ return -EINVAL;
+
+ if (!phba->cfg_enable_rrq)
+ return -EINVAL;
+
+ if (phba->pport->load_flag & FC_UNLOADING) {
+ phba->hba_flag &= ~HBA_RRQ_ACTIVE;
+ goto out;
+ }
+ did = ndlp->nlp_DID;
/*
* set the active bit even if there is no mem available.
*/
adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
- if (!ndlp)
- return -EINVAL;
+
+ if (NLP_CHK_FREE_REQ(ndlp))
+ goto out;
+
+ if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
+ goto out;
+
if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap))
- return -EINVAL;
+ goto out;
+
rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
if (rrq) {
rrq->send_rrq = send_rrq;
@@ -553,14 +574,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
rrq->vport = ndlp->vport;
rrq->rxid = rxid;
empty = list_empty(&phba->active_rrq_list);
- if (phba->cfg_enable_rrq && send_rrq)
- /*
- * We need the xri before we can add this to the
- * phba active rrq list.
- */
- rrq->send_rrq = send_rrq;
- else
- rrq->send_rrq = 0;
+ rrq->send_rrq = send_rrq;
list_add_tail(&rrq->list, &phba->active_rrq_list);
if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
phba->hba_flag |= HBA_RRQ_ACTIVE;
@@ -569,40 +583,49 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
}
return 0;
}
- return -ENOMEM;
+out:
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2921 Can't set rrq active xri:0x%x rxid:0x%x"
+ " DID:0x%x Send:%d\n",
+ xritag, rxid, did, send_rrq);
+ return -EINVAL;
}
/**
- * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
+ * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
* @phba: Pointer to HBA context object.
* @xritag: xri used in this exchange.
* @rrq: The RRQ to be cleared.
*
- * This function is called with hbalock held. This function
**/
-static void
-__lpfc_clr_rrq_active(struct lpfc_hba *phba,
- uint16_t xritag,
- struct lpfc_node_rrq *rrq)
+void
+lpfc_clr_rrq_active(struct lpfc_hba *phba,
+ uint16_t xritag,
+ struct lpfc_node_rrq *rrq)
{
uint16_t adj_xri;
- struct lpfc_nodelist *ndlp;
+ struct lpfc_nodelist *ndlp = NULL;
- ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
+ if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
+ ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
/* The target DID could have been swapped (cable swap)
* we should use the ndlp from the findnode if it is
* available.
*/
- if (!ndlp)
+ if ((!ndlp) && rrq->ndlp)
ndlp = rrq->ndlp;
+ if (!ndlp)
+ goto out;
+
adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) {
rrq->send_rrq = 0;
rrq->xritag = 0;
rrq->rrq_stop_time = 0;
}
+out:
mempool_free(rrq, phba->rrq_pool);
}
@@ -627,34 +650,34 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)
struct lpfc_node_rrq *nextrrq;
unsigned long next_time;
unsigned long iflags;
+ LIST_HEAD(send_rrq);
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
next_time = jiffies + HZ * (phba->fc_ratov + 1);
list_for_each_entry_safe(rrq, nextrrq,
- &phba->active_rrq_list, list) {
- if (time_after(jiffies, rrq->rrq_stop_time)) {
- list_del(&rrq->list);
- if (!rrq->send_rrq)
- /* this call will free the rrq */
- __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
- else {
- /* if we send the rrq then the completion handler
- * will clear the bit in the xribitmap.
- */
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- if (lpfc_send_rrq(phba, rrq)) {
- lpfc_clr_rrq_active(phba, rrq->xritag,
- rrq);
- }
- spin_lock_irqsave(&phba->hbalock, iflags);
- }
- } else if (time_before(rrq->rrq_stop_time, next_time))
+ &phba->active_rrq_list, list) {
+ if (time_after(jiffies, rrq->rrq_stop_time))
+ list_move(&rrq->list, &send_rrq);
+ else if (time_before(rrq->rrq_stop_time, next_time))
next_time = rrq->rrq_stop_time;
}
spin_unlock_irqrestore(&phba->hbalock, iflags);
if (!list_empty(&phba->active_rrq_list))
mod_timer(&phba->rrq_tmr, next_time);
+ list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
+ list_del(&rrq->list);
+ if (!rrq->send_rrq)
+ /* this call will free the rrq */
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ else if (lpfc_send_rrq(phba, rrq)) {
+ /* if we send the rrq then the completion handler
+ * will clear the bit in the xribitmap.
+ */
+ lpfc_clr_rrq_active(phba, rrq->xritag,
+ rrq);
+ }
+ }
}
/**
@@ -692,29 +715,37 @@ lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
/**
* lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
* @vport: Pointer to vport context object.
- *
- * Remove all active RRQs for this vport from the phba->active_rrq_list and
- * clear the rrq.
+ * @ndlp: Pointer to the lpfc_node_list structure.
+ * If ndlp is NULL Remove all active RRQs for this vport from the
+ * phba->active_rrq_list and clear the rrq.
+ * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
**/
void
-lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport)
+lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_node_rrq *rrq;
struct lpfc_node_rrq *nextrrq;
unsigned long iflags;
+ LIST_HEAD(rrq_list);
if (phba->sli_rev != LPFC_SLI_REV4)
return;
- spin_lock_irqsave(&phba->hbalock, iflags);
- list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
- if (rrq->vport == vport) {
- list_del(&rrq->list);
- __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
- }
+ if (!ndlp) {
+ lpfc_sli4_vport_delete_els_xri_aborted(vport);
+ lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
}
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
+ if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp))
+ list_move(&rrq->list, &rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
+ list_del(&rrq->list);
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ }
}
/**
@@ -732,24 +763,27 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
struct lpfc_node_rrq *nextrrq;
unsigned long next_time;
unsigned long iflags;
+ LIST_HEAD(rrq_list);
if (phba->sli_rev != LPFC_SLI_REV4)
return;
spin_lock_irqsave(&phba->hbalock, iflags);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
next_time = jiffies + HZ * (phba->fc_ratov * 2);
- list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
+ list_splice_init(&phba->active_rrq_list, &rrq_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
list_del(&rrq->list);
- __lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
+ lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
}
- spin_unlock_irqrestore(&phba->hbalock, iflags);
if (!list_empty(&phba->active_rrq_list))
mod_timer(&phba->rrq_tmr, next_time);
}
/**
- * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
+ * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
* @phba: Pointer to HBA context object.
* @ndlp: Targets nodelist pointer for this exchange.
* @xritag the xri in the bitmap to test.
@@ -758,8 +792,8 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
* returns 0 = rrq not active for this xri
* 1 = rrq is valid for this xri.
**/
-static int
-__lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
+int
+lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
uint16_t xritag)
{
uint16_t adj_xri;
@@ -802,52 +836,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
}
/**
- * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
- * @phba: Pointer to HBA context object.
- * @xritag: xri used in this exchange.
- * @rrq: The RRQ to be cleared.
- *
- * This function is takes the hbalock.
- **/
-void
-lpfc_clr_rrq_active(struct lpfc_hba *phba,
- uint16_t xritag,
- struct lpfc_node_rrq *rrq)
-{
- unsigned long iflags;
-
- spin_lock_irqsave(&phba->hbalock, iflags);
- __lpfc_clr_rrq_active(phba, xritag, rrq);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- return;
-}
-
-
-
-/**
- * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
- * @phba: Pointer to HBA context object.
- * @ndlp: Targets nodelist pointer for this exchange.
- * @xritag the xri in the bitmap to test.
- *
- * This function takes the hbalock.
- * returns 0 = rrq not active for this xri
- * 1 = rrq is valid for this xri.
- **/
-int
-lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
- uint16_t xritag)
-{
- int ret;
- unsigned long iflags;
-
- spin_lock_irqsave(&phba->hbalock, iflags);
- ret = __lpfc_test_rrq_active(phba, ndlp, xritag);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- return ret;
-}
-
-/**
* __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
* @phba: Pointer to HBA context object.
* @piocb: Pointer to the iocbq.
@@ -884,7 +872,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
return NULL;
adj_xri = sglq->sli4_xritag -
phba->sli4_hba.max_cfg_param.xri_base;
- if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
+ if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) {
/* This xri has an rrq outstanding for this DID.
* put it back in the list and get another xri.
*/
@@ -969,7 +957,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
} else {
sglq->state = SGL_FREED;
sglq->ndlp = NULL;
- list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
+ list_add_tail(&sglq->list,
+ &phba->sli4_hba.lpfc_sgl_list);
/* Check if TXQ queue needs to be serviced */
if (pring->txq_cnt)
@@ -4817,7 +4806,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"0378 No support for fcpi mode.\n");
ftr_rsp++;
}
-
+ if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
+ phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
+ else
+ phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
/*
* If the port cannot support the host's requested features
* then turn off the global config parameters to disable the
@@ -5004,7 +4996,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
phba->link_state = LPFC_LINK_DOWN;
spin_unlock_irq(&phba->hbalock);
- rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK)
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
out_unset_queue:
/* Unset all the queues set up in this routine when error out */
if (rc)
@@ -10478,6 +10471,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
cq->type = type;
cq->subtype = subtype;
cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+ cq->assoc_qid = eq->queue_id;
cq->host_index = 0;
cq->hba_index = 0;
@@ -10672,6 +10666,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
goto out;
}
mq->type = LPFC_MQ;
+ mq->assoc_qid = cq->queue_id;
mq->subtype = subtype;
mq->host_index = 0;
mq->hba_index = 0;
@@ -10759,6 +10754,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
goto out;
}
wq->type = LPFC_WQ;
+ wq->assoc_qid = cq->queue_id;
wq->subtype = subtype;
wq->host_index = 0;
wq->hba_index = 0;
@@ -10876,6 +10872,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
goto out;
}
hrq->type = LPFC_HRQ;
+ hrq->assoc_qid = cq->queue_id;
hrq->subtype = subtype;
hrq->host_index = 0;
hrq->hba_index = 0;
@@ -10936,6 +10933,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
goto out;
}
drq->type = LPFC_DRQ;
+ drq->assoc_qid = cq->queue_id;
drq->subtype = subtype;
drq->host_index = 0;
drq->hba_index = 0;
@@ -11189,7 +11187,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
if (!mbox)
return -ENOMEM;
length = (sizeof(struct lpfc_mbx_rq_destroy) -
- sizeof(struct mbox_header));
+ sizeof(struct lpfc_sli4_cfg_mhdr));
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
length, LPFC_SLI4_MBX_EMBED);
@@ -11279,7 +11277,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
sizeof(struct lpfc_mbx_post_sgl_pages) -
- sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+ sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
&mbox->u.mqe.un.post_sgl_pages;
@@ -12402,7 +12400,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
sizeof(struct lpfc_mbx_post_hdr_tmpl) -
- sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+ sizeof(struct lpfc_sli4_cfg_mhdr),
+ LPFC_SLI4_MBX_EMBED);
bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
hdr_tmpl, rpi_page->page_count);
bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index c7217d579e0f..595056b89608 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -125,9 +125,9 @@ struct lpfc_queue {
uint32_t entry_count; /* Number of entries to support on the queue */
uint32_t entry_size; /* Size of each queue entry. */
uint32_t queue_id; /* Queue ID assigned by the hardware */
+ uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
struct list_head page_list;
uint32_t page_count; /* Number of pages allocated for this queue */
-
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
union sli4_qe qe[1]; /* array to index entries (must be last) */
@@ -359,6 +359,10 @@ struct lpfc_pc_sli4_params {
uint32_t hdr_pp_align;
uint32_t sgl_pages_max;
uint32_t sgl_pp_align;
+ uint8_t cqv;
+ uint8_t mqv;
+ uint8_t wqv;
+ uint8_t rqv;
};
/* SLI4 HBA data structure entries */
@@ -562,6 +566,8 @@ void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
+void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *);
int lpfc_sli4_brdreset(struct lpfc_hba *);
int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 386cf92de492..0a4d376dbca5 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2010 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.20"
+#define LPFC_DRIVER_VERSION "8.3.21"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 6b8d2952e32f..30ba5440c67a 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -464,6 +464,7 @@ disable_vport(struct fc_vport *fc_vport)
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
long timeout;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (ndlp && NLP_CHK_NODE_ACT(ndlp)
@@ -498,6 +499,9 @@ disable_vport(struct fc_vport *fc_vport)
* scsi_host_put() to release the vport.
*/
lpfc_mbx_unreg_vpi(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+ spin_unlock_irq(shost->host_lock);
lpfc_vport_set_state(vport, FC_VPORT_DISABLED);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 1b5e375732c0..635b228c3ead 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.05.29-rc1"
-#define MEGASAS_RELDATE "Dec. 7, 2010"
-#define MEGASAS_EXT_VERSION "Tue. Dec. 7 17:00:00 PDT 2010"
+#define MEGASAS_VERSION "00.00.05.34-rc1"
+#define MEGASAS_RELDATE "Feb. 24, 2011"
+#define MEGASAS_EXT_VERSION "Thu. Feb. 24 17:00:00 PDT 2011"
/*
* Device IDs
@@ -723,6 +723,7 @@ struct megasas_ctrl_info {
MEGASAS_MAX_DEV_PER_CHANNEL)
#define MEGASAS_MAX_SECTORS (2*1024)
+#define MEGASAS_MAX_SECTORS_IEEE (2*128)
#define MEGASAS_DBG_LVL 1
#define MEGASAS_FW_BUSY 1
@@ -1477,4 +1478,7 @@ struct megasas_mgmt_info {
int max_index;
};
+#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
+#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
+
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 5d6d07bd1cd0..f875e818905f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,12 +18,13 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.05.29-rc1
+ * Version : v00.00.05.34-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
* Sumant Patro
* Bo Yang
+ * Adam Radford <linuxraid@lsi.com>
*
* Send feedback to: <megaraidlinux@lsi.com>
*
@@ -134,7 +135,11 @@ spinlock_t poll_aen_lock;
void
megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
u8 alt_status);
-
+static u32
+megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
+static int
+megasas_adp_reset_gen2(struct megasas_instance *instance,
+ struct megasas_register_set __iomem *reg_set);
static irqreturn_t megasas_isr(int irq, void *devp);
static u32
megasas_init_adapter_mfi(struct megasas_instance *instance);
@@ -554,6 +559,8 @@ static int
megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
{
u32 status;
+ u32 mfiStatus = 0;
+
/*
* Check if it is our interrupt
*/
@@ -564,6 +571,15 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
}
/*
+ * Check if it is our interrupt
+ */
+ if ((megasas_read_fw_status_reg_gen2(regs) & MFI_STATE_MASK) ==
+ MFI_STATE_FAULT) {
+ mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
+ } else
+ mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
+
+ /*
* Clear the interrupt by writing back the same value
*/
writel(status, &regs->outbound_intr_status);
@@ -573,7 +589,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
*/
readl(&regs->outbound_intr_status);
- return 1;
+ return mfiStatus;
}
/**
@@ -597,17 +613,6 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
}
/**
- * megasas_adp_reset_skinny - For controller reset
- * @regs: MFI register set
- */
-static int
-megasas_adp_reset_skinny(struct megasas_instance *instance,
- struct megasas_register_set __iomem *regs)
-{
- return 0;
-}
-
-/**
* megasas_check_reset_skinny - For controller reset check
* @regs: MFI register set
*/
@@ -625,7 +630,7 @@ static struct megasas_instance_template megasas_instance_template_skinny = {
.disable_intr = megasas_disable_intr_skinny,
.clear_intr = megasas_clear_intr_skinny,
.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
- .adp_reset = megasas_adp_reset_skinny,
+ .adp_reset = megasas_adp_reset_gen2,
.check_reset = megasas_check_reset_skinny,
.service_isr = megasas_isr,
.tasklet = megasas_complete_cmd_dpc,
@@ -740,20 +745,28 @@ megasas_adp_reset_gen2(struct megasas_instance *instance,
{
u32 retry = 0 ;
u32 HostDiag;
+ u32 *seq_offset = &reg_set->seq_offset;
+ u32 *hostdiag_offset = &reg_set->host_diag;
+
+ if (instance->instancet == &megasas_instance_template_skinny) {
+ seq_offset = &reg_set->fusion_seq_offset;
+ hostdiag_offset = &reg_set->fusion_host_diag;
+ }
+
+ writel(0, seq_offset);
+ writel(4, seq_offset);
+ writel(0xb, seq_offset);
+ writel(2, seq_offset);
+ writel(7, seq_offset);
+ writel(0xd, seq_offset);
- writel(0, &reg_set->seq_offset);
- writel(4, &reg_set->seq_offset);
- writel(0xb, &reg_set->seq_offset);
- writel(2, &reg_set->seq_offset);
- writel(7, &reg_set->seq_offset);
- writel(0xd, &reg_set->seq_offset);
msleep(1000);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
while ( !( HostDiag & DIAG_WRITE_ENABLE) ) {
msleep(100);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n",
retry, HostDiag);
@@ -764,14 +777,14 @@ megasas_adp_reset_gen2(struct megasas_instance *instance,
printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
- writel((HostDiag | DIAG_RESET_ADAPTER), &reg_set->host_diag);
+ writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
ssleep(10);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
while ( ( HostDiag & DIAG_RESET_ADAPTER) ) {
msleep(100);
- HostDiag = (u32)readl(&reg_set->host_diag);
+ HostDiag = (u32)readl(hostdiag_offset);
printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n",
retry, HostDiag);
@@ -2503,7 +2516,9 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
if ((mfiStatus = instance->instancet->clear_intr(
instance->reg_set)
) == 0) {
- return IRQ_NONE;
+ /* Hardware may not set outbound_intr_status in MSI-X mode */
+ if (!instance->msi_flag)
+ return IRQ_NONE;
}
instance->mfiStatus = mfiStatus;
@@ -2611,7 +2626,9 @@ megasas_transition_to_ready(struct megasas_instance* instance)
case MFI_STATE_FAULT:
printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
- return -ENODEV;
+ max_wait = MEGASAS_RESET_WAIT_TIME;
+ cur_state = MFI_STATE_FAULT;
+ break;
case MFI_STATE_WAIT_HANDSHAKE:
/*
@@ -3424,7 +3441,6 @@ fail_reply_queue:
megasas_free_cmds(instance);
fail_alloc_cmds:
- iounmap(instance->reg_set);
return 1;
}
@@ -3494,7 +3510,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
/* Get operational params, sge flags, send init cmd to controller */
if (instance->instancet->init_adapter(instance))
- return -ENODEV;
+ goto fail_init_adapter;
printk(KERN_ERR "megasas: INIT adapter done\n");
@@ -3543,7 +3559,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
* Setup tasklet for cmd completion
*/
- tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc,
+ tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
/* Initialize the cmd completion timer */
@@ -3553,6 +3569,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
MEGASAS_COMPLETION_TIMER_INTERVAL);
return 0;
+fail_init_adapter:
fail_ready_state:
iounmap(instance->reg_set);
@@ -3820,6 +3837,10 @@ static int megasas_io_attach(struct megasas_instance *instance)
instance->max_fw_cmds - MEGASAS_INT_CMDS;
host->this_id = instance->init_id;
host->sg_tablesize = instance->max_num_sge;
+
+ if (instance->fw_support_ieee)
+ instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
+
/*
* Check if the module parameter value for max_sectors can be used
*/
@@ -3899,9 +3920,26 @@ fail_set_dma_mask:
static int __devinit
megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
- int rval;
+ int rval, pos;
struct Scsi_Host *host;
struct megasas_instance *instance;
+ u16 control = 0;
+
+ /* Reset MSI-X in the kdump kernel */
+ if (reset_devices) {
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(pdev, msi_control_reg(pos),
+ &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE) {
+ dev_info(&pdev->dev, "resetting MSI-X\n");
+ pci_write_config_word(pdev,
+ msi_control_reg(pos),
+ control &
+ ~PCI_MSIX_FLAGS_ENABLE);
+ }
+ }
+ }
/*
* Announce PCI information
@@ -4039,12 +4077,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
- /*
- * Initialize MFI Firmware
- */
- if (megasas_init_fw(instance))
- goto fail_init_mfi;
-
/* Try to enable MSI-X */
if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) &&
(instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) &&
@@ -4054,6 +4086,12 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
instance->msi_flag = 1;
/*
+ * Initialize MFI Firmware
+ */
+ if (megasas_init_fw(instance))
+ goto fail_init_mfi;
+
+ /*
* Register IRQ
*/
if (request_irq(instance->msi_flag ? instance->msixentry.vector :
@@ -4105,24 +4143,23 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
instance->instancet->disable_intr(instance->reg_set);
free_irq(instance->msi_flag ? instance->msixentry.vector :
instance->pdev->irq, instance);
+fail_irq:
+ if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)
+ megasas_release_fusion(instance);
+ else
+ megasas_release_mfi(instance);
+ fail_init_mfi:
if (instance->msi_flag)
pci_disable_msix(instance->pdev);
-
- fail_irq:
- fail_init_mfi:
fail_alloc_dma_buf:
if (instance->evt_detail)
pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
instance->evt_detail,
instance->evt_detail_h);
- if (instance->producer) {
+ if (instance->producer)
pci_free_consistent(pdev, sizeof(u32), instance->producer,
instance->producer_h);
- megasas_release_mfi(instance);
- } else {
- megasas_release_fusion(instance);
- }
if (instance->consumer)
pci_free_consistent(pdev, sizeof(u32), instance->consumer,
instance->consumer_h);
@@ -4242,9 +4279,8 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
/* cancel the delayed work if this work still in queue */
if (instance->ev != NULL) {
struct megasas_aen_event *ev = instance->ev;
- cancel_delayed_work(
+ cancel_delayed_work_sync(
(struct delayed_work *)&ev->hotplug_work);
- flush_scheduled_work();
instance->ev = NULL;
}
@@ -4297,6 +4333,10 @@ megasas_resume(struct pci_dev *pdev)
if (megasas_set_dma_mask(pdev))
goto fail_set_dma_mask;
+ /* Now re-enable MSI-X */
+ if (instance->msi_flag)
+ pci_enable_msix(instance->pdev, &instance->msixentry, 1);
+
/*
* Initialize MFI Firmware
*/
@@ -4333,10 +4373,6 @@ megasas_resume(struct pci_dev *pdev)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- /* Now re-enable MSI-X */
- if (instance->msi_flag)
- pci_enable_msix(instance->pdev, &instance->msixentry, 1);
-
/*
* Register IRQ
*/
@@ -4417,9 +4453,8 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
/* cancel the delayed work if this work still in queue*/
if (instance->ev != NULL) {
struct megasas_aen_event *ev = instance->ev;
- cancel_delayed_work(
+ cancel_delayed_work_sync(
(struct delayed_work *)&ev->hotplug_work);
- flush_scheduled_work();
instance->ev = NULL;
}
@@ -4611,6 +4646,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* For each user buffer, create a mirror buffer and copy in
*/
for (i = 0; i < ioc->sge_count; i++) {
+ if (!ioc->sgl[i].iov_len)
+ continue;
+
kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
ioc->sgl[i].iov_len,
&buf_handle, GFP_KERNEL);
@@ -5177,6 +5215,7 @@ megasas_aen_polling(struct work_struct *work)
break;
case MR_EVT_LD_OFFLINE:
+ case MR_EVT_CFG_CLEARED:
case MR_EVT_LD_DELETED:
megasas_get_ld_list(instance);
for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index d6e2a663b165..145a8cffb1fa 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -81,6 +81,10 @@ u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+
+void
+megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
+
u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
struct LD_LOAD_BALANCE_INFO *lbInfo);
u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
@@ -983,13 +987,15 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
return 0;
-fail_alloc_cmds:
-fail_alloc_mfi_cmds:
fail_map_info:
if (i == 1)
dma_free_coherent(&instance->pdev->dev, fusion->map_sz,
fusion->ld_map[0], fusion->ld_map_phys[0]);
fail_ioc_init:
+ megasas_free_cmds_fusion(instance);
+fail_alloc_cmds:
+ megasas_free_cmds(instance);
+fail_alloc_mfi_cmds:
return 1;
}
@@ -1431,8 +1437,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
/* Check if this is a system PD I/O */
- if ((instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) &&
- (instance->pd_list[pd_index].driveType == TYPE_DISK)) {
+ if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
io_request->Function = 0;
io_request->DevHandle =
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
@@ -1455,7 +1460,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
io_request->RaidContext.VirtualDiskTgtId = device_id;
- io_request->LUN[0] = scmd->device->lun;
+ io_request->LUN[1] = scmd->device->lun;
io_request->DataLength = scsi_bufflen(scmd);
}
@@ -1479,7 +1484,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
device_id = MEGASAS_DEV_INDEX(instance, scp);
/* Zero out some fields so they don't get reused */
- io_request->LUN[0] = 0;
+ io_request->LUN[1] = 0;
io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
io_request->EEDPFlags = 0;
@@ -1743,7 +1748,7 @@ complete_cmd_fusion(struct megasas_instance *instance)
wmb();
writel(fusion->last_reply_idx,
&instance->reg_set->reply_post_host_index);
-
+ megasas_check_and_restore_queue_depth(instance);
return IRQ_HANDLED;
}
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 8be75e65f763..a3e60385787f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.16
+ * mpi2.h Version: 02.00.17
*
* Version History
* ---------------
@@ -63,6 +63,7 @@
* function codes, 0xF0 to 0xFF.
* 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
* Added alternative defines for the SGE Direction bit.
+ * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -88,7 +89,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x10)
+#define MPI2_HEADER_VERSION_UNIT (0x11)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index d76a65847603..f5b9c766e28f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.15
+ * mpi2_cnfg.h Version: 02.00.16
*
* Version History
* ---------------
@@ -125,6 +125,8 @@
* define.
* Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define.
* Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
+ * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing)
+ * defines.
* --------------------------------------------------------------------------
*/
@@ -745,8 +747,6 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040)
#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020)
#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004)
-#define MPI2_IOUNITPAGE1_MULTI_PATHING (0x00000002)
-#define MPI2_IOUNITPAGE1_SINGLE_PATHING (0x00000000)
/* IO Unit Page 3 */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
deleted file mode 100644
index b1e88f26b748..000000000000
--- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt
+++ /dev/null
@@ -1,384 +0,0 @@
- ==============================
- Fusion-MPT MPI 2.0 Header File Change History
- ==============================
-
- Copyright (c) 2000-2010 LSI Corporation.
-
- ---------------------------------------
- Header Set Release Version: 02.00.14
- Header Set Release Date: 10-28-09
- ---------------------------------------
-
- Filename Current version Prior version
- ---------- --------------- -------------
- mpi2.h 02.00.14 02.00.13
- mpi2_cnfg.h 02.00.13 02.00.12
- mpi2_init.h 02.00.08 02.00.07
- mpi2_ioc.h 02.00.13 02.00.12
- mpi2_raid.h 02.00.04 02.00.04
- mpi2_sas.h 02.00.03 02.00.02
- mpi2_targ.h 02.00.03 02.00.03
- mpi2_tool.h 02.00.04 02.00.04
- mpi2_type.h 02.00.00 02.00.00
- mpi2_ra.h 02.00.00 02.00.00
- mpi2_hbd.h 02.00.00
- mpi2_history.txt 02.00.14 02.00.13
-
-
- * Date Version Description
- * -------- -------- ------------------------------------------------------
-
-mpi2.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT.
- * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT.
- * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT.
- * Moved ReplyPostHostIndex register to offset 0x6C of the
- * MPI2_SYSTEM_INTERFACE_REGS and modified the define for
- * MPI2_REPLY_POST_HOST_INDEX_OFFSET.
- * Added union of request descriptors.
- * Added union of reply descriptors.
- * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT.
- * Added define for MPI2_VERSION_02_00.
- * Fixed the size of the FunctionDependent5 field in the
- * MPI2_DEFAULT_REPLY structure.
- * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT.
- * Removed the MPI-defined Fault Codes and extended the
- * product specific codes up to 0xEFFF.
- * Added a sixth key value for the WriteSequence register
- * and changed the flush value to 0x0.
- * Added message function codes for Diagnostic Buffer Post
- * and Diagnsotic Release.
- * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED
- * Moved MPI2_VERSION_UNION from mpi2_ioc.h.
- * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT.
- * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT.
- * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT.
- * Added #defines for marking a reply descriptor as unused.
- * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT.
- * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT.
- * Moved LUN field defines from mpi2_init.h.
- * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT.
- * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT.
- * In all request and reply descriptors, replaced VF_ID
- * field with MSIxIndex field.
- * Removed DevHandle field from
- * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
- * bytes reserved.
- * Added RAID Accelerator functionality.
- * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
- * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
- * Added MSI-x index mask and shift for Reply Post Host
- * Index register.
- * Added function code for Host Based Discovery Action.
- * --------------------------------------------------------------------------
-
-mpi2_cnfg.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags.
- * Added Manufacturing Page 11.
- * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE
- * define.
- * 06-26-07 02.00.02 Adding generic structure for product-specific
- * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS.
- * Rework of BIOS Page 2 configuration page.
- * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the
- * forms.
- * Added configuration pages IOC Page 8 and Driver
- * Persistent Mapping Page 0.
- * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated
- * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1,
- * RAID Physical Disk Pages 0 and 1, RAID Configuration
- * Page 0).
- * Added new value for AccessStatus field of SAS Device
- * Page 0 (_SATA_NEEDS_INITIALIZATION).
- * 10-31-07 02.00.04 Added missing SEPDevHandle field to
- * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
- * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for
- * NVDATA.
- * Modified IOC Page 7 to use masks and added field for
- * SASBroadcastPrimitiveMasks.
- * Added MPI2_CONFIG_PAGE_BIOS_4.
- * Added MPI2_CONFIG_PAGE_LOG_0.
- * 02-29-08 02.00.06 Modified various names to make them 32-character unique.
- * Added SAS Device IDs.
- * Updated Integrated RAID configuration pages including
- * Manufacturing Page 4, IOC Page 6, and RAID Configuration
- * Page 0.
- * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA.
- * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION.
- * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING.
- * Added missing MaxNumRoutedSasAddresses field to
- * MPI2_CONFIG_PAGE_EXPANDER_0.
- * Added SAS Port Page 0.
- * Modified structure layout for
- * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0.
- * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use
- * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array.
- * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF
- * to 0x000000FF.
- * Added two new values for the Physical Disk Coercion Size
- * bits in the Flags field of Manufacturing Page 4.
- * Added product-specific Manufacturing pages 16 to 31.
- * Modified Flags bits for controlling write cache on SATA
- * drives in IO Unit Page 1.
- * Added new bit to AdditionalControlFlags of SAS IO Unit
- * Page 1 to control Invalid Topology Correction.
- * Added SupportedPhysDisks field to RAID Volume Page 1 and
- * added related defines.
- * Added additional defines for RAID Volume Page 0
- * VolumeStatusFlags field.
- * Modified meaning of RAID Volume Page 0 VolumeSettings
- * define for auto-configure of hot-swap drives.
- * Added PhysDiskAttributes field (and related defines) to
- * RAID Physical Disk Page 0.
- * Added MPI2_SAS_PHYINFO_PHY_VACANT define.
- * Added three new DiscoveryStatus bits for SAS IO Unit
- * Page 0 and SAS Expander Page 0.
- * Removed multiplexing information from SAS IO Unit pages.
- * Added BootDeviceWaitTime field to SAS IO Unit Page 4.
- * Removed Zone Address Resolved bit from PhyInfo and from
- * Expander Page 0 Flags field.
- * Added two new AccessStatus values to SAS Device Page 0
- * for indicating routing problems. Added 3 reserved words
- * to this page.
- * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3.
- * Inserted missing reserved field into structure for IOC
- * Page 6.
- * Added more pending task bits to RAID Volume Page 0
- * VolumeStatusFlags defines.
- * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define.
- * Added a new DiscoveryStatus bit for SAS IO Unit Page 0
- * and SAS Expander Page 0 to flag a downstream initiator
- * when in simplified routing mode.
- * Removed SATA Init Failure defines for DiscoveryStatus
- * fields of SAS IO Unit Page 0 and SAS Expander Page 0.
- * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define.
- * Added PortGroups, DmaGroup, and ControlGroup fields to
- * SAS Device Page 0.
- * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO
- * Unit Page 6.
- * Added expander reduced functionality data to SAS
- * Expander Page 0.
- * Added SAS PHY Page 2 and SAS PHY Page 3.
- * 07-30-09 02.00.12 Added IO Unit Page 7.
- * Added new device ids.
- * Added SAS IO Unit Page 5.
- * Added partial and slumber power management capable flags
- * to SAS Device Page 0 Flags field.
- * Added PhyInfo defines for power condition.
- * Added Ethernet configuration pages.
- * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
- * Added SAS PHY Page 4 structure and defines.
- * --------------------------------------------------------------------------
-
-mpi2_init.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t.
- * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines.
- * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention.
- * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY.
- * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t.
- * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO
- * Control field Task Attribute flags.
- * Moved LUN field defines to mpi2.h becasue they are
- * common to many structures.
- * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to
- * Query Asynchronous Event.
- * Defined two new bits in the SlotStatus field of the SCSI
- * Enclosure Processor Request and Reply.
- * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
- * both SCSI IO Error Reply and SCSI Task Management Reply.
- * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
- * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
- * --------------------------------------------------------------------------
-
-mpi2_ioc.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to
- * MaxTargets.
- * Added TotalImageSize field to FWDownload Request.
- * Added reserved words to FWUpload Request.
- * 06-26-07 02.00.02 Added IR Configuration Change List Event.
- * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit
- * request and replaced it with
- * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth.
- * Replaced the MinReplyQueueDepth field of the IOCFacts
- * reply with MaxReplyDescriptorPostQueueDepth.
- * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum
- * depth for the Reply Descriptor Post Queue.
- * Added SASAddress field to Initiator Device Table
- * Overflow Event data.
- * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING
- * for SAS Initiator Device Status Change Event data.
- * Modified Reason Code defines for SAS Topology Change
- * List Event data, including adding a bit for PHY Vacant
- * status, and adding a mask for the Reason Code.
- * Added define for
- * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING.
- * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID.
- * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of
- * the IOCFacts Reply.
- * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
- * Moved MPI2_VERSION_UNION to mpi2.h.
- * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks
- * instead of enables, and added SASBroadcastPrimitiveMasks
- * field.
- * Added Log Entry Added Event and related structure.
- * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID.
- * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET.
- * Added MaxVolumes and MaxPersistentEntries fields to
- * IOCFacts reply.
- * Added ProtocalFlags and IOCCapabilities fields to
- * MPI2_FW_IMAGE_HEADER.
- * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT.
- * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to
- * a U16 (from a U32).
- * Removed extra 's' from EventMasks name.
- * 06-27-08 02.00.08 Fixed an offset in a comment.
- * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST.
- * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and
- * renamed MinReplyFrameSize to ReplyFrameSize.
- * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX.
- * Added two new RAIDOperation values for Integrated RAID
- * Operations Status Event data.
- * Added four new IR Configuration Change List Event data
- * ReasonCode values.
- * Added two new ReasonCode defines for SAS Device Status
- * Change Event data.
- * Added three new DiscoveryStatus bits for the SAS
- * Discovery event data.
- * Added Multiplexing Status Change bit to the PhyStatus
- * field of the SAS Topology Change List event data.
- * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY.
- * BootFlags are now product-specific.
- * Added defines for the indivdual signature bytes
- * for MPI2_INIT_IMAGE_FOOTER.
- * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define.
- * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR
- * define.
- * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE
- * define.
- * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define.
- * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define.
- * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define.
- * Added two new reason codes for SAS Device Status Change
- * Event.
- * Added new event: SAS PHY Counter.
- * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
- * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
- * Added new product id family for 2208.
- * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
- * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
- * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
- * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
- * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
- * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
- * Added Host Based Discovery Phy Event data.
- * Added defines for ProductID Product field
- * (MPI2_FW_HEADER_PID_).
- * Modified values for SAS ProductID Family
- * (MPI2_FW_HEADER_PID_FAMILY_).
- * --------------------------------------------------------------------------
-
-mpi2_raid.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 08-31-07 02.00.01 Modifications to RAID Action request and reply,
- * including the Actions and ActionData.
- * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD.
- * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
- * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
- * can be sized by the build environment.
- * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
- * VolumeCreationFlags and marked the old one as obsolete.
- * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
- * --------------------------------------------------------------------------
-
-mpi2_sas.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit
- * Control Request.
- * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
- * Request.
- * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
- * to MPI2_SGE_IO_UNION since it supports chained SGLs.
- * 05-12-10 02.00.04 Modified some comments.
- * --------------------------------------------------------------------------
-
-mpi2_targ.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 08-31-07 02.00.01 Added Command Buffer Data Location Address Space bits to
- * BufferPostFlags field of CommandBufferPostBase Request.
- * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
- * 10-02-08 02.00.03 Removed NextCmdBufferOffset from
- * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST.
- * Target Status Send Request only takes a single SGE for
- * response data.
- * --------------------------------------------------------------------------
-
-mpi2_tool.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release
- * structures and defines.
- * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
- * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
- * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
- * and reply messages.
- * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
- * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
- * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
- * --------------------------------------------------------------------------
-
-mpi2_type.h
- * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
- * --------------------------------------------------------------------------
-
-mpi2_ra.h
- * 05-06-09 02.00.00 Initial version.
- * --------------------------------------------------------------------------
-
-mpi2_hbd.h
- * 10-28-09 02.00.00 Initial version.
- * --------------------------------------------------------------------------
-
-
-mpi2_history.txt Parts list history
-
-Filename 02.00.14 02.00.13 02.00.12
----------- -------- -------- --------
-mpi2.h 02.00.14 02.00.13 02.00.12
-mpi2_cnfg.h 02.00.13 02.00.12 02.00.11
-mpi2_init.h 02.00.08 02.00.07 02.00.07
-mpi2_ioc.h 02.00.13 02.00.12 02.00.11
-mpi2_raid.h 02.00.04 02.00.04 02.00.03
-mpi2_sas.h 02.00.03 02.00.02 02.00.02
-mpi2_targ.h 02.00.03 02.00.03 02.00.03
-mpi2_tool.h 02.00.04 02.00.04 02.00.03
-mpi2_type.h 02.00.00 02.00.00 02.00.00
-mpi2_ra.h 02.00.00 02.00.00 02.00.00
-mpi2_hbd.h 02.00.00
-
-Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
----------- -------- -------- -------- -------- -------- --------
-mpi2.h 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06
-mpi2_cnfg.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 02.00.06
-mpi2_init.h 02.00.06 02.00.06 02.00.05 02.00.05 02.00.04 02.00.03
-mpi2_ioc.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07 02.00.06
-mpi2_raid.h 02.00.03 02.00.03 02.00.03 02.00.03 02.00.02 02.00.02
-mpi2_sas.h 02.00.02 02.00.02 02.00.01 02.00.01 02.00.01 02.00.01
-mpi2_targ.h 02.00.03 02.00.03 02.00.02 02.00.02 02.00.02 02.00.02
-mpi2_tool.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02
-mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
-
-Filename 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
----------- -------- -------- -------- -------- -------- --------
-mpi2.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
-mpi2_cnfg.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
-mpi2_init.h 02.00.02 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00
-mpi2_ioc.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00
-mpi2_raid.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00
-mpi2_sas.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00
-mpi2_targ.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00
-mpi2_tool.h 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
-mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00
-
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index 608f6d6e6fca..fdffde1ebc0f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -6,7 +6,7 @@
* Title: MPI Serial Attached SCSI structures and definitions
* Creation Date: February 9, 2007
*
- * mpi2_sas.h Version: 02.00.04
+ * mpi2_sas.h Version: 02.00.05
*
* Version History
* ---------------
@@ -21,6 +21,7 @@
* 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
* to MPI2_SGE_IO_UNION since it supports chained SGLs.
* 05-12-10 02.00.04 Modified some comments.
+ * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control.
* --------------------------------------------------------------------------
*/
@@ -163,7 +164,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST
U32 Reserved4; /* 0x14 */
U32 DataLength; /* 0x18 */
U8 CommandFIS[20]; /* 0x1C */
- MPI2_SGE_IO_UNION SGL; /* 0x20 */
+ MPI2_SGE_IO_UNION SGL; /* 0x30 */
} MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t;
@@ -246,6 +247,8 @@ typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST
#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D)
#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E)
#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F)
+#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14)
+#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15)
#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80)
/* values for the PrimFlags field */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 5c6e3a67bb94..2a4bceda364b 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.05
+ * mpi2_tool.h Version: 02.00.06
*
* Version History
* ---------------
@@ -23,6 +23,8 @@
* Added MPI2_DIAG_BUF_TYPE_EXTENDED.
* Incremented MPI2_DIAG_BUF_TYPE_COUNT.
* 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
+ * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
+ * Post Request.
* --------------------------------------------------------------------------
*/
@@ -354,6 +356,10 @@ typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST
/* count of the number of buffer types */
#define MPI2_DIAG_BUF_TYPE_COUNT (0x03)
+/* values for the Flags field */
+#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002)
+#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001)
+
/****************************************************************************
* Diagnostic Buffer Post reply
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 9ead0399808a..e8a6f1cf1e4b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -752,20 +752,19 @@ static u8
_base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
{
int i;
- u8 cb_idx = 0xFF;
-
- if (smid >= ioc->hi_priority_smid) {
- if (smid < ioc->internal_smid) {
- i = smid - ioc->hi_priority_smid;
- cb_idx = ioc->hpr_lookup[i].cb_idx;
- } else if (smid <= ioc->hba_queue_depth) {
- i = smid - ioc->internal_smid;
- cb_idx = ioc->internal_lookup[i].cb_idx;
- }
- } else {
+ u8 cb_idx;
+
+ if (smid < ioc->hi_priority_smid) {
i = smid - 1;
cb_idx = ioc->scsi_lookup[i].cb_idx;
- }
+ } else if (smid < ioc->internal_smid) {
+ i = smid - ioc->hi_priority_smid;
+ cb_idx = ioc->hpr_lookup[i].cb_idx;
+ } else if (smid <= ioc->hba_queue_depth) {
+ i = smid - ioc->internal_smid;
+ cb_idx = ioc->internal_lookup[i].cb_idx;
+ } else
+ cb_idx = 0xFF;
return cb_idx;
}
@@ -1430,7 +1429,7 @@ mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
struct scsi_cmnd *scmd)
{
unsigned long flags;
- struct request_tracker *request;
+ struct scsiio_tracker *request;
u16 smid;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
@@ -1442,7 +1441,7 @@ mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
}
request = list_entry(ioc->free_list.next,
- struct request_tracker, tracker_list);
+ struct scsiio_tracker, tracker_list);
request->scmd = scmd;
request->cb_idx = cb_idx;
smid = request->smid;
@@ -1496,48 +1495,47 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
struct chain_tracker *chain_req, *next;
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
- if (smid >= ioc->hi_priority_smid) {
- if (smid < ioc->internal_smid) {
- /* hi-priority */
- i = smid - ioc->hi_priority_smid;
- ioc->hpr_lookup[i].cb_idx = 0xFF;
- list_add_tail(&ioc->hpr_lookup[i].tracker_list,
- &ioc->hpr_free_list);
- } else {
- /* internal queue */
- i = smid - ioc->internal_smid;
- ioc->internal_lookup[i].cb_idx = 0xFF;
- list_add_tail(&ioc->internal_lookup[i].tracker_list,
- &ioc->internal_free_list);
+ if (smid < ioc->hi_priority_smid) {
+ /* scsiio queue */
+ i = smid - 1;
+ if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
+ list_for_each_entry_safe(chain_req, next,
+ &ioc->scsi_lookup[i].chain_list, tracker_list) {
+ list_del_init(&chain_req->tracker_list);
+ list_add_tail(&chain_req->tracker_list,
+ &ioc->free_chain_list);
+ }
}
+ ioc->scsi_lookup[i].cb_idx = 0xFF;
+ ioc->scsi_lookup[i].scmd = NULL;
+ list_add_tail(&ioc->scsi_lookup[i].tracker_list,
+ &ioc->free_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- return;
- }
- /* scsiio queue */
- i = smid - 1;
- if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
- list_for_each_entry_safe(chain_req, next,
- &ioc->scsi_lookup[i].chain_list, tracker_list) {
- list_del_init(&chain_req->tracker_list);
- list_add_tail(&chain_req->tracker_list,
- &ioc->free_chain_list);
+ /*
+ * See _wait_for_commands_to_complete() call with regards
+ * to this code.
+ */
+ if (ioc->shost_recovery && ioc->pending_io_count) {
+ if (ioc->pending_io_count == 1)
+ wake_up(&ioc->reset_wq);
+ ioc->pending_io_count--;
}
+ return;
+ } else if (smid < ioc->internal_smid) {
+ /* hi-priority */
+ i = smid - ioc->hi_priority_smid;
+ ioc->hpr_lookup[i].cb_idx = 0xFF;
+ list_add_tail(&ioc->hpr_lookup[i].tracker_list,
+ &ioc->hpr_free_list);
+ } else if (smid <= ioc->hba_queue_depth) {
+ /* internal queue */
+ i = smid - ioc->internal_smid;
+ ioc->internal_lookup[i].cb_idx = 0xFF;
+ list_add_tail(&ioc->internal_lookup[i].tracker_list,
+ &ioc->internal_free_list);
}
- ioc->scsi_lookup[i].cb_idx = 0xFF;
- ioc->scsi_lookup[i].scmd = NULL;
- list_add_tail(&ioc->scsi_lookup[i].tracker_list,
- &ioc->free_list);
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
-
- /*
- * See _wait_for_commands_to_complete() call with regards to this code.
- */
- if (ioc->shost_recovery && ioc->pending_io_count) {
- if (ioc->pending_io_count == 1)
- wake_up(&ioc->reset_wq);
- ioc->pending_io_count--;
- }
}
/**
@@ -1725,6 +1723,31 @@ _base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
}
/**
+ * _base_display_intel_branding - Display branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
+{
+ if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_INTEL &&
+ ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008) {
+
+ switch (ioc->pdev->subsystem_device) {
+ case MPT2SAS_INTEL_RMS2LL080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS2LL080_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS2LL040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS2LL040_BRANDING);
+ break;
+ }
+ }
+}
+
+/**
* _base_display_ioc_capabilities - Disply IOC's capabilities.
* @ioc: per adapter object
*
@@ -1754,6 +1777,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
ioc->bios_pg3.BiosVersion & 0x000000FF);
_base_display_dell_branding(ioc);
+ _base_display_intel_branding(ioc);
printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
@@ -2252,9 +2276,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
ioc->name, (unsigned long long) ioc->request_dma));
total_sz += sz;
- sz = ioc->scsiio_depth * sizeof(struct request_tracker);
+ sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
ioc->scsi_lookup_pages = get_order(sz);
- ioc->scsi_lookup = (struct request_tracker *)__get_free_pages(
+ ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
GFP_KERNEL, ioc->scsi_lookup_pages);
if (!ioc->scsi_lookup) {
printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, "
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 283568c6fb04..a3f8aa9baea4 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,8 +69,8 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "07.100.00.00"
-#define MPT2SAS_MAJOR_VERSION 07
+#define MPT2SAS_DRIVER_VERSION "08.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 08
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
#define MPT2SAS_RELEASE_VERSION 00
@@ -101,7 +101,8 @@
#define MPT_NAME_LENGTH 32 /* generic length of strings */
#define MPT_STRING_LENGTH 64
-#define MPT_MAX_CALLBACKS 16
+#define MPT_MAX_CALLBACKS 16
+
#define CAN_SLEEP 1
#define NO_SLEEP 0
@@ -154,6 +155,20 @@
#define MPT2SAS_DELL_6GBPS_SAS_SSDID 0x1F22
/*
+ * Intel HBA branding
+ */
+#define MPT2SAS_INTEL_RMS2LL080_BRANDING \
+ "Intel Integrated RAID Module RMS2LL080"
+#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
+ "Intel Integrated RAID Module RMS2LL040"
+
+/*
+ * Intel HBA SSDIDs
+ */
+#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
+#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
+
+/*
* per target private data
*/
#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01
@@ -431,14 +446,14 @@ struct chain_tracker {
};
/**
- * struct request_tracker - firmware request tracker
+ * struct scsiio_tracker - scsi mf request tracker
* @smid: system message id
* @scmd: scsi request pointer
* @cb_idx: callback index
* @chain_list: list of chains associated to this IO
* @tracker_list: list of free request (ioc->free_list)
*/
-struct request_tracker {
+struct scsiio_tracker {
u16 smid;
struct scsi_cmnd *scmd;
u8 cb_idx;
@@ -447,6 +462,19 @@ struct request_tracker {
};
/**
+ * struct request_tracker - misc mf request tracker
+ * @smid: system message id
+ * @scmd: scsi request pointer
+ * @cb_idx: callback index
+ * @tracker_list: list of free request (ioc->free_list)
+ */
+struct request_tracker {
+ u16 smid;
+ u8 cb_idx;
+ struct list_head tracker_list;
+};
+
+/**
* struct _tr_list - target reset list
* @handle: device handle
* @state: state machine
@@ -709,7 +737,7 @@ struct MPT2SAS_ADAPTER {
u8 *request;
dma_addr_t request_dma;
u32 request_dma_sz;
- struct request_tracker *scsi_lookup;
+ struct scsiio_tracker *scsi_lookup;
ulong scsi_lookup_pages;
spinlock_t scsi_lookup_lock;
struct list_head free_list;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 5ded3db6e316..6ceb7759bfe5 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -6975,7 +6975,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
u32 device_state;
mpt2sas_base_stop_watchdog(ioc);
- flush_scheduled_work();
scsi_block_requests(shost);
device_state = pci_choose_state(pdev, state);
printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering "
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index b37c8a3c1bb0..86afb13f1e79 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1005,11 +1005,23 @@ int osd_req_read_sg(struct osd_request *or,
const struct osd_sg_entry *sglist, unsigned numentries)
{
u64 len;
- int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
+ u64 off;
+ int ret;
- if (ret)
- return ret;
- osd_req_read(or, obj, 0, bio, len);
+ if (numentries > 1) {
+ off = 0;
+ ret = _add_sg_continuation_descriptor(or, sglist, numentries,
+ &len);
+ if (ret)
+ return ret;
+ } else {
+ /* Optimize the case of single segment, read_sg is a
+ * bidi operation.
+ */
+ len = sglist->len;
+ off = sglist->offset;
+ }
+ osd_req_read(or, obj, off, bio, len);
return 0;
}
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index d8db0137c0c7..18b6c55cd08c 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1382,53 +1382,50 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
return MPI_IO_STATUS_BUSY;
}
-static void pm8001_work_queue(struct work_struct *work)
+static void pm8001_work_fn(struct work_struct *work)
{
- struct delayed_work *dw = container_of(work, struct delayed_work, work);
- struct pm8001_wq *wq = container_of(dw, struct pm8001_wq, work_q);
+ struct pm8001_work *pw = container_of(work, struct pm8001_work, work);
struct pm8001_device *pm8001_dev;
- struct domain_device *dev;
+ struct domain_device *dev;
- switch (wq->handler) {
+ switch (pw->handler) {
case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_DS_IN_ERROR:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
case IO_DS_NON_OPERATIONAL:
- pm8001_dev = wq->data;
+ pm8001_dev = pw->data;
dev = pm8001_dev->sas_device;
pm8001_I_T_nexus_reset(dev);
break;
}
- list_del(&wq->entry);
- kfree(wq);
+ kfree(pw);
}
static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data,
int handler)
{
- struct pm8001_wq *wq;
+ struct pm8001_work *pw;
int ret = 0;
- wq = kmalloc(sizeof(struct pm8001_wq), GFP_ATOMIC);
- if (wq) {
- wq->pm8001_ha = pm8001_ha;
- wq->data = data;
- wq->handler = handler;
- INIT_DELAYED_WORK(&wq->work_q, pm8001_work_queue);
- list_add_tail(&wq->entry, &pm8001_ha->wq_list);
- schedule_delayed_work(&wq->work_q, 0);
+ pw = kmalloc(sizeof(struct pm8001_work), GFP_ATOMIC);
+ if (pw) {
+ pw->pm8001_ha = pm8001_ha;
+ pw->data = data;
+ pw->handler = handler;
+ INIT_WORK(&pw->work, pm8001_work_fn);
+ queue_work(pm8001_wq, &pw->work);
} else
ret = -ENOMEM;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index b95285f3383f..002360da01e3 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -51,6 +51,8 @@ static int pm8001_id;
LIST_HEAD(hba_list);
+struct workqueue_struct *pm8001_wq;
+
/**
* The main structure which LLDD must register for scsi core.
*/
@@ -134,7 +136,6 @@ static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha,
static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
{
int i;
- struct pm8001_wq *wq;
if (!pm8001_ha)
return;
@@ -150,8 +151,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
if (pm8001_ha->shost)
scsi_host_put(pm8001_ha->shost);
- list_for_each_entry(wq, &pm8001_ha->wq_list, entry)
- cancel_delayed_work(&wq->work_q);
+ flush_workqueue(pm8001_wq);
kfree(pm8001_ha->tags);
kfree(pm8001_ha);
}
@@ -381,7 +381,6 @@ pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost)
pm8001_ha->sas = sha;
pm8001_ha->shost = shost;
pm8001_ha->id = pm8001_id++;
- INIT_LIST_HEAD(&pm8001_ha->wq_list);
pm8001_ha->logging_level = 0x01;
sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id);
#ifdef PM8001_USE_TASKLET
@@ -758,7 +757,7 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)
int i , pos;
u32 device_state;
pm8001_ha = sha->lldd_ha;
- flush_scheduled_work();
+ flush_workqueue(pm8001_wq);
scsi_block_requests(pm8001_ha->shost);
pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pos == 0) {
@@ -870,17 +869,26 @@ static struct pci_driver pm8001_pci_driver = {
*/
static int __init pm8001_init(void)
{
- int rc;
+ int rc = -ENOMEM;
+
+ pm8001_wq = alloc_workqueue("pm8001", 0, 0);
+ if (!pm8001_wq)
+ goto err;
+
pm8001_id = 0;
pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops);
if (!pm8001_stt)
- return -ENOMEM;
+ goto err_wq;
rc = pci_register_driver(&pm8001_pci_driver);
if (rc)
- goto err_out;
+ goto err_tp;
return 0;
-err_out:
+
+err_tp:
sas_release_transport(pm8001_stt);
+err_wq:
+ destroy_workqueue(pm8001_wq);
+err:
return rc;
}
@@ -888,6 +896,7 @@ static void __exit pm8001_exit(void)
{
pci_unregister_driver(&pm8001_pci_driver);
sas_release_transport(pm8001_stt);
+ destroy_workqueue(pm8001_wq);
}
module_init(pm8001_init);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 7f064f9ca828..bdb6b27dedd6 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -50,6 +50,7 @@
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <scsi/libsas.h>
#include <scsi/scsi_tcq.h>
#include <scsi/sas_ata.h>
@@ -379,18 +380,16 @@ struct pm8001_hba_info {
#ifdef PM8001_USE_TASKLET
struct tasklet_struct tasklet;
#endif
- struct list_head wq_list;
u32 logging_level;
u32 fw_status;
const struct firmware *fw_image;
};
-struct pm8001_wq {
- struct delayed_work work_q;
+struct pm8001_work {
+ struct work_struct work;
struct pm8001_hba_info *pm8001_ha;
void *data;
int handler;
- struct list_head entry;
};
struct pm8001_fw_image_header {
@@ -460,6 +459,9 @@ struct fw_control_ex {
void *param3;
};
+/* pm8001 workqueue */
+extern struct workqueue_struct *pm8001_wq;
+
/******************** function prototype *********************/
int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out);
void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 321cf3ae8630..bcf858e88c64 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -5454,7 +5454,7 @@ static void __devexit pmcraid_remove(struct pci_dev *pdev)
pmcraid_shutdown(pdev);
pmcraid_disable_interrupts(pinstance, ~0);
- flush_scheduled_work();
+ flush_work_sync(&pinstance->worker_q);
pmcraid_kill_tasklets(pinstance);
pmcraid_unregister_interrupt_handler(pinstance);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ccfc8e78be21..6c51c0a35b9e 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2402,13 +2402,13 @@ struct qla_hw_data {
volatile struct {
uint32_t mbox_int :1;
uint32_t mbox_busy :1;
-
uint32_t disable_risc_code_load :1;
uint32_t enable_64bit_addressing :1;
uint32_t enable_lip_reset :1;
uint32_t enable_target_reset :1;
uint32_t enable_lip_full_login :1;
uint32_t enable_led_scheme :1;
+
uint32_t msi_enabled :1;
uint32_t msix_enabled :1;
uint32_t disable_serdes :1;
@@ -2417,6 +2417,7 @@ struct qla_hw_data {
uint32_t pci_channel_io_perm_failure :1;
uint32_t fce_enabled :1;
uint32_t fac_supported :1;
+
uint32_t chip_reset_done :1;
uint32_t port0 :1;
uint32_t running_gold_fw :1;
@@ -2424,9 +2425,11 @@ struct qla_hw_data {
uint32_t cpu_affinity_enabled :1;
uint32_t disable_msix_handshake :1;
uint32_t fcp_prio_enabled :1;
- uint32_t fw_hung :1;
- uint32_t quiesce_owner:1;
+ uint32_t isp82xx_fw_hung:1;
+
+ uint32_t quiesce_owner:1;
uint32_t thermal_supported:1;
+ uint32_t isp82xx_reset_hdlr_active:1;
/* 26 bits */
} flags;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 89e900adb679..d48326ee3f61 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -565,6 +565,7 @@ extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
extern void qla82xx_start_iocbs(srb_t *);
extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
+extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
/* BSG related functions */
extern int qla24xx_bsg_request(struct fc_bsg_job *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4c083928c2fb..74a91b6dfc68 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -121,8 +121,11 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
rval = QLA_FUNCTION_FAILED;
if (ms_pkt->entry_status != 0) {
- DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n",
- vha->host_no, routine, ms_pkt->entry_status));
+ DEBUG2_3(printk(KERN_WARNING "scsi(%ld): %s failed, error status "
+ "(%x) on port_id: %02x%02x%02x.\n",
+ vha->host_no, routine, ms_pkt->entry_status,
+ vha->d_id.b.domain, vha->d_id.b.area,
+ vha->d_id.b.al_pa));
} else {
if (IS_FWI2_CAPABLE(ha))
comp_status = le16_to_cpu(
@@ -136,8 +139,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
if (ct_rsp->header.response !=
__constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
DEBUG2_3(printk("scsi(%ld): %s failed, "
- "rejected request:\n", vha->host_no,
- routine));
+ "rejected request on port_id: %02x%02x%02x\n",
+ vha->host_no, routine,
+ vha->d_id.b.domain, vha->d_id.b.area,
+ vha->d_id.b.al_pa));
DEBUG2_3(qla2x00_dump_buffer(
(uint8_t *)&ct_rsp->header,
sizeof(struct ct_rsp_hdr)));
@@ -147,8 +152,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
break;
default:
DEBUG2_3(printk("scsi(%ld): %s failed, completion "
- "status (%x).\n", vha->host_no, routine,
- comp_status));
+ "status (%x) on port_id: %02x%02x%02x.\n",
+ vha->host_no, routine, comp_status,
+ vha->d_id.b.domain, vha->d_id.b.area,
+ vha->d_id.b.al_pa));
break;
}
}
@@ -1965,7 +1972,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
"scsi(%ld): GFF_ID issue IOCB failed "
"(%d).\n", vha->host_no, rval));
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
- "GPN_ID") != QLA_SUCCESS) {
+ "GFF_ID") != QLA_SUCCESS) {
DEBUG2_3(printk(KERN_INFO
"scsi(%ld): GFF_ID IOCB status had a "
"failure status code\n", vha->host_no));
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index d9479c3fe5f8..8575808dbae0 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1967,7 +1967,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
} else {
/* Mailbox cmd failed. Timeout on min_wait. */
if (time_after_eq(jiffies, mtime) ||
- (IS_QLA82XX(ha) && ha->flags.fw_hung))
+ ha->flags.isp82xx_fw_hung)
break;
}
@@ -3945,8 +3945,13 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp;
unsigned long flags;
+ fc_port_t *fcport;
- vha->flags.online = 0;
+ /* For ISP82XX, driver waits for completion of the commands.
+ * online flag should be set.
+ */
+ if (!IS_QLA82XX(ha))
+ vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
ha->qla_stats.total_isp_aborts++;
@@ -3954,7 +3959,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
qla_printk(KERN_INFO, ha,
"Performing ISP error recovery - ha= %p.\n", ha);
- /* Chip reset does not apply to 82XX */
+ /* For ISP82XX, reset_chip is just disabling interrupts.
+ * Driver waits for the completion of the commands.
+ * the interrupts need to be enabled.
+ */
if (!IS_QLA82XX(ha))
ha->isp_ops->reset_chip(vha);
@@ -3980,14 +3988,31 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
LOOP_DOWN_TIME);
}
+ /* Clear all async request states across all VPs. */
+ list_for_each_entry(fcport, &vha->vp_fcports, list)
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ atomic_inc(&vp->vref_count);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ list_for_each_entry(fcport, &vp->vp_fcports, list)
+ fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ atomic_dec(&vp->vref_count);
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
if (!ha->flags.eeh_busy) {
/* Make sure for ISP 82XX IO DMA is complete */
if (IS_QLA82XX(ha)) {
- if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
- WAIT_HOST) == QLA_SUCCESS) {
- DEBUG2(qla_printk(KERN_INFO, ha,
- "Done wait for pending commands\n"));
- }
+ qla82xx_chip_reset_cleanup(vha);
+
+ /* Done waiting for pending commands.
+ * Reset the online flag.
+ */
+ vha->flags.online = 0;
}
/* Requeue all commands in outstanding command list. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 4c1ba6263eb3..d78d5896fc33 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -328,6 +328,7 @@ qla2x00_start_scsi(srb_t *sp)
struct qla_hw_data *ha;
struct req_que *req;
struct rsp_que *rsp;
+ char tag[2];
/* Setup device pointers. */
ret = 0;
@@ -406,7 +407,22 @@ qla2x00_start_scsi(srb_t *sp)
cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
/* Update tagged queuing modifier */
- cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_HEAD_TAG);
+ break;
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_ORDERED_TAG);
+ break;
+ default:
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_SIMPLE_TAG);
+ break;
+ }
+ }
/* Load SCSI command packet. */
memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
@@ -971,6 +987,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
uint16_t fcp_cmnd_len;
struct fcp_cmnd *fcp_cmnd;
dma_addr_t crc_ctx_dma;
+ char tag[2];
cmd = sp->cmd;
@@ -1068,9 +1085,27 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
- fcp_cmnd->task_attribute = 0;
fcp_cmnd->task_management = 0;
+ /*
+ * Update tagged queuing modifier if using command tag queuing
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ fcp_cmnd->task_attribute = TSK_ORDERED;
+ break;
+ default:
+ fcp_cmnd->task_attribute = 0;
+ break;
+ }
+ } else {
+ fcp_cmnd->task_attribute = 0;
+ }
+
cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
@@ -1177,6 +1212,7 @@ qla24xx_start_scsi(srb_t *sp)
struct scsi_cmnd *cmd = sp->cmd;
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
+ char tag[2];
/* Setup device pointers. */
ret = 0;
@@ -1260,6 +1296,18 @@ qla24xx_start_scsi(srb_t *sp)
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+ /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ cmd_pkt->task = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->task = TSK_ORDERED;
+ break;
+ }
+ }
+
/* Load SCSI command packet. */
memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index e473e9fb363c..7a7c0ecfe7dd 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -71,6 +71,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
+ if (ha->flags.isp82xx_fw_hung) {
+ /* Setting Link-Down error */
+ mcp->mb[0] = MBS_LINK_DOWN_ERROR;
+ rval = QLA_FUNCTION_FAILED;
+ goto premature_exit;
+ }
+
/*
* Wait for active mailbox commands to finish by waiting at most tov
* seconds. This is to serialize actual issuing of mailbox cmds during
@@ -83,13 +90,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
- if (IS_QLA82XX(ha) && ha->flags.fw_hung) {
- /* Setting Link-Down error */
- mcp->mb[0] = MBS_LINK_DOWN_ERROR;
- rval = QLA_FUNCTION_FAILED;
- goto premature_exit;
- }
-
ha->flags.mbox_busy = 1;
/* Save mailbox command for debug */
ha->mcp = mcp;
@@ -223,7 +223,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->flags.mbox_int = 0;
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- if (IS_QLA82XX(ha) && ha->flags.fw_hung) {
+ if (ha->flags.isp82xx_fw_hung) {
ha->flags.mbox_busy = 0;
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
@@ -2462,22 +2462,19 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
"-- completion status (%x).\n", __func__,
vha->host_no, le16_to_cpu(sts->comp_status)));
rval = QLA_FUNCTION_FAILED;
- } else if (!(le16_to_cpu(sts->scsi_status) &
- SS_RESPONSE_INFO_LEN_VALID)) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- no response info (%x).\n", __func__, vha->host_no,
- le16_to_cpu(sts->scsi_status)));
- rval = QLA_FUNCTION_FAILED;
- } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- not enough response info (%d).\n", __func__,
- vha->host_no, le32_to_cpu(sts->rsp_data_len)));
- rval = QLA_FUNCTION_FAILED;
- } else if (sts->data[3]) {
- DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
- "-- response (%x).\n", __func__,
- vha->host_no, sts->data[3]));
- rval = QLA_FUNCTION_FAILED;
+ } else if (le16_to_cpu(sts->scsi_status) &
+ SS_RESPONSE_INFO_LEN_VALID) {
+ if (le32_to_cpu(sts->rsp_data_len) < 4) {
+ DEBUG2_3_11(printk("%s(%ld): ignoring inconsistent "
+ "data length -- not enough response info (%d).\n",
+ __func__, vha->host_no,
+ le32_to_cpu(sts->rsp_data_len)));
+ } else if (sts->data[3]) {
+ DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB "
+ "-- response (%x).\n", __func__,
+ vha->host_no, sts->data[3]));
+ rval = QLA_FUNCTION_FAILED;
+ }
}
/* Issue marker IOCB. */
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index fdb96a3584a5..76ec876e6b21 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -7,6 +7,7 @@
#include "qla_def.h"
#include <linux/delay.h>
#include <linux/pci.h>
+#include <scsi/scsi_tcq.h>
#define MASK(n) ((1ULL<<(n))-1)
#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \
@@ -2547,7 +2548,7 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
*dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
*dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- *dsd_seg++ = dsd_list_len;
+ cmd_pkt->fcp_data_dseg_len = dsd_list_len;
} else {
*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
@@ -2620,6 +2621,7 @@ qla82xx_start_scsi(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
+ char tag[2];
/* Setup device pointers. */
ret = 0;
@@ -2770,6 +2772,22 @@ sufficient_dsds:
int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+ /*
+ * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ ctx->fcp_cmnd->task_attribute =
+ TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ ctx->fcp_cmnd->task_attribute =
+ TSK_ORDERED;
+ break;
+ }
+ }
+
/* build FCP_CMND IU */
memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
@@ -2835,6 +2853,20 @@ sufficient_dsds:
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
sizeof(cmd_pkt->lun));
+ /*
+ * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ cmd_pkt->task = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->task = TSK_ORDERED;
+ break;
+ }
+ }
+
/* Load SCSI command packet. */
memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
@@ -3457,46 +3489,28 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
}
}
-static void
+int
qla82xx_check_fw_alive(scsi_qla_host_t *vha)
{
- uint32_t fw_heartbeat_counter, halt_status;
- struct qla_hw_data *ha = vha->hw;
+ uint32_t fw_heartbeat_counter;
+ int status = 0;
- fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+ fw_heartbeat_counter = qla82xx_rd_32(vha->hw,
+ QLA82XX_PEG_ALIVE_COUNTER);
/* all 0xff, assume AER/EEH in progress, ignore */
if (fw_heartbeat_counter == 0xffffffff)
- return;
+ return status;
if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
vha->seconds_since_last_heartbeat++;
/* FW not alive after 2 seconds */
if (vha->seconds_since_last_heartbeat == 2) {
vha->seconds_since_last_heartbeat = 0;
- halt_status = qla82xx_rd_32(ha,
- QLA82XX_PEG_HALT_STATUS1);
- if (halt_status & HALT_STATUS_UNRECOVERABLE) {
- set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
- } else {
- qla_printk(KERN_INFO, ha,
- "scsi(%ld): %s - detect abort needed\n",
- vha->host_no, __func__);
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- }
- qla2xxx_wake_dpc(vha);
- ha->flags.fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- DEBUG2(qla_printk(KERN_ERR, ha,
- "Due to fw hung, doing premature "
- "completion of mbx command\n"));
- if (test_bit(MBX_INTR_WAIT,
- &ha->mbx_cmd_flags))
- complete(&ha->mbx_intr_comp);
- }
+ status = 1;
}
} else
vha->seconds_since_last_heartbeat = 0;
vha->fw_heartbeat_counter = fw_heartbeat_counter;
+ return status;
}
/*
@@ -3557,6 +3571,8 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
break;
case QLA82XX_DEV_NEED_RESET:
qla82xx_need_reset_handler(vha);
+ dev_init_timeout = jiffies +
+ (ha->nx_dev_init_timeout * HZ);
break;
case QLA82XX_DEV_NEED_QUIESCENT:
qla82xx_need_qsnt_handler(vha);
@@ -3596,30 +3612,18 @@ exit:
void qla82xx_watchdog(scsi_qla_host_t *vha)
{
- uint32_t dev_state;
+ uint32_t dev_state, halt_status;
struct qla_hw_data *ha = vha->hw;
- dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-
/* don't poll if reset is going on */
- if (!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
- test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))) {
- if (dev_state == QLA82XX_DEV_NEED_RESET) {
+ if (!ha->flags.isp82xx_reset_hdlr_active) {
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ if (dev_state == QLA82XX_DEV_NEED_RESET &&
+ !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
qla_printk(KERN_WARNING, ha,
- "%s(): Adapter reset needed!\n", __func__);
+ "%s(): Adapter reset needed!\n", __func__);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
- ha->flags.fw_hung = 1;
- if (ha->flags.mbox_busy) {
- ha->flags.mbox_int = 1;
- DEBUG2(qla_printk(KERN_ERR, ha,
- "Need reset, doing premature "
- "completion of mbx command\n"));
- if (test_bit(MBX_INTR_WAIT,
- &ha->mbx_cmd_flags))
- complete(&ha->mbx_intr_comp);
- }
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
DEBUG(qla_printk(KERN_INFO, ha,
@@ -3629,6 +3633,31 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
qla2xxx_wake_dpc(vha);
} else {
qla82xx_check_fw_alive(vha);
+ if (qla82xx_check_fw_alive(vha)) {
+ halt_status = qla82xx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS1);
+ if (halt_status & HALT_STATUS_UNRECOVERABLE) {
+ set_bit(ISP_UNRECOVERABLE,
+ &vha->dpc_flags);
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld): %s - detect abort needed\n",
+ vha->host_no, __func__);
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+ qla2xxx_wake_dpc(vha);
+ ha->flags.isp82xx_fw_hung = 1;
+ if (ha->flags.mbox_busy) {
+ ha->flags.mbox_int = 1;
+ DEBUG2(qla_printk(KERN_ERR, ha,
+ "Due to fw hung, doing premature "
+ "completion of mbx command\n"));
+ if (test_bit(MBX_INTR_WAIT,
+ &ha->mbx_cmd_flags))
+ complete(&ha->mbx_intr_comp);
+ }
+ }
}
}
}
@@ -3663,6 +3692,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
"Exiting.\n", __func__, vha->host_no);
return QLA_SUCCESS;
}
+ ha->flags.isp82xx_reset_hdlr_active = 1;
qla82xx_idc_lock(ha);
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
@@ -3683,7 +3713,8 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
qla82xx_idc_unlock(ha);
if (rval == QLA_SUCCESS) {
- ha->flags.fw_hung = 0;
+ ha->flags.isp82xx_fw_hung = 0;
+ ha->flags.isp82xx_reset_hdlr_active = 0;
qla82xx_restart_isp(vha);
}
@@ -3791,3 +3822,71 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
return status;
}
+
+void
+qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
+{
+ int i;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Check if 82XX firmware is alive or not
+ * We may have arrived here from NEED_RESET
+ * detection only
+ */
+ if (!ha->flags.isp82xx_fw_hung) {
+ for (i = 0; i < 2; i++) {
+ msleep(1000);
+ if (qla82xx_check_fw_alive(vha)) {
+ ha->flags.isp82xx_fw_hung = 1;
+ if (ha->flags.mbox_busy) {
+ ha->flags.mbox_int = 1;
+ complete(&ha->mbx_intr_comp);
+ }
+ break;
+ }
+ }
+ }
+
+ /* Abort all commands gracefully if fw NOT hung */
+ if (!ha->flags.isp82xx_fw_hung) {
+ int cnt, que;
+ srb_t *sp;
+ struct req_que *req;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (que = 0; que < ha->max_req_queues; que++) {
+ req = ha->req_q_map[que];
+ if (!req)
+ continue;
+ for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ sp = req->outstanding_cmds[cnt];
+ if (sp) {
+ if (!sp->ctx ||
+ (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
+ spin_unlock_irqrestore(
+ &ha->hardware_lock, flags);
+ if (ha->isp_ops->abort_command(sp)) {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx abort command failed in %s\n",
+ vha->host_no, __func__);
+ } else {
+ qla_printk(KERN_INFO, ha,
+ "scsi(%ld): mbx abort command success in %s\n",
+ vha->host_no, __func__);
+ }
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ }
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ /* Wait for pending cmds (physical and virtual) to complete */
+ if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
+ WAIT_HOST) == QLA_SUCCESS) {
+ DEBUG2(qla_printk(KERN_INFO, ha,
+ "Done wait for pending commands\n"));
+ }
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e90f7c16b956..75a966c94860 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -506,7 +506,7 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
static inline srb_t *
qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
- struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+ struct scsi_cmnd *cmd)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
@@ -520,14 +520,13 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
sp->cmd = cmd;
sp->flags = 0;
CMD_SP(cmd) = (void *)sp;
- cmd->scsi_done = done;
sp->ctx = NULL;
return sp;
}
static int
-qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
+qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
@@ -537,7 +536,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
srb_t *sp;
int rval;
- spin_unlock_irq(vha->host->host_lock);
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure)
cmd->result = DID_NO_CONNECT << 16;
@@ -569,40 +567,32 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
goto qc24_target_busy;
}
- sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
+ sp = qla2x00_get_new_sp(base_vha, fcport, cmd);
if (!sp)
- goto qc24_host_busy_lock;
+ goto qc24_host_busy;
rval = ha->isp_ops->start_scsi(sp);
if (rval != QLA_SUCCESS)
goto qc24_host_busy_free_sp;
- spin_lock_irq(vha->host->host_lock);
-
return 0;
qc24_host_busy_free_sp:
qla2x00_sp_free_dma(sp);
mempool_free(sp, ha->srb_mempool);
-qc24_host_busy_lock:
- spin_lock_irq(vha->host->host_lock);
+qc24_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
qc24_target_busy:
- spin_lock_irq(vha->host->host_lock);
return SCSI_MLQUEUE_TARGET_BUSY;
qc24_fail_command:
- spin_lock_irq(vha->host->host_lock);
- done(cmd);
+ cmd->scsi_done(cmd);
return 0;
}
-static DEF_SCSI_QCMD(qla2xxx_queuecommand)
-
-
/*
* qla2x00_eh_wait_on_command
* Waits for the command to be returned by the Firmware for some
@@ -821,17 +811,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
srb_t *sp;
- int ret = SUCCESS;
+ int ret;
unsigned int id, lun;
unsigned long flags;
int wait = 0;
struct qla_hw_data *ha = vha->hw;
- fc_block_scsi_eh(cmd);
-
if (!CMD_SP(cmd))
return SUCCESS;
+ ret = fc_block_scsi_eh(cmd);
+ if (ret != 0)
+ return ret;
+ ret = SUCCESS;
+
id = cmd->device->id;
lun = cmd->device->lun;
@@ -940,11 +933,13 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
int err;
- fc_block_scsi_eh(cmd);
-
if (!fcport)
return FAILED;
+ err = fc_block_scsi_eh(cmd);
+ if (err != 0)
+ return err;
+
qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n",
vha->host_no, cmd->device->id, cmd->device->lun, name);
@@ -1018,14 +1013,17 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
int ret = FAILED;
unsigned int id, lun;
- fc_block_scsi_eh(cmd);
-
id = cmd->device->id;
lun = cmd->device->lun;
if (!fcport)
return ret;
+ ret = fc_block_scsi_eh(cmd);
+ if (ret != 0)
+ return ret;
+ ret = FAILED;
+
qla_printk(KERN_INFO, vha->hw,
"scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun);
@@ -1078,14 +1076,17 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
unsigned int id, lun;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- fc_block_scsi_eh(cmd);
-
id = cmd->device->id;
lun = cmd->device->lun;
if (!fcport)
return ret;
+ ret = fc_block_scsi_eh(cmd);
+ if (ret != 0)
+ return ret;
+ ret = FAILED;
+
qla_printk(KERN_INFO, ha,
"scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun);
@@ -3805,7 +3806,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
ha->flags.eeh_busy = 1;
/* For ISP82XX complete any pending mailbox cmd */
if (IS_QLA82XX(ha)) {
- ha->flags.fw_hung = 1;
+ ha->flags.isp82xx_fw_hung = 1;
if (ha->flags.mbox_busy) {
ha->flags.mbox_int = 1;
DEBUG2(qla_printk(KERN_ERR, ha,
@@ -3945,7 +3946,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_READY);
qla82xx_idc_unlock(ha);
- ha->flags.fw_hung = 0;
+ ha->flags.isp82xx_fw_hung = 0;
rval = qla82xx_restart_isp(base_vha);
qla82xx_idc_lock(ha);
/* Clear driver state register */
@@ -3958,7 +3959,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
"This devfn is not reset owner = 0x%x\n", ha->pdev->devfn));
if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
QLA82XX_DEV_READY)) {
- ha->flags.fw_hung = 0;
+ ha->flags.isp82xx_fw_hung = 0;
rval = qla82xx_restart_isp(base_vha);
qla82xx_idc_lock(ha);
qla82xx_set_drv_active(base_vha);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index cf0075a2d0c2..3a260c3f055a 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.05-k0"
+#define QLA2XXX_VERSION "8.03.07.00"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
-#define QLA_DRIVER_PATCH_VER 5
+#define QLA_DRIVER_PATCH_VER 7
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index a6b2d72022fc..fa5758cbdedb 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -89,32 +89,34 @@ static const char * scsi_debug_version_date = "20100324";
/* With these defaults, this driver will make 1 host with 1 target
* (id 0) containing 1 logical unit (lun 0). That is 1 device.
*/
+#define DEF_ATO 1
#define DEF_DELAY 1
#define DEF_DEV_SIZE_MB 8
-#define DEF_EVERY_NTH 0
-#define DEF_NUM_PARTS 0
-#define DEF_OPTS 0
-#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
-#define DEF_PTYPE 0
+#define DEF_DIF 0
+#define DEF_DIX 0
#define DEF_D_SENSE 0
-#define DEF_NO_LUN_0 0
-#define DEF_VIRTUAL_GB 0
+#define DEF_EVERY_NTH 0
#define DEF_FAKE_RW 0
-#define DEF_VPD_USE_HOSTNO 1
-#define DEF_SECTOR_SIZE 512
-#define DEF_DIX 0
-#define DEF_DIF 0
#define DEF_GUARD 0
-#define DEF_ATO 1
-#define DEF_PHYSBLK_EXP 0
+#define DEF_LBPU 0
+#define DEF_LBPWS 0
+#define DEF_LBPWS10 0
#define DEF_LOWEST_ALIGNED 0
+#define DEF_NO_LUN_0 0
+#define DEF_NUM_PARTS 0
+#define DEF_OPTS 0
#define DEF_OPT_BLKS 64
+#define DEF_PHYSBLK_EXP 0
+#define DEF_PTYPE 0
+#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */
+#define DEF_SECTOR_SIZE 512
+#define DEF_UNMAP_ALIGNMENT 0
+#define DEF_UNMAP_GRANULARITY 1
#define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
#define DEF_UNMAP_MAX_DESC 256
-#define DEF_UNMAP_GRANULARITY 1
-#define DEF_UNMAP_ALIGNMENT 0
-#define DEF_TPWS 0
-#define DEF_TPU 0
+#define DEF_VIRTUAL_GB 0
+#define DEF_VPD_USE_HOSTNO 1
+#define DEF_WRITESAME_LENGTH 0xFFFF
/* bit mask values for scsi_debug_opts */
#define SCSI_DEBUG_OPT_NOISE 1
@@ -144,6 +146,7 @@ static const char * scsi_debug_version_date = "20100324";
/* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
* sector on read commands: */
#define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
+#define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
* or "peripheral device" addressing (value 0) */
@@ -155,36 +158,38 @@ static const char * scsi_debug_version_date = "20100324";
#define SCSI_DEBUG_CANQUEUE 255
static int scsi_debug_add_host = DEF_NUM_HOST;
+static int scsi_debug_ato = DEF_ATO;
static int scsi_debug_delay = DEF_DELAY;
static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB;
+static int scsi_debug_dif = DEF_DIF;
+static int scsi_debug_dix = DEF_DIX;
+static int scsi_debug_dsense = DEF_D_SENSE;
static int scsi_debug_every_nth = DEF_EVERY_NTH;
+static int scsi_debug_fake_rw = DEF_FAKE_RW;
+static int scsi_debug_guard = DEF_GUARD;
+static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
static int scsi_debug_max_luns = DEF_MAX_LUNS;
static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE;
-static int scsi_debug_num_parts = DEF_NUM_PARTS;
+static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
static int scsi_debug_no_uld = 0;
+static int scsi_debug_num_parts = DEF_NUM_PARTS;
static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */
+static int scsi_debug_opt_blks = DEF_OPT_BLKS;
static int scsi_debug_opts = DEF_OPTS;
-static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
+static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */
-static int scsi_debug_dsense = DEF_D_SENSE;
-static int scsi_debug_no_lun_0 = DEF_NO_LUN_0;
+static int scsi_debug_scsi_level = DEF_SCSI_LEVEL;
+static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB;
-static int scsi_debug_fake_rw = DEF_FAKE_RW;
static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
-static int scsi_debug_sector_size = DEF_SECTOR_SIZE;
-static int scsi_debug_dix = DEF_DIX;
-static int scsi_debug_dif = DEF_DIF;
-static int scsi_debug_guard = DEF_GUARD;
-static int scsi_debug_ato = DEF_ATO;
-static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP;
-static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;
-static int scsi_debug_opt_blks = DEF_OPT_BLKS;
-static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
-static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
-static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
+static unsigned int scsi_debug_lbpu = DEF_LBPU;
+static unsigned int scsi_debug_lbpws = DEF_LBPWS;
+static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10;
static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
-static unsigned int scsi_debug_tpws = DEF_TPWS;
-static unsigned int scsi_debug_tpu = DEF_TPU;
+static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY;
+static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
+static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
+static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;
static int scsi_debug_cmnd_count = 0;
@@ -206,6 +211,11 @@ static int sdebug_sectors_per; /* sectors per cylinder */
#define SCSI_DEBUG_MAX_CMD_LEN 32
+static unsigned int scsi_debug_lbp(void)
+{
+ return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10;
+}
+
struct sdebug_dev_info {
struct list_head dev_list;
unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */
@@ -727,7 +737,7 @@ static int inquiry_evpd_b0(unsigned char * arr)
/* Optimal Transfer Length */
put_unaligned_be32(scsi_debug_opt_blks, &arr[8]);
- if (scsi_debug_tpu) {
+ if (scsi_debug_lbpu) {
/* Maximum Unmap LBA Count */
put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]);
@@ -744,7 +754,10 @@ static int inquiry_evpd_b0(unsigned char * arr)
/* Optimal Unmap Granularity */
put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]);
- return 0x3c; /* Mandatory page length for thin provisioning */
+ /* Maximum WRITE SAME Length */
+ put_unaligned_be64(scsi_debug_write_same_length, &arr[32]);
+
+ return 0x3c; /* Mandatory page length for Logical Block Provisioning */
return sizeof(vpdb0_data);
}
@@ -767,12 +780,15 @@ static int inquiry_evpd_b2(unsigned char *arr)
memset(arr, 0, 0x8);
arr[0] = 0; /* threshold exponent */
- if (scsi_debug_tpu)
+ if (scsi_debug_lbpu)
arr[1] = 1 << 7;
- if (scsi_debug_tpws)
+ if (scsi_debug_lbpws)
arr[1] |= 1 << 6;
+ if (scsi_debug_lbpws10)
+ arr[1] |= 1 << 5;
+
return 0x8;
}
@@ -831,7 +847,8 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
arr[n++] = 0x89; /* ATA information */
arr[n++] = 0xb0; /* Block limits (SBC) */
arr[n++] = 0xb1; /* Block characteristics (SBC) */
- arr[n++] = 0xb2; /* Thin provisioning (SBC) */
+ if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */
+ arr[n++] = 0xb2;
arr[3] = n - 4; /* number of supported VPD pages */
} else if (0x80 == cmd[2]) { /* unit serial number */
arr[1] = cmd[2]; /*sanity */
@@ -879,7 +896,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target,
} else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_evpd_b1(&arr[4]);
- } else if (0xb2 == cmd[2]) { /* Thin provisioning (SBC) */
+ } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */
arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_evpd_b2(&arr[4]);
} else {
@@ -1053,8 +1070,8 @@ static int resp_readcap16(struct scsi_cmnd * scp,
arr[13] = scsi_debug_physblk_exp & 0xf;
arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f;
- if (scsi_debug_tpu || scsi_debug_tpws)
- arr[14] |= 0x80; /* TPE */
+ if (scsi_debug_lbp())
+ arr[14] |= 0x80; /* LBPME */
arr[15] = scsi_debug_lowest_aligned & 0xff;
@@ -1791,15 +1808,15 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,
return ret;
if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) &&
- (lba <= OPT_MEDIUM_ERR_ADDR) &&
+ (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) &&
((lba + num) > OPT_MEDIUM_ERR_ADDR)) {
/* claim unrecoverable read error */
- mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR,
- 0);
+ mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
/* set info field and valid bit for fixed descriptor */
if (0x70 == (devip->sense_buff[0] & 0x7f)) {
devip->sense_buff[0] |= 0x80; /* Valid bit */
- ret = OPT_MEDIUM_ERR_ADDR;
+ ret = (lba < OPT_MEDIUM_ERR_ADDR)
+ ? OPT_MEDIUM_ERR_ADDR : (int)lba;
devip->sense_buff[3] = (ret >> 24) & 0xff;
devip->sense_buff[4] = (ret >> 16) & 0xff;
devip->sense_buff[5] = (ret >> 8) & 0xff;
@@ -2084,6 +2101,12 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba,
if (ret)
return ret;
+ if (num > scsi_debug_write_same_length) {
+ mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB,
+ 0);
+ return check_condition_result;
+ }
+
write_lock_irqsave(&atomic_rw, iflags);
if (unmap && scsi_debug_unmap_granularity) {
@@ -2695,37 +2718,40 @@ static int schedule_resp(struct scsi_cmnd * cmnd,
/sys/bus/pseudo/drivers/scsi_debug directory is changed.
*/
module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);
+module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);
module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);
+module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
+module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);
module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR);
+module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
+module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);
+module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);
+module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO);
+module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR);
module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR);
module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR);
module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO);
module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO);
module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR);
+module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR);
+module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR);
module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO);
-module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
-module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
- S_IRUGO | S_IWUSR);
module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO);
-module_param_named(dix, scsi_debug_dix, int, S_IRUGO);
-module_param_named(dif, scsi_debug_dif, int, S_IRUGO);
-module_param_named(guard, scsi_debug_guard, int, S_IRUGO);
-module_param_named(ato, scsi_debug_ato, int, S_IRUGO);
-module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO);
-module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO);
-module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO);
+module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
+module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO);
module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO);
-module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO);
-module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO);
-module_param_named(tpu, scsi_debug_tpu, int, S_IRUGO);
-module_param_named(tpws, scsi_debug_tpws, int, S_IRUGO);
+module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR);
+module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int,
+ S_IRUGO | S_IWUSR);
+module_param_named(write_same_length, scsi_debug_write_same_length, int,
+ S_IRUGO | S_IWUSR);
MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
MODULE_DESCRIPTION("SCSI debug adapter driver");
@@ -2733,36 +2759,38 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(SCSI_DEBUG_VERSION);
MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");
+MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");
MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");
+MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
+MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
+MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
+MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
+MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))");
MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
+MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
+MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
-MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
-MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
-MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
-MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)");
-MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
-MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
-MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
-MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
-MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
+MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
+MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
-MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
-MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
-MODULE_PARM_DESC(tpu, "enable TP, support UNMAP command (def=0)");
-MODULE_PARM_DESC(tpws, "enable TP, support WRITE SAME(16) with UNMAP bit (def=0)");
+MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)");
+MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
+MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
static char sdebug_info[256];
@@ -3150,7 +3178,7 @@ static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)
{
ssize_t count;
- if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0)
+ if (!scsi_debug_lbp())
return scnprintf(buf, PAGE_SIZE, "0-%u\n",
sdebug_store_sectors);
@@ -3333,8 +3361,8 @@ static int __init scsi_debug_init(void)
memset(dif_storep, 0xff, dif_size);
}
- /* Thin Provisioning */
- if (scsi_debug_tpu || scsi_debug_tpws) {
+ /* Logical Block Provisioning */
+ if (scsi_debug_lbp()) {
unsigned int map_bytes;
scsi_debug_unmap_max_blocks =
@@ -3664,7 +3692,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
errsts = resp_readcap16(SCpnt, devip);
else if (cmd[1] == SAI_GET_LBA_STATUS) {
- if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0) {
+ if (scsi_debug_lbp() == 0) {
mk_sense_buffer(devip, ILLEGAL_REQUEST,
INVALID_COMMAND_OPCODE, 0);
errsts = check_condition_result;
@@ -3775,8 +3803,10 @@ write:
}
break;
case WRITE_SAME_16:
+ case WRITE_SAME:
if (cmd[1] & 0x8) {
- if (scsi_debug_tpws == 0) {
+ if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) ||
+ (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) {
mk_sense_buffer(devip, ILLEGAL_REQUEST,
INVALID_FIELD_IN_CDB, 0);
errsts = check_condition_result;
@@ -3785,8 +3815,6 @@ write:
}
if (errsts)
break;
- /* fall through */
- case WRITE_SAME:
errsts = check_readiness(SCpnt, 0, devip);
if (errsts)
break;
@@ -3798,7 +3826,7 @@ write:
if (errsts)
break;
- if (scsi_debug_unmap_max_desc == 0 || scsi_debug_tpu == 0) {
+ if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) {
mk_sense_buffer(devip, ILLEGAL_REQUEST,
INVALID_COMMAND_OPCODE, 0);
errsts = check_condition_result;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 43fad4c09beb..82e9e5c0476e 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -382,6 +382,91 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
EXPORT_SYMBOL(scsi_dev_info_list_add_keyed);
/**
+ * scsi_dev_info_list_del_keyed - remove one dev_info list entry.
+ * @vendor: vendor string
+ * @model: model (product) string
+ * @key: specify list to use
+ *
+ * Description:
+ * Remove and destroy one dev_info entry for @vendor, @model
+ * in list specified by @key.
+ *
+ * Returns: 0 OK, -error on failure.
+ **/
+int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key)
+{
+ struct scsi_dev_info_list *devinfo, *found = NULL;
+ struct scsi_dev_info_list_table *devinfo_table =
+ scsi_devinfo_lookup_by_key(key);
+
+ if (IS_ERR(devinfo_table))
+ return PTR_ERR(devinfo_table);
+
+ list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list,
+ dev_info_list) {
+ if (devinfo->compatible) {
+ /*
+ * Behave like the older version of get_device_flags.
+ */
+ size_t max;
+ /*
+ * XXX why skip leading spaces? If an odd INQUIRY
+ * value, that should have been part of the
+ * scsi_static_device_list[] entry, such as " FOO"
+ * rather than "FOO". Since this code is already
+ * here, and we don't know what device it is
+ * trying to work with, leave it as-is.
+ */
+ max = 8; /* max length of vendor */
+ while ((max > 0) && *vendor == ' ') {
+ max--;
+ vendor++;
+ }
+ /*
+ * XXX removing the following strlen() would be
+ * good, using it means that for a an entry not in
+ * the list, we scan every byte of every vendor
+ * listed in scsi_static_device_list[], and never match
+ * a single one (and still have to compare at
+ * least the first byte of each vendor).
+ */
+ if (memcmp(devinfo->vendor, vendor,
+ min(max, strlen(devinfo->vendor))))
+ continue;
+ /*
+ * Skip spaces again.
+ */
+ max = 16; /* max length of model */
+ while ((max > 0) && *model == ' ') {
+ max--;
+ model++;
+ }
+ if (memcmp(devinfo->model, model,
+ min(max, strlen(devinfo->model))))
+ continue;
+ found = devinfo;
+ } else {
+ if (!memcmp(devinfo->vendor, vendor,
+ sizeof(devinfo->vendor)) &&
+ !memcmp(devinfo->model, model,
+ sizeof(devinfo->model)))
+ found = devinfo;
+ }
+ if (found)
+ break;
+ }
+
+ if (found) {
+ list_del(&found->dev_info_list);
+ kfree(found);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL(scsi_dev_info_list_del_keyed);
+
+/**
* scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list.
* @dev_list: string of device flags to add
*
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 45c75649b9e0..991de3c15cfc 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -223,7 +223,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
* @scmd: Cmd to have sense checked.
*
* Return value:
- * SUCCESS or FAILED or NEEDS_RETRY
+ * SUCCESS or FAILED or NEEDS_RETRY or TARGET_ERROR
*
* Notes:
* When a deferred error is detected the current command has
@@ -326,17 +326,19 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
*/
return SUCCESS;
- /* these three are not supported */
+ /* these are not supported */
case COPY_ABORTED:
case VOLUME_OVERFLOW:
case MISCOMPARE:
- return SUCCESS;
+ case BLANK_CHECK:
+ case DATA_PROTECT:
+ return TARGET_ERROR;
case MEDIUM_ERROR:
if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
sshdr.asc == 0x13 || /* AMNF DATA FIELD */
sshdr.asc == 0x14) { /* RECORD NOT FOUND */
- return SUCCESS;
+ return TARGET_ERROR;
}
return NEEDS_RETRY;
@@ -344,11 +346,9 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
if (scmd->device->retry_hwerror)
return ADD_TO_MLQUEUE;
else
- return SUCCESS;
+ return TARGET_ERROR;
case ILLEGAL_REQUEST:
- case BLANK_CHECK:
- case DATA_PROTECT:
default:
return SUCCESS;
}
@@ -787,6 +787,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
case SUCCESS:
case NEEDS_RETRY:
case FAILED:
+ case TARGET_ERROR:
break;
case ADD_TO_MLQUEUE:
rtn = NEEDS_RETRY;
@@ -1469,6 +1470,14 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
rtn = scsi_check_sense(scmd);
if (rtn == NEEDS_RETRY)
goto maybe_retry;
+ else if (rtn == TARGET_ERROR) {
+ /*
+ * Need to modify host byte to signal a
+ * permanent target failure
+ */
+ scmd->result |= (DID_TARGET_FAILURE << 16);
+ rtn = SUCCESS;
+ }
/* if rtn == FAILED, we have no sense information;
* returning FAILED will wake the error handler thread
* to collect the sense and redo the decide
@@ -1486,6 +1495,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
case RESERVATION_CONFLICT:
sdev_printk(KERN_INFO, scmd->device,
"reservation conflict\n");
+ scmd->result |= (DID_NEXUS_FAILURE << 16);
return SUCCESS; /* causes immediate i/o error */
default:
return FAILED;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fb2bb35c62cb..2d63c8ad1442 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -667,6 +667,30 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
}
EXPORT_SYMBOL(scsi_release_buffers);
+static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
+{
+ int error = 0;
+
+ switch(host_byte(result)) {
+ case DID_TRANSPORT_FAILFAST:
+ error = -ENOLINK;
+ break;
+ case DID_TARGET_FAILURE:
+ cmd->result |= (DID_OK << 16);
+ error = -EREMOTEIO;
+ break;
+ case DID_NEXUS_FAILURE:
+ cmd->result |= (DID_OK << 16);
+ error = -EBADE;
+ break;
+ default:
+ error = -EIO;
+ break;
+ }
+
+ return error;
+}
+
/*
* Function: scsi_io_completion()
*
@@ -737,7 +761,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
req->sense_len = len;
}
if (!sense_deferred)
- error = -EIO;
+ error = __scsi_error_from_host_byte(cmd, result);
}
req->resid_len = scsi_get_resid(cmd);
@@ -796,7 +820,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
return;
- error = -EIO;
+ error = __scsi_error_from_host_byte(cmd, result);
if (host_byte(result) == DID_RESET) {
/* Third party bus reset or reset for error recovery
@@ -843,6 +867,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
description = "Host Data Integrity Failure";
action = ACTION_FAIL;
error = -EILSEQ;
+ /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
+ } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
+ (cmd->cmnd[0] == UNMAP ||
+ cmd->cmnd[0] == WRITE_SAME_16 ||
+ cmd->cmnd[0] == WRITE_SAME)) {
+ description = "Discard failure";
+ action = ACTION_FAIL;
} else
action = ACTION_FAIL;
break;
@@ -1038,6 +1069,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
cmd->request = req;
cmd->cmnd = req->cmd;
+ cmd->prot_op = SCSI_PROT_NORMAL;
return cmd;
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 342ee1a9c41d..2a588955423a 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -45,6 +45,7 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
enum {
SCSI_DEVINFO_GLOBAL = 0,
SCSI_DEVINFO_SPI,
+ SCSI_DEVINFO_DH,
};
extern int scsi_get_device_flags(struct scsi_device *sdev,
@@ -56,6 +57,7 @@ extern int scsi_get_device_flags_keyed(struct scsi_device *sdev,
extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
char *model, char *strflags,
int flags, int key);
+extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key);
extern int scsi_dev_info_add_list(int key, const char *name);
extern int scsi_dev_info_remove_list(int key);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index f905ecb5704d..b4218390941e 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -954,6 +954,7 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
if (dd_size)
conn->dd_data = &conn[1];
+ mutex_init(&conn->ep_mutex);
INIT_LIST_HEAD(&conn->conn_list);
conn->transport = transport;
conn->cid = cid;
@@ -975,7 +976,6 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
spin_lock_irqsave(&connlock, flags);
list_add(&conn->conn_list, &connlist);
- conn->active = 1;
spin_unlock_irqrestore(&connlock, flags);
ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n");
@@ -1001,7 +1001,6 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn)
unsigned long flags;
spin_lock_irqsave(&connlock, flags);
- conn->active = 0;
list_del(&conn->conn_list);
spin_unlock_irqrestore(&connlock, flags);
@@ -1430,6 +1429,29 @@ release_host:
return err;
}
+static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
+ u64 ep_handle)
+{
+ struct iscsi_cls_conn *conn;
+ struct iscsi_endpoint *ep;
+
+ if (!transport->ep_disconnect)
+ return -EINVAL;
+
+ ep = iscsi_lookup_endpoint(ep_handle);
+ if (!ep)
+ return -EINVAL;
+ conn = ep->conn;
+ if (conn) {
+ mutex_lock(&conn->ep_mutex);
+ conn->ep = NULL;
+ mutex_unlock(&conn->ep_mutex);
+ }
+
+ transport->ep_disconnect(ep);
+ return 0;
+}
+
static int
iscsi_if_transport_ep(struct iscsi_transport *transport,
struct iscsi_uevent *ev, int msg_type)
@@ -1454,14 +1476,8 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
ev->u.ep_poll.timeout_ms);
break;
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
- if (!transport->ep_disconnect)
- return -EINVAL;
-
- ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle);
- if (!ep)
- return -EINVAL;
-
- transport->ep_disconnect(ep);
+ rc = iscsi_if_ep_disconnect(transport,
+ ev->u.ep_disconnect.ep_handle);
break;
}
return rc;
@@ -1609,12 +1625,31 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
session = iscsi_session_lookup(ev->u.b_conn.sid);
conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid);
- if (session && conn)
- ev->r.retcode = transport->bind_conn(session, conn,
- ev->u.b_conn.transport_eph,
- ev->u.b_conn.is_leading);
- else
+ if (conn && conn->ep)
+ iscsi_if_ep_disconnect(transport, conn->ep->id);
+
+ if (!session || !conn) {
err = -EINVAL;
+ break;
+ }
+
+ ev->r.retcode = transport->bind_conn(session, conn,
+ ev->u.b_conn.transport_eph,
+ ev->u.b_conn.is_leading);
+ if (ev->r.retcode || !transport->ep_connect)
+ break;
+
+ ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph);
+ if (ep) {
+ ep->conn = conn;
+
+ mutex_lock(&conn->ep_mutex);
+ conn->ep = ep;
+ mutex_unlock(&conn->ep_mutex);
+ } else
+ iscsi_cls_conn_printk(KERN_ERR, conn,
+ "Could not set ep conn "
+ "binding\n");
break;
case ISCSI_UEVENT_SET_PARAM:
err = iscsi_set_param(transport, ev);
@@ -1747,13 +1782,48 @@ iscsi_conn_attr(data_digest, ISCSI_PARAM_DATADGST_EN);
iscsi_conn_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN);
iscsi_conn_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN);
iscsi_conn_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT);
-iscsi_conn_attr(port, ISCSI_PARAM_CONN_PORT);
iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
-iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS);
iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
+#define iscsi_conn_ep_attr_show(param) \
+static ssize_t show_conn_ep_param_##param(struct device *dev, \
+ struct device_attribute *attr,\
+ char *buf) \
+{ \
+ struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \
+ struct iscsi_transport *t = conn->transport; \
+ struct iscsi_endpoint *ep; \
+ ssize_t rc; \
+ \
+ /* \
+ * Need to make sure ep_disconnect does not free the LLD's \
+ * interconnect resources while we are trying to read them. \
+ */ \
+ mutex_lock(&conn->ep_mutex); \
+ ep = conn->ep; \
+ if (!ep && t->ep_connect) { \
+ mutex_unlock(&conn->ep_mutex); \
+ return -ENOTCONN; \
+ } \
+ \
+ if (ep) \
+ rc = t->get_ep_param(ep, param, buf); \
+ else \
+ rc = t->get_conn_param(conn, param, buf); \
+ mutex_unlock(&conn->ep_mutex); \
+ return rc; \
+}
+
+#define iscsi_conn_ep_attr(field, param) \
+ iscsi_conn_ep_attr_show(param) \
+static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, \
+ show_conn_ep_param_##param, NULL);
+
+iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS);
+iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT);
+
/*
* iSCSI session attrs
*/
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e56730214c05..3be5db5d6343 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -96,6 +96,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
#define SD_MINORS 0
#endif
+static void sd_config_discard(struct scsi_disk *, unsigned int);
static int sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
static int sd_probe(struct device *);
@@ -294,7 +295,54 @@ sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
- return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning);
+ return snprintf(buf, 20, "%u\n", sdkp->lbpme);
+}
+
+static const char *lbp_mode[] = {
+ [SD_LBP_FULL] = "full",
+ [SD_LBP_UNMAP] = "unmap",
+ [SD_LBP_WS16] = "writesame_16",
+ [SD_LBP_WS10] = "writesame_10",
+ [SD_LBP_ZERO] = "writesame_zero",
+ [SD_LBP_DISABLE] = "disabled",
+};
+
+static ssize_t
+sd_show_provisioning_mode(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 20, "%s\n", lbp_mode[sdkp->provisioning_mode]);
+}
+
+static ssize_t
+sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (sdp->type != TYPE_DISK)
+ return -EINVAL;
+
+ if (!strncmp(buf, lbp_mode[SD_LBP_UNMAP], 20))
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_WS16], 20))
+ sd_config_discard(sdkp, SD_LBP_WS16);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_WS10], 20))
+ sd_config_discard(sdkp, SD_LBP_WS10);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_ZERO], 20))
+ sd_config_discard(sdkp, SD_LBP_ZERO);
+ else if (!strncmp(buf, lbp_mode[SD_LBP_DISABLE], 20))
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ else
+ return -EINVAL;
+
+ return count;
}
static struct device_attribute sd_disk_attrs[] = {
@@ -309,6 +357,8 @@ static struct device_attribute sd_disk_attrs[] = {
__ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL),
__ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
__ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
+ __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode,
+ sd_store_provisioning_mode),
__ATTR_NULL,
};
@@ -433,6 +483,49 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
scsi_set_prot_type(scmd, dif);
}
+static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
+{
+ struct request_queue *q = sdkp->disk->queue;
+ unsigned int logical_block_size = sdkp->device->sector_size;
+ unsigned int max_blocks = 0;
+
+ q->limits.discard_zeroes_data = sdkp->lbprz;
+ q->limits.discard_alignment = sdkp->unmap_alignment;
+ q->limits.discard_granularity =
+ max(sdkp->physical_block_size,
+ sdkp->unmap_granularity * logical_block_size);
+
+ switch (mode) {
+
+ case SD_LBP_DISABLE:
+ q->limits.max_discard_sectors = 0;
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
+ return;
+
+ case SD_LBP_UNMAP:
+ max_blocks = min_not_zero(sdkp->max_unmap_blocks, 0xffffffff);
+ break;
+
+ case SD_LBP_WS16:
+ max_blocks = min_not_zero(sdkp->max_ws_blocks, 0xffffffff);
+ break;
+
+ case SD_LBP_WS10:
+ max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff);
+ break;
+
+ case SD_LBP_ZERO:
+ max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff);
+ q->limits.discard_zeroes_data = 1;
+ break;
+ }
+
+ q->limits.max_discard_sectors = max_blocks * (logical_block_size >> 9);
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+
+ sdkp->provisioning_mode = mode;
+}
+
/**
* scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device
* @sdp: scsi device to operate one
@@ -449,6 +542,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
unsigned int nr_sectors = bio_sectors(bio);
unsigned int len;
int ret;
+ char *buf;
struct page *page;
if (sdkp->device->sector_size == 4096) {
@@ -464,8 +558,9 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
if (!page)
return BLKPREP_DEFER;
- if (sdkp->unmap) {
- char *buf = page_address(page);
+ switch (sdkp->provisioning_mode) {
+ case SD_LBP_UNMAP:
+ buf = page_address(page);
rq->cmd_len = 10;
rq->cmd[0] = UNMAP;
@@ -477,7 +572,9 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
put_unaligned_be32(nr_sectors, &buf[16]);
len = 24;
- } else {
+ break;
+
+ case SD_LBP_WS16:
rq->cmd_len = 16;
rq->cmd[0] = WRITE_SAME_16;
rq->cmd[1] = 0x8; /* UNMAP */
@@ -485,11 +582,29 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
put_unaligned_be32(nr_sectors, &rq->cmd[10]);
len = sdkp->device->sector_size;
+ break;
+
+ case SD_LBP_WS10:
+ case SD_LBP_ZERO:
+ rq->cmd_len = 10;
+ rq->cmd[0] = WRITE_SAME;
+ if (sdkp->provisioning_mode == SD_LBP_WS10)
+ rq->cmd[1] = 0x8; /* UNMAP */
+ put_unaligned_be32(sector, &rq->cmd[2]);
+ put_unaligned_be16(nr_sectors, &rq->cmd[7]);
+
+ len = sdkp->device->sector_size;
+ break;
+
+ default:
+ goto out;
}
blk_add_request_payload(rq, page, len);
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
rq->buffer = page_address(page);
+
+out:
if (ret != BLKPREP_OK) {
__free_page(page);
rq->buffer = NULL;
@@ -1251,12 +1366,10 @@ static int sd_done(struct scsi_cmnd *SCpnt)
struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
int sense_valid = 0;
int sense_deferred = 0;
+ unsigned char op = SCpnt->cmnd[0];
- if (SCpnt->request->cmd_flags & REQ_DISCARD) {
- if (!result)
- scsi_set_resid(SCpnt, 0);
- return good_bytes;
- }
+ if ((SCpnt->request->cmd_flags & REQ_DISCARD) && !result)
+ scsi_set_resid(SCpnt, 0);
if (result) {
sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
@@ -1295,10 +1408,17 @@ static int sd_done(struct scsi_cmnd *SCpnt)
SCpnt->result = 0;
memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
break;
- case ABORTED_COMMAND: /* DIF: Target detected corruption */
- case ILLEGAL_REQUEST: /* DIX: Host detected corruption */
- if (sshdr.asc == 0x10)
+ case ABORTED_COMMAND:
+ if (sshdr.asc == 0x10) /* DIF: Target detected corruption */
+ good_bytes = sd_completed_bytes(SCpnt);
+ break;
+ case ILLEGAL_REQUEST:
+ if (sshdr.asc == 0x10) /* DIX: Host detected corruption */
good_bytes = sd_completed_bytes(SCpnt);
+ /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
+ if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
+ (op == UNMAP || op == WRITE_SAME_16 || op == WRITE_SAME))
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
break;
default:
break;
@@ -1596,17 +1716,13 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
sd_printk(KERN_NOTICE, sdkp,
"physical block alignment offset: %u\n", alignment);
- if (buffer[14] & 0x80) { /* TPE */
- struct request_queue *q = sdp->request_queue;
+ if (buffer[14] & 0x80) { /* LBPME */
+ sdkp->lbpme = 1;
- sdkp->thin_provisioning = 1;
- q->limits.discard_granularity = sdkp->physical_block_size;
- q->limits.max_discard_sectors = 0xffffffff;
+ if (buffer[14] & 0x40) /* LBPRZ */
+ sdkp->lbprz = 1;
- if (buffer[14] & 0x40) /* TPRZ */
- q->limits.discard_zeroes_data = 1;
-
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ sd_config_discard(sdkp, SD_LBP_WS16);
}
sdkp->capacity = lba + 1;
@@ -2091,7 +2207,6 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
*/
static void sd_read_block_limits(struct scsi_disk *sdkp)
{
- struct request_queue *q = sdkp->disk->queue;
unsigned int sector_sz = sdkp->device->sector_size;
const int vpd_len = 64;
unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
@@ -2106,39 +2221,46 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
blk_queue_io_opt(sdkp->disk->queue,
get_unaligned_be32(&buffer[12]) * sector_sz);
- /* Thin provisioning enabled and page length indicates TP support */
- if (sdkp->thin_provisioning && buffer[3] == 0x3c) {
- unsigned int lba_count, desc_count, granularity;
+ if (buffer[3] == 0x3c) {
+ unsigned int lba_count, desc_count;
- lba_count = get_unaligned_be32(&buffer[20]);
- desc_count = get_unaligned_be32(&buffer[24]);
-
- if (lba_count && desc_count) {
- if (sdkp->tpvpd && !sdkp->tpu)
- sdkp->unmap = 0;
- else
- sdkp->unmap = 1;
- }
+ sdkp->max_ws_blocks =
+ (u32) min_not_zero(get_unaligned_be64(&buffer[36]),
+ (u64)0xffffffff);
- if (sdkp->tpvpd && !sdkp->tpu && !sdkp->tpws) {
- sd_printk(KERN_ERR, sdkp, "Thin provisioning is " \
- "enabled but neither TPU, nor TPWS are " \
- "set. Disabling discard!\n");
+ if (!sdkp->lbpme)
goto out;
- }
- if (lba_count)
- q->limits.max_discard_sectors =
- lba_count * sector_sz >> 9;
+ lba_count = get_unaligned_be32(&buffer[20]);
+ desc_count = get_unaligned_be32(&buffer[24]);
- granularity = get_unaligned_be32(&buffer[28]);
+ if (lba_count && desc_count)
+ sdkp->max_unmap_blocks = lba_count;
- if (granularity)
- q->limits.discard_granularity = granularity * sector_sz;
+ sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
if (buffer[32] & 0x80)
- q->limits.discard_alignment =
+ sdkp->unmap_alignment =
get_unaligned_be32(&buffer[32]) & ~(1 << 31);
+
+ if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
+
+ if (sdkp->max_unmap_blocks)
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else
+ sd_config_discard(sdkp, SD_LBP_WS16);
+
+ } else { /* LBP VPD page tells us what to use */
+
+ if (sdkp->lbpu && sdkp->max_unmap_blocks)
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else if (sdkp->lbpws)
+ sd_config_discard(sdkp, SD_LBP_WS16);
+ else if (sdkp->lbpws10)
+ sd_config_discard(sdkp, SD_LBP_WS10);
+ else
+ sd_config_discard(sdkp, SD_LBP_DISABLE);
+ }
}
out:
@@ -2172,15 +2294,15 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
}
/**
- * sd_read_thin_provisioning - Query thin provisioning VPD page
+ * sd_read_block_provisioning - Query provisioning VPD page
* @disk: disk to query
*/
-static void sd_read_thin_provisioning(struct scsi_disk *sdkp)
+static void sd_read_block_provisioning(struct scsi_disk *sdkp)
{
unsigned char *buffer;
const int vpd_len = 8;
- if (sdkp->thin_provisioning == 0)
+ if (sdkp->lbpme == 0)
return;
buffer = kmalloc(vpd_len, GFP_KERNEL);
@@ -2188,9 +2310,10 @@ static void sd_read_thin_provisioning(struct scsi_disk *sdkp)
if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
goto out;
- sdkp->tpvpd = 1;
- sdkp->tpu = (buffer[5] >> 7) & 1; /* UNMAP */
- sdkp->tpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
+ sdkp->lbpvpd = 1;
+ sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */
+ sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
+ sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
out:
kfree(buffer);
@@ -2247,7 +2370,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_capacity(sdkp, buffer);
if (sd_try_extended_inquiry(sdp)) {
- sd_read_thin_provisioning(sdkp);
+ sd_read_block_provisioning(sdkp);
sd_read_block_limits(sdkp);
sd_read_block_characteristics(sdkp);
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index c9d8f6ca49e2..6ad798bfd52a 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -43,6 +43,15 @@ enum {
SD_MEMPOOL_SIZE = 2, /* CDB pool size */
};
+enum {
+ SD_LBP_FULL = 0, /* Full logical block provisioning */
+ SD_LBP_UNMAP, /* Use UNMAP command */
+ SD_LBP_WS16, /* Use WRITE SAME(16) with UNMAP bit */
+ SD_LBP_WS10, /* Use WRITE SAME(10) with UNMAP bit */
+ SD_LBP_ZERO, /* Use WRITE SAME(10) with zero payload */
+ SD_LBP_DISABLE, /* Discard disabled due to failed cmd */
+};
+
struct scsi_disk {
struct scsi_driver *driver; /* always &sd_template */
struct scsi_device *device;
@@ -50,21 +59,27 @@ struct scsi_disk {
struct gendisk *disk;
atomic_t openers;
sector_t capacity; /* size in 512-byte sectors */
+ u32 max_ws_blocks;
+ u32 max_unmap_blocks;
+ u32 unmap_granularity;
+ u32 unmap_alignment;
u32 index;
unsigned int physical_block_size;
u8 media_present;
u8 write_prot;
u8 protection_type;/* Data Integrity Field */
+ u8 provisioning_mode;
unsigned ATO : 1; /* state of disk ATO bit */
unsigned WCE : 1; /* state of disk WCE bit */
unsigned RCD : 1; /* state of disk RCD bit, unused */
unsigned DPOFUA : 1; /* state of disk DPOFUA bit */
unsigned first_scan : 1;
- unsigned thin_provisioning : 1;
- unsigned unmap : 1;
- unsigned tpws : 1;
- unsigned tpu : 1;
- unsigned tpvpd : 1;
+ unsigned lbpme : 1;
+ unsigned lbprz : 1;
+ unsigned lbpu : 1;
+ unsigned lbpws : 1;
+ unsigned lbpws10 : 1;
+ unsigned lbpvpd : 1;
};
#define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 366080baf474..7f19c8b7b84c 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -667,7 +667,13 @@ target_emulate_readcapacity(struct se_cmd *cmd)
{
struct se_device *dev = SE_DEV(cmd);
unsigned char *buf = cmd->t_task->t_task_buf;
- u32 blocks = dev->transport->get_blocks(dev);
+ unsigned long long blocks_long = dev->transport->get_blocks(dev);
+ u32 blocks;
+
+ if (blocks_long >= 0x00000000ffffffff)
+ blocks = 0xffffffff;
+ else
+ blocks = (u32)blocks_long;
buf[0] = (blocks >> 24) & 0xff;
buf[1] = (blocks >> 16) & 0xff;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 3740e327f180..c35f1a73bc8b 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -177,6 +177,8 @@ static int __init xen_hvc_init(void)
}
if (xencons_irq < 0)
xencons_irq = 0; /* NO_IRQ */
+ else
+ set_irq_noprobe(xencons_irq);
hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256);
if (IS_ERR(hp))
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 92c91c83edde..eb7958c675a8 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -47,7 +47,6 @@
#include <linux/clk.h>
#include <linux/ctype.h>
#include <linux/err.h>
-#include <linux/list.h>
#include <linux/dmaengine.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
@@ -65,11 +64,8 @@
struct sci_port {
struct uart_port port;
- /* Port type */
- unsigned int type;
-
- /* Port IRQs: ERI, RXI, TXI, BRI (optional) */
- unsigned int irqs[SCIx_NR_IRQS];
+ /* Platform configuration */
+ struct plat_sci_port *cfg;
/* Port enable callback */
void (*enable)(struct uart_port *port);
@@ -81,26 +77,15 @@ struct sci_port {
struct timer_list break_timer;
int break_flag;
- /* SCSCR initialization */
- unsigned int scscr;
-
- /* SCBRR calculation algo */
- unsigned int scbrr_algo_id;
-
/* Interface clock */
struct clk *iclk;
/* Function clock */
struct clk *fclk;
- struct list_head node;
-
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
#ifdef CONFIG_SERIAL_SH_SCI_DMA
- struct device *dma_dev;
- unsigned int slave_tx;
- unsigned int slave_rx;
struct dma_async_tx_descriptor *desc_tx;
struct dma_async_tx_descriptor *desc_rx[2];
dma_cookie_t cookie_tx;
@@ -117,16 +102,14 @@ struct sci_port {
struct timer_list rx_timer;
unsigned int rx_timeout;
#endif
-};
-struct sh_sci_priv {
- spinlock_t lock;
- struct list_head ports;
- struct notifier_block clk_nb;
+ struct notifier_block freq_transition;
};
/* Function prototypes */
+static void sci_start_tx(struct uart_port *port);
static void sci_stop_tx(struct uart_port *port);
+static void sci_start_rx(struct uart_port *port);
#define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
@@ -142,12 +125,6 @@ to_sci_port(struct uart_port *uart)
#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
#ifdef CONFIG_CONSOLE_POLL
-static inline void handle_error(struct uart_port *port)
-{
- /* Clear error flags */
- sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
-}
-
static int sci_poll_get_char(struct uart_port *port)
{
unsigned short status;
@@ -156,7 +133,7 @@ static int sci_poll_get_char(struct uart_port *port)
do {
status = sci_in(port, SCxSR);
if (status & SCxSR_ERRORS(port)) {
- handle_error(port);
+ sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port));
continue;
}
break;
@@ -475,7 +452,7 @@ static void sci_transmit_chars(struct uart_port *port)
/* On SH3, SCIF may read end-of-break as a space->mark char */
#define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); })
-static inline void sci_receive_chars(struct uart_port *port)
+static void sci_receive_chars(struct uart_port *port)
{
struct sci_port *sci_port = to_sci_port(port);
struct tty_struct *tty = port->state->port.tty;
@@ -566,18 +543,20 @@ static inline void sci_receive_chars(struct uart_port *port)
}
#define SCI_BREAK_JIFFIES (HZ/20)
-/* The sci generates interrupts during the break,
+
+/*
+ * The sci generates interrupts during the break,
* 1 per millisecond or so during the break period, for 9600 baud.
* So dont bother disabling interrupts.
* But dont want more than 1 break event.
* Use a kernel timer to periodically poll the rx line until
* the break is finished.
*/
-static void sci_schedule_break_timer(struct sci_port *port)
+static inline void sci_schedule_break_timer(struct sci_port *port)
{
- port->break_timer.expires = jiffies + SCI_BREAK_JIFFIES;
- add_timer(&port->break_timer);
+ mod_timer(&port->break_timer, jiffies + SCI_BREAK_JIFFIES);
}
+
/* Ensure that two consecutive samples find the break over. */
static void sci_break_timer(unsigned long data)
{
@@ -594,7 +573,7 @@ static void sci_break_timer(unsigned long data)
port->break_flag = 0;
}
-static inline int sci_handle_errors(struct uart_port *port)
+static int sci_handle_errors(struct uart_port *port)
{
int copied = 0;
unsigned short status = sci_in(port, SCxSR);
@@ -650,7 +629,7 @@ static inline int sci_handle_errors(struct uart_port *port)
return copied;
}
-static inline int sci_handle_fifo_overrun(struct uart_port *port)
+static int sci_handle_fifo_overrun(struct uart_port *port)
{
struct tty_struct *tty = port->state->port.tty;
int copied = 0;
@@ -671,7 +650,7 @@ static inline int sci_handle_fifo_overrun(struct uart_port *port)
return copied;
}
-static inline int sci_handle_breaks(struct uart_port *port)
+static int sci_handle_breaks(struct uart_port *port)
{
int copied = 0;
unsigned short status = sci_in(port, SCxSR);
@@ -794,7 +773,7 @@ static inline unsigned long port_rx_irq_mask(struct uart_port *port)
* it's unset, it's logically inferred that there's no point in
* testing for it.
*/
- return SCSCR_RIE | (to_sci_port(port)->scscr & SCSCR_REIE);
+ return SCSCR_RIE | (to_sci_port(port)->cfg->scscr & SCSCR_REIE);
}
static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
@@ -839,17 +818,18 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
static int sci_notifier(struct notifier_block *self,
unsigned long phase, void *p)
{
- struct sh_sci_priv *priv = container_of(self,
- struct sh_sci_priv, clk_nb);
struct sci_port *sci_port;
unsigned long flags;
+ sci_port = container_of(self, struct sci_port, freq_transition);
+
if ((phase == CPUFREQ_POSTCHANGE) ||
(phase == CPUFREQ_RESUMECHANGE)) {
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(sci_port, &priv->ports, node)
- sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
- spin_unlock_irqrestore(&priv->lock, flags);
+ struct uart_port *port = &sci_port->port;
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->uartclk = clk_get_rate(sci_port->iclk);
+ spin_unlock_irqrestore(&port->lock, flags);
}
return NOTIFY_OK;
@@ -882,21 +862,21 @@ static int sci_request_irq(struct sci_port *port)
const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full",
"SCI Transmit Data Empty", "SCI Break" };
- if (port->irqs[0] == port->irqs[1]) {
- if (unlikely(!port->irqs[0]))
+ if (port->cfg->irqs[0] == port->cfg->irqs[1]) {
+ if (unlikely(!port->cfg->irqs[0]))
return -ENODEV;
- if (request_irq(port->irqs[0], sci_mpxed_interrupt,
+ if (request_irq(port->cfg->irqs[0], sci_mpxed_interrupt,
IRQF_DISABLED, "sci", port)) {
dev_err(port->port.dev, "Can't allocate IRQ\n");
return -ENODEV;
}
} else {
for (i = 0; i < ARRAY_SIZE(handlers); i++) {
- if (unlikely(!port->irqs[i]))
+ if (unlikely(!port->cfg->irqs[i]))
continue;
- if (request_irq(port->irqs[i], handlers[i],
+ if (request_irq(port->cfg->irqs[i], handlers[i],
IRQF_DISABLED, desc[i], port)) {
dev_err(port->port.dev, "Can't allocate IRQ\n");
return -ENODEV;
@@ -911,14 +891,14 @@ static void sci_free_irq(struct sci_port *port)
{
int i;
- if (port->irqs[0] == port->irqs[1])
- free_irq(port->irqs[0], port);
+ if (port->cfg->irqs[0] == port->cfg->irqs[1])
+ free_irq(port->cfg->irqs[0], port);
else {
- for (i = 0; i < ARRAY_SIZE(port->irqs); i++) {
- if (!port->irqs[i])
+ for (i = 0; i < ARRAY_SIZE(port->cfg->irqs); i++) {
+ if (!port->cfg->irqs[i])
continue;
- free_irq(port->irqs[i], port);
+ free_irq(port->cfg->irqs[i], port);
}
}
}
@@ -1037,9 +1017,6 @@ static void sci_dma_rx_complete(void *arg)
schedule_work(&s->work_rx);
}
-static void sci_start_rx(struct uart_port *port);
-static void sci_start_tx(struct uart_port *port);
-
static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
{
struct dma_chan *chan = s->chan_rx;
@@ -1325,7 +1302,7 @@ static void rx_timer_fn(unsigned long arg)
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
scr &= ~0x4000;
- enable_irq(s->irqs[1]);
+ enable_irq(s->cfg->irqs[1]);
}
sci_out(port, SCSCR, scr | SCSCR_RIE);
dev_dbg(port->dev, "DMA Rx timed out\n");
@@ -1341,9 +1318,9 @@ static void sci_request_dma(struct uart_port *port)
int nent;
dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__,
- port->line, s->dma_dev);
+ port->line, s->cfg->dma_dev);
- if (!s->dma_dev)
+ if (!s->cfg->dma_dev)
return;
dma_cap_zero(mask);
@@ -1352,8 +1329,8 @@ static void sci_request_dma(struct uart_port *port)
param = &s->param_tx;
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
- param->slave_id = s->slave_tx;
- param->dma_dev = s->dma_dev;
+ param->slave_id = s->cfg->dma_slave_tx;
+ param->dma_dev = s->cfg->dma_dev;
s->cookie_tx = -EINVAL;
chan = dma_request_channel(mask, filter, param);
@@ -1381,8 +1358,8 @@ static void sci_request_dma(struct uart_port *port)
param = &s->param_rx;
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
- param->slave_id = s->slave_rx;
- param->dma_dev = s->dma_dev;
+ param->slave_id = s->cfg->dma_slave_rx;
+ param->dma_dev = s->cfg->dma_dev;
chan = dma_request_channel(mask, filter, param);
dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
@@ -1427,7 +1404,7 @@ static void sci_free_dma(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
- if (!s->dma_dev)
+ if (!s->cfg->dma_dev)
return;
if (s->chan_tx)
@@ -1435,21 +1412,32 @@ static void sci_free_dma(struct uart_port *port)
if (s->chan_rx)
sci_rx_dma_release(s, false);
}
+#else
+static inline void sci_request_dma(struct uart_port *port)
+{
+}
+
+static inline void sci_free_dma(struct uart_port *port)
+{
+}
#endif
static int sci_startup(struct uart_port *port)
{
struct sci_port *s = to_sci_port(port);
+ int ret;
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
if (s->enable)
s->enable(port);
- sci_request_irq(s);
-#ifdef CONFIG_SERIAL_SH_SCI_DMA
+ ret = sci_request_irq(s);
+ if (unlikely(ret < 0))
+ return ret;
+
sci_request_dma(port);
-#endif
+
sci_start_tx(port);
sci_start_rx(port);
@@ -1464,9 +1452,8 @@ static void sci_shutdown(struct uart_port *port)
sci_stop_rx(port);
sci_stop_tx(port);
-#ifdef CONFIG_SERIAL_SH_SCI_DMA
+
sci_free_dma(port);
-#endif
sci_free_irq(s);
if (s->disable)
@@ -1491,6 +1478,7 @@ static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
/* Warn, but use a safe default */
WARN_ON(1);
+
return ((freq + 16 * bps) / (32 * bps) - 1);
}
@@ -1514,7 +1502,10 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 0, max_baud);
if (likely(baud && port->uartclk))
- t = sci_scbrr_calc(s->scbrr_algo_id, baud, port->uartclk);
+ t = sci_scbrr_calc(s->cfg->scbrr_algo_id, baud, port->uartclk);
+
+ if (s->enable)
+ s->enable(port);
do {
status = sci_in(port, SCxSR);
@@ -1526,6 +1517,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
smr_val = sci_in(port, SCSMR) & 3;
+
if ((termios->c_cflag & CSIZE) == CS7)
smr_val |= 0x40;
if (termios->c_cflag & PARENB)
@@ -1540,7 +1532,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
sci_out(port, SCSMR, smr_val);
dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
- s->scscr);
+ s->cfg->scscr);
if (t > 0) {
if (t >= 256) {
@@ -1556,7 +1548,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
sci_init_pins(port, termios->c_cflag);
sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
- sci_out(port, SCSCR, s->scscr);
+ sci_out(port, SCSCR, s->cfg->scscr);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
/*
@@ -1582,6 +1574,9 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
if ((termios->c_cflag & CREAD) != 0)
sci_start_rx(port);
+
+ if (s->disable)
+ s->disable(port);
}
static const char *sci_type(struct uart_port *port)
@@ -1602,31 +1597,33 @@ static const char *sci_type(struct uart_port *port)
return NULL;
}
-static void sci_release_port(struct uart_port *port)
+static inline unsigned long sci_port_size(struct uart_port *port)
{
- /* Nothing here yet .. */
-}
-
-static int sci_request_port(struct uart_port *port)
-{
- /* Nothing here yet .. */
- return 0;
+ /*
+ * Pick an arbitrary size that encapsulates all of the base
+ * registers by default. This can be optimized later, or derived
+ * from platform resource data at such a time that ports begin to
+ * behave more erratically.
+ */
+ return 64;
}
-static void sci_config_port(struct uart_port *port, int flags)
+static int sci_remap_port(struct uart_port *port)
{
- struct sci_port *s = to_sci_port(port);
-
- port->type = s->type;
+ unsigned long size = sci_port_size(port);
+ /*
+ * Nothing to do if there's already an established membase.
+ */
if (port->membase)
- return;
+ return 0;
if (port->flags & UPF_IOREMAP) {
- port->membase = ioremap_nocache(port->mapbase, 0x40);
-
- if (IS_ERR(port->membase))
+ port->membase = ioremap_nocache(port->mapbase, size);
+ if (unlikely(!port->membase)) {
dev_err(port->dev, "can't remap port#%d\n", port->line);
+ return -ENXIO;
+ }
} else {
/*
* For the simple (and majority of) cases where we don't
@@ -1635,13 +1632,54 @@ static void sci_config_port(struct uart_port *port, int flags)
*/
port->membase = (void __iomem *)port->mapbase;
}
+
+ return 0;
+}
+
+static void sci_release_port(struct uart_port *port)
+{
+ if (port->flags & UPF_IOREMAP) {
+ iounmap(port->membase);
+ port->membase = NULL;
+ }
+
+ release_mem_region(port->mapbase, sci_port_size(port));
+}
+
+static int sci_request_port(struct uart_port *port)
+{
+ unsigned long size = sci_port_size(port);
+ struct resource *res;
+ int ret;
+
+ res = request_mem_region(port->mapbase, size, dev_name(port->dev));
+ if (unlikely(res == NULL))
+ return -EBUSY;
+
+ ret = sci_remap_port(port);
+ if (unlikely(ret != 0)) {
+ release_resource(res);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sci_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ struct sci_port *sport = to_sci_port(port);
+
+ port->type = sport->cfg->type;
+ sci_request_port(port);
+ }
}
static int sci_verify_port(struct uart_port *port, struct serial_struct *ser)
{
struct sci_port *s = to_sci_port(port);
- if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
+ if (ser->irq != s->cfg->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs)
return -EINVAL;
if (ser->baud_base < 2400)
/* No paper tape reader for Mitch.. */
@@ -1726,36 +1764,29 @@ static int __devinit sci_init_single(struct platform_device *dev,
sci_port->break_timer.function = sci_break_timer;
init_timer(&sci_port->break_timer);
- port->mapbase = p->mapbase;
- port->membase = p->membase;
+ sci_port->cfg = p;
- port->irq = p->irqs[SCIx_TXI_IRQ];
+ port->mapbase = p->mapbase;
+ port->type = p->type;
port->flags = p->flags;
- sci_port->type = port->type = p->type;
- sci_port->scscr = p->scscr;
- sci_port->scbrr_algo_id = p->scbrr_algo_id;
-#ifdef CONFIG_SERIAL_SH_SCI_DMA
- sci_port->dma_dev = p->dma_dev;
- sci_port->slave_tx = p->dma_slave_tx;
- sci_port->slave_rx = p->dma_slave_rx;
+ /*
+ * The UART port needs an IRQ value, so we peg this to the TX IRQ
+ * for the multi-IRQ ports, which is where we are primarily
+ * concerned with the shutdown path synchronization.
+ *
+ * For the muxed case there's nothing more to do.
+ */
+ port->irq = p->irqs[SCIx_TXI_IRQ];
- dev_dbg(port->dev, "%s: DMA device %p, tx %d, rx %d\n", __func__,
- p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
-#endif
+ if (p->dma_dev)
+ dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n",
+ p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
- memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
return 0;
}
#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
-static struct tty_driver *serial_console_device(struct console *co, int *index)
-{
- struct uart_driver *p = &sci_uart_driver;
- *index = co->index;
- return p->tty_driver;
-}
-
static void serial_console_putchar(struct uart_port *port, int ch)
{
sci_poll_put_char(port, ch);
@@ -1768,8 +1799,8 @@ static void serial_console_putchar(struct uart_port *port, int ch)
static void serial_console_write(struct console *co, const char *s,
unsigned count)
{
- struct uart_port *port = co->data;
- struct sci_port *sci_port = to_sci_port(port);
+ struct sci_port *sci_port = &sci_ports[co->index];
+ struct uart_port *port = &sci_port->port;
unsigned short bits;
if (sci_port->enable)
@@ -1797,32 +1828,17 @@ static int __devinit serial_console_setup(struct console *co, char *options)
int ret;
/*
- * Check whether an invalid uart number has been specified, and
- * if so, search for the first available port that does have
- * console support.
- */
- if (co->index >= SCI_NPORTS)
- co->index = 0;
-
- if (co->data) {
- port = co->data;
- sci_port = to_sci_port(port);
- } else {
- sci_port = &sci_ports[co->index];
- port = &sci_port->port;
- co->data = port;
- }
-
- /*
- * Also need to check port->type, we don't actually have any
- * UPIO_PORT ports, but uart_report_port() handily misreports
- * it anyways if we don't have a port available by the time this is
- * called.
+ * Refuse to handle any bogus ports.
*/
- if (!port->type)
+ if (co->index < 0 || co->index >= SCI_NPORTS)
return -ENODEV;
- sci_config_port(port, 0);
+ sci_port = &sci_ports[co->index];
+ port = &sci_port->port;
+
+ ret = sci_remap_port(port);
+ if (unlikely(ret != 0))
+ return ret;
if (sci_port->enable)
sci_port->enable(port);
@@ -1842,11 +1858,12 @@ static int __devinit serial_console_setup(struct console *co, char *options)
static struct console serial_console = {
.name = "ttySC",
- .device = serial_console_device,
+ .device = uart_console_device,
.write = serial_console_write,
.setup = serial_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
+ .data = &sci_uart_driver,
};
static int __init sci_console_init(void)
@@ -1856,14 +1873,39 @@ static int __init sci_console_init(void)
}
console_initcall(sci_console_init);
-static struct sci_port early_serial_port;
static struct console early_serial_console = {
.name = "early_ttySC",
.write = serial_console_write,
.flags = CON_PRINTBUFFER,
+ .index = -1,
};
+
static char early_serial_buf[32];
+static int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
+{
+ struct plat_sci_port *cfg = pdev->dev.platform_data;
+
+ if (early_serial_console.data)
+ return -EEXIST;
+
+ early_serial_console.index = pdev->id;
+
+ sci_init_single(NULL, &sci_ports[pdev->id], pdev->id, cfg);
+
+ serial_console_setup(&early_serial_console, early_serial_buf);
+
+ if (!strstr(early_serial_buf, "keep"))
+ early_serial_console.flags |= CON_BOOT;
+
+ register_console(&early_serial_console);
+ return 0;
+}
+#else
+static inline int __devinit sci_probe_earlyprintk(struct platform_device *pdev)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
@@ -1885,24 +1927,18 @@ static struct uart_driver sci_uart_driver = {
.cons = SCI_CONSOLE,
};
-
static int sci_remove(struct platform_device *dev)
{
- struct sh_sci_priv *priv = platform_get_drvdata(dev);
- struct sci_port *p;
- unsigned long flags;
+ struct sci_port *port = platform_get_drvdata(dev);
- cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
+ cpufreq_unregister_notifier(&port->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(p, &priv->ports, node) {
- uart_remove_one_port(&sci_uart_driver, &p->port);
- clk_put(p->iclk);
- clk_put(p->fclk);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
+ uart_remove_one_port(&sci_uart_driver, &port->port);
+
+ clk_put(port->iclk);
+ clk_put(port->fclk);
- kfree(priv);
return 0;
}
@@ -1911,8 +1947,6 @@ static int __devinit sci_probe_single(struct platform_device *dev,
struct plat_sci_port *p,
struct sci_port *sciport)
{
- struct sh_sci_priv *priv = platform_get_drvdata(dev);
- unsigned long flags;
int ret;
/* Sanity check */
@@ -1929,68 +1963,35 @@ static int __devinit sci_probe_single(struct platform_device *dev,
if (ret)
return ret;
- ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
- if (ret)
- return ret;
-
- INIT_LIST_HEAD(&sciport->node);
-
- spin_lock_irqsave(&priv->lock, flags);
- list_add(&sciport->node, &priv->ports);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
+ return uart_add_one_port(&sci_uart_driver, &sciport->port);
}
-/*
- * Register a set of serial devices attached to a platform device. The
- * list is terminated with a zero flags entry, which means we expect
- * all entries to have at least UPF_BOOT_AUTOCONF set. Platforms that need
- * remapping (such as sh64) should also set UPF_IOREMAP.
- */
static int __devinit sci_probe(struct platform_device *dev)
{
struct plat_sci_port *p = dev->dev.platform_data;
- struct sh_sci_priv *priv;
- int i, ret = -EINVAL;
+ struct sci_port *sp = &sci_ports[dev->id];
+ int ret;
-#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
- if (is_early_platform_device(dev)) {
- if (dev->id == -1)
- return -ENOTSUPP;
- early_serial_console.index = dev->id;
- early_serial_console.data = &early_serial_port.port;
- sci_init_single(NULL, &early_serial_port, dev->id, p);
- serial_console_setup(&early_serial_console, early_serial_buf);
- if (!strstr(early_serial_buf, "keep"))
- early_serial_console.flags |= CON_BOOT;
- register_console(&early_serial_console);
- return 0;
- }
-#endif
+ /*
+ * If we've come here via earlyprintk initialization, head off to
+ * the special early probe. We don't have sufficient device state
+ * to make it beyond this yet.
+ */
+ if (is_early_platform_device(dev))
+ return sci_probe_earlyprintk(dev);
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ platform_set_drvdata(dev, sp);
- INIT_LIST_HEAD(&priv->ports);
- spin_lock_init(&priv->lock);
- platform_set_drvdata(dev, priv);
+ ret = sci_probe_single(dev, dev->id, p, sp);
+ if (ret)
+ goto err_unreg;
- priv->clk_nb.notifier_call = sci_notifier;
- cpufreq_register_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
+ sp->freq_transition.notifier_call = sci_notifier;
- if (dev->id != -1) {
- ret = sci_probe_single(dev, dev->id, p, &sci_ports[dev->id]);
- if (ret)
- goto err_unreg;
- } else {
- for (i = 0; p && p->flags != 0; p++, i++) {
- ret = sci_probe_single(dev, i, p, &sci_ports[i]);
- if (ret)
- goto err_unreg;
- }
- }
+ ret = cpufreq_register_notifier(&sp->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (unlikely(ret < 0))
+ goto err_unreg;
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_gdb_detach();
@@ -2005,28 +2006,20 @@ err_unreg:
static int sci_suspend(struct device *dev)
{
- struct sh_sci_priv *priv = dev_get_drvdata(dev);
- struct sci_port *p;
- unsigned long flags;
+ struct sci_port *sport = dev_get_drvdata(dev);
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(p, &priv->ports, node)
- uart_suspend_port(&sci_uart_driver, &p->port);
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (sport)
+ uart_suspend_port(&sci_uart_driver, &sport->port);
return 0;
}
static int sci_resume(struct device *dev)
{
- struct sh_sci_priv *priv = dev_get_drvdata(dev);
- struct sci_port *p;
- unsigned long flags;
+ struct sci_port *sport = dev_get_drvdata(dev);
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry(p, &priv->ports, node)
- uart_resume_port(&sci_uart_driver, &p->port);
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (sport)
+ uart_resume_port(&sci_uart_driver, &sport->port);
return 0;
}
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index b223d6cbf33a..5fefed53fa42 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -54,9 +54,6 @@
# define PBCR 0xa4050102
#elif defined(CONFIG_CPU_SUBTYPE_SH7343)
# define SCSPTR0 0xffe00010 /* 16 bit SCIF */
-# define SCSPTR1 0xffe10010 /* 16 bit SCIF */
-# define SCSPTR2 0xffe20010 /* 16 bit SCIF */
-# define SCSPTR3 0xffe30010 /* 16 bit SCIF */
#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
# define PADR 0xA4050120
# define PSDR 0xA405013e
@@ -69,77 +66,42 @@
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
# define SCSPTR0 0xa4050160
-# define SCSPTR1 0xa405013e
-# define SCSPTR2 0xa4050160
-# define SCSPTR3 0xa405013e
-# define SCSPTR4 0xa4050128
-# define SCSPTR5 0xa4050128
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH4_202)
# define SCSPTR2 0xffe80020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
-#elif defined(CONFIG_CPU_SUBTYPE_SH5_101) || defined(CONFIG_CPU_SUBTYPE_SH5_103)
-# define SCIF_PTR2_OFFS 0x0000020
-# define SCSPTR2 ((port->mapbase)+SCIF_PTR2_OFFS) /* 16 bit SCIF */
#elif defined(CONFIG_H83007) || defined(CONFIG_H83068)
# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
#elif defined(CONFIG_H8S2678)
# define H8300_SCI_DR(ch) *(volatile char *)(P1DR + h8300_sci_pins[ch].port)
#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
# define SCSPTR0 0xfe4b0020
-# define SCSPTR1 0xfe4b0020
-# define SCSPTR2 0xfe4b0020
# define SCIF_ORER 0x0001
-# define SCIF_ONLY
#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
-# define SCSPTR1 0xffe08024 /* 16 bit SCIF */
-# define SCSPTR2 0xffe10020 /* 16 bit SCIF/IRDA */
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7770)
# define SCSPTR0 0xff923020 /* 16 bit SCIF */
-# define SCSPTR1 0xff924020 /* 16 bit SCIF */
-# define SCSPTR2 0xff925020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
# define SCSPTR0 0xffe00024 /* 16 bit SCIF */
-# define SCSPTR1 0xffe10024 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* Overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7785) || \
defined(CONFIG_CPU_SUBTYPE_SH7786)
# define SCSPTR0 0xffea0024 /* 16 bit SCIF */
-# define SCSPTR1 0xffeb0024 /* 16 bit SCIF */
-# define SCSPTR2 0xffec0024 /* 16 bit SCIF */
-# define SCSPTR3 0xffed0024 /* 16 bit SCIF */
-# define SCSPTR4 0xffee0024 /* 16 bit SCIF */
-# define SCSPTR5 0xffef0024 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* Overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SH7201) || \
defined(CONFIG_CPU_SUBTYPE_SH7203) || \
defined(CONFIG_CPU_SUBTYPE_SH7206) || \
defined(CONFIG_CPU_SUBTYPE_SH7263)
# define SCSPTR0 0xfffe8020 /* 16 bit SCIF */
-# define SCSPTR1 0xfffe8820 /* 16 bit SCIF */
-# define SCSPTR2 0xfffe9020 /* 16 bit SCIF */
-# define SCSPTR3 0xfffe9820 /* 16 bit SCIF */
-# if defined(CONFIG_CPU_SUBTYPE_SH7201)
-# define SCSPTR4 0xfffeA020 /* 16 bit SCIF */
-# define SCSPTR5 0xfffeA820 /* 16 bit SCIF */
-# define SCSPTR6 0xfffeB020 /* 16 bit SCIF */
-# define SCSPTR7 0xfffeB820 /* 16 bit SCIF */
-# endif
#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
# define SCSPTR0 0xf8400020 /* 16 bit SCIF */
-# define SCSPTR1 0xf8410020 /* 16 bit SCIF */
-# define SCSPTR2 0xf8420020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* overrun error bit */
#elif defined(CONFIG_CPU_SUBTYPE_SHX3)
# define SCSPTR0 0xffc30020 /* 16 bit SCIF */
-# define SCSPTR1 0xffc40020 /* 16 bit SCIF */
-# define SCSPTR2 0xffc50020 /* 16 bit SCIF */
-# define SCSPTR3 0xffc60020 /* 16 bit SCIF */
# define SCIF_ORER 0x0001 /* Overrun error bit */
#else
# error CPU subtype not defined
@@ -411,7 +373,6 @@ SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
SCIF_FNS(SCLSR, 0, 0, 0x28, 16)
#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
SCIF_FNS(SCFDR, 0, 0, 0x1C, 16)
-SCIF_FNS(SCSPTR2, 0, 0, 0x20, 16)
SCIF_FNS(SCTFDR, 0x0e, 16, 0x1C, 16)
SCIF_FNS(SCRFDR, 0x0e, 16, 0x20, 16)
SCIF_FNS(SCSPTR, 0, 0, 0x24, 16)
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index b57bc273b184..2a753f1e9183 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1942,6 +1942,7 @@ config FB_SH_MOBILE_LCDC
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
select FB_DEFERRED_IO
+ select FB_BACKLIGHT
select SH_MIPI_DSI if SH_LCD_MIPI_DSI
---help---
Frame buffer driver for the on-chip SH-Mobile LCD controller.
@@ -2302,6 +2303,17 @@ config FB_JZ4740
help
Framebuffer support for the JZ4740 SoC.
+config FB_PUV3_UNIGFX
+ tristate "PKUnity v3 Unigfx framebuffer support"
+ depends on FB && UNICORE32 && ARCH_PUV3
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ help
+ Choose this option if you want to use the Unigfx device as a
+ framebuffer device. Without the support of PCI & AGP.
+
source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 8c8fabdff9d0..b0eb3da24670 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -139,6 +139,7 @@ obj-$(CONFIG_FB_MB862XX) += mb862xx/
obj-$(CONFIG_FB_MSM) += msm/
obj-$(CONFIG_FB_NUC900) += nuc900fb.o
obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
+obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o
# Platform or fallback drivers go here
obj-$(CONFIG_FB_UVESA) += uvesafb.o
diff --git a/drivers/video/fb-puv3.c b/drivers/video/fb-puv3.c
new file mode 100644
index 000000000000..dbd2dc4745d1
--- /dev/null
+++ b/drivers/video/fb-puv3.c
@@ -0,0 +1,846 @@
+/*
+ * Frame Buffer Driver for PKUnity-v3 Unigfx
+ * Code specific to PKUnity SoC and UniCore ISA
+ *
+ * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
+ * Copyright (C) 2001-2010 Guan Xuetao
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/console.h>
+
+#include <asm/sizes.h>
+#include <mach/hardware.h>
+
+/* Platform_data reserved for unifb registers. */
+#define UNIFB_REGS_NUM 10
+/* RAM reserved for the frame buffer. */
+#define UNIFB_MEMSIZE (SZ_4M) /* 4 MB for 1024*768*32b */
+
+/*
+ * cause UNIGFX don not have EDID
+ * all the modes are organized as follow
+ */
+static const struct fb_videomode unifb_modes[] = {
+ /* 0 640x480-60 VESA */
+ { "640x480@60", 60, 640, 480, 25175000, 48, 16, 34, 10, 96, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 1 640x480-75 VESA */
+ { "640x480@75", 75, 640, 480, 31500000, 120, 16, 18, 1, 64, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 2 800x600-60 VESA */
+ { "800x600@60", 60, 800, 600, 40000000, 88, 40, 26, 1, 128, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 3 800x600-75 VESA */
+ { "800x600@75", 75, 800, 600, 49500000, 160, 16, 23, 1, 80, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 4 1024x768-60 VESA */
+ { "1024x768@60", 60, 1024, 768, 65000000, 160, 24, 34, 3, 136, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 5 1024x768-75 VESA */
+ { "1024x768@75", 75, 1024, 768, 78750000, 176, 16, 30, 1, 96, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 6 1280x960-60 VESA */
+ { "1280x960@60", 60, 1280, 960, 108000000, 312, 96, 38, 1, 112, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 7 1440x900-60 VESA */
+ { "1440x900@60", 60, 1440, 900, 106500000, 232, 80, 30, 3, 152, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 8 FIXME 9 1024x600-60 VESA UNTESTED */
+ { "1024x600@60", 60, 1024, 600, 50650000, 160, 24, 26, 1, 136, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 9 FIXME 10 1024x600-75 VESA UNTESTED */
+ { "1024x600@75", 75, 1024, 600, 61500000, 176, 16, 23, 1, 96, 1,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+ /* 10 FIXME 11 1366x768-60 VESA UNTESTED */
+ { "1366x768@60", 60, 1366, 768, 85500000, 256, 58, 18, 1, 112, 3,
+ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
+};
+
+static struct fb_var_screeninfo unifb_default = {
+ .xres = 640,
+ .yres = 480,
+ .xres_virtual = 640,
+ .yres_virtual = 480,
+ .bits_per_pixel = 16,
+ .red = { 11, 5, 0 },
+ .green = { 5, 6, 0 },
+ .blue = { 0, 5, 0 },
+ .activate = FB_ACTIVATE_NOW,
+ .height = -1,
+ .width = -1,
+ .pixclock = 25175000,
+ .left_margin = 48,
+ .right_margin = 16,
+ .upper_margin = 33,
+ .lower_margin = 10,
+ .hsync_len = 96,
+ .vsync_len = 2,
+ .vmode = FB_VMODE_NONINTERLACED,
+};
+
+static struct fb_fix_screeninfo unifb_fix = {
+ .id = "UNIGFX FB",
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_TRUECOLOR,
+ .xpanstep = 1,
+ .ypanstep = 1,
+ .ywrapstep = 1,
+ .accel = FB_ACCEL_NONE,
+};
+
+static void unifb_sync(struct fb_info *info)
+{
+ /* TODO: may, this can be replaced by interrupt */
+ int cnt;
+
+ for (cnt = 0; cnt < 0x10000000; cnt++) {
+ if (readl(UGE_COMMAND) & 0x1000000)
+ return;
+ }
+
+ if (cnt > 0x8000000)
+ dev_warn(info->device, "Warning: UniGFX GE time out ...\n");
+}
+
+static void unifb_prim_fillrect(struct fb_info *info,
+ const struct fb_fillrect *region)
+{
+ int awidth = region->width;
+ int aheight = region->height;
+ int m_iBpp = info->var.bits_per_pixel;
+ int screen_width = info->var.xres;
+ int src_sel = 1; /* from fg_color */
+ int pat_sel = 1;
+ int src_x0 = 0;
+ int dst_x0 = region->dx;
+ int src_y0 = 0;
+ int dst_y0 = region->dy;
+ int rop_alpha_sel = 0;
+ int rop_alpha_code = 0xCC;
+ int x_dir = 1;
+ int y_dir = 1;
+ int alpha_r = 0;
+ int alpha_sel = 0;
+ int dst_pitch = screen_width * (m_iBpp / 8);
+ int dst_offset = dst_y0 * dst_pitch + dst_x0 * (m_iBpp / 8);
+ int src_pitch = screen_width * (m_iBpp / 8);
+ int src_offset = src_y0 * src_pitch + src_x0 * (m_iBpp / 8);
+ unsigned int command = 0;
+ int clip_region = 0;
+ int clip_en = 0;
+ int tp_en = 0;
+ int fg_color = 0;
+ int bottom = info->var.yres - 1;
+ int right = info->var.xres - 1;
+ int top = 0;
+
+ bottom = (bottom << 16) | right;
+ command = (rop_alpha_sel << 26) | (pat_sel << 18) | (src_sel << 16)
+ | (x_dir << 20) | (y_dir << 21) | (command << 24)
+ | (clip_region << 23) | (clip_en << 22) | (tp_en << 27);
+ src_pitch = (dst_pitch << 16) | src_pitch;
+ awidth = awidth | (aheight << 16);
+ alpha_r = ((rop_alpha_code & 0xff) << 8) | (alpha_r & 0xff)
+ | (alpha_sel << 16);
+ src_x0 = (src_x0 & 0x1fff) | ((src_y0 & 0x1fff) << 16);
+ dst_x0 = (dst_x0 & 0x1fff) | ((dst_y0 & 0x1fff) << 16);
+ fg_color = region->color;
+
+ unifb_sync(info);
+
+ writel(((u32 *)(info->pseudo_palette))[fg_color], UGE_FCOLOR);
+ writel(0, UGE_BCOLOR);
+ writel(src_pitch, UGE_PITCH);
+ writel(src_offset, UGE_SRCSTART);
+ writel(dst_offset, UGE_DSTSTART);
+ writel(awidth, UGE_WIDHEIGHT);
+ writel(top, UGE_CLIP0);
+ writel(bottom, UGE_CLIP1);
+ writel(alpha_r, UGE_ROPALPHA);
+ writel(src_x0, UGE_SRCXY);
+ writel(dst_x0, UGE_DSTXY);
+ writel(command, UGE_COMMAND);
+}
+
+static void unifb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *region)
+{
+ struct fb_fillrect modded;
+ int vxres, vyres;
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ sys_fillrect(info, region);
+ return;
+ }
+
+ vxres = info->var.xres_virtual;
+ vyres = info->var.yres_virtual;
+
+ memcpy(&modded, region, sizeof(struct fb_fillrect));
+
+ if (!modded.width || !modded.height ||
+ modded.dx >= vxres || modded.dy >= vyres)
+ return;
+
+ if (modded.dx + modded.width > vxres)
+ modded.width = vxres - modded.dx;
+ if (modded.dy + modded.height > vyres)
+ modded.height = vyres - modded.dy;
+
+ unifb_prim_fillrect(info, &modded);
+}
+
+static void unifb_prim_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ int awidth = area->width;
+ int aheight = area->height;
+ int m_iBpp = info->var.bits_per_pixel;
+ int screen_width = info->var.xres;
+ int src_sel = 2; /* from mem */
+ int pat_sel = 0;
+ int src_x0 = area->sx;
+ int dst_x0 = area->dx;
+ int src_y0 = area->sy;
+ int dst_y0 = area->dy;
+
+ int rop_alpha_sel = 0;
+ int rop_alpha_code = 0xCC;
+ int x_dir = 1;
+ int y_dir = 1;
+
+ int alpha_r = 0;
+ int alpha_sel = 0;
+ int dst_pitch = screen_width * (m_iBpp / 8);
+ int dst_offset = dst_y0 * dst_pitch + dst_x0 * (m_iBpp / 8);
+ int src_pitch = screen_width * (m_iBpp / 8);
+ int src_offset = src_y0 * src_pitch + src_x0 * (m_iBpp / 8);
+ unsigned int command = 0;
+ int clip_region = 0;
+ int clip_en = 1;
+ int tp_en = 0;
+ int top = 0;
+ int bottom = info->var.yres;
+ int right = info->var.xres;
+ int fg_color = 0;
+ int bg_color = 0;
+
+ if (src_x0 < 0)
+ src_x0 = 0;
+ if (src_y0 < 0)
+ src_y0 = 0;
+
+ if (src_y0 - dst_y0 > 0) {
+ y_dir = 1;
+ } else {
+ y_dir = 0;
+ src_offset = (src_y0 + aheight) * src_pitch +
+ src_x0 * (m_iBpp / 8);
+ dst_offset = (dst_y0 + aheight) * dst_pitch +
+ dst_x0 * (m_iBpp / 8);
+ src_y0 += aheight;
+ dst_y0 += aheight;
+ }
+
+ command = (rop_alpha_sel << 26) | (pat_sel << 18) | (src_sel << 16) |
+ (x_dir << 20) | (y_dir << 21) | (command << 24) |
+ (clip_region << 23) | (clip_en << 22) | (tp_en << 27);
+ src_pitch = (dst_pitch << 16) | src_pitch;
+ awidth = awidth | (aheight << 16);
+ alpha_r = ((rop_alpha_code & 0xff) << 8) | (alpha_r & 0xff) |
+ (alpha_sel << 16);
+ src_x0 = (src_x0 & 0x1fff) | ((src_y0 & 0x1fff) << 16);
+ dst_x0 = (dst_x0 & 0x1fff) | ((dst_y0 & 0x1fff) << 16);
+ bottom = (bottom << 16) | right;
+
+ unifb_sync(info);
+
+ writel(src_pitch, UGE_PITCH);
+ writel(src_offset, UGE_SRCSTART);
+ writel(dst_offset, UGE_DSTSTART);
+ writel(awidth, UGE_WIDHEIGHT);
+ writel(top, UGE_CLIP0);
+ writel(bottom, UGE_CLIP1);
+ writel(bg_color, UGE_BCOLOR);
+ writel(fg_color, UGE_FCOLOR);
+ writel(alpha_r, UGE_ROPALPHA);
+ writel(src_x0, UGE_SRCXY);
+ writel(dst_x0, UGE_DSTXY);
+ writel(command, UGE_COMMAND);
+}
+
+static void unifb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
+{
+ struct fb_copyarea modded;
+ u32 vxres, vyres;
+ modded.sx = area->sx;
+ modded.sy = area->sy;
+ modded.dx = area->dx;
+ modded.dy = area->dy;
+ modded.width = area->width;
+ modded.height = area->height;
+
+ if (info->flags & FBINFO_HWACCEL_DISABLED) {
+ sys_copyarea(info, area);
+ return;
+ }
+
+ vxres = info->var.xres_virtual;
+ vyres = info->var.yres_virtual;
+
+ if (!modded.width || !modded.height ||
+ modded.sx >= vxres || modded.sy >= vyres ||
+ modded.dx >= vxres || modded.dy >= vyres)
+ return;
+
+ if (modded.sx + modded.width > vxres)
+ modded.width = vxres - modded.sx;
+ if (modded.dx + modded.width > vxres)
+ modded.width = vxres - modded.dx;
+ if (modded.sy + modded.height > vyres)
+ modded.height = vyres - modded.sy;
+ if (modded.dy + modded.height > vyres)
+ modded.height = vyres - modded.dy;
+
+ unifb_prim_copyarea(info, &modded);
+}
+
+static void unifb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ sys_imageblit(info, image);
+}
+
+static u_long get_line_length(int xres_virtual, int bpp)
+{
+ u_long length;
+
+ length = xres_virtual * bpp;
+ length = (length + 31) & ~31;
+ length >>= 3;
+ return length;
+}
+
+/*
+ * Setting the video mode has been split into two parts.
+ * First part, xxxfb_check_var, must not write anything
+ * to hardware, it should only verify and adjust var.
+ * This means it doesn't alter par but it does use hardware
+ * data from it to check this var.
+ */
+static int unifb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ u_long line_length;
+
+ /*
+ * FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal!
+ * as FB_VMODE_SMOOTH_XPAN is only used internally
+ */
+
+ if (var->vmode & FB_VMODE_CONUPDATE) {
+ var->vmode |= FB_VMODE_YWRAP;
+ var->xoffset = info->var.xoffset;
+ var->yoffset = info->var.yoffset;
+ }
+
+ /*
+ * Some very basic checks
+ */
+ if (!var->xres)
+ var->xres = 1;
+ if (!var->yres)
+ var->yres = 1;
+ if (var->xres > var->xres_virtual)
+ var->xres_virtual = var->xres;
+ if (var->yres > var->yres_virtual)
+ var->yres_virtual = var->yres;
+ if (var->bits_per_pixel <= 1)
+ var->bits_per_pixel = 1;
+ else if (var->bits_per_pixel <= 8)
+ var->bits_per_pixel = 8;
+ else if (var->bits_per_pixel <= 16)
+ var->bits_per_pixel = 16;
+ else if (var->bits_per_pixel <= 24)
+ var->bits_per_pixel = 24;
+ else if (var->bits_per_pixel <= 32)
+ var->bits_per_pixel = 32;
+ else
+ return -EINVAL;
+
+ if (var->xres_virtual < var->xoffset + var->xres)
+ var->xres_virtual = var->xoffset + var->xres;
+ if (var->yres_virtual < var->yoffset + var->yres)
+ var->yres_virtual = var->yoffset + var->yres;
+
+ /*
+ * Memory limit
+ */
+ line_length =
+ get_line_length(var->xres_virtual, var->bits_per_pixel);
+ if (line_length * var->yres_virtual > UNIFB_MEMSIZE)
+ return -ENOMEM;
+
+ /*
+ * Now that we checked it we alter var. The reason being is that the
+ * video mode passed in might not work but slight changes to it might
+ * make it work. This way we let the user know what is acceptable.
+ */
+ switch (var->bits_per_pixel) {
+ case 1:
+ case 8:
+ var->red.offset = 0;
+ var->red.length = 8;
+ var->green.offset = 0;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
+ case 16: /* RGBA 5551 */
+ if (var->transp.length) {
+ var->red.offset = 0;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 5;
+ var->blue.offset = 10;
+ var->blue.length = 5;
+ var->transp.offset = 15;
+ var->transp.length = 1;
+ } else { /* RGB 565 */
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ }
+ break;
+ case 24: /* RGB 888 */
+ var->red.offset = 0;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 16;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ break;
+ case 32: /* RGBA 8888 */
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ break;
+ }
+ var->red.msb_right = 0;
+ var->green.msb_right = 0;
+ var->blue.msb_right = 0;
+ var->transp.msb_right = 0;
+
+ return 0;
+}
+
+/*
+ * This routine actually sets the video mode. It's in here where we
+ * the hardware state info->par and fix which can be affected by the
+ * change in par. For this driver it doesn't do much.
+ */
+static int unifb_set_par(struct fb_info *info)
+{
+ int hTotal, vTotal, hSyncStart, hSyncEnd, vSyncStart, vSyncEnd;
+ int format;
+
+#ifdef CONFIG_PUV3_PM
+ struct clk *clk_vga;
+ u32 pixclk = 0;
+ int i;
+
+ for (i = 0; i <= 10; i++) {
+ if (info->var.xres == unifb_modes[i].xres
+ && info->var.yres == unifb_modes[i].yres
+ && info->var.upper_margin == unifb_modes[i].upper_margin
+ && info->var.lower_margin == unifb_modes[i].lower_margin
+ && info->var.left_margin == unifb_modes[i].left_margin
+ && info->var.right_margin == unifb_modes[i].right_margin
+ && info->var.hsync_len == unifb_modes[i].hsync_len
+ && info->var.vsync_len == unifb_modes[i].vsync_len) {
+ pixclk = unifb_modes[i].pixclock;
+ break;
+ }
+ }
+
+ /* set clock rate */
+ clk_vga = clk_get(info->device, "VGA_CLK");
+ if (clk_vga == ERR_PTR(-ENOENT))
+ return -ENOENT;
+
+ if (pixclk != 0) {
+ if (clk_set_rate(clk_vga, pixclk)) { /* set clock failed */
+ info->fix = unifb_fix;
+ info->var = unifb_default;
+ if (clk_set_rate(clk_vga, unifb_default.pixclock))
+ return -EINVAL;
+ }
+ }
+#endif
+
+ info->fix.line_length = get_line_length(info->var.xres_virtual,
+ info->var.bits_per_pixel);
+
+ hSyncStart = info->var.xres + info->var.right_margin;
+ hSyncEnd = hSyncStart + info->var.hsync_len;
+ hTotal = hSyncEnd + info->var.left_margin;
+
+ vSyncStart = info->var.yres + info->var.lower_margin;
+ vSyncEnd = vSyncStart + info->var.vsync_len;
+ vTotal = vSyncEnd + info->var.upper_margin;
+
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ format = UDE_CFG_DST8;
+ break;
+ case 16:
+ format = UDE_CFG_DST16;
+ break;
+ case 24:
+ format = UDE_CFG_DST24;
+ break;
+ case 32:
+ format = UDE_CFG_DST32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel(PKUNITY_UNIGFX_MMAP_BASE, UDE_FSA);
+ writel(info->var.yres, UDE_LS);
+ writel(get_line_length(info->var.xres,
+ info->var.bits_per_pixel) >> 3, UDE_PS);
+ /* >> 3 for hardware required. */
+ writel((hTotal << 16) | (info->var.xres), UDE_HAT);
+ writel(((hTotal - 1) << 16) | (info->var.xres - 1), UDE_HBT);
+ writel(((hSyncEnd - 1) << 16) | (hSyncStart - 1), UDE_HST);
+ writel((vTotal << 16) | (info->var.yres), UDE_VAT);
+ writel(((vTotal - 1) << 16) | (info->var.yres - 1), UDE_VBT);
+ writel(((vSyncEnd - 1) << 16) | (vSyncStart - 1), UDE_VST);
+ writel(UDE_CFG_GDEN_ENABLE | UDE_CFG_TIMEUP_ENABLE
+ | format | 0xC0000001, UDE_CFG);
+
+ return 0;
+}
+
+/*
+ * Set a single color register. The values supplied are already
+ * rounded down to the hardware's capabilities (according to the
+ * entries in the var structure). Return != 0 for invalid regno.
+ */
+static int unifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *info)
+{
+ if (regno >= 256) /* no. of hw registers */
+ return 1;
+
+ /* grayscale works only partially under directcolor */
+ if (info->var.grayscale) {
+ /* grayscale = 0.30*R + 0.59*G + 0.11*B */
+ red = green = blue =
+ (red * 77 + green * 151 + blue * 28) >> 8;
+ }
+
+#define CNVT_TOHW(val, width) ((((val)<<(width))+0x7FFF-(val))>>16)
+ switch (info->fix.visual) {
+ case FB_VISUAL_TRUECOLOR:
+ case FB_VISUAL_PSEUDOCOLOR:
+ red = CNVT_TOHW(red, info->var.red.length);
+ green = CNVT_TOHW(green, info->var.green.length);
+ blue = CNVT_TOHW(blue, info->var.blue.length);
+ transp = CNVT_TOHW(transp, info->var.transp.length);
+ break;
+ case FB_VISUAL_DIRECTCOLOR:
+ red = CNVT_TOHW(red, 8); /* expect 8 bit DAC */
+ green = CNVT_TOHW(green, 8);
+ blue = CNVT_TOHW(blue, 8);
+ /* hey, there is bug in transp handling... */
+ transp = CNVT_TOHW(transp, 8);
+ break;
+ }
+#undef CNVT_TOHW
+ /* Truecolor has hardware independent palette */
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+ u32 v;
+
+ if (regno >= 16)
+ return 1;
+
+ v = (red << info->var.red.offset) |
+ (green << info->var.green.offset) |
+ (blue << info->var.blue.offset) |
+ (transp << info->var.transp.offset);
+ switch (info->var.bits_per_pixel) {
+ case 8:
+ break;
+ case 16:
+ case 24:
+ case 32:
+ ((u32 *) (info->pseudo_palette))[regno] = v;
+ break;
+ default:
+ return 1;
+ }
+ return 0;
+ }
+ return 0;
+}
+
+/*
+ * Pan or Wrap the Display
+ *
+ * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
+ */
+static int unifb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ if (var->vmode & FB_VMODE_YWRAP) {
+ if (var->yoffset < 0
+ || var->yoffset >= info->var.yres_virtual
+ || var->xoffset)
+ return -EINVAL;
+ } else {
+ if (var->xoffset + var->xres > info->var.xres_virtual ||
+ var->yoffset + var->yres > info->var.yres_virtual)
+ return -EINVAL;
+ }
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ if (var->vmode & FB_VMODE_YWRAP)
+ info->var.vmode |= FB_VMODE_YWRAP;
+ else
+ info->var.vmode &= ~FB_VMODE_YWRAP;
+ return 0;
+}
+
+int unifb_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ unsigned long size = vma->vm_end - vma->vm_start;
+ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+ unsigned long pos = info->fix.smem_start + offset;
+
+ if (offset + size > info->fix.smem_len)
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start, pos >> PAGE_SHIFT, size,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
+ return 0;
+
+}
+
+static struct fb_ops unifb_ops = {
+ .fb_read = fb_sys_read,
+ .fb_write = fb_sys_write,
+ .fb_check_var = unifb_check_var,
+ .fb_set_par = unifb_set_par,
+ .fb_setcolreg = unifb_setcolreg,
+ .fb_pan_display = unifb_pan_display,
+ .fb_fillrect = unifb_fillrect,
+ .fb_copyarea = unifb_copyarea,
+ .fb_imageblit = unifb_imageblit,
+ .fb_mmap = unifb_mmap,
+};
+
+/*
+ * Initialisation
+ */
+static int unifb_probe(struct platform_device *dev)
+{
+ struct fb_info *info;
+ u32 unifb_regs[UNIFB_REGS_NUM];
+ int retval = -ENOMEM;
+ struct resource *iomem, *mapmem;
+
+ info = framebuffer_alloc(sizeof(u32)*256, &dev->dev);
+ if (!info)
+ goto err;
+
+ info->screen_base = (char __iomem *)KUSER_UNIGFX_BASE;
+ info->fbops = &unifb_ops;
+
+ retval = fb_find_mode(&info->var, info, NULL,
+ unifb_modes, 10, &unifb_modes[0], 16);
+
+ if (!retval || (retval == 4))
+ info->var = unifb_default;
+
+ iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ unifb_fix.mmio_start = iomem->start;
+
+ mapmem = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ unifb_fix.smem_start = mapmem->start;
+ unifb_fix.smem_len = UNIFB_MEMSIZE;
+
+ info->fix = unifb_fix;
+ info->pseudo_palette = info->par;
+ info->par = NULL;
+ info->flags = FBINFO_FLAG_DEFAULT;
+#ifdef FB_ACCEL_PUV3_UNIGFX
+ info->fix.accel = FB_ACCEL_PUV3_UNIGFX;
+#endif
+
+ retval = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (retval < 0)
+ goto err1;
+
+ retval = register_framebuffer(info);
+ if (retval < 0)
+ goto err2;
+ platform_set_drvdata(dev, info);
+ platform_device_add_data(dev, unifb_regs, sizeof(u32) * UNIFB_REGS_NUM);
+
+ printk(KERN_INFO
+ "fb%d: Virtual frame buffer device, using %dM of video memory\n",
+ info->node, UNIFB_MEMSIZE >> 20);
+ return 0;
+err2:
+ fb_dealloc_cmap(&info->cmap);
+err1:
+ framebuffer_release(info);
+err:
+ return retval;
+}
+
+static int unifb_remove(struct platform_device *dev)
+{
+ struct fb_info *info = platform_get_drvdata(dev);
+
+ if (info) {
+ unregister_framebuffer(info);
+ fb_dealloc_cmap(&info->cmap);
+ framebuffer_release(info);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int unifb_resume(struct platform_device *dev)
+{
+ int rc = 0;
+ u32 *unifb_regs = dev->dev.platform_data;
+
+ if (dev->dev.power.power_state.event == PM_EVENT_ON)
+ return 0;
+
+ console_lock();
+
+ if (dev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
+ writel(unifb_regs[0], UDE_FSA);
+ writel(unifb_regs[1], UDE_LS);
+ writel(unifb_regs[2], UDE_PS);
+ writel(unifb_regs[3], UDE_HAT);
+ writel(unifb_regs[4], UDE_HBT);
+ writel(unifb_regs[5], UDE_HST);
+ writel(unifb_regs[6], UDE_VAT);
+ writel(unifb_regs[7], UDE_VBT);
+ writel(unifb_regs[8], UDE_VST);
+ writel(unifb_regs[9], UDE_CFG);
+ }
+ dev->dev.power.power_state = PMSG_ON;
+
+ console_unlock();
+
+ return rc;
+}
+
+static int unifb_suspend(struct platform_device *dev, pm_message_t mesg)
+{
+ u32 *unifb_regs = dev->dev.platform_data;
+
+ unifb_regs[0] = readl(UDE_FSA);
+ unifb_regs[1] = readl(UDE_LS);
+ unifb_regs[2] = readl(UDE_PS);
+ unifb_regs[3] = readl(UDE_HAT);
+ unifb_regs[4] = readl(UDE_HBT);
+ unifb_regs[5] = readl(UDE_HST);
+ unifb_regs[6] = readl(UDE_VAT);
+ unifb_regs[7] = readl(UDE_VBT);
+ unifb_regs[8] = readl(UDE_VST);
+ unifb_regs[9] = readl(UDE_CFG);
+
+ if (mesg.event == dev->dev.power.power_state.event)
+ return 0;
+
+ switch (mesg.event) {
+ case PM_EVENT_FREEZE: /* about to take snapshot */
+ case PM_EVENT_PRETHAW: /* before restoring snapshot */
+ goto done;
+ }
+
+ console_lock();
+
+ /* do nothing... */
+
+ console_unlock();
+
+done:
+ dev->dev.power.power_state = mesg;
+
+ return 0;
+}
+#else
+#define unifb_resume NULL
+#define unifb_suspend NULL
+#endif
+
+static struct platform_driver unifb_driver = {
+ .probe = unifb_probe,
+ .remove = unifb_remove,
+ .resume = unifb_resume,
+ .suspend = unifb_suspend,
+ .driver = {
+ .name = "PKUnity-v3-UNIGFX",
+ },
+};
+
+static int __init unifb_init(void)
+{
+#ifndef MODULE
+ if (fb_get_options("unifb", NULL))
+ return -ENODEV;
+#endif
+
+ return platform_driver_register(&unifb_driver);
+}
+
+module_init(unifb_init);
+
+static void __exit unifb_exit(void)
+{
+ platform_driver_unregister(&unifb_driver);
+}
+
+module_exit(unifb_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index bf12e53aed5c..bf2629f83f40 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -21,6 +21,8 @@
#include <linux/ioctl.h>
#include <linux/slab.h>
#include <linux/console.h>
+#include <linux/backlight.h>
+#include <linux/gpio.h>
#include <video/sh_mobile_lcdc.h>
#include <asm/atomic.h>
@@ -67,6 +69,7 @@ static unsigned long lcdc_offs_mainlcd[NR_CH_REGS] = {
[LDSM1R] = 0x428,
[LDSM2R] = 0x42c,
[LDSA1R] = 0x430,
+ [LDSA2R] = 0x434,
[LDMLSR] = 0x438,
[LDHCNR] = 0x448,
[LDHSYNR] = 0x44c,
@@ -151,6 +154,7 @@ static bool banked(int reg_nr)
case LDDFR:
case LDSM1R:
case LDSA1R:
+ case LDSA2R:
case LDMLSR:
case LDHCNR:
case LDHSYNR:
@@ -463,6 +467,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
struct sh_mobile_lcdc_board_cfg *board_cfg;
unsigned long tmp;
int bpp = 0;
+ unsigned long ldddsr;
int k, m;
int ret = 0;
@@ -541,16 +546,21 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
}
/* word and long word swap */
- switch (bpp) {
- case 16:
- lcdc_write(priv, _LDDDSR, lcdc_read(priv, _LDDDSR) | 6);
- break;
- case 24:
- lcdc_write(priv, _LDDDSR, lcdc_read(priv, _LDDDSR) | 7);
- break;
- case 32:
- lcdc_write(priv, _LDDDSR, lcdc_read(priv, _LDDDSR) | 4);
- break;
+ ldddsr = lcdc_read(priv, _LDDDSR);
+ if (priv->ch[0].info->var.nonstd)
+ lcdc_write(priv, _LDDDSR, ldddsr | 7);
+ else {
+ switch (bpp) {
+ case 16:
+ lcdc_write(priv, _LDDDSR, ldddsr | 6);
+ break;
+ case 24:
+ lcdc_write(priv, _LDDDSR, ldddsr | 7);
+ break;
+ case 32:
+ lcdc_write(priv, _LDDDSR, ldddsr | 4);
+ break;
+ }
}
for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
@@ -561,21 +571,40 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
/* set bpp format in PKF[4:0] */
tmp = lcdc_read_chan(ch, LDDFR);
- tmp &= ~0x0001001f;
- switch (ch->info->var.bits_per_pixel) {
- case 16:
- tmp |= 0x03;
- break;
- case 24:
- tmp |= 0x0b;
- break;
- case 32:
- break;
+ tmp &= ~0x0003031f;
+ if (ch->info->var.nonstd) {
+ tmp |= (ch->info->var.nonstd << 16);
+ switch (ch->info->var.bits_per_pixel) {
+ case 12:
+ break;
+ case 16:
+ tmp |= (0x1 << 8);
+ break;
+ case 24:
+ tmp |= (0x2 << 8);
+ break;
+ }
+ } else {
+ switch (ch->info->var.bits_per_pixel) {
+ case 16:
+ tmp |= 0x03;
+ break;
+ case 24:
+ tmp |= 0x0b;
+ break;
+ case 32:
+ break;
+ }
}
lcdc_write_chan(ch, LDDFR, tmp);
/* point out our frame buffer */
lcdc_write_chan(ch, LDSA1R, ch->info->fix.smem_start);
+ if (ch->info->var.nonstd)
+ lcdc_write_chan(ch, LDSA2R,
+ ch->info->fix.smem_start +
+ ch->info->var.xres *
+ ch->info->var.yres_virtual);
/* set line size */
lcdc_write_chan(ch, LDMLSR, ch->info->fix.line_length);
@@ -618,6 +647,11 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
board_cfg->display_on(board_cfg->board_data, ch->info);
module_put(board_cfg->owner);
}
+
+ if (ch->bl) {
+ ch->bl->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(ch->bl);
+ }
}
return 0;
@@ -648,6 +682,11 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv)
sh_mobile_lcdc_clk_on(priv);
}
+ if (ch->bl) {
+ ch->bl->props.power = FB_BLANK_POWERDOWN;
+ backlight_update_status(ch->bl);
+ }
+
board_cfg = &ch->cfg.board_cfg;
if (try_module_get(board_cfg->owner) && board_cfg->display_off) {
board_cfg->display_off(board_cfg->board_data);
@@ -804,9 +843,15 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
struct sh_mobile_lcdc_priv *priv = ch->lcdc;
unsigned long ldrcntr;
unsigned long new_pan_offset;
+ unsigned long base_addr_y, base_addr_c;
+ unsigned long c_offset;
- new_pan_offset = (var->yoffset * info->fix.line_length) +
- (var->xoffset * (info->var.bits_per_pixel / 8));
+ if (!var->nonstd)
+ new_pan_offset = (var->yoffset * info->fix.line_length) +
+ (var->xoffset * (info->var.bits_per_pixel / 8));
+ else
+ new_pan_offset = (var->yoffset * info->fix.line_length) +
+ (var->xoffset);
if (new_pan_offset == ch->pan_offset)
return 0; /* No change, do nothing */
@@ -814,7 +859,26 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
ldrcntr = lcdc_read(priv, _LDRCNTR);
/* Set the source address for the next refresh */
- lcdc_write_chan_mirror(ch, LDSA1R, ch->dma_handle + new_pan_offset);
+ base_addr_y = ch->dma_handle + new_pan_offset;
+ if (var->nonstd) {
+ /* Set y offset */
+ c_offset = (var->yoffset *
+ info->fix.line_length *
+ (info->var.bits_per_pixel - 8)) / 8;
+ base_addr_c = ch->dma_handle + var->xres * var->yres_virtual +
+ c_offset;
+ /* Set x offset */
+ if (info->var.bits_per_pixel == 24)
+ base_addr_c += 2 * var->xoffset;
+ else
+ base_addr_c += var->xoffset;
+ } else
+ base_addr_c = 0;
+
+ lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y);
+ if (base_addr_c)
+ lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c);
+
if (lcdc_chan_is_sublcd(ch))
lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS);
else
@@ -885,7 +949,10 @@ static void sh_mobile_fb_reconfig(struct fb_info *info)
/* Couldn't reconfigure, hopefully, can continue as before */
return;
- info->fix.line_length = mode1.xres * (ch->cfg.bpp / 8);
+ if (info->var.nonstd)
+ info->fix.line_length = mode1.xres;
+ else
+ info->fix.line_length = mode1.xres * (ch->cfg.bpp / 8);
/*
* fb_set_var() calls the notifier change internally, only if
@@ -980,8 +1047,80 @@ static struct fb_ops sh_mobile_lcdc_ops = {
.fb_check_var = sh_mobile_check_var,
};
-static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp)
+static int sh_mobile_lcdc_update_bl(struct backlight_device *bdev)
+{
+ struct sh_mobile_lcdc_chan *ch = bl_get_data(bdev);
+ struct sh_mobile_lcdc_board_cfg *cfg = &ch->cfg.board_cfg;
+ int brightness = bdev->props.brightness;
+
+ if (bdev->props.power != FB_BLANK_UNBLANK ||
+ bdev->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
+ brightness = 0;
+
+ return cfg->set_brightness(cfg->board_data, brightness);
+}
+
+static int sh_mobile_lcdc_get_brightness(struct backlight_device *bdev)
+{
+ struct sh_mobile_lcdc_chan *ch = bl_get_data(bdev);
+ struct sh_mobile_lcdc_board_cfg *cfg = &ch->cfg.board_cfg;
+
+ return cfg->get_brightness(cfg->board_data);
+}
+
+static int sh_mobile_lcdc_check_fb(struct backlight_device *bdev,
+ struct fb_info *info)
+{
+ return (info->bl_dev == bdev);
+}
+
+static struct backlight_ops sh_mobile_lcdc_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = sh_mobile_lcdc_update_bl,
+ .get_brightness = sh_mobile_lcdc_get_brightness,
+ .check_fb = sh_mobile_lcdc_check_fb,
+};
+
+static struct backlight_device *sh_mobile_lcdc_bl_probe(struct device *parent,
+ struct sh_mobile_lcdc_chan *ch)
+{
+ struct backlight_device *bl;
+
+ bl = backlight_device_register(ch->cfg.bl_info.name, parent, ch,
+ &sh_mobile_lcdc_bl_ops, NULL);
+ if (!bl) {
+ dev_err(parent, "unable to register backlight device\n");
+ return NULL;
+ }
+
+ bl->props.max_brightness = ch->cfg.bl_info.max_brightness;
+ bl->props.brightness = bl->props.max_brightness;
+ backlight_update_status(bl);
+
+ return bl;
+}
+
+static void sh_mobile_lcdc_bl_remove(struct backlight_device *bdev)
+{
+ backlight_device_unregister(bdev);
+}
+
+static int sh_mobile_lcdc_set_bpp(struct fb_var_screeninfo *var, int bpp,
+ int nonstd)
{
+ if (nonstd) {
+ switch (bpp) {
+ case 12:
+ case 16:
+ case 24:
+ var->bits_per_pixel = bpp;
+ var->nonstd = nonstd;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
+
switch (bpp) {
case 16: /* PKF[4:0] = 00011 - RGB 565 */
var->red.offset = 11;
@@ -1198,6 +1337,10 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
init_completion(&ch->vsync_completion);
ch->pan_offset = 0;
+ /* probe the backlight is there is one defined */
+ if (ch->cfg.bl_info.max_brightness)
+ ch->bl = sh_mobile_lcdc_bl_probe(&pdev->dev, ch);
+
switch (pdata->ch[i].chan) {
case LCDC_CHAN_MAINLCD:
ch->enabled = 1 << 1;
@@ -1260,6 +1403,14 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
k < cfg->num_cfg && lcd_cfg;
k++, lcd_cfg++) {
unsigned long size = lcd_cfg->yres * lcd_cfg->xres;
+ /* NV12 buffers must have even number of lines */
+ if ((cfg->nonstd) && cfg->bpp == 12 &&
+ (lcd_cfg->yres & 0x1)) {
+ dev_err(&pdev->dev, "yres must be multiple of 2"
+ " for YCbCr420 mode.\n");
+ error = -EINVAL;
+ goto err1;
+ }
if (size > max_size) {
max_cfg = lcd_cfg;
@@ -1274,7 +1425,11 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
max_cfg->xres, max_cfg->yres);
info->fix = sh_mobile_lcdc_fix;
- info->fix.smem_len = max_size * (cfg->bpp / 8) * 2;
+ info->fix.smem_len = max_size * 2 * cfg->bpp / 8;
+
+ /* Only pan in 2 line steps for NV12 */
+ if (cfg->nonstd && cfg->bpp == 12)
+ info->fix.ypanstep = 2;
if (!mode) {
mode = &default_720p;
@@ -1292,7 +1447,7 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
var->yres_virtual = var->yres * 2;
var->activate = FB_ACTIVATE_NOW;
- error = sh_mobile_lcdc_set_bpp(var, cfg->bpp);
+ error = sh_mobile_lcdc_set_bpp(var, cfg->bpp, cfg->nonstd);
if (error)
break;
@@ -1316,7 +1471,11 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
}
info->fix.smem_start = ch->dma_handle;
- info->fix.line_length = var->xres * (cfg->bpp / 8);
+ if (var->nonstd)
+ info->fix.line_length = var->xres;
+ else
+ info->fix.line_length = var->xres * (cfg->bpp / 8);
+
info->screen_base = buf;
info->device = &pdev->dev;
ch->display_var = *var;
@@ -1345,6 +1504,8 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
}
}
+ info->bl_dev = ch->bl;
+
error = register_framebuffer(info);
if (error < 0)
goto err1;
@@ -1404,6 +1565,11 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev)
framebuffer_release(info);
}
+ for (i = 0; i < ARRAY_SIZE(priv->ch); i++) {
+ if (priv->ch[i].bl)
+ sh_mobile_lcdc_bl_remove(priv->ch[i].bl);
+ }
+
if (priv->dot_clk)
clk_put(priv->dot_clk);
diff --git a/drivers/video/sh_mobile_lcdcfb.h b/drivers/video/sh_mobile_lcdcfb.h
index 9ecee2fba1d7..4635eed63eee 100644
--- a/drivers/video/sh_mobile_lcdcfb.h
+++ b/drivers/video/sh_mobile_lcdcfb.h
@@ -8,7 +8,7 @@
/* per-channel registers */
enum { LDDCKPAT1R, LDDCKPAT2R, LDMT1R, LDMT2R, LDMT3R, LDDFR, LDSM1R,
- LDSM2R, LDSA1R, LDMLSR, LDHCNR, LDHSYNR, LDVLNR, LDVSYNR, LDPMR,
+ LDSM2R, LDSA1R, LDSA2R, LDMLSR, LDHCNR, LDHSYNR, LDVLNR, LDVSYNR, LDPMR,
LDHAJR,
NR_CH_REGS };
@@ -16,6 +16,7 @@ enum { LDDCKPAT1R, LDDCKPAT2R, LDMT1R, LDMT2R, LDMT3R, LDDFR, LDSM1R,
struct sh_mobile_lcdc_priv;
struct fb_info;
+struct backlight_device;
struct sh_mobile_lcdc_chan {
struct sh_mobile_lcdc_priv *lcdc;
@@ -26,6 +27,7 @@ struct sh_mobile_lcdc_chan {
u32 pseudo_palette[PALETTE_NR];
unsigned long saved_ch_regs[NR_CH_REGS];
struct fb_info *info;
+ struct backlight_device *bl;
dma_addr_t dma_handle;
struct fb_deferred_io defio;
struct scatterlist *sglist;
diff --git a/drivers/video/via/chip.h b/drivers/video/via/chip.h
index 48f1342897bd..781f3aa66b42 100644
--- a/drivers/video/via/chip.h
+++ b/drivers/video/via/chip.h
@@ -110,16 +110,13 @@
struct tmds_chip_information {
int tmds_chip_name;
int tmds_chip_slave_addr;
- int data_mode;
int output_interface;
int i2c_port;
- int device_type;
};
struct lvds_chip_information {
int lvds_chip_name;
int lvds_chip_slave_addr;
- int data_mode;
int output_interface;
int i2c_port;
};
@@ -142,9 +139,6 @@ struct chip_information {
struct crt_setting_information {
int iga_path;
- int h_active;
- int v_active;
- int bpp;
int refresh_rate;
};
@@ -162,8 +156,6 @@ struct lvds_setting_information {
int h_active;
int v_active;
int bpp;
- int refresh_rate;
- int lcd_panel_id;
int lcd_panel_hres;
int lcd_panel_vres;
int display_method;
@@ -188,7 +180,6 @@ struct GFX_DPA_SETTING {
};
struct VT1636_DPA_SETTING {
- int PanelSizeID;
u8 CLK_SEL_ST1;
u8 CLK_SEL_ST2;
};
diff --git a/drivers/video/via/dvi.c b/drivers/video/via/dvi.c
index 84e21b39dd0b..41ca198b5098 100644
--- a/drivers/video/via/dvi.c
+++ b/drivers/video/via/dvi.c
@@ -195,7 +195,9 @@ void viafb_dvi_set_mode(struct VideoModeTable *mode, int mode_bpp,
struct crt_mode_table *pDviTiming;
unsigned long desirePixelClock, maxPixelClock;
pDviTiming = mode->crtc;
- desirePixelClock = pDviTiming->clk / 1000000;
+ desirePixelClock = pDviTiming->refresh_rate
+ * pDviTiming->crtc.hor_total * pDviTiming->crtc.ver_total
+ / 1000000;
maxPixelClock = (unsigned long)viaparinfo->
tmds_setting_info->max_pixel_clock;
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index 36d73f940d8b..5728fd76bc11 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -22,342 +22,290 @@
#include <linux/via-core.h>
#include "global.h"
-static struct pll_map pll_value[] = {
- {25175000,
- {99, 7, 3},
- {85, 3, 4}, /* ignoring bit difference: 0x00008000 */
- {141, 5, 4},
- {141, 5, 4} },
- {29581000,
- {33, 4, 2},
- {66, 2, 4}, /* ignoring bit difference: 0x00808000 */
- {166, 5, 4}, /* ignoring bit difference: 0x00008000 */
- {165, 5, 4} },
- {26880000,
- {15, 4, 1},
- {30, 2, 3}, /* ignoring bit difference: 0x00808000 */
- {150, 5, 4},
- {150, 5, 4} },
- {31500000,
- {53, 3, 3}, /* ignoring bit difference: 0x00008000 */
- {141, 4, 4}, /* ignoring bit difference: 0x00008000 */
- {176, 5, 4},
- {176, 5, 4} },
- {31728000,
- {31, 7, 1},
- {177, 5, 4}, /* ignoring bit difference: 0x00008000 */
- {177, 5, 4},
- {142, 4, 4} },
- {32688000,
- {73, 4, 3},
- {146, 4, 4}, /* ignoring bit difference: 0x00008000 */
- {183, 5, 4},
- {146, 4, 4} },
- {36000000,
- {101, 5, 3}, /* ignoring bit difference: 0x00008000 */
- {161, 4, 4}, /* ignoring bit difference: 0x00008000 */
- {202, 5, 4},
- {161, 4, 4} },
- {40000000,
- {89, 4, 3},
- {89, 4, 3}, /* ignoring bit difference: 0x00008000 */
- {112, 5, 3},
- {112, 5, 3} },
- {41291000,
- {23, 4, 1},
- {69, 3, 3}, /* ignoring bit difference: 0x00008000 */
- {115, 5, 3},
- {115, 5, 3} },
- {43163000,
- {121, 5, 3},
- {121, 5, 3}, /* ignoring bit difference: 0x00008000 */
- {121, 5, 3},
- {121, 5, 3} },
- {45250000,
- {127, 5, 3},
- {127, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {127, 5, 3},
- {127, 5, 3} },
- {46000000,
- {90, 7, 2},
- {103, 4, 3}, /* ignoring bit difference: 0x00008000 */
- {129, 5, 3},
- {103, 4, 3} },
- {46996000,
- {105, 4, 3}, /* ignoring bit difference: 0x00008000 */
- {131, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {131, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {105, 4, 3} },
- {48000000,
- {67, 20, 0},
- {134, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {134, 5, 3},
- {134, 5, 3} },
- {48875000,
- {99, 29, 0},
- {82, 3, 3}, /* ignoring bit difference: 0x00808000 */
- {82, 3, 3}, /* ignoring bit difference: 0x00808000 */
- {137, 5, 3} },
- {49500000,
- {83, 6, 2},
- {83, 3, 3}, /* ignoring bit difference: 0x00008000 */
- {138, 5, 3},
- {83, 3, 3} },
- {52406000,
- {117, 4, 3},
- {117, 4, 3}, /* ignoring bit difference: 0x00008000 */
- {117, 4, 3},
- {88, 3, 3} },
- {52977000,
- {37, 5, 1},
- {148, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {148, 5, 3},
- {148, 5, 3} },
- {56250000,
- {55, 7, 1}, /* ignoring bit difference: 0x00008000 */
- {126, 4, 3}, /* ignoring bit difference: 0x00008000 */
- {157, 5, 3},
- {157, 5, 3} },
- {57275000,
- {0, 0, 0},
- {2, 2, 0},
- {2, 2, 0},
- {157, 5, 3} }, /* ignoring bit difference: 0x00808000 */
- {60466000,
- {76, 9, 1},
- {169, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {169, 5, 3}, /* FIXED: old = {72, 2, 3} */
- {169, 5, 3} },
- {61500000,
- {86, 20, 0},
- {172, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {172, 5, 3},
- {172, 5, 3} },
- {65000000,
- {109, 6, 2}, /* ignoring bit difference: 0x00008000 */
- {109, 3, 3}, /* ignoring bit difference: 0x00008000 */
- {109, 3, 3},
- {109, 3, 3} },
- {65178000,
- {91, 5, 2},
- {182, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {109, 3, 3},
- {182, 5, 3} },
- {66750000,
- {75, 4, 2},
- {150, 4, 3}, /* ignoring bit difference: 0x00808000 */
- {150, 4, 3},
- {112, 3, 3} },
- {68179000,
- {19, 4, 0},
- {114, 3, 3}, /* ignoring bit difference: 0x00008000 */
- {190, 5, 3},
- {191, 5, 3} },
- {69924000,
- {83, 17, 0},
- {195, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {195, 5, 3},
- {195, 5, 3} },
- {70159000,
- {98, 20, 0},
- {196, 5, 3}, /* ignoring bit difference: 0x00808000 */
- {196, 5, 3},
- {195, 5, 3} },
- {72000000,
- {121, 24, 0},
- {161, 4, 3}, /* ignoring bit difference: 0x00808000 */
- {161, 4, 3},
- {161, 4, 3} },
- {78750000,
- {33, 3, 1},
- {66, 3, 2}, /* ignoring bit difference: 0x00008000 */
- {110, 5, 2},
- {110, 5, 2} },
- {80136000,
- {28, 5, 0},
- {68, 3, 2}, /* ignoring bit difference: 0x00008000 */
- {112, 5, 2},
- {112, 5, 2} },
- {83375000,
- {93, 2, 3},
- {93, 4, 2}, /* ignoring bit difference: 0x00800000 */
- {93, 4, 2}, /* ignoring bit difference: 0x00800000 */
- {117, 5, 2} },
- {83950000,
- {41, 7, 0},
- {117, 5, 2}, /* ignoring bit difference: 0x00008000 */
- {117, 5, 2},
- {117, 5, 2} },
- {84750000,
- {118, 5, 2},
- {118, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {118, 5, 2},
- {118, 5, 2} },
- {85860000,
- {84, 7, 1},
- {120, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {120, 5, 2},
- {118, 5, 2} },
- {88750000,
- {31, 5, 0},
- {124, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {174, 7, 2}, /* ignoring bit difference: 0x00808000 */
- {124, 5, 2} },
- {94500000,
- {33, 5, 0},
- {132, 5, 2}, /* ignoring bit difference: 0x00008000 */
- {132, 5, 2},
- {132, 5, 2} },
- {97750000,
- {82, 6, 1},
- {137, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {137, 5, 2},
- {137, 5, 2} },
- {101000000,
- {127, 9, 1},
- {141, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {141, 5, 2},
- {141, 5, 2} },
- {106500000,
- {119, 4, 2},
- {119, 4, 2}, /* ignoring bit difference: 0x00808000 */
- {119, 4, 2},
- {149, 5, 2} },
- {108000000,
- {121, 4, 2},
- {121, 4, 2}, /* ignoring bit difference: 0x00808000 */
- {151, 5, 2},
- {151, 5, 2} },
- {113309000,
- {95, 12, 0},
- {95, 3, 2}, /* ignoring bit difference: 0x00808000 */
- {95, 3, 2},
- {159, 5, 2} },
- {118840000,
- {83, 5, 1},
- {166, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {166, 5, 2},
- {166, 5, 2} },
- {119000000,
- {108, 13, 0},
- {133, 4, 2}, /* ignoring bit difference: 0x00808000 */
- {133, 4, 2},
- {167, 5, 2} },
- {121750000,
- {85, 5, 1},
- {170, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {68, 2, 2},
- {0, 0, 0} },
- {125104000,
- {53, 6, 0}, /* ignoring bit difference: 0x00008000 */
- {106, 3, 2}, /* ignoring bit difference: 0x00008000 */
- {175, 5, 2},
- {0, 0, 0} },
- {135000000,
- {94, 5, 1},
- {28, 3, 0}, /* ignoring bit difference: 0x00804000 */
- {151, 4, 2},
- {189, 5, 2} },
- {136700000,
- {115, 12, 0},
- {191, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {191, 5, 2},
- {191, 5, 2} },
- {138400000,
- {87, 9, 0},
- {116, 3, 2}, /* ignoring bit difference: 0x00808000 */
- {116, 3, 2},
- {194, 5, 2} },
- {146760000,
- {103, 5, 1},
- {206, 5, 2}, /* ignoring bit difference: 0x00808000 */
- {206, 5, 2},
- {206, 5, 2} },
- {153920000,
- {86, 8, 0},
- {86, 4, 1}, /* ignoring bit difference: 0x00808000 */
- {86, 4, 1},
- {86, 4, 1} }, /* FIXED: old = {84, 2, 1} */
- {156000000,
- {109, 5, 1},
- {109, 5, 1}, /* ignoring bit difference: 0x00808000 */
- {109, 5, 1},
- {108, 5, 1} },
- {157500000,
- {55, 5, 0}, /* ignoring bit difference: 0x00008000 */
- {22, 2, 0}, /* ignoring bit difference: 0x00802000 */
- {110, 5, 1},
- {110, 5, 1} },
- {162000000,
- {113, 5, 1},
- {113, 5, 1}, /* ignoring bit difference: 0x00808000 */
- {113, 5, 1},
- {113, 5, 1} },
- {187000000,
- {118, 9, 0},
- {131, 5, 1}, /* ignoring bit difference: 0x00808000 */
- {131, 5, 1},
- {131, 5, 1} },
- {193295000,
- {108, 8, 0},
- {81, 3, 1}, /* ignoring bit difference: 0x00808000 */
- {135, 5, 1},
- {135, 5, 1} },
- {202500000,
- {99, 7, 0},
- {85, 3, 1}, /* ignoring bit difference: 0x00808000 */
- {142, 5, 1},
- {142, 5, 1} },
- {204000000,
- {100, 7, 0},
- {143, 5, 1}, /* ignoring bit difference: 0x00808000 */
- {143, 5, 1},
- {143, 5, 1} },
- {218500000,
- {92, 6, 0},
- {153, 5, 1}, /* ignoring bit difference: 0x00808000 */
- {153, 5, 1},
- {153, 5, 1} },
- {234000000,
- {98, 6, 0},
- {98, 3, 1}, /* ignoring bit difference: 0x00008000 */
- {98, 3, 1},
- {164, 5, 1} },
- {267250000,
- {112, 6, 0},
- {112, 3, 1}, /* ignoring bit difference: 0x00808000 */
- {187, 5, 1},
- {187, 5, 1} },
- {297500000,
- {102, 5, 0}, /* ignoring bit difference: 0x00008000 */
- {166, 4, 1}, /* ignoring bit difference: 0x00008000 */
- {208, 5, 1},
- {208, 5, 1} },
- {74481000,
- {26, 5, 0},
- {125, 3, 3}, /* ignoring bit difference: 0x00808000 */
- {208, 5, 3},
- {209, 5, 3} },
- {172798000,
- {121, 5, 1},
- {121, 5, 1}, /* ignoring bit difference: 0x00808000 */
- {121, 5, 1},
- {121, 5, 1} },
- {122614000,
- {60, 7, 0},
- {137, 4, 2}, /* ignoring bit difference: 0x00808000 */
- {137, 4, 2},
- {172, 5, 2} },
- {74270000,
- {83, 8, 1},
- {208, 5, 3},
- {208, 5, 3},
- {0, 0, 0} },
- {148500000,
- {83, 8, 0},
- {208, 5, 2},
- {166, 4, 2},
- {208, 5, 2} }
+static struct pll_config cle266_pll_config[] = {
+ {19, 4, 0},
+ {26, 5, 0},
+ {28, 5, 0},
+ {31, 5, 0},
+ {33, 5, 0},
+ {55, 5, 0},
+ {102, 5, 0},
+ {53, 6, 0},
+ {92, 6, 0},
+ {98, 6, 0},
+ {112, 6, 0},
+ {41, 7, 0},
+ {60, 7, 0},
+ {99, 7, 0},
+ {100, 7, 0},
+ {83, 8, 0},
+ {86, 8, 0},
+ {108, 8, 0},
+ {87, 9, 0},
+ {118, 9, 0},
+ {95, 12, 0},
+ {115, 12, 0},
+ {108, 13, 0},
+ {83, 17, 0},
+ {67, 20, 0},
+ {86, 20, 0},
+ {98, 20, 0},
+ {121, 24, 0},
+ {99, 29, 0},
+ {33, 3, 1},
+ {15, 4, 1},
+ {23, 4, 1},
+ {37, 5, 1},
+ {83, 5, 1},
+ {85, 5, 1},
+ {94, 5, 1},
+ {103, 5, 1},
+ {109, 5, 1},
+ {113, 5, 1},
+ {121, 5, 1},
+ {82, 6, 1},
+ {31, 7, 1},
+ {55, 7, 1},
+ {84, 7, 1},
+ {83, 8, 1},
+ {76, 9, 1},
+ {127, 9, 1},
+ {33, 4, 2},
+ {75, 4, 2},
+ {119, 4, 2},
+ {121, 4, 2},
+ {91, 5, 2},
+ {118, 5, 2},
+ {83, 6, 2},
+ {109, 6, 2},
+ {90, 7, 2},
+ {93, 2, 3},
+ {53, 3, 3},
+ {73, 4, 3},
+ {89, 4, 3},
+ {105, 4, 3},
+ {117, 4, 3},
+ {101, 5, 3},
+ {121, 5, 3},
+ {127, 5, 3},
+ {99, 7, 3}
+};
+
+static struct pll_config k800_pll_config[] = {
+ {22, 2, 0},
+ {28, 3, 0},
+ {81, 3, 1},
+ {85, 3, 1},
+ {98, 3, 1},
+ {112, 3, 1},
+ {86, 4, 1},
+ {166, 4, 1},
+ {109, 5, 1},
+ {113, 5, 1},
+ {121, 5, 1},
+ {131, 5, 1},
+ {143, 5, 1},
+ {153, 5, 1},
+ {66, 3, 2},
+ {68, 3, 2},
+ {95, 3, 2},
+ {106, 3, 2},
+ {116, 3, 2},
+ {93, 4, 2},
+ {119, 4, 2},
+ {121, 4, 2},
+ {133, 4, 2},
+ {137, 4, 2},
+ {117, 5, 2},
+ {118, 5, 2},
+ {120, 5, 2},
+ {124, 5, 2},
+ {132, 5, 2},
+ {137, 5, 2},
+ {141, 5, 2},
+ {166, 5, 2},
+ {170, 5, 2},
+ {191, 5, 2},
+ {206, 5, 2},
+ {208, 5, 2},
+ {30, 2, 3},
+ {69, 3, 3},
+ {82, 3, 3},
+ {83, 3, 3},
+ {109, 3, 3},
+ {114, 3, 3},
+ {125, 3, 3},
+ {89, 4, 3},
+ {103, 4, 3},
+ {117, 4, 3},
+ {126, 4, 3},
+ {150, 4, 3},
+ {161, 4, 3},
+ {121, 5, 3},
+ {127, 5, 3},
+ {131, 5, 3},
+ {134, 5, 3},
+ {148, 5, 3},
+ {169, 5, 3},
+ {172, 5, 3},
+ {182, 5, 3},
+ {195, 5, 3},
+ {196, 5, 3},
+ {208, 5, 3},
+ {66, 2, 4},
+ {85, 3, 4},
+ {141, 4, 4},
+ {146, 4, 4},
+ {161, 4, 4},
+ {177, 5, 4}
+};
+
+static struct pll_config cx700_pll_config[] = {
+ {98, 3, 1},
+ {86, 4, 1},
+ {109, 5, 1},
+ {110, 5, 1},
+ {113, 5, 1},
+ {121, 5, 1},
+ {131, 5, 1},
+ {135, 5, 1},
+ {142, 5, 1},
+ {143, 5, 1},
+ {153, 5, 1},
+ {187, 5, 1},
+ {208, 5, 1},
+ {68, 2, 2},
+ {95, 3, 2},
+ {116, 3, 2},
+ {93, 4, 2},
+ {119, 4, 2},
+ {133, 4, 2},
+ {137, 4, 2},
+ {151, 4, 2},
+ {166, 4, 2},
+ {110, 5, 2},
+ {112, 5, 2},
+ {117, 5, 2},
+ {118, 5, 2},
+ {120, 5, 2},
+ {132, 5, 2},
+ {137, 5, 2},
+ {141, 5, 2},
+ {151, 5, 2},
+ {166, 5, 2},
+ {175, 5, 2},
+ {191, 5, 2},
+ {206, 5, 2},
+ {174, 7, 2},
+ {82, 3, 3},
+ {109, 3, 3},
+ {117, 4, 3},
+ {150, 4, 3},
+ {161, 4, 3},
+ {112, 5, 3},
+ {115, 5, 3},
+ {121, 5, 3},
+ {127, 5, 3},
+ {129, 5, 3},
+ {131, 5, 3},
+ {134, 5, 3},
+ {138, 5, 3},
+ {148, 5, 3},
+ {157, 5, 3},
+ {169, 5, 3},
+ {172, 5, 3},
+ {190, 5, 3},
+ {195, 5, 3},
+ {196, 5, 3},
+ {208, 5, 3},
+ {141, 5, 4},
+ {150, 5, 4},
+ {166, 5, 4},
+ {176, 5, 4},
+ {177, 5, 4},
+ {183, 5, 4},
+ {202, 5, 4}
+};
+
+static struct pll_config vx855_pll_config[] = {
+ {86, 4, 1},
+ {108, 5, 1},
+ {110, 5, 1},
+ {113, 5, 1},
+ {121, 5, 1},
+ {131, 5, 1},
+ {135, 5, 1},
+ {142, 5, 1},
+ {143, 5, 1},
+ {153, 5, 1},
+ {164, 5, 1},
+ {187, 5, 1},
+ {208, 5, 1},
+ {110, 5, 2},
+ {112, 5, 2},
+ {117, 5, 2},
+ {118, 5, 2},
+ {124, 5, 2},
+ {132, 5, 2},
+ {137, 5, 2},
+ {141, 5, 2},
+ {149, 5, 2},
+ {151, 5, 2},
+ {159, 5, 2},
+ {166, 5, 2},
+ {167, 5, 2},
+ {172, 5, 2},
+ {189, 5, 2},
+ {191, 5, 2},
+ {194, 5, 2},
+ {206, 5, 2},
+ {208, 5, 2},
+ {83, 3, 3},
+ {88, 3, 3},
+ {109, 3, 3},
+ {112, 3, 3},
+ {103, 4, 3},
+ {105, 4, 3},
+ {161, 4, 3},
+ {112, 5, 3},
+ {115, 5, 3},
+ {121, 5, 3},
+ {127, 5, 3},
+ {134, 5, 3},
+ {137, 5, 3},
+ {148, 5, 3},
+ {157, 5, 3},
+ {169, 5, 3},
+ {172, 5, 3},
+ {182, 5, 3},
+ {191, 5, 3},
+ {195, 5, 3},
+ {209, 5, 3},
+ {142, 4, 4},
+ {146, 4, 4},
+ {161, 4, 4},
+ {141, 5, 4},
+ {150, 5, 4},
+ {165, 5, 4},
+ {176, 5, 4}
+};
+
+/* according to VIA Technologies these values are based on experiment */
+static struct io_reg scaling_parameters[] = {
+ {VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */
+ {VIACR, CR7B, 0xFF, 0x02}, /* LCD Scaling Parameter 2 */
+ {VIACR, CR7C, 0xFF, 0x03}, /* LCD Scaling Parameter 3 */
+ {VIACR, CR7D, 0xFF, 0x04}, /* LCD Scaling Parameter 4 */
+ {VIACR, CR7E, 0xFF, 0x07}, /* LCD Scaling Parameter 5 */
+ {VIACR, CR7F, 0xFF, 0x0A}, /* LCD Scaling Parameter 6 */
+ {VIACR, CR80, 0xFF, 0x0D}, /* LCD Scaling Parameter 7 */
+ {VIACR, CR81, 0xFF, 0x13}, /* LCD Scaling Parameter 8 */
+ {VIACR, CR82, 0xFF, 0x16}, /* LCD Scaling Parameter 9 */
+ {VIACR, CR83, 0xFF, 0x19}, /* LCD Scaling Parameter 10 */
+ {VIACR, CR84, 0xFF, 0x1C}, /* LCD Scaling Parameter 11 */
+ {VIACR, CR85, 0xFF, 0x1D}, /* LCD Scaling Parameter 12 */
+ {VIACR, CR86, 0xFF, 0x1E}, /* LCD Scaling Parameter 13 */
+ {VIACR, CR87, 0xFF, 0x1F}, /* LCD Scaling Parameter 14 */
};
static struct fifo_depth_select display_fifo_depth_reg = {
@@ -751,7 +699,7 @@ void viafb_unlock_crt(void)
viafb_write_reg_mask(CR47, VIACR, 0, BIT0);
}
-void write_dac_reg(u8 index, u8 r, u8 g, u8 b)
+static void write_dac_reg(u8 index, u8 r, u8 g, u8 b)
{
outb(index, LUT_INDEX_WRITE);
outb(r, LUT_DATA);
@@ -1674,43 +1622,63 @@ static u32 vx855_encode_pll(struct pll_config pll)
| pll.multiplier;
}
-u32 viafb_get_clk_value(int clk)
+static inline u32 get_pll_internal_frequency(u32 ref_freq,
+ struct pll_config pll)
{
- u32 value = 0;
- int i = 0;
+ return ref_freq / pll.divisor * pll.multiplier;
+}
- while (i < NUM_TOTAL_PLL_TABLE && clk != pll_value[i].clk)
- i++;
+static inline u32 get_pll_output_frequency(u32 ref_freq, struct pll_config pll)
+{
+ return get_pll_internal_frequency(ref_freq, pll)>>pll.rshift;
+}
- if (i == NUM_TOTAL_PLL_TABLE) {
- printk(KERN_WARNING "viafb_get_clk_value: PLL lookup failed!");
- } else {
- switch (viaparinfo->chip_info->gfx_chip_name) {
- case UNICHROME_CLE266:
- case UNICHROME_K400:
- value = cle266_encode_pll(pll_value[i].cle266_pll);
- break;
+static struct pll_config get_pll_config(struct pll_config *config, int size,
+ int clk)
+{
+ struct pll_config best = config[0];
+ const u32 f0 = 14318180; /* X1 frequency */
+ int i;
- case UNICHROME_K800:
- case UNICHROME_PM800:
- case UNICHROME_CN700:
- value = k800_encode_pll(pll_value[i].k800_pll);
- break;
+ for (i = 1; i < size; i++) {
+ if (abs(get_pll_output_frequency(f0, config[i]) - clk)
+ < abs(get_pll_output_frequency(f0, best) - clk))
+ best = config[i];
+ }
- case UNICHROME_CX700:
- case UNICHROME_CN750:
- case UNICHROME_K8M890:
- case UNICHROME_P4M890:
- case UNICHROME_P4M900:
- case UNICHROME_VX800:
- value = k800_encode_pll(pll_value[i].cx700_pll);
- break;
+ return best;
+}
- case UNICHROME_VX855:
- case UNICHROME_VX900:
- value = vx855_encode_pll(pll_value[i].vx855_pll);
- break;
- }
+u32 viafb_get_clk_value(int clk)
+{
+ u32 value = 0;
+
+ switch (viaparinfo->chip_info->gfx_chip_name) {
+ case UNICHROME_CLE266:
+ case UNICHROME_K400:
+ value = cle266_encode_pll(get_pll_config(cle266_pll_config,
+ ARRAY_SIZE(cle266_pll_config), clk));
+ break;
+ case UNICHROME_K800:
+ case UNICHROME_PM800:
+ case UNICHROME_CN700:
+ value = k800_encode_pll(get_pll_config(k800_pll_config,
+ ARRAY_SIZE(k800_pll_config), clk));
+ break;
+ case UNICHROME_CX700:
+ case UNICHROME_CN750:
+ case UNICHROME_K8M890:
+ case UNICHROME_P4M890:
+ case UNICHROME_P4M900:
+ case UNICHROME_VX800:
+ value = k800_encode_pll(get_pll_config(cx700_pll_config,
+ ARRAY_SIZE(cx700_pll_config), clk));
+ break;
+ case UNICHROME_VX855:
+ case UNICHROME_VX900:
+ value = vx855_encode_pll(get_pll_config(vx855_pll_config,
+ ARRAY_SIZE(vx855_pll_config), clk));
+ break;
}
return value;
@@ -2034,7 +2002,7 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
int i;
int index = 0;
int h_addr, v_addr;
- u32 pll_D_N;
+ u32 pll_D_N, clock;
for (i = 0; i < video_mode->mode_array; i++) {
index = i;
@@ -2087,7 +2055,9 @@ void viafb_fill_crtc_timing(struct crt_mode_table *crt_table,
&& (viaparinfo->chip_info->gfx_chip_name != UNICHROME_K400))
viafb_load_FIFO_reg(set_iga, h_addr, v_addr);
- pll_D_N = viafb_get_clk_value(crt_table[index].clk);
+ clock = crt_reg.hor_total * crt_reg.ver_total
+ * crt_table[index].refresh_rate;
+ pll_D_N = viafb_get_clk_value(clock);
DEBUG_MSG(KERN_INFO "PLL=%x", pll_D_N);
viafb_set_vclock(pll_D_N, set_iga);
@@ -2117,9 +2087,6 @@ void viafb_update_device_setting(int hres, int vres,
int bpp, int vmode_refresh, int flag)
{
if (flag == 0) {
- viaparinfo->crt_setting_info->h_active = hres;
- viaparinfo->crt_setting_info->v_active = vres;
- viaparinfo->crt_setting_info->bpp = bpp;
viaparinfo->crt_setting_info->refresh_rate =
vmode_refresh;
@@ -2129,13 +2096,9 @@ void viafb_update_device_setting(int hres, int vres,
viaparinfo->lvds_setting_info->h_active = hres;
viaparinfo->lvds_setting_info->v_active = vres;
viaparinfo->lvds_setting_info->bpp = bpp;
- viaparinfo->lvds_setting_info->refresh_rate =
- vmode_refresh;
viaparinfo->lvds_setting_info2->h_active = hres;
viaparinfo->lvds_setting_info2->v_active = vres;
viaparinfo->lvds_setting_info2->bpp = bpp;
- viaparinfo->lvds_setting_info2->refresh_rate =
- vmode_refresh;
} else {
if (viaparinfo->tmds_setting_info->iga_path == IGA2) {
@@ -2147,15 +2110,11 @@ void viafb_update_device_setting(int hres, int vres,
viaparinfo->lvds_setting_info->h_active = hres;
viaparinfo->lvds_setting_info->v_active = vres;
viaparinfo->lvds_setting_info->bpp = bpp;
- viaparinfo->lvds_setting_info->refresh_rate =
- vmode_refresh;
}
if (IGA2 == viaparinfo->lvds_setting_info2->iga_path) {
viaparinfo->lvds_setting_info2->h_active = hres;
viaparinfo->lvds_setting_info2->v_active = vres;
viaparinfo->lvds_setting_info2->bpp = bpp;
- viaparinfo->lvds_setting_info2->refresh_rate =
- vmode_refresh;
}
}
}
@@ -2430,6 +2389,7 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
break;
}
+ viafb_write_regx(scaling_parameters, ARRAY_SIZE(scaling_parameters));
device_off();
via_set_state(devices, VIA_STATE_OFF);
@@ -2608,35 +2568,43 @@ int viafb_setmode(struct VideoModeTable *vmode_tbl, int video_bpp,
int viafb_get_pixclock(int hres, int vres, int vmode_refresh)
{
int i;
+ struct crt_mode_table *best;
+ struct VideoModeTable *vmode = viafb_get_mode(hres, vres);
+
+ if (!vmode)
+ return RES_640X480_60HZ_PIXCLOCK;
- for (i = 0; i < NUM_TOTAL_RES_MAP_REFRESH; i++) {
- if ((hres == res_map_refresh_tbl[i].hres)
- && (vres == res_map_refresh_tbl[i].vres)
- && (vmode_refresh == res_map_refresh_tbl[i].vmode_refresh))
- return res_map_refresh_tbl[i].pixclock;
+ best = &vmode->crtc[0];
+ for (i = 1; i < vmode->mode_array; i++) {
+ if (abs(vmode->crtc[i].refresh_rate - vmode_refresh)
+ < abs(best->refresh_rate - vmode_refresh))
+ best = &vmode->crtc[i];
}
- return RES_640X480_60HZ_PIXCLOCK;
+ return 1000000000 / (best->crtc.hor_total * best->crtc.ver_total)
+ * 1000 / best->refresh_rate;
}
int viafb_get_refresh(int hres, int vres, u32 long_refresh)
{
-#define REFRESH_TOLERANCE 3
- int i, nearest = -1, diff = REFRESH_TOLERANCE;
- for (i = 0; i < NUM_TOTAL_RES_MAP_REFRESH; i++) {
- if ((hres == res_map_refresh_tbl[i].hres)
- && (vres == res_map_refresh_tbl[i].vres)
- && (diff > (abs(long_refresh -
- res_map_refresh_tbl[i].vmode_refresh)))) {
- diff = abs(long_refresh - res_map_refresh_tbl[i].
- vmode_refresh);
- nearest = i;
- }
+ int i;
+ struct crt_mode_table *best;
+ struct VideoModeTable *vmode = viafb_get_mode(hres, vres);
+
+ if (!vmode)
+ return 60;
+
+ best = &vmode->crtc[0];
+ for (i = 1; i < vmode->mode_array; i++) {
+ if (abs(vmode->crtc[i].refresh_rate - long_refresh)
+ < abs(best->refresh_rate - long_refresh))
+ best = &vmode->crtc[i];
}
-#undef REFRESH_TOLERANCE
- if (nearest > 0)
- return res_map_refresh_tbl[nearest].vmode_refresh;
- return 60;
+
+ if (abs(best->refresh_rate - long_refresh) > 3)
+ return 60;
+
+ return best->refresh_rate;
}
static void device_off(void)
diff --git a/drivers/video/via/hw.h b/drivers/video/via/hw.h
index 668d534542ef..7295263299f7 100644
--- a/drivers/video/via/hw.h
+++ b/drivers/video/via/hw.h
@@ -893,8 +893,6 @@ struct iga2_crtc_timing {
/* VT3410 chipset*/
#define VX900_FUNCTION3 0x3410
-#define NUM_TOTAL_PLL_TABLE ARRAY_SIZE(pll_value)
-
struct IODATA {
u8 Index;
u8 Mask;
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index 3425c3969806..64bc7e763103 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -26,10 +26,12 @@
/* CLE266 Software Power Sequence */
/* {Mask}, {Data}, {Delay} */
-int PowerSequenceOn[3][3] = { {0x10, 0x08, 0x06}, {0x10, 0x08, 0x06},
- {0x19, 0x1FE, 0x01} };
-int PowerSequenceOff[3][3] = { {0x06, 0x08, 0x10}, {0x00, 0x00, 0x00},
- {0xD2, 0x19, 0x01} };
+static const int PowerSequenceOn[3][3] = {
+ {0x10, 0x08, 0x06}, {0x10, 0x08, 0x06}, {0x19, 0x1FE, 0x01}
+};
+static const int PowerSequenceOff[3][3] = {
+ {0x06, 0x08, 0x10}, {0x00, 0x00, 0x00}, {0xD2, 0x19, 0x01}
+};
static struct _lcd_scaling_factor lcd_scaling_factor = {
/* LCD Horizontal Scaling Factor Register */
@@ -95,8 +97,6 @@ void __devinit viafb_init_lcd_size(void)
DEBUG_MSG(KERN_INFO "viafb_init_lcd_size()\n");
fp_id_to_vindex(viafb_lcd_panel_id);
- viaparinfo->lvds_setting_info2->lcd_panel_id =
- viaparinfo->lvds_setting_info->lcd_panel_id;
viaparinfo->lvds_setting_info2->lcd_panel_hres =
viaparinfo->lvds_setting_info->lcd_panel_hres;
viaparinfo->lvds_setting_info2->lcd_panel_vres =
@@ -203,176 +203,132 @@ static void __devinit fp_id_to_vindex(int panel_id)
case 0x0:
viaparinfo->lvds_setting_info->lcd_panel_hres = 640;
viaparinfo->lvds_setting_info->lcd_panel_vres = 480;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID0_640X480;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x1:
viaparinfo->lvds_setting_info->lcd_panel_hres = 800;
viaparinfo->lvds_setting_info->lcd_panel_vres = 600;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID1_800X600;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x2:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID2_1024X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x3:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID3_1280X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x4:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1024;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID4_1280X1024;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x5:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1400;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1050;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID5_1400X1050;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x6:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1600;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1200;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID6_1600X1200;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x8:
viaparinfo->lvds_setting_info->lcd_panel_hres = 800;
viaparinfo->lvds_setting_info->lcd_panel_vres = 480;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_IDA_800X480;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x9:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID2_1024X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0xA:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID2_1024X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0xB:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID2_1024X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0xC:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID3_1280X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0xD:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1024;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID4_1280X1024;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0xE:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1400;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1050;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID5_1400X1050;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0xF:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1600;
viaparinfo->lvds_setting_info->lcd_panel_vres = 1200;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID6_1600X1200;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0x10:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1366;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID7_1366X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0x11:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1024;
viaparinfo->lvds_setting_info->lcd_panel_vres = 600;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID8_1024X600;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x12:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID3_1280X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x13:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 800;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID9_1280X800;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
case 0x14:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1360;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_IDB_1360X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0x15:
viaparinfo->lvds_setting_info->lcd_panel_hres = 1280;
viaparinfo->lvds_setting_info->lcd_panel_vres = 768;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID3_1280X768;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 1;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
case 0x16:
viaparinfo->lvds_setting_info->lcd_panel_hres = 480;
viaparinfo->lvds_setting_info->lcd_panel_vres = 640;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_IDC_480X640;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
break;
@@ -380,16 +336,12 @@ static void __devinit fp_id_to_vindex(int panel_id)
/* OLPC XO-1.5 panel */
viaparinfo->lvds_setting_info->lcd_panel_hres = 1200;
viaparinfo->lvds_setting_info->lcd_panel_vres = 900;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_IDD_1200X900;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 0;
break;
default:
viaparinfo->lvds_setting_info->lcd_panel_hres = 800;
viaparinfo->lvds_setting_info->lcd_panel_vres = 600;
- viaparinfo->lvds_setting_info->lcd_panel_id =
- LCD_PANEL_ID1_800X600;
viaparinfo->lvds_setting_info->device_lcd_dualedge = 0;
viaparinfo->lvds_setting_info->LCDDithering = 1;
}
@@ -610,7 +562,7 @@ void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
int set_vres = plvds_setting_info->v_active;
int panel_hres = plvds_setting_info->lcd_panel_hres;
int panel_vres = plvds_setting_info->lcd_panel_vres;
- u32 pll_D_N;
+ u32 pll_D_N, clock;
struct display_timing mode_crt_reg, panel_crt_reg;
struct crt_mode_table *panel_crt_table = NULL;
struct VideoModeTable *vmode_tbl = viafb_get_mode(panel_hres,
@@ -625,7 +577,9 @@ void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
DEBUG_MSG(KERN_INFO "bellow viafb_lcd_set_mode!!\n");
if (VT1636_LVDS == plvds_chip_info->lvds_chip_name)
viafb_init_lvds_vt1636(plvds_setting_info, plvds_chip_info);
- plvds_setting_info->vclk = panel_crt_table->clk;
+ clock = panel_crt_reg.hor_total * panel_crt_reg.ver_total
+ * panel_crt_table->refresh_rate;
+ plvds_setting_info->vclk = clock;
if (set_iga == IGA1) {
/* IGA1 doesn't have LCD scaling, so set it as centering. */
viafb_load_crtc_timing(lcd_centering_timging
@@ -660,7 +614,7 @@ void viafb_lcd_set_mode(struct crt_mode_table *mode_crt_table,
fill_lcd_format();
- pll_D_N = viafb_get_clk_value(panel_crt_table[0].clk);
+ pll_D_N = viafb_get_clk_value(clock);
DEBUG_MSG(KERN_INFO "PLL=0x%x", pll_D_N);
viafb_set_vclock(pll_D_N, set_iga);
lcd_patch_skew(plvds_setting_info, plvds_chip_info);
@@ -1064,34 +1018,33 @@ static struct display_timing lcd_centering_timging(struct display_timing
bool viafb_lcd_get_mobile_state(bool *mobile)
{
- unsigned char *romptr, *tableptr;
+ unsigned char __iomem *romptr, *tableptr, *biosptr;
u8 core_base;
- unsigned char *biosptr;
/* Rom address */
- u32 romaddr = 0x000C0000;
- u16 start_pattern = 0;
+ const u32 romaddr = 0x000C0000;
+ u16 start_pattern;
biosptr = ioremap(romaddr, 0x10000);
+ start_pattern = readw(biosptr);
- memcpy(&start_pattern, biosptr, 2);
/* Compare pattern */
if (start_pattern == 0xAA55) {
/* Get the start of Table */
/* 0x1B means BIOS offset position */
romptr = biosptr + 0x1B;
- tableptr = biosptr + *((u16 *) romptr);
+ tableptr = biosptr + readw(romptr);
/* Get the start of biosver structure */
/* 18 means BIOS version position. */
romptr = tableptr + 18;
- romptr = biosptr + *((u16 *) romptr);
+ romptr = biosptr + readw(romptr);
/* The offset should be 44, but the
actual image is less three char. */
/* pRom += 44; */
romptr += 41;
- core_base = *romptr++;
+ core_base = readb(romptr);
if (core_base & 0x8)
*mobile = false;
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 2cbe1031b421..4b7831f0d012 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -627,77 +627,6 @@
#define M2048x1536_R60_HSP NEGATIVE
#define M2048x1536_R60_VSP POSITIVE
-/* define PLL index: */
-#define CLK_25_175M 25175000
-#define CLK_26_880M 26880000
-#define CLK_29_581M 29581000
-#define CLK_31_500M 31500000
-#define CLK_31_728M 31728000
-#define CLK_32_668M 32688000
-#define CLK_36_000M 36000000
-#define CLK_40_000M 40000000
-#define CLK_41_291M 41291000
-#define CLK_43_163M 43163000
-#define CLK_45_250M 45250000 /* 45.46MHz */
-#define CLK_46_000M 46000000
-#define CLK_46_996M 46996000
-#define CLK_48_000M 48000000
-#define CLK_48_875M 48875000
-#define CLK_49_500M 49500000
-#define CLK_52_406M 52406000
-#define CLK_52_977M 52977000
-#define CLK_56_250M 56250000
-#define CLK_57_275M 57275000
-#define CLK_60_466M 60466000
-#define CLK_61_500M 61500000
-#define CLK_65_000M 65000000
-#define CLK_65_178M 65178000
-#define CLK_66_750M 66750000 /* 67.116MHz */
-#define CLK_68_179M 68179000
-#define CLK_69_924M 69924000
-#define CLK_70_159M 70159000
-#define CLK_72_000M 72000000
-#define CLK_74_270M 74270000
-#define CLK_78_750M 78750000
-#define CLK_80_136M 80136000
-#define CLK_83_375M 83375000
-#define CLK_83_950M 83950000
-#define CLK_84_750M 84750000 /* 84.537Mhz */
-#define CLK_85_860M 85860000
-#define CLK_88_750M 88750000
-#define CLK_94_500M 94500000
-#define CLK_97_750M 97750000
-#define CLK_101_000M 101000000
-#define CLK_106_500M 106500000
-#define CLK_108_000M 108000000
-#define CLK_113_309M 113309000
-#define CLK_118_840M 118840000
-#define CLK_119_000M 119000000
-#define CLK_121_750M 121750000 /* 121.704MHz */
-#define CLK_125_104M 125104000
-#define CLK_135_000M 135000000
-#define CLK_136_700M 136700000
-#define CLK_138_400M 138400000
-#define CLK_146_760M 146760000
-#define CLK_148_500M 148500000
-
-#define CLK_153_920M 153920000
-#define CLK_156_000M 156000000
-#define CLK_157_500M 157500000
-#define CLK_162_000M 162000000
-#define CLK_187_000M 187000000
-#define CLK_193_295M 193295000
-#define CLK_202_500M 202500000
-#define CLK_204_000M 204000000
-#define CLK_218_500M 218500000
-#define CLK_234_000M 234000000
-#define CLK_267_250M 267250000
-#define CLK_297_500M 297500000
-#define CLK_74_481M 74481000
-#define CLK_172_798M 172798000
-#define CLK_122_614M 122614000
-
-
/* Definition CRTC Timing Index */
#define H_TOTAL_INDEX 0
#define H_ADDR_INDEX 1
@@ -722,76 +651,7 @@
/* Definition Video Mode Pixel Clock (picoseconds)
*/
-#define RES_480X640_60HZ_PIXCLOCK 39722
#define RES_640X480_60HZ_PIXCLOCK 39722
-#define RES_640X480_75HZ_PIXCLOCK 31747
-#define RES_640X480_85HZ_PIXCLOCK 27777
-#define RES_640X480_100HZ_PIXCLOCK 23168
-#define RES_640X480_120HZ_PIXCLOCK 19081
-#define RES_720X480_60HZ_PIXCLOCK 37020
-#define RES_720X576_60HZ_PIXCLOCK 30611
-#define RES_800X600_60HZ_PIXCLOCK 25000
-#define RES_800X600_75HZ_PIXCLOCK 20203
-#define RES_800X600_85HZ_PIXCLOCK 17777
-#define RES_800X600_100HZ_PIXCLOCK 14667
-#define RES_800X600_120HZ_PIXCLOCK 11912
-#define RES_800X480_60HZ_PIXCLOCK 33805
-#define RES_848X480_60HZ_PIXCLOCK 31756
-#define RES_856X480_60HZ_PIXCLOCK 31518
-#define RES_1024X512_60HZ_PIXCLOCK 24218
-#define RES_1024X600_60HZ_PIXCLOCK 20460
-#define RES_1024X768_60HZ_PIXCLOCK 15385
-#define RES_1024X768_75HZ_PIXCLOCK 12699
-#define RES_1024X768_85HZ_PIXCLOCK 10582
-#define RES_1024X768_100HZ_PIXCLOCK 8825
-#define RES_1152X864_75HZ_PIXCLOCK 9259
-#define RES_1280X768_60HZ_PIXCLOCK 12480
-#define RES_1280X800_60HZ_PIXCLOCK 11994
-#define RES_1280X960_60HZ_PIXCLOCK 9259
-#define RES_1280X1024_60HZ_PIXCLOCK 9260
-#define RES_1280X1024_75HZ_PIXCLOCK 7408
-#define RES_1280X768_85HZ_PIXCLOCK 6349
-#define RES_1440X1050_60HZ_PIXCLOCK 7993
-#define RES_1600X1200_60HZ_PIXCLOCK 6172
-#define RES_1600X1200_75HZ_PIXCLOCK 4938
-#define RES_1280X720_60HZ_PIXCLOCK 13426
-#define RES_1200X900_60HZ_PIXCLOCK 17459
-#define RES_1920X1080_60HZ_PIXCLOCK 5787
-#define RES_1400X1050_60HZ_PIXCLOCK 8214
-#define RES_1400X1050_75HZ_PIXCLOCK 6410
-#define RES_1368X768_60HZ_PIXCLOCK 11647
-#define RES_960X600_60HZ_PIXCLOCK 22099
-#define RES_1000X600_60HZ_PIXCLOCK 20834
-#define RES_1024X576_60HZ_PIXCLOCK 21278
-#define RES_1088X612_60HZ_PIXCLOCK 18877
-#define RES_1152X720_60HZ_PIXCLOCK 14981
-#define RES_1200X720_60HZ_PIXCLOCK 14253
-#define RES_1280X600_60HZ_PIXCLOCK 16260
-#define RES_1280X720_50HZ_PIXCLOCK 16538
-#define RES_1280X768_50HZ_PIXCLOCK 15342
-#define RES_1366X768_50HZ_PIXCLOCK 14301
-#define RES_1366X768_60HZ_PIXCLOCK 11646
-#define RES_1360X768_60HZ_PIXCLOCK 11799
-#define RES_1440X900_60HZ_PIXCLOCK 9390
-#define RES_1440X900_75HZ_PIXCLOCK 7315
-#define RES_1600X900_60HZ_PIXCLOCK 8415
-#define RES_1600X1024_60HZ_PIXCLOCK 7315
-#define RES_1680X1050_60HZ_PIXCLOCK 6814
-#define RES_1680X1050_75HZ_PIXCLOCK 5348
-#define RES_1792X1344_60HZ_PIXCLOCK 4902
-#define RES_1856X1392_60HZ_PIXCLOCK 4577
-#define RES_1920X1200_60HZ_PIXCLOCK 5173
-#define RES_1920X1440_60HZ_PIXCLOCK 4274
-#define RES_1920X1440_75HZ_PIXCLOCK 3367
-#define RES_2048X1536_60HZ_PIXCLOCK 3742
-
-#define RES_1360X768_RB_60HZ_PIXCLOCK 13889
-#define RES_1400X1050_RB_60HZ_PIXCLOCK 9901
-#define RES_1440X900_RB_60HZ_PIXCLOCK 11268
-#define RES_1600X900_RB_60HZ_PIXCLOCK 10230
-#define RES_1680X1050_RB_60HZ_PIXCLOCK 8403
-#define RES_1920X1080_RB_60HZ_PIXCLOCK 7225
-#define RES_1920X1200_RB_60HZ_PIXCLOCK 6497
/* LCD display method
*/
@@ -822,7 +682,6 @@ struct display_timing {
struct crt_mode_table {
int refresh_rate;
- unsigned long clk;
int h_sync_polarity;
int v_sync_polarity;
struct display_timing crtc;
diff --git a/drivers/video/via/tblDPASetting.c b/drivers/video/via/tblDPASetting.c
index 0c4c8cc712f4..73bb554e7c1e 100644
--- a/drivers/video/via/tblDPASetting.c
+++ b/drivers/video/via/tblDPASetting.c
@@ -20,17 +20,6 @@
*/
#include "global.h"
-/* For VT3324: */
-struct VT1636_DPA_SETTING VT1636_DPA_SETTING_TBL_VT3324[] = {
- /* Panel ID, CLK_SEL_ST1[09], CLK_SEL_ST2[08] */
- {LCD_PANEL_ID0_640X480, 0x00, 0x00}, /* For 640x480 */
- {LCD_PANEL_ID1_800X600, 0x00, 0x00}, /* For 800x600 */
- {LCD_PANEL_ID2_1024X768, 0x00, 0x00}, /* For 1024x768 */
- {LCD_PANEL_ID3_1280X768, 0x00, 0x00}, /* For 1280x768 */
- {LCD_PANEL_ID4_1280X1024, 0x00, 0x00}, /* For 1280x1024 */
- {LCD_PANEL_ID5_1400X1050, 0x00, 0x00}, /* For 1400x1050 */
- {LCD_PANEL_ID6_1600X1200, 0x0B, 0x03} /* For 1600x1200 */
-};
struct GFX_DPA_SETTING GFX_DPA_SETTING_TBL_VT3324[] = {
/* ClkRange, DVP0, DVP0DataDriving, DVP0ClockDriving, DVP1,
@@ -57,18 +46,6 @@ struct GFX_DPA_SETTING GFX_DPA_SETTING_TBL_VT3324[] = {
0x00},
};
-/* For VT3327: */
-struct VT1636_DPA_SETTING VT1636_DPA_SETTING_TBL_VT3327[] = {
- /* Panel ID, CLK_SEL_ST1[09], CLK_SEL_ST2[08] */
- {LCD_PANEL_ID0_640X480, 0x00, 0x00}, /* For 640x480 */
- {LCD_PANEL_ID1_800X600, 0x00, 0x00}, /* For 800x600 */
- {LCD_PANEL_ID2_1024X768, 0x00, 0x00}, /* For 1024x768 */
- {LCD_PANEL_ID3_1280X768, 0x00, 0x00}, /* For 1280x768 */
- {LCD_PANEL_ID4_1280X1024, 0x00, 0x00}, /* For 1280x1024 */
- {LCD_PANEL_ID5_1400X1050, 0x00, 0x00}, /* For 1400x1050 */
- {LCD_PANEL_ID6_1600X1200, 0x00, 0x00} /* For 1600x1200 */
-};
-
struct GFX_DPA_SETTING GFX_DPA_SETTING_TBL_VT3327[] = {
/* ClkRange,DVP0, DVP0DataDriving, DVP0ClockDriving, DVP1,
DVP1Driving, DFPHigh, DFPLow */
diff --git a/drivers/video/via/tblDPASetting.h b/drivers/video/via/tblDPASetting.h
index b065a83481d3..6db61519cb5d 100644
--- a/drivers/video/via/tblDPASetting.h
+++ b/drivers/video/via/tblDPASetting.h
@@ -38,9 +38,7 @@ enum DPA_RANGE {
DPA_CLK_RANGE_150M
};
-extern struct VT1636_DPA_SETTING VT1636_DPA_SETTING_TBL_VT3324[7];
extern struct GFX_DPA_SETTING GFX_DPA_SETTING_TBL_VT3324[6];
-extern struct VT1636_DPA_SETTING VT1636_DPA_SETTING_TBL_VT3327[7];
extern struct GFX_DPA_SETTING GFX_DPA_SETTING_TBL_VT3327[];
extern struct GFX_DPA_SETTING GFX_DPA_SETTING_TBL_VT3364[6];
diff --git a/drivers/video/via/via_i2c.c b/drivers/video/via/via_i2c.c
index 3844b558b7bd..78f1405dbab7 100644
--- a/drivers/video/via/via_i2c.c
+++ b/drivers/video/via/via_i2c.c
@@ -32,7 +32,7 @@
*/
#define VIAFB_NUM_I2C 5
static struct via_i2c_stuff via_i2c_par[VIAFB_NUM_I2C];
-struct viafb_dev *i2c_vdev; /* Passed in from core */
+static struct viafb_dev *i2c_vdev; /* Passed in from core */
static void via_i2c_setscl(void *data, int state)
{
@@ -209,7 +209,6 @@ static int create_i2c_bus(struct i2c_adapter *adapter,
sprintf(adapter->name, "viafb i2c io_port idx 0x%02x",
adap_cfg->ioport_index);
adapter->owner = THIS_MODULE;
- adapter->id = 0x01FFFF;
adapter->class = I2C_CLASS_DDC;
adapter->algo_data = algo;
if (pdev)
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 4e66349e4366..f555b891cc72 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -43,11 +43,11 @@ static int viafb_second_size;
static int viafb_accel = 1;
/* Added for specifying active devices.*/
-char *viafb_active_dev;
+static char *viafb_active_dev;
/*Added for specify lcd output port*/
-char *viafb_lcd_port = "";
-char *viafb_dvi_port = "";
+static char *viafb_lcd_port = "";
+static char *viafb_dvi_port = "";
static void retrieve_device_setting(struct viafb_ioctl_setting
*setting_info);
diff --git a/drivers/video/via/viamode.c b/drivers/video/via/viamode.c
index 2dbad3c0f679..8c5bc41ff6a4 100644
--- a/drivers/video/via/viamode.c
+++ b/drivers/video/via/viamode.c
@@ -21,72 +21,6 @@
#include <linux/via-core.h>
#include "global.h"
-struct res_map_refresh res_map_refresh_tbl[] = {
-/*hres, vres, vclock, vmode_refresh*/
- {480, 640, RES_480X640_60HZ_PIXCLOCK, 60},
- {640, 480, RES_640X480_60HZ_PIXCLOCK, 60},
- {640, 480, RES_640X480_75HZ_PIXCLOCK, 75},
- {640, 480, RES_640X480_85HZ_PIXCLOCK, 85},
- {640, 480, RES_640X480_100HZ_PIXCLOCK, 100},
- {640, 480, RES_640X480_120HZ_PIXCLOCK, 120},
- {720, 480, RES_720X480_60HZ_PIXCLOCK, 60},
- {720, 576, RES_720X576_60HZ_PIXCLOCK, 60},
- {800, 480, RES_800X480_60HZ_PIXCLOCK, 60},
- {800, 600, RES_800X600_60HZ_PIXCLOCK, 60},
- {800, 600, RES_800X600_75HZ_PIXCLOCK, 75},
- {800, 600, RES_800X600_85HZ_PIXCLOCK, 85},
- {800, 600, RES_800X600_100HZ_PIXCLOCK, 100},
- {800, 600, RES_800X600_120HZ_PIXCLOCK, 120},
- {848, 480, RES_848X480_60HZ_PIXCLOCK, 60},
- {856, 480, RES_856X480_60HZ_PIXCLOCK, 60},
- {1024, 512, RES_1024X512_60HZ_PIXCLOCK, 60},
- {1024, 600, RES_1024X600_60HZ_PIXCLOCK, 60},
- {1024, 768, RES_1024X768_60HZ_PIXCLOCK, 60},
- {1024, 768, RES_1024X768_75HZ_PIXCLOCK, 75},
- {1024, 768, RES_1024X768_85HZ_PIXCLOCK, 85},
- {1024, 768, RES_1024X768_100HZ_PIXCLOCK, 100},
-/* {1152,864, RES_1152X864_70HZ_PIXCLOCK, 70},*/
- {1152, 864, RES_1152X864_75HZ_PIXCLOCK, 75},
- {1280, 768, RES_1280X768_60HZ_PIXCLOCK, 60},
- {1280, 800, RES_1280X800_60HZ_PIXCLOCK, 60},
- {1280, 960, RES_1280X960_60HZ_PIXCLOCK, 60},
- {1280, 1024, RES_1280X1024_60HZ_PIXCLOCK, 60},
- {1280, 1024, RES_1280X1024_75HZ_PIXCLOCK, 75},
- {1280, 1024, RES_1280X768_85HZ_PIXCLOCK, 85},
- {1440, 1050, RES_1440X1050_60HZ_PIXCLOCK, 60},
- {1600, 1200, RES_1600X1200_60HZ_PIXCLOCK, 60},
- {1600, 1200, RES_1600X1200_75HZ_PIXCLOCK, 75},
- {1280, 720, RES_1280X720_60HZ_PIXCLOCK, 60},
- {1920, 1080, RES_1920X1080_60HZ_PIXCLOCK, 60},
- {1400, 1050, RES_1400X1050_60HZ_PIXCLOCK, 60},
- {1400, 1050, RES_1400X1050_75HZ_PIXCLOCK, 75},
- {1368, 768, RES_1368X768_60HZ_PIXCLOCK, 60},
- {960, 600, RES_960X600_60HZ_PIXCLOCK, 60},
- {1000, 600, RES_1000X600_60HZ_PIXCLOCK, 60},
- {1024, 576, RES_1024X576_60HZ_PIXCLOCK, 60},
- {1088, 612, RES_1088X612_60HZ_PIXCLOCK, 60},
- {1152, 720, RES_1152X720_60HZ_PIXCLOCK, 60},
- {1200, 720, RES_1200X720_60HZ_PIXCLOCK, 60},
- {1200, 900, RES_1200X900_60HZ_PIXCLOCK, 60},
- {1280, 600, RES_1280X600_60HZ_PIXCLOCK, 60},
- {1280, 720, RES_1280X720_50HZ_PIXCLOCK, 50},
- {1280, 768, RES_1280X768_50HZ_PIXCLOCK, 50},
- {1360, 768, RES_1360X768_60HZ_PIXCLOCK, 60},
- {1366, 768, RES_1366X768_50HZ_PIXCLOCK, 50},
- {1366, 768, RES_1366X768_60HZ_PIXCLOCK, 60},
- {1440, 900, RES_1440X900_60HZ_PIXCLOCK, 60},
- {1440, 900, RES_1440X900_75HZ_PIXCLOCK, 75},
- {1600, 900, RES_1600X900_60HZ_PIXCLOCK, 60},
- {1600, 1024, RES_1600X1024_60HZ_PIXCLOCK, 60},
- {1680, 1050, RES_1680X1050_60HZ_PIXCLOCK, 60},
- {1680, 1050, RES_1680X1050_75HZ_PIXCLOCK, 75},
- {1792, 1344, RES_1792X1344_60HZ_PIXCLOCK, 60},
- {1856, 1392, RES_1856X1392_60HZ_PIXCLOCK, 60},
- {1920, 1200, RES_1920X1200_60HZ_PIXCLOCK, 60},
- {1920, 1440, RES_1920X1440_60HZ_PIXCLOCK, 60},
- {1920, 1440, RES_1920X1440_75HZ_PIXCLOCK, 75},
- {2048, 1536, RES_2048X1536_60HZ_PIXCLOCK, 60}
-};
struct io_reg CN400_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
{VIASR, SR15, 0x02, 0x02},
@@ -108,20 +42,6 @@ struct io_reg CN400_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
{VIACR, CR6A, 0xFF, 0x40},
{VIACR, CR6B, 0xFF, 0x00},
{VIACR, CR6C, 0xFF, 0x00},
-{VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */
-{VIACR, CR7B, 0xFF, 0x02}, /* LCD Scaling Parameter 2 */
-{VIACR, CR7C, 0xFF, 0x03}, /* LCD Scaling Parameter 3 */
-{VIACR, CR7D, 0xFF, 0x04}, /* LCD Scaling Parameter 4 */
-{VIACR, CR7E, 0xFF, 0x07}, /* LCD Scaling Parameter 5 */
-{VIACR, CR7F, 0xFF, 0x0A}, /* LCD Scaling Parameter 6 */
-{VIACR, CR80, 0xFF, 0x0D}, /* LCD Scaling Parameter 7 */
-{VIACR, CR81, 0xFF, 0x13}, /* LCD Scaling Parameter 8 */
-{VIACR, CR82, 0xFF, 0x16}, /* LCD Scaling Parameter 9 */
-{VIACR, CR83, 0xFF, 0x19}, /* LCD Scaling Parameter 10 */
-{VIACR, CR84, 0xFF, 0x1C}, /* LCD Scaling Parameter 11 */
-{VIACR, CR85, 0xFF, 0x1D}, /* LCD Scaling Parameter 12 */
-{VIACR, CR86, 0xFF, 0x1E}, /* LCD Scaling Parameter 13 */
-{VIACR, CR87, 0xFF, 0x1F}, /* LCD Scaling Parameter 14 */
{VIACR, CR88, 0xFF, 0x40}, /* LCD Panel Type */
{VIACR, CR89, 0xFF, 0x00}, /* LCD Timing Control 0 */
{VIACR, CR8A, 0xFF, 0x88}, /* LCD Timing Control 1 */
@@ -172,20 +92,6 @@ struct io_reg CN700_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
{VIACR, CR78, 0xFF, 0x00}, /* LCD scaling Factor */
{VIACR, CR79, 0xFF, 0x00}, /* LCD scaling Factor */
{VIACR, CR9F, 0x03, 0x00}, /* LCD scaling Factor */
-{VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */
-{VIACR, CR7B, 0xFF, 0x02}, /* LCD Scaling Parameter 2 */
-{VIACR, CR7C, 0xFF, 0x03}, /* LCD Scaling Parameter 3 */
-{VIACR, CR7D, 0xFF, 0x04}, /* LCD Scaling Parameter 4 */
-{VIACR, CR7E, 0xFF, 0x07}, /* LCD Scaling Parameter 5 */
-{VIACR, CR7F, 0xFF, 0x0A}, /* LCD Scaling Parameter 6 */
-{VIACR, CR80, 0xFF, 0x0D}, /* LCD Scaling Parameter 7 */
-{VIACR, CR81, 0xFF, 0x13}, /* LCD Scaling Parameter 8 */
-{VIACR, CR82, 0xFF, 0x16}, /* LCD Scaling Parameter 9 */
-{VIACR, CR83, 0xFF, 0x19}, /* LCD Scaling Parameter 10 */
-{VIACR, CR84, 0xFF, 0x1C}, /* LCD Scaling Parameter 11 */
-{VIACR, CR85, 0xFF, 0x1D}, /* LCD Scaling Parameter 12 */
-{VIACR, CR86, 0xFF, 0x1E}, /* LCD Scaling Parameter 13 */
-{VIACR, CR87, 0xFF, 0x1F}, /* LCD Scaling Parameter 14 */
{VIACR, CR88, 0xFF, 0x40}, /* LCD Panel Type */
{VIACR, CR89, 0xFF, 0x00}, /* LCD Timing Control 0 */
{VIACR, CR8A, 0xFF, 0x88}, /* LCD Timing Control 1 */
@@ -229,20 +135,6 @@ struct io_reg KM400_ModeXregs[] = {
{VIACR, CR36, 0xFF, 0x01}, /* Power Mangement 3 */
{VIACR, CR68, 0xFF, 0x67}, /* Default FIFO For IGA2 */
{VIACR, CR6A, 0x20, 0x20}, /* Extended FIFO On */
- {VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */
- {VIACR, CR7B, 0xFF, 0x02}, /* LCD Scaling Parameter 2 */
- {VIACR, CR7C, 0xFF, 0x03}, /* LCD Scaling Parameter 3 */
- {VIACR, CR7D, 0xFF, 0x04}, /* LCD Scaling Parameter 4 */
- {VIACR, CR7E, 0xFF, 0x07}, /* LCD Scaling Parameter 5 */
- {VIACR, CR7F, 0xFF, 0x0A}, /* LCD Scaling Parameter 6 */
- {VIACR, CR80, 0xFF, 0x0D}, /* LCD Scaling Parameter 7 */
- {VIACR, CR81, 0xFF, 0x13}, /* LCD Scaling Parameter 8 */
- {VIACR, CR82, 0xFF, 0x16}, /* LCD Scaling Parameter 9 */
- {VIACR, CR83, 0xFF, 0x19}, /* LCD Scaling Parameter 10 */
- {VIACR, CR84, 0xFF, 0x1C}, /* LCD Scaling Parameter 11 */
- {VIACR, CR85, 0xFF, 0x1D}, /* LCD Scaling Parameter 12 */
- {VIACR, CR86, 0xFF, 0x1E}, /* LCD Scaling Parameter 13 */
- {VIACR, CR87, 0xFF, 0x1F}, /* LCD Scaling Parameter 14 */
{VIACR, CR88, 0xFF, 0x40}, /* LCD Panel Type */
{VIACR, CR89, 0xFF, 0x00}, /* LCD Timing Control 0 */
{VIACR, CR8A, 0xFF, 0x88}, /* LCD Timing Control 1 */
@@ -283,20 +175,6 @@ struct io_reg CX700_ModeXregs[] = { {VIASR, SR10, 0xFF, 0x01},
{VIACR, CR6A, 0xFF, 0x40},
{VIACR, CR6B, 0xFF, 0x00},
{VIACR, CR6C, 0xFF, 0x00},
-{VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */
-{VIACR, CR7B, 0xFF, 0x02}, /* LCD Scaling Parameter 2 */
-{VIACR, CR7C, 0xFF, 0x03}, /* LCD Scaling Parameter 3 */
-{VIACR, CR7D, 0xFF, 0x04}, /* LCD Scaling Parameter 4 */
-{VIACR, CR7E, 0xFF, 0x07}, /* LCD Scaling Parameter 5 */
-{VIACR, CR7F, 0xFF, 0x0A}, /* LCD Scaling Parameter 6 */
-{VIACR, CR80, 0xFF, 0x0D}, /* LCD Scaling Parameter 7 */
-{VIACR, CR81, 0xFF, 0x13}, /* LCD Scaling Parameter 8 */
-{VIACR, CR82, 0xFF, 0x16}, /* LCD Scaling Parameter 9 */
-{VIACR, CR83, 0xFF, 0x19}, /* LCD Scaling Parameter 10 */
-{VIACR, CR84, 0xFF, 0x1C}, /* LCD Scaling Parameter 11 */
-{VIACR, CR85, 0xFF, 0x1D}, /* LCD Scaling Parameter 12 */
-{VIACR, CR86, 0xFF, 0x1E}, /* LCD Scaling Parameter 13 */
-{VIACR, CR87, 0xFF, 0x1F}, /* LCD Scaling Parameter 14 */
{VIACR, CR88, 0xFF, 0x40}, /* LCD Panel Type */
{VIACR, CR89, 0xFF, 0x00}, /* LCD Timing Control 0 */
{VIACR, CR8A, 0xFF, 0x88}, /* LCD Timing Control 1 */
@@ -342,20 +220,6 @@ struct io_reg VX855_ModeXregs[] = {
{VIACR, CR6A, 0xFD, 0x60},
{VIACR, CR6B, 0xFF, 0x00},
{VIACR, CR6C, 0xFF, 0x00},
-{VIACR, CR7A, 0xFF, 0x01}, /* LCD Scaling Parameter 1 */
-{VIACR, CR7B, 0xFF, 0x02}, /* LCD Scaling Parameter 2 */
-{VIACR, CR7C, 0xFF, 0x03}, /* LCD Scaling Parameter 3 */
-{VIACR, CR7D, 0xFF, 0x04}, /* LCD Scaling Parameter 4 */
-{VIACR, CR7E, 0xFF, 0x07}, /* LCD Scaling Parameter 5 */
-{VIACR, CR7F, 0xFF, 0x0A}, /* LCD Scaling Parameter 6 */
-{VIACR, CR80, 0xFF, 0x0D}, /* LCD Scaling Parameter 7 */
-{VIACR, CR81, 0xFF, 0x13}, /* LCD Scaling Parameter 8 */
-{VIACR, CR82, 0xFF, 0x16}, /* LCD Scaling Parameter 9 */
-{VIACR, CR83, 0xFF, 0x19}, /* LCD Scaling Parameter 10 */
-{VIACR, CR84, 0xFF, 0x1C}, /* LCD Scaling Parameter 11 */
-{VIACR, CR85, 0xFF, 0x1D}, /* LCD Scaling Parameter 12 */
-{VIACR, CR86, 0xFF, 0x1E}, /* LCD Scaling Parameter 13 */
-{VIACR, CR87, 0xFF, 0x1F}, /* LCD Scaling Parameter 14 */
{VIACR, CR88, 0xFF, 0x40}, /* LCD Panel Type */
{VIACR, CR89, 0xFF, 0x00}, /* LCD Timing Control 0 */
{VIACR, CR8A, 0xFF, 0x88}, /* LCD Timing Control 1 */
@@ -390,21 +254,6 @@ struct io_reg CLE266_ModeXregs[] = { {VIASR, SR1E, 0xF0, 0x00},
{VIAGR, GR20, 0xFF, 0x00},
{VIAGR, GR21, 0xFF, 0x00},
{VIAGR, GR22, 0xFF, 0x00},
- /* LCD Parameters */
-{VIACR, CR7A, 0xFF, 0x01}, /* LCD Parameter 1 */
-{VIACR, CR7B, 0xFF, 0x02}, /* LCD Parameter 2 */
-{VIACR, CR7C, 0xFF, 0x03}, /* LCD Parameter 3 */
-{VIACR, CR7D, 0xFF, 0x04}, /* LCD Parameter 4 */
-{VIACR, CR7E, 0xFF, 0x07}, /* LCD Parameter 5 */
-{VIACR, CR7F, 0xFF, 0x0A}, /* LCD Parameter 6 */
-{VIACR, CR80, 0xFF, 0x0D}, /* LCD Parameter 7 */
-{VIACR, CR81, 0xFF, 0x13}, /* LCD Parameter 8 */
-{VIACR, CR82, 0xFF, 0x16}, /* LCD Parameter 9 */
-{VIACR, CR83, 0xFF, 0x19}, /* LCD Parameter 10 */
-{VIACR, CR84, 0xFF, 0x1C}, /* LCD Parameter 11 */
-{VIACR, CR85, 0xFF, 0x1D}, /* LCD Parameter 12 */
-{VIACR, CR86, 0xFF, 0x1E}, /* LCD Parameter 13 */
-{VIACR, CR87, 0xFF, 0x1F}, /* LCD Parameter 14 */
};
@@ -443,328 +292,321 @@ struct VPITTable VPIT = {
/********************/
/* 480x640 */
-struct crt_mode_table CRTM480x640[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM480x640[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_25_175M, M480X640_R60_HSP, M480X640_R60_VSP,
+ {REFRESH_60, M480X640_R60_HSP, M480X640_R60_VSP,
{624, 480, 480, 144, 504, 48, 663, 640, 640, 23, 641, 3} } /* GTF*/
};
/* 640x480*/
-struct crt_mode_table CRTM640x480[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM640x480[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_25_175M, M640X480_R60_HSP, M640X480_R60_VSP,
+ {REFRESH_60, M640X480_R60_HSP, M640X480_R60_VSP,
{800, 640, 648, 144, 656, 96, 525, 480, 480, 45, 490, 2} },
- {REFRESH_75, CLK_31_500M, M640X480_R75_HSP, M640X480_R75_VSP,
+ {REFRESH_75, M640X480_R75_HSP, M640X480_R75_VSP,
{840, 640, 640, 200, 656, 64, 500, 480, 480, 20, 481, 3} },
- {REFRESH_85, CLK_36_000M, M640X480_R85_HSP, M640X480_R85_VSP,
+ {REFRESH_85, M640X480_R85_HSP, M640X480_R85_VSP,
{832, 640, 640, 192, 696, 56, 509, 480, 480, 29, 481, 3} },
- {REFRESH_100, CLK_43_163M, M640X480_R100_HSP, M640X480_R100_VSP,
+ {REFRESH_100, M640X480_R100_HSP, M640X480_R100_VSP,
{848, 640, 640, 208, 680, 64, 509, 480, 480, 29, 481, 3} }, /*GTF*/
- {REFRESH_120, CLK_52_406M, M640X480_R120_HSP,
- M640X480_R120_VSP,
- {848, 640, 640, 208, 680, 64, 515, 480, 480, 35, 481,
- 3} } /*GTF*/
+ {REFRESH_120, M640X480_R120_HSP, M640X480_R120_VSP,
+ {848, 640, 640, 208, 680, 64, 515, 480, 480, 35, 481, 3} } /*GTF*/
};
/*720x480 (GTF)*/
-struct crt_mode_table CRTM720x480[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM720x480[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_26_880M, M720X480_R60_HSP, M720X480_R60_VSP,
+ {REFRESH_60, M720X480_R60_HSP, M720X480_R60_VSP,
{896, 720, 720, 176, 736, 72, 497, 480, 480, 17, 481, 3} }
};
/*720x576 (GTF)*/
-struct crt_mode_table CRTM720x576[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM720x576[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_32_668M, M720X576_R60_HSP, M720X576_R60_VSP,
+ {REFRESH_60, M720X576_R60_HSP, M720X576_R60_VSP,
{912, 720, 720, 192, 744, 72, 597, 576, 576, 21, 577, 3} }
};
/* 800x480 (CVT) */
-struct crt_mode_table CRTM800x480[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM800x480[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_29_581M, M800X480_R60_HSP, M800X480_R60_VSP,
+ {REFRESH_60, M800X480_R60_HSP, M800X480_R60_VSP,
{992, 800, 800, 192, 824, 72, 500, 480, 480, 20, 483, 7} }
};
/* 800x600*/
-struct crt_mode_table CRTM800x600[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM800x600[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_40_000M, M800X600_R60_HSP, M800X600_R60_VSP,
+ {REFRESH_60, M800X600_R60_HSP, M800X600_R60_VSP,
{1056, 800, 800, 256, 840, 128, 628, 600, 600, 28, 601, 4} },
- {REFRESH_75, CLK_49_500M, M800X600_R75_HSP, M800X600_R75_VSP,
+ {REFRESH_75, M800X600_R75_HSP, M800X600_R75_VSP,
{1056, 800, 800, 256, 816, 80, 625, 600, 600, 25, 601, 3} },
- {REFRESH_85, CLK_56_250M, M800X600_R85_HSP, M800X600_R85_VSP,
+ {REFRESH_85, M800X600_R85_HSP, M800X600_R85_VSP,
{1048, 800, 800, 248, 832, 64, 631, 600, 600, 31, 601, 3} },
- {REFRESH_100, CLK_68_179M, M800X600_R100_HSP, M800X600_R100_VSP,
+ {REFRESH_100, M800X600_R100_HSP, M800X600_R100_VSP,
{1072, 800, 800, 272, 848, 88, 636, 600, 600, 36, 601, 3} },
- {REFRESH_120, CLK_83_950M, M800X600_R120_HSP,
- M800X600_R120_VSP,
- {1088, 800, 800, 288, 856, 88, 643, 600, 600, 43, 601,
- 3} }
+ {REFRESH_120, M800X600_R120_HSP, M800X600_R120_VSP,
+ {1088, 800, 800, 288, 856, 88, 643, 600, 600, 43, 601, 3} }
};
/* 848x480 (CVT) */
-struct crt_mode_table CRTM848x480[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM848x480[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_31_500M, M848X480_R60_HSP, M848X480_R60_VSP,
+ {REFRESH_60, M848X480_R60_HSP, M848X480_R60_VSP,
{1056, 848, 848, 208, 872, 80, 500, 480, 480, 20, 483, 5} }
};
/*856x480 (GTF) convert to 852x480*/
-struct crt_mode_table CRTM852x480[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM852x480[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_31_728M, M852X480_R60_HSP, M852X480_R60_VSP,
+ {REFRESH_60, M852X480_R60_HSP, M852X480_R60_VSP,
{1064, 856, 856, 208, 872, 88, 497, 480, 480, 17, 481, 3} }
};
/*1024x512 (GTF)*/
-struct crt_mode_table CRTM1024x512[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1024x512[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_41_291M, M1024X512_R60_HSP, M1024X512_R60_VSP,
+ {REFRESH_60, M1024X512_R60_HSP, M1024X512_R60_VSP,
{1296, 1024, 1024, 272, 1056, 104, 531, 512, 512, 19, 513, 3} }
};
/* 1024x600*/
-struct crt_mode_table CRTM1024x600[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1024x600[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_48_875M, M1024X600_R60_HSP, M1024X600_R60_VSP,
+ {REFRESH_60, M1024X600_R60_HSP, M1024X600_R60_VSP,
{1312, 1024, 1024, 288, 1064, 104, 622, 600, 600, 22, 601, 3} },
};
/* 1024x768*/
-struct crt_mode_table CRTM1024x768[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1024x768[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_65_000M, M1024X768_R60_HSP, M1024X768_R60_VSP,
+ {REFRESH_60, M1024X768_R60_HSP, M1024X768_R60_VSP,
{1344, 1024, 1024, 320, 1048, 136, 806, 768, 768, 38, 771, 6} },
- {REFRESH_75, CLK_78_750M, M1024X768_R75_HSP, M1024X768_R75_VSP,
+ {REFRESH_75, M1024X768_R75_HSP, M1024X768_R75_VSP,
{1312, 1024, 1024, 288, 1040, 96, 800, 768, 768, 32, 769, 3} },
- {REFRESH_85, CLK_94_500M, M1024X768_R85_HSP, M1024X768_R85_VSP,
+ {REFRESH_85, M1024X768_R85_HSP, M1024X768_R85_VSP,
{1376, 1024, 1024, 352, 1072, 96, 808, 768, 768, 40, 769, 3} },
- {REFRESH_100, CLK_113_309M, M1024X768_R100_HSP, M1024X768_R100_VSP,
+ {REFRESH_100, M1024X768_R100_HSP, M1024X768_R100_VSP,
{1392, 1024, 1024, 368, 1096, 112, 814, 768, 768, 46, 769, 3} }
};
/* 1152x864*/
-struct crt_mode_table CRTM1152x864[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1152x864[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_75, CLK_108_000M, M1152X864_R75_HSP, M1152X864_R75_VSP,
+ {REFRESH_75, M1152X864_R75_HSP, M1152X864_R75_VSP,
{1600, 1152, 1152, 448, 1216, 128, 900, 864, 864, 36, 865, 3} }
};
/* 1280x720 (HDMI 720P)*/
-struct crt_mode_table CRTM1280x720[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1280x720[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_74_481M, M1280X720_R60_HSP, M1280X720_R60_VSP,
+ {REFRESH_60, M1280X720_R60_HSP, M1280X720_R60_VSP,
{1648, 1280, 1280, 368, 1392, 40, 750, 720, 720, 30, 725, 5} },
- {REFRESH_50, CLK_60_466M, M1280X720_R50_HSP, M1280X720_R50_VSP,
+ {REFRESH_50, M1280X720_R50_HSP, M1280X720_R50_VSP,
{1632, 1280, 1280, 352, 1328, 128, 741, 720, 720, 21, 721, 3} }
};
/*1280x768 (GTF)*/
-struct crt_mode_table CRTM1280x768[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1280x768[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_80_136M, M1280X768_R60_HSP, M1280X768_R60_VSP,
+ {REFRESH_60, M1280X768_R60_HSP, M1280X768_R60_VSP,
{1680, 1280, 1280, 400, 1344, 136, 795, 768, 768, 27, 769, 3} },
- {REFRESH_50, CLK_65_178M, M1280X768_R50_HSP, M1280X768_R50_VSP,
+ {REFRESH_50, M1280X768_R50_HSP, M1280X768_R50_VSP,
{1648, 1280, 1280, 368, 1336, 128, 791, 768, 768, 23, 769, 3} }
};
/* 1280x800 (CVT) */
-struct crt_mode_table CRTM1280x800[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1280x800[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_83_375M, M1280X800_R60_HSP, M1280X800_R60_VSP,
+ {REFRESH_60, M1280X800_R60_HSP, M1280X800_R60_VSP,
{1680, 1280, 1280, 400, 1352, 128, 831, 800, 800, 31, 803, 6} }
};
/*1280x960*/
-struct crt_mode_table CRTM1280x960[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1280x960[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_108_000M, M1280X960_R60_HSP, M1280X960_R60_VSP,
+ {REFRESH_60, M1280X960_R60_HSP, M1280X960_R60_VSP,
{1800, 1280, 1280, 520, 1376, 112, 1000, 960, 960, 40, 961, 3} }
};
/* 1280x1024*/
-struct crt_mode_table CRTM1280x1024[] = {
- /*r_rate,vclk,,hsp,vsp */
+static struct crt_mode_table CRTM1280x1024[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_108_000M, M1280X1024_R60_HSP, M1280X1024_R60_VSP,
+ {REFRESH_60, M1280X1024_R60_HSP, M1280X1024_R60_VSP,
{1688, 1280, 1280, 408, 1328, 112, 1066, 1024, 1024, 42, 1025,
3} },
- {REFRESH_75, CLK_135_000M, M1280X1024_R75_HSP, M1280X1024_R75_VSP,
+ {REFRESH_75, M1280X1024_R75_HSP, M1280X1024_R75_VSP,
{1688, 1280, 1280, 408, 1296, 144, 1066, 1024, 1024, 42, 1025,
3} },
- {REFRESH_85, CLK_157_500M, M1280X1024_R85_HSP, M1280X1024_R85_VSP,
+ {REFRESH_85, M1280X1024_R85_HSP, M1280X1024_R85_VSP,
{1728, 1280, 1280, 448, 1344, 160, 1072, 1024, 1024, 48, 1025, 3} }
};
/* 1368x768 (GTF) */
-struct crt_mode_table CRTM1368x768[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1368x768[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_85_860M, M1368X768_R60_HSP, M1368X768_R60_VSP,
+ {REFRESH_60, M1368X768_R60_HSP, M1368X768_R60_VSP,
{1800, 1368, 1368, 432, 1440, 144, 795, 768, 768, 27, 769, 3} }
};
/*1440x1050 (GTF)*/
-struct crt_mode_table CRTM1440x1050[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1440x1050[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_125_104M, M1440X1050_R60_HSP, M1440X1050_R60_VSP,
+ {REFRESH_60, M1440X1050_R60_HSP, M1440X1050_R60_VSP,
{1936, 1440, 1440, 496, 1536, 152, 1077, 1040, 1040, 37, 1041, 3} }
};
/* 1600x1200*/
-struct crt_mode_table CRTM1600x1200[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1600x1200[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_162_000M, M1600X1200_R60_HSP, M1600X1200_R60_VSP,
+ {REFRESH_60, M1600X1200_R60_HSP, M1600X1200_R60_VSP,
{2160, 1600, 1600, 560, 1664, 192, 1250, 1200, 1200, 50, 1201,
3} },
- {REFRESH_75, CLK_202_500M, M1600X1200_R75_HSP, M1600X1200_R75_VSP,
+ {REFRESH_75, M1600X1200_R75_HSP, M1600X1200_R75_VSP,
{2160, 1600, 1600, 560, 1664, 192, 1250, 1200, 1200, 50, 1201, 3} }
};
/* 1680x1050 (CVT) */
-struct crt_mode_table CRTM1680x1050[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1680x1050[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_146_760M, M1680x1050_R60_HSP, M1680x1050_R60_VSP,
+ {REFRESH_60, M1680x1050_R60_HSP, M1680x1050_R60_VSP,
{2240, 1680, 1680, 560, 1784, 176, 1089, 1050, 1050, 39, 1053,
6} },
- {REFRESH_75, CLK_187_000M, M1680x1050_R75_HSP, M1680x1050_R75_VSP,
+ {REFRESH_75, M1680x1050_R75_HSP, M1680x1050_R75_VSP,
{2272, 1680, 1680, 592, 1800, 176, 1099, 1050, 1050, 49, 1053, 6} }
};
/* 1680x1050 (CVT Reduce Blanking) */
-struct crt_mode_table CRTM1680x1050_RB[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1680x1050_RB[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_119_000M, M1680x1050_RB_R60_HSP,
- M1680x1050_RB_R60_VSP,
+ {REFRESH_60, M1680x1050_RB_R60_HSP, M1680x1050_RB_R60_VSP,
{1840, 1680, 1680, 160, 1728, 32, 1080, 1050, 1050, 30, 1053, 6} }
};
/* 1920x1080 (CVT)*/
-struct crt_mode_table CRTM1920x1080[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1920x1080[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_172_798M, M1920X1080_R60_HSP, M1920X1080_R60_VSP,
+ {REFRESH_60, M1920X1080_R60_HSP, M1920X1080_R60_VSP,
{2576, 1920, 1920, 656, 2048, 200, 1120, 1080, 1080, 40, 1083, 5} }
};
/* 1920x1080 (CVT with Reduce Blanking) */
-struct crt_mode_table CRTM1920x1080_RB[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1920x1080_RB[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_138_400M, M1920X1080_RB_R60_HSP,
- M1920X1080_RB_R60_VSP,
+ {REFRESH_60, M1920X1080_RB_R60_HSP, M1920X1080_RB_R60_VSP,
{2080, 1920, 1920, 160, 1968, 32, 1111, 1080, 1080, 31, 1083, 5} }
};
/* 1920x1440*/
-struct crt_mode_table CRTM1920x1440[] = {
- /*r_rate,vclk,hsp,vsp */
+static struct crt_mode_table CRTM1920x1440[] = {
+ /*r_rate,hsp,vsp */
/*HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_234_000M, M1920X1440_R60_HSP, M1920X1440_R60_VSP,
+ {REFRESH_60, M1920X1440_R60_HSP, M1920X1440_R60_VSP,
{2600, 1920, 1920, 680, 2048, 208, 1500, 1440, 1440, 60, 1441,
3} },
- {REFRESH_75, CLK_297_500M, M1920X1440_R75_HSP, M1920X1440_R75_VSP,
+ {REFRESH_75, M1920X1440_R75_HSP, M1920X1440_R75_VSP,
{2640, 1920, 1920, 720, 2064, 224, 1500, 1440, 1440, 60, 1441, 3} }
};
/* 1400x1050 (CVT) */
-struct crt_mode_table CRTM1400x1050[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1400x1050[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_121_750M, M1400X1050_R60_HSP, M1400X1050_R60_VSP,
+ {REFRESH_60, M1400X1050_R60_HSP, M1400X1050_R60_VSP,
{1864, 1400, 1400, 464, 1488, 144, 1089, 1050, 1050, 39, 1053,
4} },
- {REFRESH_75, CLK_156_000M, M1400X1050_R75_HSP, M1400X1050_R75_VSP,
+ {REFRESH_75, M1400X1050_R75_HSP, M1400X1050_R75_VSP,
{1896, 1400, 1400, 496, 1504, 144, 1099, 1050, 1050, 49, 1053, 4} }
};
/* 1400x1050 (CVT Reduce Blanking) */
-struct crt_mode_table CRTM1400x1050_RB[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1400x1050_RB[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_101_000M, M1400X1050_RB_R60_HSP,
- M1400X1050_RB_R60_VSP,
+ {REFRESH_60, M1400X1050_RB_R60_HSP, M1400X1050_RB_R60_VSP,
{1560, 1400, 1400, 160, 1448, 32, 1080, 1050, 1050, 30, 1053, 4} }
};
/* 960x600 (CVT) */
-struct crt_mode_table CRTM960x600[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM960x600[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_45_250M, M960X600_R60_HSP, M960X600_R60_VSP,
+ {REFRESH_60, M960X600_R60_HSP, M960X600_R60_VSP,
{1216, 960, 960, 256, 992, 96, 624, 600, 600, 24, 603, 6} }
};
/* 1000x600 (GTF) */
-struct crt_mode_table CRTM1000x600[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1000x600[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_48_000M, M1000X600_R60_HSP, M1000X600_R60_VSP,
+ {REFRESH_60, M1000X600_R60_HSP, M1000X600_R60_VSP,
{1288, 1000, 1000, 288, 1040, 104, 622, 600, 600, 22, 601, 3} }
};
/* 1024x576 (GTF) */
-struct crt_mode_table CRTM1024x576[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1024x576[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_46_996M, M1024X576_R60_HSP, M1024X576_R60_VSP,
+ {REFRESH_60, M1024X576_R60_HSP, M1024X576_R60_VSP,
{1312, 1024, 1024, 288, 1064, 104, 597, 576, 576, 21, 577, 3} }
};
/* 1088x612 (CVT) */
-struct crt_mode_table CRTM1088x612[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1088x612[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_52_977M, M1088X612_R60_HSP, M1088X612_R60_VSP,
+ {REFRESH_60, M1088X612_R60_HSP, M1088X612_R60_VSP,
{1392, 1088, 1088, 304, 1136, 104, 636, 612, 612, 24, 615, 5} }
};
/* 1152x720 (CVT) */
-struct crt_mode_table CRTM1152x720[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1152x720[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_66_750M, M1152X720_R60_HSP, M1152X720_R60_VSP,
+ {REFRESH_60, M1152X720_R60_HSP, M1152X720_R60_VSP,
{1488, 1152, 1152, 336, 1208, 112, 748, 720, 720, 28, 723, 6} }
};
/* 1200x720 (GTF) */
-struct crt_mode_table CRTM1200x720[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1200x720[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_70_159M, M1200X720_R60_HSP, M1200X720_R60_VSP,
+ {REFRESH_60, M1200X720_R60_HSP, M1200X720_R60_VSP,
{1568, 1200, 1200, 368, 1256, 128, 746, 720, 720, 26, 721, 3} }
};
/* 1200x900 (DCON) */
-struct crt_mode_table DCON1200x900[] = {
- /* r_rate, vclk, hsp, vsp */
- {REFRESH_60, CLK_57_275M, M1200X900_R60_HSP, M1200X900_R60_VSP,
+static struct crt_mode_table DCON1200x900[] = {
+ /* r_rate, hsp, vsp */
+ {REFRESH_60, M1200X900_R60_HSP, M1200X900_R60_VSP,
/* The correct htotal is 1240, but this doesn't raster on VX855. */
/* Via suggested changing to a multiple of 16, hence 1264. */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
@@ -772,126 +614,122 @@ struct crt_mode_table DCON1200x900[] = {
};
/* 1280x600 (GTF) */
-struct crt_mode_table CRTM1280x600[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1280x600[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_61_500M, M1280x600_R60_HSP, M1280x600_R60_VSP,
+ {REFRESH_60, M1280x600_R60_HSP, M1280x600_R60_VSP,
{1648, 1280, 1280, 368, 1336, 128, 622, 600, 600, 22, 601, 3} }
};
/* 1360x768 (CVT) */
-struct crt_mode_table CRTM1360x768[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1360x768[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_84_750M, M1360X768_R60_HSP, M1360X768_R60_VSP,
+ {REFRESH_60, M1360X768_R60_HSP, M1360X768_R60_VSP,
{1776, 1360, 1360, 416, 1432, 136, 798, 768, 768, 30, 771, 5} }
};
/* 1360x768 (CVT Reduce Blanking) */
-struct crt_mode_table CRTM1360x768_RB[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1360x768_RB[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_72_000M, M1360X768_RB_R60_HSP,
- M1360X768_RB_R60_VSP,
+ {REFRESH_60, M1360X768_RB_R60_HSP, M1360X768_RB_R60_VSP,
{1520, 1360, 1360, 160, 1408, 32, 790, 768, 768, 22, 771, 5} }
};
/* 1366x768 (GTF) */
-struct crt_mode_table CRTM1366x768[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1366x768[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_85_860M, M1368X768_R60_HSP, M1368X768_R60_VSP,
+ {REFRESH_60, M1368X768_R60_HSP, M1368X768_R60_VSP,
{1800, 1368, 1368, 432, 1440, 144, 795, 768, 768, 27, 769, 3} },
- {REFRESH_50, CLK_69_924M, M1368X768_R50_HSP, M1368X768_R50_VSP,
+ {REFRESH_50, M1368X768_R50_HSP, M1368X768_R50_VSP,
{1768, 1368, 1368, 400, 1424, 144, 791, 768, 768, 23, 769, 3} }
};
/* 1440x900 (CVT) */
-struct crt_mode_table CRTM1440x900[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1440x900[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_106_500M, M1440X900_R60_HSP, M1440X900_R60_VSP,
+ {REFRESH_60, M1440X900_R60_HSP, M1440X900_R60_VSP,
{1904, 1440, 1440, 464, 1520, 152, 934, 900, 900, 34, 903, 6} },
- {REFRESH_75, CLK_136_700M, M1440X900_R75_HSP, M1440X900_R75_VSP,
+ {REFRESH_75, M1440X900_R75_HSP, M1440X900_R75_VSP,
{1936, 1440, 1440, 496, 1536, 152, 942, 900, 900, 42, 903, 6} }
};
/* 1440x900 (CVT Reduce Blanking) */
-struct crt_mode_table CRTM1440x900_RB[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1440x900_RB[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_88_750M, M1440X900_RB_R60_HSP,
- M1440X900_RB_R60_VSP,
+ {REFRESH_60, M1440X900_RB_R60_HSP, M1440X900_RB_R60_VSP,
{1600, 1440, 1440, 160, 1488, 32, 926, 900, 900, 26, 903, 6} }
};
/* 1600x900 (CVT) */
-struct crt_mode_table CRTM1600x900[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1600x900[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_118_840M, M1600X900_R60_HSP, M1600X900_R60_VSP,
+ {REFRESH_60, M1600X900_R60_HSP, M1600X900_R60_VSP,
{2112, 1600, 1600, 512, 1688, 168, 934, 900, 900, 34, 903, 5} }
};
/* 1600x900 (CVT Reduce Blanking) */
-struct crt_mode_table CRTM1600x900_RB[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1600x900_RB[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_97_750M, M1600X900_RB_R60_HSP,
- M1600X900_RB_R60_VSP,
+ {REFRESH_60, M1600X900_RB_R60_HSP, M1600X900_RB_R60_VSP,
{1760, 1600, 1600, 160, 1648, 32, 926, 900, 900, 26, 903, 5} }
};
/* 1600x1024 (GTF) */
-struct crt_mode_table CRTM1600x1024[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1600x1024[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_136_700M, M1600X1024_R60_HSP, M1600X1024_R60_VSP,
+ {REFRESH_60, M1600X1024_R60_HSP, M1600X1024_R60_VSP,
{2144, 1600, 1600, 544, 1704, 168, 1060, 1024, 1024, 36, 1025, 3} }
};
/* 1792x1344 (DMT) */
-struct crt_mode_table CRTM1792x1344[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1792x1344[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_204_000M, M1792x1344_R60_HSP, M1792x1344_R60_VSP,
+ {REFRESH_60, M1792x1344_R60_HSP, M1792x1344_R60_VSP,
{2448, 1792, 1792, 656, 1920, 200, 1394, 1344, 1344, 50, 1345, 3} }
};
/* 1856x1392 (DMT) */
-struct crt_mode_table CRTM1856x1392[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1856x1392[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_218_500M, M1856x1392_R60_HSP, M1856x1392_R60_VSP,
+ {REFRESH_60, M1856x1392_R60_HSP, M1856x1392_R60_VSP,
{2528, 1856, 1856, 672, 1952, 224, 1439, 1392, 1392, 47, 1393, 3} }
};
/* 1920x1200 (CVT) */
-struct crt_mode_table CRTM1920x1200[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1920x1200[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_193_295M, M1920X1200_R60_HSP, M1920X1200_R60_VSP,
+ {REFRESH_60, M1920X1200_R60_HSP, M1920X1200_R60_VSP,
{2592, 1920, 1920, 672, 2056, 200, 1245, 1200, 1200, 45, 1203, 6} }
};
/* 1920x1200 (CVT with Reduce Blanking) */
-struct crt_mode_table CRTM1920x1200_RB[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM1920x1200_RB[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_153_920M, M1920X1200_RB_R60_HSP,
- M1920X1200_RB_R60_VSP,
+ {REFRESH_60, M1920X1200_RB_R60_HSP, M1920X1200_RB_R60_VSP,
{2080, 1920, 1920, 160, 1968, 32, 1235, 1200, 1200, 35, 1203, 6} }
};
/* 2048x1536 (CVT) */
-struct crt_mode_table CRTM2048x1536[] = {
- /* r_rate, vclk, hsp, vsp */
+static struct crt_mode_table CRTM2048x1536[] = {
+ /* r_rate, hsp, vsp */
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
- {REFRESH_60, CLK_267_250M, M2048x1536_R60_HSP, M2048x1536_R60_VSP,
+ {REFRESH_60, M2048x1536_R60_HSP, M2048x1536_R60_VSP,
{2800, 2048, 2048, 752, 2200, 224, 1592, 1536, 1536, 56, 1539, 4} }
};
-struct VideoModeTable viafb_modes[] = {
+static struct VideoModeTable viafb_modes[] = {
/* Display : 480x640 (GTF) */
{CRTM480x640, ARRAY_SIZE(CRTM480x640)},
@@ -1016,7 +854,7 @@ struct VideoModeTable viafb_modes[] = {
{CRTM1400x1050, ARRAY_SIZE(CRTM1400x1050)}
};
-struct VideoModeTable viafb_rb_modes[] = {
+static struct VideoModeTable viafb_rb_modes[] = {
/* Display : 1360x768 (CVT Reduce Blanking) */
{CRTM1360x768_RB, ARRAY_SIZE(CRTM1360x768_RB)},
@@ -1040,14 +878,12 @@ struct VideoModeTable viafb_rb_modes[] = {
};
struct crt_mode_table CEAM1280x720[] = {
- {REFRESH_60, CLK_74_270M, M1280X720_CEA_R60_HSP,
- M1280X720_CEA_R60_VSP,
+ {REFRESH_60, M1280X720_CEA_R60_HSP, M1280X720_CEA_R60_VSP,
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
{1650, 1280, 1280, 370, 1390, 40, 750, 720, 720, 30, 725, 5} }
};
struct crt_mode_table CEAM1920x1080[] = {
- {REFRESH_60, CLK_148_500M, M1920X1080_CEA_R60_HSP,
- M1920X1080_CEA_R60_VSP,
+ {REFRESH_60, M1920X1080_CEA_R60_HSP, M1920X1080_CEA_R60_VSP,
/* HT, HA, HBS, HBE, HSS, HSE, VT, VA, VBS, VBE, VSS, VSE */
{2200, 1920, 1920, 300, 2008, 44, 1125, 1080, 1080, 45, 1084, 5} }
};
@@ -1057,7 +893,6 @@ struct VideoModeTable CEA_HDMI_Modes[] = {
{CEAM1920x1080, ARRAY_SIZE(CEAM1920x1080)}
};
-int NUM_TOTAL_RES_MAP_REFRESH = ARRAY_SIZE(res_map_refresh_tbl);
int NUM_TOTAL_CEA_MODES = ARRAY_SIZE(CEA_HDMI_Modes);
int NUM_TOTAL_CN400_ModeXregs = ARRAY_SIZE(CN400_ModeXregs);
int NUM_TOTAL_CN700_ModeXregs = ARRAY_SIZE(CN700_ModeXregs);
diff --git a/drivers/video/via/viamode.h b/drivers/video/via/viamode.h
index 5b1ced86514b..8a67ea1b5ef0 100644
--- a/drivers/video/via/viamode.h
+++ b/drivers/video/via/viamode.h
@@ -41,14 +41,6 @@ struct patch_table {
struct io_reg *io_reg_table;
};
-struct res_map_refresh {
- int hres;
- int vres;
- int pixclock;
- int vmode_refresh;
-};
-
-extern int NUM_TOTAL_RES_MAP_REFRESH;
extern int NUM_TOTAL_CEA_MODES;
extern int NUM_TOTAL_CN400_ModeXregs;
extern int NUM_TOTAL_CN700_ModeXregs;
@@ -66,7 +58,6 @@ extern struct crt_mode_table CEAM1280x720[];
extern struct crt_mode_table CEAM1920x1080[];
extern struct VideoModeTable CEA_HDMI_Modes[];
-extern struct res_map_refresh res_map_refresh_tbl[];
extern struct io_reg CN400_ModeXregs[];
extern struct io_reg CN700_ModeXregs[];
extern struct io_reg KM400_ModeXregs[];
diff --git a/drivers/video/via/vt1636.c b/drivers/video/via/vt1636.c
index 60e4192c2b34..ee2903b472cf 100644
--- a/drivers/video/via/vt1636.c
+++ b/drivers/video/via/vt1636.c
@@ -167,22 +167,6 @@ static int get_clk_range_index(u32 Clk)
return DPA_CLK_RANGE_150M;
}
-static int get_lvds_dpa_setting_index(int panel_size_id,
- struct VT1636_DPA_SETTING *p_vt1636_dpasetting_tbl,
- int tbl_size)
-{
- int i;
-
- for (i = 0; i < tbl_size; i++) {
- if (panel_size_id == p_vt1636_dpasetting_tbl->PanelSizeID)
- return i;
-
- p_vt1636_dpasetting_tbl++;
- }
-
- return 0;
-}
-
static void set_dpa_vt1636(struct lvds_setting_information
*plvds_setting_info, struct lvds_chip_information *plvds_chip_info,
struct VT1636_DPA_SETTING *p_vt1636_dpa_setting)
@@ -206,7 +190,9 @@ void viafb_vt1636_patch_skew_on_vt3324(
struct lvds_setting_information *plvds_setting_info,
struct lvds_chip_information *plvds_chip_info)
{
- int index, size;
+ struct VT1636_DPA_SETTING dpa = {0x00, 0x00}, dpa_16x12 = {0x0B, 0x03},
+ *pdpa;
+ int index;
DEBUG_MSG(KERN_INFO "viafb_vt1636_patch_skew_on_vt3324.\n");
@@ -216,19 +202,21 @@ void viafb_vt1636_patch_skew_on_vt3324(
&GFX_DPA_SETTING_TBL_VT3324[index]);
/* LVDS Transmitter DPA settings: */
- size = ARRAY_SIZE(VT1636_DPA_SETTING_TBL_VT3324);
- index =
- get_lvds_dpa_setting_index(plvds_setting_info->lcd_panel_id,
- VT1636_DPA_SETTING_TBL_VT3324, size);
- set_dpa_vt1636(plvds_setting_info, plvds_chip_info,
- &VT1636_DPA_SETTING_TBL_VT3324[index]);
+ if (plvds_setting_info->lcd_panel_hres == 1600 &&
+ plvds_setting_info->lcd_panel_vres == 1200)
+ pdpa = &dpa_16x12;
+ else
+ pdpa = &dpa;
+
+ set_dpa_vt1636(plvds_setting_info, plvds_chip_info, pdpa);
}
void viafb_vt1636_patch_skew_on_vt3327(
struct lvds_setting_information *plvds_setting_info,
struct lvds_chip_information *plvds_chip_info)
{
- int index, size;
+ struct VT1636_DPA_SETTING dpa = {0x00, 0x00};
+ int index;
DEBUG_MSG(KERN_INFO "viafb_vt1636_patch_skew_on_vt3327.\n");
@@ -238,12 +226,7 @@ void viafb_vt1636_patch_skew_on_vt3327(
&GFX_DPA_SETTING_TBL_VT3327[index]);
/* LVDS Transmitter DPA settings: */
- size = ARRAY_SIZE(VT1636_DPA_SETTING_TBL_VT3327);
- index =
- get_lvds_dpa_setting_index(plvds_setting_info->lcd_panel_id,
- VT1636_DPA_SETTING_TBL_VT3327, size);
- set_dpa_vt1636(plvds_setting_info, plvds_chip_info,
- &VT1636_DPA_SETTING_TBL_VT3327[index]);
+ set_dpa_vt1636(plvds_setting_info, plvds_chip_info, &dpa);
}
void viafb_vt1636_patch_skew_on_vt3364(
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 31649b7b672f..b69d71482554 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -533,6 +533,16 @@ config I6300ESB_WDT
To compile this driver as a module, choose M here: the
module will be called i6300esb.
+config INTEL_SCU_WATCHDOG
+ bool "Intel SCU Watchdog for Mobile Platforms"
+ depends on WATCHDOG
+ depends on INTEL_SCU_IPC
+ ---help---
+ Hardware driver for the watchdog time built into the Intel SCU
+ for Intel Mobile Platforms.
+
+ To compile this driver as a module, choose M here.
+
config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on (X86 || IA64) && PCI
@@ -580,7 +590,7 @@ config IT87_WDT
depends on X86 && EXPERIMENTAL
---help---
This is the driver for the hardware watchdog on the ITE IT8702,
- IT8712, IT8716, IT8718, IT8720, IT8726, IT8712 Super I/O chips.
+ IT8712, IT8716, IT8718, IT8720, IT8721, IT8726 Super I/O chips.
This watchdog simply watches your kernel to make sure it doesn't
freeze, and if it does, it reboots your computer after a certain
amount of time.
@@ -589,18 +599,20 @@ config IT87_WDT
be called it87_wdt.
config HP_WATCHDOG
- tristate "HP Proliant iLO2+ Hardware Watchdog Timer"
+ tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
depends on X86
+ default m
help
A software monitoring watchdog and NMI sourcing driver. This driver
will detect lockups and provide a stack trace. This is a driver that
- will only load on a HP ProLiant system with a minimum of iLO2 support.
+ will only load on an HP ProLiant system with a minimum of iLO2 support.
To compile this driver as a module, choose M here: the module will be
called hpwdt.
config HPWDT_NMI_DECODING
bool "NMI decoding support for the HP ProLiant iLO2+ Hardware Watchdog Timer"
depends on HP_WATCHDOG
+ default y
help
When an NMI occurs this feature will make the necessary BIOS calls to
log the cause of the NMI.
@@ -903,6 +915,12 @@ config INDYDOG
timer expired and no process has written to /dev/watchdog during
that time.
+config JZ4740_WDT
+ tristate "Ingenic jz4740 SoC hardware watchdog"
+ depends on MACH_JZ4740
+ help
+ Hardware driver for the built-in watchdog timer on Ingenic jz4740 SoCs.
+
config WDT_MTX1
tristate "MTX-1 Hardware Watchdog"
depends on MIPS_MTX1
@@ -1083,14 +1101,6 @@ config SH_WDT
To compile this driver as a module, choose M here: the
module will be called shwdt.
-config SH_WDT_MMAP
- bool "Allow mmap of SH WDT"
- default n
- depends on SH_WDT
- help
- If you say Y here, user applications will be able to mmap the
- WDT/CPG registers.
-
# SPARC Architecture
# SPARC64 Architecture
@@ -1119,6 +1129,16 @@ config WATCHDOG_RIO
# XTENSA Architecture
+# Xen Architecture
+
+config XEN_WDT
+ tristate "Xen Watchdog support"
+ depends on XEN
+ help
+ Say Y here to support the hypervisor watchdog capability provided
+ by Xen 4.0 and newer. The watchdog timeout period is normally one
+ minute but can be changed with a boot-time parameter.
+
#
# ISA-based Watchdog Cards
#
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 20e44c4782b3..d520bf9c3355 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -102,6 +102,7 @@ obj-$(CONFIG_W83877F_WDT) += w83877f_wdt.o
obj-$(CONFIG_W83977F_WDT) += w83977f_wdt.o
obj-$(CONFIG_MACHZ_WDT) += machzwd.o
obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
+obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o
# M32R Architecture
@@ -114,6 +115,7 @@ obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
obj-$(CONFIG_BCM63XX_WDT) += bcm63xx_wdt.o
obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
obj-$(CONFIG_INDYDOG) += indydog.o
+obj-$(CONFIG_JZ4740_WDT) += jz4740_wdt.o
obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o
obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
@@ -148,6 +150,9 @@ obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o
# XTENSA Architecture
+# Xen
+obj-$(CONFIG_XEN_WDT) += xen_wdt.o
+
# Architecture Independant
obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index fa4d36033552..f16dcbd475fb 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -301,7 +301,7 @@ static int ali_notify_sys(struct notifier_block *this,
* want to register another driver on the same PCI id.
*/
-static struct pci_device_id ali_pci_tbl[] __used = {
+static DEFINE_PCI_DEVICE_TABLE(ali_pci_tbl) __used = {
{ PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,},
{ PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,},
{ 0, },
@@ -362,12 +362,12 @@ static int __init ali_find_watchdog(void)
*/
static const struct file_operations ali_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
.write = ali_write,
.unlocked_ioctl = ali_ioctl,
- .open = ali_open,
- .release = ali_release,
+ .open = ali_open,
+ .release = ali_release,
};
static struct miscdevice ali_miscdev = {
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index 4b7a2b4138ed..46f4b85b46de 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -430,7 +430,7 @@ err_out:
module_init(alim7101_wdt_init);
module_exit(alim7101_wdt_unload);
-static struct pci_device_id alim7101_pci_tbl[] __devinitdata __used = {
+static DEFINE_PCI_DEVICE_TABLE(alim7101_pci_tbl) __used = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533) },
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ }
diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c
index 5f245522397b..bd44417c84d4 100644
--- a/drivers/watchdog/bcm47xx_wdt.c
+++ b/drivers/watchdog/bcm47xx_wdt.c
@@ -150,8 +150,8 @@ static ssize_t bcm47xx_wdt_write(struct file *file, const char __user *data,
}
static const struct watchdog_info bcm47xx_wdt_info = {
- .identity = DRV_NAME,
- .options = WDIOF_SETTIMEOUT |
+ .identity = DRV_NAME,
+ .options = WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c
index 9042a95fc98c..b9fa9b71583a 100644
--- a/drivers/watchdog/bfin_wdt.c
+++ b/drivers/watchdog/bfin_wdt.c
@@ -63,7 +63,7 @@ static DEFINE_SPINLOCK(bfin_wdt_spinlock);
/**
* bfin_wdt_keepalive - Keep the Userspace Watchdog Alive
*
- * The Userspace watchdog got a KeepAlive: schedule the next timeout.
+ * The Userspace watchdog got a KeepAlive: schedule the next timeout.
*/
static int bfin_wdt_keepalive(void)
{
@@ -337,7 +337,7 @@ static int bfin_wdt_resume(struct platform_device *pdev)
static const struct file_operations bfin_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
- .write = bfin_wdt_write,
+ .write = bfin_wdt_write,
.unlocked_ioctl = bfin_wdt_ioctl,
.open = bfin_wdt_open,
.release = bfin_wdt_release,
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 7e7ec9c35b6a..337265b47305 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -4,7 +4,7 @@
* Author: Matthew McClintock
* Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
- * Copyright 2005, 2008, 2010 Freescale Semiconductor Inc.
+ * Copyright 2005, 2008, 2010-2011 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -221,9 +221,8 @@ static int booke_wdt_open(struct inode *inode, struct file *file)
if (booke_wdt_enabled == 0) {
booke_wdt_enabled = 1;
on_each_cpu(__booke_wdt_enable, NULL, 0);
- printk(KERN_INFO
- "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n",
- booke_wdt_period);
+ pr_debug("booke_wdt: watchdog enabled (timeout = %llu sec)\n",
+ period_to_sec(booke_wdt_period));
}
spin_unlock(&booke_wdt_lock);
@@ -240,6 +239,7 @@ static int booke_wdt_release(struct inode *inode, struct file *file)
*/
on_each_cpu(__booke_wdt_disable, NULL, 0);
booke_wdt_enabled = 0;
+ pr_debug("booke_wdt: watchdog disabled\n");
#endif
clear_bit(0, &wdt_is_active);
@@ -271,21 +271,20 @@ static int __init booke_wdt_init(void)
{
int ret = 0;
- printk(KERN_INFO "PowerPC Book-E Watchdog Timer Loaded\n");
+ pr_info("booke_wdt: powerpc book-e watchdog driver loaded\n");
ident.firmware_version = cur_cpu_spec->pvr_value;
ret = misc_register(&booke_wdt_miscdev);
if (ret) {
- printk(KERN_CRIT "Cannot register miscdev on minor=%d: %d\n",
- WATCHDOG_MINOR, ret);
+ pr_err("booke_wdt: cannot register device (minor=%u, ret=%i)\n",
+ WATCHDOG_MINOR, ret);
return ret;
}
spin_lock(&booke_wdt_lock);
if (booke_wdt_enabled == 1) {
- printk(KERN_INFO
- "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n",
- booke_wdt_period);
+ pr_info("booke_wdt: watchdog enabled (timeout = %llu sec)\n",
+ period_to_sec(booke_wdt_period));
on_each_cpu(__booke_wdt_enable, NULL, 0);
}
spin_unlock(&booke_wdt_lock);
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 65911678453d..1e013e8457b7 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -5,10 +5,10 @@
* interface and Solaris-compatible ioctls as best it is
* able.
*
- * NOTE: CP1400 systems appear to have a defective intr_mask
- * register on the PLD, preventing the disabling of
- * timer interrupts. We use a timer to periodically
- * reset 'stopped' watchdogs on affected platforms.
+ * NOTE: CP1400 systems appear to have a defective intr_mask
+ * register on the PLD, preventing the disabling of
+ * timer interrupts. We use a timer to periodically
+ * reset 'stopped' watchdogs on affected platforms.
*
* Copyright (c) 2000 Eric Brower (ebrower@usa.net)
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
@@ -107,13 +107,13 @@ static struct cpwd *cpwd_device;
* -------------------
* |- counter val -|
* -------------------
- * dcntr - Current 16-bit downcounter value.
- * When downcounter reaches '0' watchdog expires.
- * Reading this register resets downcounter with
- * 'limit' value.
- * limit - 16-bit countdown value in 1/10th second increments.
- * Writing this register begins countdown with input value.
- * Reading from this register does not affect counter.
+ * dcntr - Current 16-bit downcounter value.
+ * When downcounter reaches '0' watchdog expires.
+ * Reading this register resets downcounter with
+ * 'limit' value.
+ * limit - 16-bit countdown value in 1/10th second increments.
+ * Writing this register begins countdown with input value.
+ * Reading from this register does not affect counter.
* NOTES: After watchdog reset, dcntr and limit contain '1'
*
* status register (byte access):
@@ -123,7 +123,7 @@ static struct cpwd *cpwd_device;
* |- UNUSED -| EXP | RUN |
* ---------------------------
* status- Bit 0 - Watchdog is running
- * Bit 1 - Watchdog has expired
+ * Bit 1 - Watchdog has expired
*
*** PLD register block definition (struct wd_pld_regblk)
*
@@ -197,7 +197,7 @@ static u8 cpwd_readb(void __iomem *addr)
* Because of the CP1400 defect this should only be
* called during initialzation or by wd_[start|stop]timer()
*
- * index - sub-device index, or -1 for 'all'
+ * index - sub-device index, or -1 for 'all'
* enable - non-zero to enable interrupts, zero to disable
*/
static void cpwd_toggleintr(struct cpwd *p, int index, int enable)
@@ -317,13 +317,13 @@ static int cpwd_getstatus(struct cpwd *p, int index)
} else {
/* Fudge WD_EXPIRED status for defective CP1400--
* IF timer is running
- * AND brokenstop is set
- * AND an interrupt has been serviced
+ * AND brokenstop is set
+ * AND an interrupt has been serviced
* we are WD_EXPIRED.
*
* IF timer is running
- * AND brokenstop is set
- * AND no interrupt has been serviced
+ * AND brokenstop is set
+ * AND no interrupt has been serviced
* we are WD_FREERUN.
*/
if (p->broken &&
@@ -613,7 +613,7 @@ static int __devinit cpwd_probe(struct platform_device *op)
if (p->broken) {
init_timer(&cpwd_timer);
- cpwd_timer.function = cpwd_brokentimer;
+ cpwd_timer.function = cpwd_brokentimer;
cpwd_timer.data = (unsigned long) p;
cpwd_timer.expires = WD_BTIMEOUT;
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index 3f3dc093ad68..f1d1da662fbe 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -201,7 +201,7 @@ static void eurwdt_ping(void)
static ssize_t eurwdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- if (count) {
+ if (count) {
if (!nowayout) {
size_t i;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 204a5603c4ae..8cb26855bfed 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -52,7 +52,7 @@ static void __iomem *pci_mem_addr; /* the PCI-memory address */
static unsigned long __iomem *hpwdt_timer_reg;
static unsigned long __iomem *hpwdt_timer_con;
-static struct pci_device_id hpwdt_devices[] = {
+static DEFINE_PCI_DEVICE_TABLE(hpwdt_devices) = {
{ PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) }, /* iLO2 */
{ PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) }, /* iLO3 */
{0}, /* terminate list */
diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c
index bb9750a03942..db45091ef434 100644
--- a/drivers/watchdog/i6300esb.c
+++ b/drivers/watchdog/i6300esb.c
@@ -334,7 +334,7 @@ static struct miscdevice esb_miscdev = {
/*
* Data for PCI driver interface
*/
-static struct pci_device_id esb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(esb_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9), },
{ 0, }, /* End of list */
};
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 2c6c2b4ad8bf..35a0d12dad73 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -247,7 +247,7 @@ static struct {
{NULL, 0}
};
-#define ITCO_PCI_DEVICE(dev, data) \
+#define ITCO_PCI_DEVICE(dev, data) \
.vendor = PCI_VENDOR_ID_INTEL, \
.device = dev, \
.subvendor = PCI_ANY_ID, \
@@ -262,7 +262,7 @@ static struct {
* pci_driver, because the I/O Controller Hub has also other
* functions that probably will be registered by other drivers.
*/
-static struct pci_device_id iTCO_wdt_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AA_0, TCO_ICH)},
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AB_0, TCO_ICH0)},
{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801BA_0, TCO_ICH2)},
diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c
new file mode 100644
index 000000000000..919bdd16136f
--- /dev/null
+++ b/drivers/watchdog/intel_scu_watchdog.c
@@ -0,0 +1,572 @@
+/*
+ * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device
+ * for Intel part #(s):
+ * - AF82MP20 PCH
+ *
+ * Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ *
+ */
+
+#include <linux/compiler.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/fs.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/sfi.h>
+#include <linux/types.h>
+#include <asm/irq.h>
+#include <asm/atomic.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/apb_timer.h>
+#include <asm/mrst.h>
+
+#include "intel_scu_watchdog.h"
+
+/* Bounds number of times we will retry loading time count */
+/* This retry is a work around for a silicon bug. */
+#define MAX_RETRY 16
+
+#define IPC_SET_WATCHDOG_TIMER 0xF8
+
+static int timer_margin = DEFAULT_SOFT_TO_HARD_MARGIN;
+module_param(timer_margin, int, 0);
+MODULE_PARM_DESC(timer_margin,
+ "Watchdog timer margin"
+ "Time between interrupt and resetting the system"
+ "The range is from 1 to 160"
+ "This is the time for all keep alives to arrive");
+
+static int timer_set = DEFAULT_TIME;
+module_param(timer_set, int, 0);
+MODULE_PARM_DESC(timer_set,
+ "Default Watchdog timer setting"
+ "Complete cycle time"
+ "The range is from 1 to 170"
+ "This is the time for all keep alives to arrive");
+
+/* After watchdog device is closed, check force_boot. If:
+ * force_boot == 0, then force boot on next watchdog interrupt after close,
+ * force_boot == 1, then force boot immediately when device is closed.
+ */
+static int force_boot;
+module_param(force_boot, int, 0);
+MODULE_PARM_DESC(force_boot,
+ "A value of 1 means that the driver will reboot"
+ "the system immediately if the /dev/watchdog device is closed"
+ "A value of 0 means that when /dev/watchdog device is closed"
+ "the watchdog timer will be refreshed for one more interval"
+ "of length: timer_set. At the end of this interval, the"
+ "watchdog timer will reset the system."
+ );
+
+/* there is only one device in the system now; this can be made into
+ * an array in the future if we have more than one device */
+
+static struct intel_scu_watchdog_dev watchdog_device;
+
+/* Forces restart, if force_reboot is set */
+static void watchdog_fire(void)
+{
+ if (force_boot) {
+ printk(KERN_CRIT PFX "Initiating system reboot.\n");
+ emergency_restart();
+ printk(KERN_CRIT PFX "Reboot didn't ?????\n");
+ }
+
+ else {
+ printk(KERN_CRIT PFX "Immediate Reboot Disabled\n");
+ printk(KERN_CRIT PFX
+ "System will reset when watchdog timer times out!\n");
+ }
+}
+
+static int check_timer_margin(int new_margin)
+{
+ if ((new_margin < MIN_TIME_CYCLE) ||
+ (new_margin > MAX_TIME - timer_set)) {
+ pr_debug("Watchdog timer: value of new_margin %d is out of the range %d to %d\n",
+ new_margin, MIN_TIME_CYCLE, MAX_TIME - timer_set);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * IPC operations
+ */
+static int watchdog_set_ipc(int soft_threshold, int threshold)
+{
+ u32 *ipc_wbuf;
+ u8 cbuf[16] = { '\0' };
+ int ipc_ret = 0;
+
+ ipc_wbuf = (u32 *)&cbuf;
+ ipc_wbuf[0] = soft_threshold;
+ ipc_wbuf[1] = threshold;
+
+ ipc_ret = intel_scu_ipc_command(
+ IPC_SET_WATCHDOG_TIMER,
+ 0,
+ ipc_wbuf,
+ 2,
+ NULL,
+ 0);
+
+ if (ipc_ret != 0)
+ pr_err("Error setting SCU watchdog timer: %x\n", ipc_ret);
+
+ return ipc_ret;
+};
+
+/*
+ * Intel_SCU operations
+ */
+
+/* timer interrupt handler */
+static irqreturn_t watchdog_timer_interrupt(int irq, void *dev_id)
+{
+ int int_status;
+ int_status = ioread32(watchdog_device.timer_interrupt_status_addr);
+
+ pr_debug("Watchdog timer: irq, int_status: %x\n", int_status);
+
+ if (int_status != 0)
+ return IRQ_NONE;
+
+ /* has the timer been started? If not, then this is spurious */
+ if (watchdog_device.timer_started == 0) {
+ pr_debug("Watchdog timer: spurious interrupt received\n");
+ return IRQ_HANDLED;
+ }
+
+ /* temporarily disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+
+ /* set the timer to the threshold */
+ iowrite32(watchdog_device.threshold,
+ watchdog_device.timer_load_count_addr);
+
+ /* allow the timer to run */
+ iowrite32(0x00000003, watchdog_device.timer_control_addr);
+
+ return IRQ_HANDLED;
+}
+
+static int intel_scu_keepalive(void)
+{
+
+ /* read eoi register - clears interrupt */
+ ioread32(watchdog_device.timer_clear_interrupt_addr);
+
+ /* temporarily disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+
+ /* set the timer to the soft_threshold */
+ iowrite32(watchdog_device.soft_threshold,
+ watchdog_device.timer_load_count_addr);
+
+ /* allow the timer to run */
+ iowrite32(0x00000003, watchdog_device.timer_control_addr);
+
+ return 0;
+}
+
+static int intel_scu_stop(void)
+{
+ iowrite32(0, watchdog_device.timer_control_addr);
+ return 0;
+}
+
+static int intel_scu_set_heartbeat(u32 t)
+{
+ int ipc_ret;
+ int retry_count;
+ u32 soft_value;
+ u32 hw_pre_value;
+ u32 hw_value;
+
+ watchdog_device.timer_set = t;
+ watchdog_device.threshold =
+ timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
+ watchdog_device.soft_threshold =
+ (watchdog_device.timer_set - timer_margin)
+ * watchdog_device.timer_tbl_ptr->freq_hz;
+
+ pr_debug("Watchdog timer: set_heartbeat: timer freq is %d\n",
+ watchdog_device.timer_tbl_ptr->freq_hz);
+ pr_debug("Watchdog timer: set_heartbeat: timer_set is %x (hex)\n",
+ watchdog_device.timer_set);
+ pr_debug("Watchdog timer: set_hearbeat: timer_margin is %x (hex)\n",
+ timer_margin);
+ pr_debug("Watchdog timer: set_heartbeat: threshold is %x (hex)\n",
+ watchdog_device.threshold);
+ pr_debug("Watchdog timer: set_heartbeat: soft_threshold is %x (hex)\n",
+ watchdog_device.soft_threshold);
+
+ /* Adjust thresholds by FREQ_ADJUSTMENT factor, to make the */
+ /* watchdog timing come out right. */
+ watchdog_device.threshold =
+ watchdog_device.threshold / FREQ_ADJUSTMENT;
+ watchdog_device.soft_threshold =
+ watchdog_device.soft_threshold / FREQ_ADJUSTMENT;
+
+ /* temporarily disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+
+ /* send the threshold and soft_threshold via IPC to the processor */
+ ipc_ret = watchdog_set_ipc(watchdog_device.soft_threshold,
+ watchdog_device.threshold);
+
+ if (ipc_ret != 0) {
+ /* Make sure the watchdog timer is stopped */
+ intel_scu_stop();
+ return ipc_ret;
+ }
+
+ /* Soft Threshold set loop. Early versions of silicon did */
+ /* not always set this count correctly. This loop checks */
+ /* the value and retries if it was not set correctly. */
+
+ retry_count = 0;
+ soft_value = watchdog_device.soft_threshold & 0xFFFF0000;
+ do {
+
+ /* Make sure timer is stopped */
+ intel_scu_stop();
+
+ if (MAX_RETRY < retry_count++) {
+ /* Unable to set timer value */
+ pr_err("Watchdog timer: Unable to set timer\n");
+ return -ENODEV;
+ }
+
+ /* set the timer to the soft threshold */
+ iowrite32(watchdog_device.soft_threshold,
+ watchdog_device.timer_load_count_addr);
+
+ /* read count value before starting timer */
+ hw_pre_value = ioread32(watchdog_device.timer_load_count_addr);
+ hw_pre_value = hw_pre_value & 0xFFFF0000;
+
+ /* Start the timer */
+ iowrite32(0x00000003, watchdog_device.timer_control_addr);
+
+ /* read the value the time loaded into its count reg */
+ hw_value = ioread32(watchdog_device.timer_load_count_addr);
+ hw_value = hw_value & 0xFFFF0000;
+
+
+ } while (soft_value != hw_value);
+
+ watchdog_device.timer_started = 1;
+
+ return 0;
+}
+
+/*
+ * /dev/watchdog handling
+ */
+
+static int intel_scu_open(struct inode *inode, struct file *file)
+{
+
+ /* Set flag to indicate that watchdog device is open */
+ if (test_and_set_bit(0, &watchdog_device.driver_open))
+ return -EBUSY;
+
+ /* Check for reopen of driver. Reopens are not allowed */
+ if (watchdog_device.driver_closed)
+ return -EPERM;
+
+ return nonseekable_open(inode, file);
+}
+
+static int intel_scu_release(struct inode *inode, struct file *file)
+{
+ /*
+ * This watchdog should not be closed, after the timer
+ * is started with the WDIPC_SETTIMEOUT ioctl
+ * If force_boot is set watchdog_fire() will cause an
+ * immediate reset. If force_boot is not set, the watchdog
+ * timer is refreshed for one more interval. At the end
+ * of that interval, the watchdog timer will reset the system.
+ */
+
+ if (!test_and_clear_bit(0, &watchdog_device.driver_open)) {
+ pr_debug("Watchdog timer: intel_scu_release, without open\n");
+ return -ENOTTY;
+ }
+
+ if (!watchdog_device.timer_started) {
+ /* Just close, since timer has not been started */
+ pr_debug("Watchdog timer: closed, without starting timer\n");
+ return 0;
+ }
+
+ printk(KERN_CRIT PFX
+ "Unexpected close of /dev/watchdog!\n");
+
+ /* Since the timer was started, prevent future reopens */
+ watchdog_device.driver_closed = 1;
+
+ /* Refresh the timer for one more interval */
+ intel_scu_keepalive();
+
+ /* Reboot system (if force_boot is set) */
+ watchdog_fire();
+
+ /* We should only reach this point if force_boot is not set */
+ return 0;
+}
+
+static ssize_t intel_scu_write(struct file *file,
+ char const *data,
+ size_t len,
+ loff_t *ppos)
+{
+
+ if (watchdog_device.timer_started)
+ /* Watchdog already started, keep it alive */
+ intel_scu_keepalive();
+ else
+ /* Start watchdog with timer value set by init */
+ intel_scu_set_heartbeat(watchdog_device.timer_set);
+
+ return len;
+}
+
+static long intel_scu_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ u32 __user *p = argp;
+ u32 new_margin;
+
+
+ static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT
+ | WDIOF_KEEPALIVEPING,
+ .firmware_version = 0, /* @todo Get from SCU via
+ ipc_get_scu_fw_version()? */
+ .identity = "Intel_SCU IOH Watchdog" /* len < 32 */
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp,
+ &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, p);
+ case WDIOC_KEEPALIVE:
+ intel_scu_keepalive();
+
+ return 0;
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_margin, p))
+ return -EFAULT;
+
+ if (check_timer_margin(new_margin))
+ return -EINVAL;
+
+ if (intel_scu_set_heartbeat(new_margin))
+ return -EINVAL;
+ return 0;
+ case WDIOC_GETTIMEOUT:
+ return put_user(watchdog_device.soft_threshold, p);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+/*
+ * Notifier for system down
+ */
+static int intel_scu_notify_sys(struct notifier_block *this,
+ unsigned long code,
+ void *another_unused)
+{
+ if (code == SYS_DOWN || code == SYS_HALT)
+ /* Turn off the watchdog timer. */
+ intel_scu_stop();
+ return NOTIFY_DONE;
+}
+
+/*
+ * Kernel Interfaces
+ */
+static const struct file_operations intel_scu_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = intel_scu_write,
+ .unlocked_ioctl = intel_scu_ioctl,
+ .open = intel_scu_open,
+ .release = intel_scu_release,
+};
+
+static int __init intel_scu_watchdog_init(void)
+{
+ int ret;
+ u32 __iomem *tmp_addr;
+
+ /*
+ * We don't really need to check this as the SFI timer get will fail
+ * but if we do so we can exit with a clearer reason and no noise.
+ *
+ * If it isn't an intel MID device then it doesn't have this watchdog
+ */
+ if (!mrst_identify_cpu())
+ return -ENODEV;
+
+ /* Check boot parameters to verify that their initial values */
+ /* are in range. */
+ /* Check value of timer_set boot parameter */
+ if ((timer_set < MIN_TIME_CYCLE) ||
+ (timer_set > MAX_TIME - MIN_TIME_CYCLE)) {
+ pr_err("Watchdog timer: value of timer_set %x (hex) "
+ "is out of range from %x to %x (hex)\n",
+ timer_set, MIN_TIME_CYCLE, MAX_TIME - MIN_TIME_CYCLE);
+ return -EINVAL;
+ }
+
+ /* Check value of timer_margin boot parameter */
+ if (check_timer_margin(timer_margin))
+ return -EINVAL;
+
+ watchdog_device.timer_tbl_ptr = sfi_get_mtmr(sfi_mtimer_num-1);
+
+ if (watchdog_device.timer_tbl_ptr == NULL) {
+ pr_debug("Watchdog timer - Intel SCU watchdog: timer is not available\n");
+ return -ENODEV;
+ }
+ /* make sure the timer exists */
+ if (watchdog_device.timer_tbl_ptr->phys_addr == 0) {
+ pr_debug("Watchdog timer - Intel SCU watchdog - timer %d does not have valid physical memory\n",
+ sfi_mtimer_num);
+ return -ENODEV;
+ }
+
+ if (watchdog_device.timer_tbl_ptr->irq == 0) {
+ pr_debug("Watchdog timer: timer %d invalid irq\n",
+ sfi_mtimer_num);
+ return -ENODEV;
+ }
+
+ tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr,
+ 20);
+
+ if (tmp_addr == NULL) {
+ pr_debug("Watchdog timer: timer unable to ioremap\n");
+ return -ENOMEM;
+ }
+
+ watchdog_device.timer_load_count_addr = tmp_addr++;
+ watchdog_device.timer_current_value_addr = tmp_addr++;
+ watchdog_device.timer_control_addr = tmp_addr++;
+ watchdog_device.timer_clear_interrupt_addr = tmp_addr++;
+ watchdog_device.timer_interrupt_status_addr = tmp_addr++;
+
+ /* Set the default time values in device structure */
+
+ watchdog_device.timer_set = timer_set;
+ watchdog_device.threshold =
+ timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
+ watchdog_device.soft_threshold =
+ (watchdog_device.timer_set - timer_margin)
+ * watchdog_device.timer_tbl_ptr->freq_hz;
+
+
+ watchdog_device.intel_scu_notifier.notifier_call =
+ intel_scu_notify_sys;
+
+ ret = register_reboot_notifier(&watchdog_device.intel_scu_notifier);
+ if (ret) {
+ pr_err("Watchdog timer: cannot register notifier %d)\n", ret);
+ goto register_reboot_error;
+ }
+
+ watchdog_device.miscdev.minor = WATCHDOG_MINOR;
+ watchdog_device.miscdev.name = "watchdog";
+ watchdog_device.miscdev.fops = &intel_scu_fops;
+
+ ret = misc_register(&watchdog_device.miscdev);
+ if (ret) {
+ pr_err("Watchdog timer: cannot register miscdev %d err =%d\n",
+ WATCHDOG_MINOR, ret);
+ goto misc_register_error;
+ }
+
+ ret = request_irq((unsigned int)watchdog_device.timer_tbl_ptr->irq,
+ watchdog_timer_interrupt,
+ IRQF_SHARED, "watchdog",
+ &watchdog_device.timer_load_count_addr);
+ if (ret) {
+ pr_err("Watchdog timer: error requesting irq %d\n", ret);
+ goto request_irq_error;
+ }
+ /* Make sure timer is disabled before returning */
+ intel_scu_stop();
+ return 0;
+
+/* error cleanup */
+
+request_irq_error:
+ misc_deregister(&watchdog_device.miscdev);
+misc_register_error:
+ unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
+register_reboot_error:
+ intel_scu_stop();
+ iounmap(watchdog_device.timer_load_count_addr);
+ return ret;
+}
+
+static void __exit intel_scu_watchdog_exit(void)
+{
+
+ misc_deregister(&watchdog_device.miscdev);
+ unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
+ /* disable the timer */
+ iowrite32(0x00000002, watchdog_device.timer_control_addr);
+ iounmap(watchdog_device.timer_load_count_addr);
+}
+
+late_initcall(intel_scu_watchdog_init);
+module_exit(intel_scu_watchdog_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_VERSION(WDT_VER);
diff --git a/drivers/watchdog/intel_scu_watchdog.h b/drivers/watchdog/intel_scu_watchdog.h
new file mode 100644
index 000000000000..d2b074a82db6
--- /dev/null
+++ b/drivers/watchdog/intel_scu_watchdog.h
@@ -0,0 +1,66 @@
+/*
+ * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device
+ * for Intel part #(s):
+ * - AF82MP20 PCH
+ *
+ * Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General
+ * Public License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be
+ * useful, but WITHOUT ANY WARRANTY; without even the implied
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the Free
+ * Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ * The full GNU General Public License is included in this
+ * distribution in the file called COPYING.
+ *
+ */
+
+#ifndef __INTEL_SCU_WATCHDOG_H
+#define __INTEL_SCU_WATCHDOG_H
+
+#define PFX "Intel_SCU: "
+#define WDT_VER "0.3"
+
+/* minimum time between interrupts */
+#define MIN_TIME_CYCLE 1
+
+/* Time from warning to reboot is 2 seconds */
+#define DEFAULT_SOFT_TO_HARD_MARGIN 2
+
+#define MAX_TIME 170
+
+#define DEFAULT_TIME 5
+
+#define MAX_SOFT_TO_HARD_MARGIN (MAX_TIME-MIN_TIME_CYCLE)
+
+/* Ajustment to clock tick frequency to make timing come out right */
+#define FREQ_ADJUSTMENT 8
+
+struct intel_scu_watchdog_dev {
+ ulong driver_open;
+ ulong driver_closed;
+ u32 timer_started;
+ u32 timer_set;
+ u32 threshold;
+ u32 soft_threshold;
+ u32 __iomem *timer_load_count_addr;
+ u32 __iomem *timer_current_value_addr;
+ u32 __iomem *timer_control_addr;
+ u32 __iomem *timer_clear_interrupt_addr;
+ u32 __iomem *timer_interrupt_status_addr;
+ struct sfi_timer_table_entry *timer_tbl_ptr;
+ struct notifier_block intel_scu_notifier;
+ struct miscdevice miscdev;
+};
+
+extern int sfi_mtimer_num;
+
+/* extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); */
+#endif /* __INTEL_SCU_WATCHDOG_H */
diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c
index b32c6c045b1a..6143f52ba6b8 100644
--- a/drivers/watchdog/it8712f_wdt.c
+++ b/drivers/watchdog/it8712f_wdt.c
@@ -69,7 +69,7 @@ static unsigned short address;
#define IT8712F_DEVID 0x8712
#define LDN_GPIO 0x07 /* GPIO and Watch Dog Timer */
-#define LDN_GAME 0x09 /* Game Port */
+#define LDN_GAME 0x09 /* Game Port */
#define WDT_CONTROL 0x71 /* WDT Register: Control */
#define WDT_CONFIG 0x72 /* WDT Register: Configuration */
diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c
index dad29245a6a7..b1bc72f9a209 100644
--- a/drivers/watchdog/it87_wdt.c
+++ b/drivers/watchdog/it87_wdt.c
@@ -12,7 +12,7 @@
* http://www.ite.com.tw/
*
* Support of the watchdog timers, which are available on
- * IT8702, IT8712, IT8716, IT8718, IT8720 and IT8726.
+ * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721 and IT8726.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -45,7 +45,7 @@
#include <asm/system.h>
-#define WATCHDOG_VERSION "1.13"
+#define WATCHDOG_VERSION "1.14"
#define WATCHDOG_NAME "IT87 WDT"
#define PFX WATCHDOG_NAME ": "
#define DRIVER_VERSION WATCHDOG_NAME " driver, v" WATCHDOG_VERSION "\n"
@@ -54,7 +54,7 @@
/* Defaults for Module Parameter */
#define DEFAULT_NOGAMEPORT 0
#define DEFAULT_EXCLUSIVE 1
-#define DEFAULT_TIMEOUT 60
+#define DEFAULT_TIMEOUT 60
#define DEFAULT_TESTMODE 0
#define DEFAULT_NOWAYOUT WATCHDOG_NOWAYOUT
@@ -70,9 +70,9 @@
/* Configuration Registers and Functions */
#define LDNREG 0x07
#define CHIPID 0x20
-#define CHIPREV 0x22
+#define CHIPREV 0x22
#define ACTREG 0x30
-#define BASEREG 0x60
+#define BASEREG 0x60
/* Chip Id numbers */
#define NO_DEV_ID 0xffff
@@ -82,10 +82,11 @@
#define IT8716_ID 0x8716
#define IT8718_ID 0x8718
#define IT8720_ID 0x8720
+#define IT8721_ID 0x8721
#define IT8726_ID 0x8726 /* the data sheet suggest wrongly 0x8716 */
/* GPIO Configuration Registers LDN=0x07 */
-#define WDTCTRL 0x71
+#define WDTCTRL 0x71
#define WDTCFG 0x72
#define WDTVALLSB 0x73
#define WDTVALMSB 0x74
@@ -94,7 +95,7 @@
#define WDT_CIRINT 0x80
#define WDT_MOUSEINT 0x40
#define WDT_KYBINT 0x20
-#define WDT_GAMEPORT 0x10 /* not in it8718, it8720 */
+#define WDT_GAMEPORT 0x10 /* not in it8718, it8720, it8721 */
#define WDT_FORCE 0x02
#define WDT_ZERO 0x01
@@ -102,11 +103,11 @@
#define WDT_TOV1 0x80
#define WDT_KRST 0x40
#define WDT_TOVE 0x20
-#define WDT_PWROK 0x10
+#define WDT_PWROK 0x10 /* not in it8721 */
#define WDT_INT_MASK 0x0f
/* CIR Configuration Register LDN=0x0a */
-#define CIR_ILS 0x70
+#define CIR_ILS 0x70
/* The default Base address is not always available, we use this */
#define CIR_BASE 0x0208
@@ -134,7 +135,7 @@
#define WDTS_USE_GP 4
#define WDTS_EXPECTED 5
-static unsigned int base, gpact, ciract, max_units;
+static unsigned int base, gpact, ciract, max_units, chip_type;
static unsigned long wdt_status;
static DEFINE_SPINLOCK(spinlock);
@@ -215,7 +216,7 @@ static inline void superio_outw(int val, int reg)
/* Internal function, should be called after superio_select(GPIO) */
static void wdt_update_timeout(void)
{
- unsigned char cfg = WDT_KRST | WDT_PWROK;
+ unsigned char cfg = WDT_KRST;
int tm = timeout;
if (testmode)
@@ -226,6 +227,9 @@ static void wdt_update_timeout(void)
else
tm /= 60;
+ if (chip_type != IT8721_ID)
+ cfg |= WDT_PWROK;
+
superio_outb(cfg, WDTCFG);
superio_outb(tm, WDTVALLSB);
if (max_units > 255)
@@ -555,7 +559,6 @@ static int __init it87_wdt_init(void)
{
int rc = 0;
int try_gameport = !nogameport;
- u16 chip_type;
u8 chip_rev;
unsigned long flags;
@@ -581,6 +584,7 @@ static int __init it87_wdt_init(void)
break;
case IT8718_ID:
case IT8720_ID:
+ case IT8721_ID:
max_units = 65535;
try_gameport = 0;
break;
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
new file mode 100644
index 000000000000..684ba01fb540
--- /dev/null
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2010, Paul Cercueil <paul@crapouillou.net>
+ * JZ4740 Watchdog driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include <asm/mach-jz4740/timer.h>
+
+#define JZ_REG_WDT_TIMER_DATA 0x0
+#define JZ_REG_WDT_COUNTER_ENABLE 0x4
+#define JZ_REG_WDT_TIMER_COUNTER 0x8
+#define JZ_REG_WDT_TIMER_CONTROL 0xC
+
+#define JZ_WDT_CLOCK_PCLK 0x1
+#define JZ_WDT_CLOCK_RTC 0x2
+#define JZ_WDT_CLOCK_EXT 0x4
+
+#define WDT_IN_USE 0
+#define WDT_OK_TO_CLOSE 1
+
+#define JZ_WDT_CLOCK_DIV_SHIFT 3
+
+#define JZ_WDT_CLOCK_DIV_1 (0 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_4 (1 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_16 (2 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_64 (3 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_256 (4 << JZ_WDT_CLOCK_DIV_SHIFT)
+#define JZ_WDT_CLOCK_DIV_1024 (5 << JZ_WDT_CLOCK_DIV_SHIFT)
+
+#define DEFAULT_HEARTBEAT 5
+#define MAX_HEARTBEAT 2048
+
+static struct {
+ void __iomem *base;
+ struct resource *mem;
+ struct clk *rtc_clk;
+ unsigned long status;
+} jz4740_wdt;
+
+static int heartbeat = DEFAULT_HEARTBEAT;
+
+
+static void jz4740_wdt_service(void)
+{
+ writew(0x0, jz4740_wdt.base + JZ_REG_WDT_TIMER_COUNTER);
+}
+
+static void jz4740_wdt_set_heartbeat(int new_heartbeat)
+{
+ unsigned int rtc_clk_rate;
+ unsigned int timeout_value;
+ unsigned short clock_div = JZ_WDT_CLOCK_DIV_1;
+
+ heartbeat = new_heartbeat;
+
+ rtc_clk_rate = clk_get_rate(jz4740_wdt.rtc_clk);
+
+ timeout_value = rtc_clk_rate * heartbeat;
+ while (timeout_value > 0xffff) {
+ if (clock_div == JZ_WDT_CLOCK_DIV_1024) {
+ /* Requested timeout too high;
+ * use highest possible value. */
+ timeout_value = 0xffff;
+ break;
+ }
+ timeout_value >>= 2;
+ clock_div += (1 << JZ_WDT_CLOCK_DIV_SHIFT);
+ }
+
+ writeb(0x0, jz4740_wdt.base + JZ_REG_WDT_COUNTER_ENABLE);
+ writew(clock_div, jz4740_wdt.base + JZ_REG_WDT_TIMER_CONTROL);
+
+ writew((u16)timeout_value, jz4740_wdt.base + JZ_REG_WDT_TIMER_DATA);
+ writew(0x0, jz4740_wdt.base + JZ_REG_WDT_TIMER_COUNTER);
+ writew(clock_div | JZ_WDT_CLOCK_RTC,
+ jz4740_wdt.base + JZ_REG_WDT_TIMER_CONTROL);
+
+ writeb(0x1, jz4740_wdt.base + JZ_REG_WDT_COUNTER_ENABLE);
+}
+
+static void jz4740_wdt_enable(void)
+{
+ jz4740_timer_enable_watchdog();
+ jz4740_wdt_set_heartbeat(heartbeat);
+}
+
+static void jz4740_wdt_disable(void)
+{
+ jz4740_timer_disable_watchdog();
+ writeb(0x0, jz4740_wdt.base + JZ_REG_WDT_COUNTER_ENABLE);
+}
+
+static int jz4740_wdt_open(struct inode *inode, struct file *file)
+{
+ if (test_and_set_bit(WDT_IN_USE, &jz4740_wdt.status))
+ return -EBUSY;
+
+ jz4740_wdt_enable();
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t jz4740_wdt_write(struct file *file, const char *data,
+ size_t len, loff_t *ppos)
+{
+ if (len) {
+ size_t i;
+
+ clear_bit(WDT_OK_TO_CLOSE, &jz4740_wdt.status);
+ for (i = 0; i != len; i++) {
+ char c;
+
+ if (get_user(c, data + i))
+ return -EFAULT;
+
+ if (c == 'V')
+ set_bit(WDT_OK_TO_CLOSE, &jz4740_wdt.status);
+ }
+ jz4740_wdt_service();
+ }
+
+ return len;
+}
+
+static const struct watchdog_info ident = {
+ .options = WDIOF_KEEPALIVEPING,
+ .identity = "jz4740 Watchdog",
+};
+
+static long jz4740_wdt_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = -ENOTTY;
+ int heartbeat_seconds;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ ret = copy_to_user((struct watchdog_info *)arg, &ident,
+ sizeof(ident)) ? -EFAULT : 0;
+ break;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ ret = put_user(0, (int *)arg);
+ break;
+
+ case WDIOC_KEEPALIVE:
+ jz4740_wdt_service();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(heartbeat_seconds, (int __user *)arg))
+ return -EFAULT;
+
+ jz4740_wdt_set_heartbeat(heartbeat_seconds);
+ return 0;
+
+ case WDIOC_GETTIMEOUT:
+ return put_user(heartbeat, (int *)arg);
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int jz4740_wdt_release(struct inode *inode, struct file *file)
+{
+ jz4740_wdt_service();
+
+ if (test_and_clear_bit(WDT_OK_TO_CLOSE, &jz4740_wdt.status))
+ jz4740_wdt_disable();
+
+ clear_bit(WDT_IN_USE, &jz4740_wdt.status);
+ return 0;
+}
+
+static const struct file_operations jz4740_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = jz4740_wdt_write,
+ .unlocked_ioctl = jz4740_wdt_ioctl,
+ .open = jz4740_wdt_open,
+ .release = jz4740_wdt_release,
+};
+
+static struct miscdevice jz4740_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &jz4740_wdt_fops,
+};
+
+static int __devinit jz4740_wdt_probe(struct platform_device *pdev)
+{
+ int ret = 0, size;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "failed to get memory region resource\n");
+ return -ENXIO;
+ }
+
+ size = resource_size(res);
+ jz4740_wdt.mem = request_mem_region(res->start, size, pdev->name);
+ if (jz4740_wdt.mem == NULL) {
+ dev_err(dev, "failed to get memory region\n");
+ return -EBUSY;
+ }
+
+ jz4740_wdt.base = ioremap_nocache(res->start, size);
+ if (jz4740_wdt.base == NULL) {
+ dev_err(dev, "failed to map memory region\n");
+ ret = -EBUSY;
+ goto err_release_region;
+ }
+
+ jz4740_wdt.rtc_clk = clk_get(NULL, "rtc");
+ if (IS_ERR(jz4740_wdt.rtc_clk)) {
+ dev_err(dev, "cannot find RTC clock\n");
+ ret = PTR_ERR(jz4740_wdt.rtc_clk);
+ goto err_iounmap;
+ }
+
+ ret = misc_register(&jz4740_wdt_miscdev);
+ if (ret < 0) {
+ dev_err(dev, "cannot register misc device\n");
+ goto err_disable_clk;
+ }
+
+ return 0;
+
+err_disable_clk:
+ clk_put(jz4740_wdt.rtc_clk);
+err_iounmap:
+ iounmap(jz4740_wdt.base);
+err_release_region:
+ release_mem_region(jz4740_wdt.mem->start,
+ resource_size(jz4740_wdt.mem));
+ return ret;
+}
+
+
+static int __devexit jz4740_wdt_remove(struct platform_device *pdev)
+{
+ jz4740_wdt_disable();
+ misc_deregister(&jz4740_wdt_miscdev);
+ clk_put(jz4740_wdt.rtc_clk);
+
+ iounmap(jz4740_wdt.base);
+ jz4740_wdt.base = NULL;
+
+ release_mem_region(jz4740_wdt.mem->start,
+ resource_size(jz4740_wdt.mem));
+ jz4740_wdt.mem = NULL;
+
+ return 0;
+}
+
+
+static struct platform_driver jz4740_wdt_driver = {
+ .probe = jz4740_wdt_probe,
+ .remove = __devexit_p(jz4740_wdt_remove),
+ .driver = {
+ .name = "jz4740-wdt",
+ .owner = THIS_MODULE,
+ },
+};
+
+
+static int __init jz4740_wdt_init(void)
+{
+ return platform_driver_register(&jz4740_wdt_driver);
+}
+module_init(jz4740_wdt_init);
+
+static void __exit jz4740_wdt_exit(void)
+{
+ platform_driver_unregister(&jz4740_wdt_driver);
+}
+module_exit(jz4740_wdt_exit);
+
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("jz4740 Watchdog Driver");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat,
+ "Watchdog heartbeat period in seconds from 1 to "
+ __MODULE_STRING(MAX_HEARTBEAT) ", default "
+ __MODULE_STRING(DEFAULT_HEARTBEAT));
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:jz4740-wdt");
diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c
index 928035069396..1332b838cc58 100644
--- a/drivers/watchdog/machzwd.c
+++ b/drivers/watchdog/machzwd.c
@@ -54,7 +54,7 @@
/* indexes */ /* size */
#define ZFL_VERSION 0x02 /* 16 */
-#define CONTROL 0x10 /* 16 */
+#define CONTROL 0x10 /* 16 */
#define STATUS 0x12 /* 8 */
#define COUNTER_1 0x0C /* 16 */
#define COUNTER_2 0x0E /* 8 */
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 3053ff05ca41..7a82ce5a6337 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -41,7 +41,7 @@ static int nowayout = WATCHDOG_NOWAYOUT;
* to ping the watchdog.
*/
#define MAX6369_WDSET (7 << 0)
-#define MAX6369_WDI (1 << 3)
+#define MAX6369_WDI (1 << 3)
static DEFINE_SPINLOCK(io_lock);
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index ea438ad53055..6709d723e017 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -2,9 +2,9 @@
* mpc8xxx_wdt.c - MPC8xx/MPC83xx/MPC86xx watchdog userspace interface
*
* Authors: Dave Updegraff <dave@cray.org>
- * Kumar Gala <galak@kernel.crashing.org>
- * Attribution: from 83xx_wst: Florian Schirmer <jolt@tuxbox.org>
- * ..and from sc520_wdt
+ * Kumar Gala <galak@kernel.crashing.org>
+ * Attribution: from 83xx_wst: Florian Schirmer <jolt@tuxbox.org>
+ * ..and from sc520_wdt
* Copyright (c) 2008 MontaVista Software, Inc.
* Anton Vorontsov <avorontsov@ru.mvista.com>
*
diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c
index b8ec7aca3c8e..2b4af222b5f2 100644
--- a/drivers/watchdog/mpcore_wdt.c
+++ b/drivers/watchdog/mpcore_wdt.c
@@ -172,7 +172,7 @@ static int mpcore_wdt_release(struct inode *inode, struct file *file)
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (wdt->expect_close == 42)
mpcore_wdt_stop(wdt);
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 08e8a6ab74e1..5ec5ac1f7878 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -190,19 +190,19 @@ static ssize_t mtx1_wdt_write(struct file *file, const char *buf,
}
static const struct file_operations mtx1_wdt_fops = {
- .owner = THIS_MODULE,
+ .owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = mtx1_wdt_ioctl,
- .open = mtx1_wdt_open,
- .write = mtx1_wdt_write,
- .release = mtx1_wdt_release,
+ .open = mtx1_wdt_open,
+ .write = mtx1_wdt_write,
+ .release = mtx1_wdt_release,
};
static struct miscdevice mtx1_wdt_misc = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &mtx1_wdt_fops,
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &mtx1_wdt_fops,
};
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
index 1a50aa7079bf..267377a5a83e 100644
--- a/drivers/watchdog/nv_tco.c
+++ b/drivers/watchdog/nv_tco.c
@@ -289,7 +289,7 @@ static struct miscdevice nv_tco_miscdev = {
* register a pci_driver, because someone else might one day
* want to register another driver on the same PCI id.
*/
-static struct pci_device_id tco_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tco_pci_tbl) = {
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS,
PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
diff --git a/drivers/watchdog/omap_wdt.h b/drivers/watchdog/omap_wdt.h
index fc02ec6a0386..09b774cf75b9 100644
--- a/drivers/watchdog/omap_wdt.h
+++ b/drivers/watchdog/omap_wdt.h
@@ -44,7 +44,7 @@
* months before firing. These limits work without scaling,
* with the 60 second default assumed by most tools and docs.
*/
-#define TIMER_MARGIN_MAX (24 * 60 * 60) /* 1 day */
+#define TIMER_MARGIN_MAX (24 * 60 * 60) /* 1 day */
#define TIMER_MARGIN_DEFAULT 60 /* 60 secs */
#define TIMER_MARGIN_MIN 1
diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c
index 3a56bc360924..139d773300c6 100644
--- a/drivers/watchdog/pc87413_wdt.c
+++ b/drivers/watchdog/pc87413_wdt.c
@@ -514,7 +514,7 @@ static struct miscdevice pc87413_miscdev = {
/* -- Module init functions -------------------------------------*/
/**
- * pc87413_init: module's "constructor"
+ * pc87413_init: module's "constructor"
*
* Set up the WDT watchdog board. All we have to do is grab the
* resources we require and bitch if anyone beat us to them.
diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c
index 64374d636f09..b8d14f88f0b5 100644
--- a/drivers/watchdog/pcwd_pci.c
+++ b/drivers/watchdog/pcwd_pci.c
@@ -817,7 +817,7 @@ static void __devexit pcipcwd_card_exit(struct pci_dev *pdev)
cards_found--;
}
-static struct pci_device_id pcipcwd_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pcipcwd_pci_tbl) = {
{ PCI_VENDOR_ID_QUICKLOGIC, PCI_DEVICE_ID_WATCHDOG_PCIPCWD,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0 }, /* End of list */
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index bf5b97c546eb..c7cf4cbf8ab3 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -4,7 +4,7 @@
* Watchdog driver for PNX4008 board
*
* Authors: Dmitry Chigirev <source@mvista.com>,
- * Vitaly Wool <vitalywool@gmail.com>
+ * Vitaly Wool <vitalywool@gmail.com>
* Based on sa1100 driver,
* Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
*
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index ae53662c29bc..25b39bf35925 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -224,7 +224,7 @@ static int s3c2410wdt_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (expect_close == 42)
diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c
index 68e2e2d6f73d..514ec23050f7 100644
--- a/drivers/watchdog/sbc8360.c
+++ b/drivers/watchdog/sbc8360.c
@@ -114,7 +114,7 @@ static char expect_close;
* C | 6.5s 65s 650s 1300s
* D | 7s 70s 700s 1400s
* E | 7.5s 75s 750s 1500s
- * F | 8s 80s 800s 1600s
+ * F | 8s 80s 800s 1600s
*
* Another way to say the same things is:
* For N=1, Timeout = (M+1) * 0.5s
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 79906255eeb6..d5d399464599 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -41,7 +41,7 @@ static DEFINE_MUTEX(wdt_lock);
#define IFACE_ON_COMMAND 1
#define REBOOT_COMMAND 2
-#define WATCHDOG_NAME "SBC-FITPC2 Watchdog"
+#define WATCHDOG_NAME "SBC-FITPC2 Watchdog"
static void wdt_send_data(unsigned char command, unsigned char data)
{
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index 6fc74065abee..4e3e7eb5919c 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -1,9 +1,9 @@
/*
- * drivers/char/watchdog/shwdt.c
+ * drivers/watchdog/shwdt.c
*
* Watchdog driver for integrated watchdog in the SuperH processors.
*
- * Copyright (C) 2001, 2002, 2003 Paul Mundt <lethal@linux-sh.org>
+ * Copyright (C) 2001 - 2010 Paul Mundt <lethal@linux-sh.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -19,6 +19,7 @@
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
@@ -28,11 +29,12 @@
#include <linux/ioport.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <asm/watchdog.h>
-#define PFX "shwdt: "
+#define DRV_NAME "sh-wdt"
/*
* Default clock division ratio is 5.25 msecs. For an additional table of
@@ -62,37 +64,36 @@
* misses its deadline, the kernel timer will allow the WDT to overflow.
*/
static int clock_division_ratio = WTCSR_CKS_4096;
-
#define next_ping_period(cks) msecs_to_jiffies(cks - 4)
-static void sh_wdt_ping(unsigned long data);
-
-static unsigned long shwdt_is_open;
static const struct watchdog_info sh_wdt_info;
-static char shwdt_expect_close;
-static DEFINE_TIMER(timer, sh_wdt_ping, 0, 0);
-static unsigned long next_heartbeat;
+static struct platform_device *sh_wdt_dev;
static DEFINE_SPINLOCK(shwdt_lock);
#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
-
static int nowayout = WATCHDOG_NOWAYOUT;
+static unsigned long next_heartbeat;
-/**
- * sh_wdt_start - Start the Watchdog
- *
- * Starts the watchdog.
- */
-static void sh_wdt_start(void)
+struct sh_wdt {
+ void __iomem *base;
+ struct device *dev;
+
+ struct timer_list timer;
+
+ unsigned long enabled;
+ char expect_close;
+};
+
+static void sh_wdt_start(struct sh_wdt *wdt)
{
- __u8 csr;
unsigned long flags;
+ u8 csr;
spin_lock_irqsave(&shwdt_lock, flags);
next_heartbeat = jiffies + (heartbeat * HZ);
- mod_timer(&timer, next_ping_period(clock_division_ratio));
+ mod_timer(&wdt->timer, next_ping_period(clock_division_ratio));
csr = sh_wdt_read_csr();
csr |= WTCSR_WT | clock_division_ratio;
@@ -114,15 +115,6 @@ static void sh_wdt_start(void)
sh_wdt_write_csr(csr);
#ifdef CONFIG_CPU_SH2
- /*
- * Whoever came up with the RSTCSR semantics must've been smoking
- * some of the good stuff, since in addition to the WTCSR/WTCNT write
- * brain-damage, it's managed to fuck things up one step further..
- *
- * If we need to clear the WOVF bit, the upper byte has to be 0xa5..
- * but if we want to touch RSTE or RSTS, the upper byte has to be
- * 0x5a..
- */
csr = sh_wdt_read_rstcsr();
csr &= ~RSTCSR_RSTS;
sh_wdt_write_rstcsr(csr);
@@ -130,30 +122,23 @@ static void sh_wdt_start(void)
spin_unlock_irqrestore(&shwdt_lock, flags);
}
-/**
- * sh_wdt_stop - Stop the Watchdog
- * Stops the watchdog.
- */
-static void sh_wdt_stop(void)
+static void sh_wdt_stop(struct sh_wdt *wdt)
{
- __u8 csr;
unsigned long flags;
+ u8 csr;
spin_lock_irqsave(&shwdt_lock, flags);
- del_timer(&timer);
+ del_timer(&wdt->timer);
csr = sh_wdt_read_csr();
csr &= ~WTCSR_TME;
sh_wdt_write_csr(csr);
+
spin_unlock_irqrestore(&shwdt_lock, flags);
}
-/**
- * sh_wdt_keepalive - Keep the Userspace Watchdog Alive
- * The Userspace watchdog got a KeepAlive: schedule the next heartbeat.
- */
-static inline void sh_wdt_keepalive(void)
+static inline void sh_wdt_keepalive(struct sh_wdt *wdt)
{
unsigned long flags;
@@ -162,10 +147,6 @@ static inline void sh_wdt_keepalive(void)
spin_unlock_irqrestore(&shwdt_lock, flags);
}
-/**
- * sh_wdt_set_heartbeat - Set the Userspace Watchdog heartbeat
- * Set the Userspace Watchdog heartbeat
- */
static int sh_wdt_set_heartbeat(int t)
{
unsigned long flags;
@@ -179,19 +160,14 @@ static int sh_wdt_set_heartbeat(int t)
return 0;
}
-/**
- * sh_wdt_ping - Ping the Watchdog
- * @data: Unused
- *
- * Clears overflow bit, resets timer counter.
- */
static void sh_wdt_ping(unsigned long data)
{
+ struct sh_wdt *wdt = (struct sh_wdt *)data;
unsigned long flags;
spin_lock_irqsave(&shwdt_lock, flags);
if (time_before(jiffies, next_heartbeat)) {
- __u8 csr;
+ u8 csr;
csr = sh_wdt_read_csr();
csr &= ~WTCSR_IOVF;
@@ -199,148 +175,76 @@ static void sh_wdt_ping(unsigned long data)
sh_wdt_write_cnt(0);
- mod_timer(&timer, next_ping_period(clock_division_ratio));
+ mod_timer(&wdt->timer, next_ping_period(clock_division_ratio));
} else
- printk(KERN_WARNING PFX "Heartbeat lost! Will not ping "
- "the watchdog\n");
+ dev_warn(wdt->dev, "Heartbeat lost! Will not ping "
+ "the watchdog\n");
spin_unlock_irqrestore(&shwdt_lock, flags);
}
-/**
- * sh_wdt_open - Open the Device
- * @inode: inode of device
- * @file: file handle of device
- *
- * Watchdog device is opened and started.
- */
static int sh_wdt_open(struct inode *inode, struct file *file)
{
- if (test_and_set_bit(0, &shwdt_is_open))
+ struct sh_wdt *wdt = platform_get_drvdata(sh_wdt_dev);
+
+ if (test_and_set_bit(0, &wdt->enabled))
return -EBUSY;
if (nowayout)
__module_get(THIS_MODULE);
- sh_wdt_start();
+ file->private_data = wdt;
+
+ sh_wdt_start(wdt);
return nonseekable_open(inode, file);
}
-/**
- * sh_wdt_close - Close the Device
- * @inode: inode of device
- * @file: file handle of device
- *
- * Watchdog device is closed and stopped.
- */
static int sh_wdt_close(struct inode *inode, struct file *file)
{
- if (shwdt_expect_close == 42) {
- sh_wdt_stop();
+ struct sh_wdt *wdt = file->private_data;
+
+ if (wdt->expect_close == 42) {
+ sh_wdt_stop(wdt);
} else {
- printk(KERN_CRIT PFX "Unexpected close, not "
- "stopping watchdog!\n");
- sh_wdt_keepalive();
+ dev_crit(wdt->dev, "Unexpected close, not "
+ "stopping watchdog!\n");
+ sh_wdt_keepalive(wdt);
}
- clear_bit(0, &shwdt_is_open);
- shwdt_expect_close = 0;
+ clear_bit(0, &wdt->enabled);
+ wdt->expect_close = 0;
return 0;
}
-/**
- * sh_wdt_write - Write to Device
- * @file: file handle of device
- * @buf: buffer to write
- * @count: length of buffer
- * @ppos: offset
- *
- * Pings the watchdog on write.
- */
static ssize_t sh_wdt_write(struct file *file, const char *buf,
size_t count, loff_t *ppos)
{
+ struct sh_wdt *wdt = file->private_data;
+
if (count) {
if (!nowayout) {
size_t i;
- shwdt_expect_close = 0;
+ wdt->expect_close = 0;
for (i = 0; i != count; i++) {
char c;
if (get_user(c, buf + i))
return -EFAULT;
if (c == 'V')
- shwdt_expect_close = 42;
+ wdt->expect_close = 42;
}
}
- sh_wdt_keepalive();
+ sh_wdt_keepalive(wdt);
}
return count;
}
-/**
- * sh_wdt_mmap - map WDT/CPG registers into userspace
- * @file: file structure for the device
- * @vma: VMA to map the registers into
- *
- * A simple mmap() implementation for the corner cases where the counter
- * needs to be mapped in userspace directly. Due to the relatively small
- * size of the area, neighbouring registers not necessarily tied to the
- * CPG will also be accessible through the register page, so this remains
- * configurable for users that really know what they're doing.
- *
- * Additionaly, the register page maps in the CPG register base relative
- * to the nearest page-aligned boundary, which requires that userspace do
- * the appropriate CPU subtype math for calculating the page offset for
- * the counter value.
- */
-static int sh_wdt_mmap(struct file *file, struct vm_area_struct *vma)
-{
- int ret = -ENOSYS;
-
-#ifdef CONFIG_SH_WDT_MMAP
- unsigned long addr;
-
- /* Only support the simple cases where we map in a register page. */
- if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
- return -EINVAL;
-
- /*
- * Pick WTCNT as the start, it's usually the first register after the
- * FRQCR, and neither one are generally page-aligned out of the box.
- */
- addr = WTCNT & ~(PAGE_SIZE - 1);
-
- vma->vm_flags |= VM_IO;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
- PAGE_SIZE, vma->vm_page_prot)) {
- printk(KERN_ERR PFX "%s: io_remap_pfn_range failed\n",
- __func__);
- return -EAGAIN;
- }
-
- ret = 0;
-#endif
-
- return ret;
-}
-
-/**
- * sh_wdt_ioctl - Query Device
- * @file: file handle of device
- * @cmd: watchdog command
- * @arg: argument
- *
- * Query basic information from the device or ping it, as outlined by the
- * watchdog API.
- */
static long sh_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
+ struct sh_wdt *wdt = file->private_data;
int new_heartbeat;
int options, retval = -EINVAL;
@@ -356,18 +260,18 @@ static long sh_wdt_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
if (options & WDIOS_DISABLECARD) {
- sh_wdt_stop();
+ sh_wdt_stop(wdt);
retval = 0;
}
if (options & WDIOS_ENABLECARD) {
- sh_wdt_start();
+ sh_wdt_start(wdt);
retval = 0;
}
return retval;
case WDIOC_KEEPALIVE:
- sh_wdt_keepalive();
+ sh_wdt_keepalive(wdt);
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_heartbeat, (int *)arg))
@@ -376,7 +280,7 @@ static long sh_wdt_ioctl(struct file *file, unsigned int cmd,
if (sh_wdt_set_heartbeat(new_heartbeat))
return -EINVAL;
- sh_wdt_keepalive();
+ sh_wdt_keepalive(wdt);
/* Fall */
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, (int *)arg);
@@ -386,20 +290,13 @@ static long sh_wdt_ioctl(struct file *file, unsigned int cmd,
return 0;
}
-/**
- * sh_wdt_notify_sys - Notifier Handler
- * @this: notifier block
- * @code: notifier event
- * @unused: unused
- *
- * Handles specific events, such as turning off the watchdog during a
- * shutdown event.
- */
static int sh_wdt_notify_sys(struct notifier_block *this,
unsigned long code, void *unused)
{
+ struct sh_wdt *wdt = platform_get_drvdata(sh_wdt_dev);
+
if (code == SYS_DOWN || code == SYS_HALT)
- sh_wdt_stop();
+ sh_wdt_stop(wdt);
return NOTIFY_DONE;
}
@@ -411,7 +308,6 @@ static const struct file_operations sh_wdt_fops = {
.unlocked_ioctl = sh_wdt_ioctl,
.open = sh_wdt_open,
.release = sh_wdt_close,
- .mmap = sh_wdt_mmap,
};
static const struct watchdog_info sh_wdt_info = {
@@ -431,66 +327,148 @@ static struct miscdevice sh_wdt_miscdev = {
.fops = &sh_wdt_fops,
};
-/**
- * sh_wdt_init - Initialize module
- * Registers the device and notifier handler. Actual device
- * initialization is handled by sh_wdt_open().
- */
-static int __init sh_wdt_init(void)
+static int __devinit sh_wdt_probe(struct platform_device *pdev)
{
+ struct sh_wdt *wdt;
+ struct resource *res;
int rc;
- if (clock_division_ratio < 0x5 || clock_division_ratio > 0x7) {
- clock_division_ratio = WTCSR_CKS_4096;
- printk(KERN_INFO PFX
- "clock_division_ratio value must be 0x5<=x<=0x7, using %d\n",
- clock_division_ratio);
+ /*
+ * As this driver only covers the global watchdog case, reject
+ * any attempts to register per-CPU watchdogs.
+ */
+ if (pdev->id != -1)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(!res))
+ return -EINVAL;
+
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), DRV_NAME))
+ return -EBUSY;
+
+ wdt = devm_kzalloc(&pdev->dev, sizeof(struct sh_wdt), GFP_KERNEL);
+ if (unlikely(!wdt)) {
+ rc = -ENOMEM;
+ goto out_release;
}
- rc = sh_wdt_set_heartbeat(heartbeat);
- if (unlikely(rc)) {
- heartbeat = WATCHDOG_HEARTBEAT;
- printk(KERN_INFO PFX
- "heartbeat value must be 1<=x<=3600, using %d\n",
- heartbeat);
+ wdt->dev = &pdev->dev;
+
+ wdt->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (unlikely(!wdt->base)) {
+ rc = -ENXIO;
+ goto out_err;
}
rc = register_reboot_notifier(&sh_wdt_notifier);
if (unlikely(rc)) {
- printk(KERN_ERR PFX
+ dev_err(&pdev->dev,
"Can't register reboot notifier (err=%d)\n", rc);
- return rc;
+ goto out_unmap;
}
+ sh_wdt_miscdev.parent = wdt->dev;
+
rc = misc_register(&sh_wdt_miscdev);
if (unlikely(rc)) {
- printk(KERN_ERR PFX
+ dev_err(&pdev->dev,
"Can't register miscdev on minor=%d (err=%d)\n",
sh_wdt_miscdev.minor, rc);
- unregister_reboot_notifier(&sh_wdt_notifier);
- return rc;
+ goto out_unreg;
}
- printk(KERN_INFO PFX "initialized. heartbeat=%d sec (nowayout=%d)\n",
- heartbeat, nowayout);
+ init_timer(&wdt->timer);
+ wdt->timer.function = sh_wdt_ping;
+ wdt->timer.data = (unsigned long)wdt;
+ wdt->timer.expires = next_ping_period(clock_division_ratio);
+
+ platform_set_drvdata(pdev, wdt);
+ sh_wdt_dev = pdev;
+
+ dev_info(&pdev->dev, "initialized.\n");
return 0;
+
+out_unreg:
+ unregister_reboot_notifier(&sh_wdt_notifier);
+out_unmap:
+ devm_iounmap(&pdev->dev, wdt->base);
+out_err:
+ devm_kfree(&pdev->dev, wdt);
+out_release:
+ devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
+
+ return rc;
}
-/**
- * sh_wdt_exit - Deinitialize module
- * Unregisters the device and notifier handler. Actual device
- * deinitialization is handled by sh_wdt_close().
- */
-static void __exit sh_wdt_exit(void)
+static int __devexit sh_wdt_remove(struct platform_device *pdev)
{
+ struct sh_wdt *wdt = platform_get_drvdata(pdev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ platform_set_drvdata(pdev, NULL);
+
misc_deregister(&sh_wdt_miscdev);
+
+ sh_wdt_dev = NULL;
+
unregister_reboot_notifier(&sh_wdt_notifier);
+ devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
+ devm_iounmap(&pdev->dev, wdt->base);
+ devm_kfree(&pdev->dev, wdt);
+
+ return 0;
}
+static struct platform_driver sh_wdt_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+
+ .probe = sh_wdt_probe,
+ .remove = __devexit_p(sh_wdt_remove),
+};
+
+static int __init sh_wdt_init(void)
+{
+ int rc;
+
+ if (unlikely(clock_division_ratio < 0x5 ||
+ clock_division_ratio > 0x7)) {
+ clock_division_ratio = WTCSR_CKS_4096;
+
+ pr_info("%s: divisor must be 0x5<=x<=0x7, using %d\n",
+ DRV_NAME, clock_division_ratio);
+ }
+
+ rc = sh_wdt_set_heartbeat(heartbeat);
+ if (unlikely(rc)) {
+ heartbeat = WATCHDOG_HEARTBEAT;
+
+ pr_info("%s: heartbeat value must be 1<=x<=3600, using %d\n",
+ DRV_NAME, heartbeat);
+ }
+
+ pr_info("%s: configured with heartbeat=%d sec (nowayout=%d)\n",
+ DRV_NAME, heartbeat, nowayout);
+
+ return platform_driver_register(&sh_wdt_driver);
+}
+
+static void __exit sh_wdt_exit(void)
+{
+ platform_driver_unregister(&sh_wdt_driver);
+}
+module_init(sh_wdt_init);
+module_exit(sh_wdt_exit);
+
MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>");
MODULE_DESCRIPTION("SuperH watchdog driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
module_param(clock_division_ratio, int, 0);
@@ -507,6 +485,3 @@ module_param(nowayout, int, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
-module_init(sh_wdt_init);
-module_exit(sh_wdt_exit);
diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c
index 8a1f0bc3e271..df88cfa05f35 100644
--- a/drivers/watchdog/smsc37b787_wdt.c
+++ b/drivers/watchdog/smsc37b787_wdt.c
@@ -434,11 +434,11 @@ static long wb_smsc_wdt_ioctl(struct file *file,
} uarg;
static const struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING |
+ .options = WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
.firmware_version = 0,
- .identity = "SMsC 37B787 Watchdog",
+ .identity = "SMsC 37B787 Watchdog",
};
uarg.i = (int __user *)arg;
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index 833f49f43d43..100b114e3c3c 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -151,7 +151,7 @@ static int softdog_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (expect_close == 42) {
softdog_stop();
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
index 808372883e88..1bc493848ed4 100644
--- a/drivers/watchdog/sp5100_tco.c
+++ b/drivers/watchdog/sp5100_tco.c
@@ -259,7 +259,7 @@ static struct miscdevice sp5100_tco_miscdev = {
* register a pci_driver, because someone else might
* want to register another driver on the same PCI id.
*/
-static struct pci_device_id sp5100_tco_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sp5100_tco_pci_tbl) = {
{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_ANY_ID,
PCI_ANY_ID, },
{ 0, }, /* End of list */
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 18cdeb4c4258..5a90a4a871dd 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -68,7 +68,7 @@ struct platform_device *ts72xx_wdt_pdev;
* to control register):
* value description
* -------------------------
- * 0x00 watchdog disabled
+ * 0x00 watchdog disabled
* 0x01 250ms
* 0x02 500ms
* 0x03 1s
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index df2a64dc9672..be9c4d839e15 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -87,10 +87,10 @@ static int w83697ug_select_wd_register(void)
outb_p(0x87, WDT_EFER); /* Enter extended function mode */
outb_p(0x87, WDT_EFER); /* Again according to manual */
- outb(0x20, WDT_EFER); /* check chip version */
+ outb(0x20, WDT_EFER); /* check chip version */
version = inb(WDT_EFDR);
- if (version == 0x68) { /* W83697UG */
+ if (version == 0x68) { /* W83697UG */
printk(KERN_INFO PFX "Watchdog chip version 0x%02x = "
"W83697UG/UF found at 0x%04x\n", version, wdt_io);
diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c
index 552a4381e78f..bb03e151a1d0 100644
--- a/drivers/watchdog/wdt.c
+++ b/drivers/watchdog/wdt.c
@@ -581,7 +581,7 @@ static void __exit wdt_exit(void)
}
/**
- * wdt_init:
+ * wdt_init:
*
* Set up the WDT watchdog board. All we have to do is grab the
* resources we require and bitch if anyone beat us to them.
diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c
index 5c2521fc836c..a2f01c9f5c34 100644
--- a/drivers/watchdog/wdt977.c
+++ b/drivers/watchdog/wdt977.c
@@ -281,7 +281,7 @@ static int wdt977_release(struct inode *inode, struct file *file)
{
/*
* Shut off the timer.
- * Lock it in if it's a module and we set nowayout
+ * Lock it in if it's a module and we set nowayout
*/
if (expect_close == 42) {
wdt977_stop();
diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c
index 6130c88fa5ac..172dad6c7693 100644
--- a/drivers/watchdog/wdt_pci.c
+++ b/drivers/watchdog/wdt_pci.c
@@ -31,7 +31,7 @@
* Jeff Garzik : PCI cleanups
* Tigran Aivazian : Restructured wdtpci_init_one() to handle
* failures
- * Joel Becker : Added WDIOC_GET/SETTIMEOUT
+ * Joel Becker : Added WDIOC_GET/SETTIMEOUT
* Zwane Mwaikambo : Magic char closing, locking changes,
* cleanups
* Matt Domsch : nowayout module option
@@ -727,7 +727,7 @@ static void __devexit wdtpci_remove_one(struct pci_dev *pdev)
}
-static struct pci_device_id wdtpci_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(wdtpci_pci_tbl) = {
{
.vendor = PCI_VENDOR_ID_ACCESSIO,
.device = PCI_DEVICE_ID_ACCESSIO_WDG_CSM,
@@ -764,7 +764,7 @@ static void __exit wdtpci_cleanup(void)
/**
- * wdtpci_init:
+ * wdtpci_init:
*
* Set up the WDT watchdog board. All we have to do is grab the
* resources we require and bitch if anyone beat us to them.
diff --git a/drivers/watchdog/xen_wdt.c b/drivers/watchdog/xen_wdt.c
new file mode 100644
index 000000000000..49bd9d395562
--- /dev/null
+++ b/drivers/watchdog/xen_wdt.c
@@ -0,0 +1,359 @@
+/*
+ * Xen Watchdog Driver
+ *
+ * (c) Copyright 2010 Novell, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define DRV_NAME "wdt"
+#define DRV_VERSION "0.01"
+#define PFX DRV_NAME ": "
+
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/init.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+#include <xen/xen.h>
+#include <asm/xen/hypercall.h>
+#include <xen/interface/sched.h>
+
+static struct platform_device *platform_device;
+static DEFINE_SPINLOCK(wdt_lock);
+static struct sched_watchdog wdt;
+static __kernel_time_t wdt_expires;
+static bool is_active, expect_release;
+
+#define WATCHDOG_TIMEOUT 60 /* in seconds */
+static unsigned int timeout = WATCHDOG_TIMEOUT;
+module_param(timeout, uint, S_IRUGO);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds "
+ "(default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, S_IRUGO);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static inline __kernel_time_t set_timeout(void)
+{
+ wdt.timeout = timeout;
+ return ktime_to_timespec(ktime_get()).tv_sec + timeout;
+}
+
+static int xen_wdt_start(void)
+{
+ __kernel_time_t expires;
+ int err;
+
+ spin_lock(&wdt_lock);
+
+ expires = set_timeout();
+ if (!wdt.id)
+ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt);
+ else
+ err = -EBUSY;
+ if (err > 0) {
+ wdt.id = err;
+ wdt_expires = expires;
+ err = 0;
+ } else
+ BUG_ON(!err);
+
+ spin_unlock(&wdt_lock);
+
+ return err;
+}
+
+static int xen_wdt_stop(void)
+{
+ int err = 0;
+
+ spin_lock(&wdt_lock);
+
+ wdt.timeout = 0;
+ if (wdt.id)
+ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt);
+ if (!err)
+ wdt.id = 0;
+
+ spin_unlock(&wdt_lock);
+
+ return err;
+}
+
+static int xen_wdt_kick(void)
+{
+ __kernel_time_t expires;
+ int err;
+
+ spin_lock(&wdt_lock);
+
+ expires = set_timeout();
+ if (wdt.id)
+ err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt);
+ else
+ err = -ENXIO;
+ if (!err)
+ wdt_expires = expires;
+
+ spin_unlock(&wdt_lock);
+
+ return err;
+}
+
+static int xen_wdt_open(struct inode *inode, struct file *file)
+{
+ int err;
+
+ /* /dev/watchdog can only be opened once */
+ if (xchg(&is_active, true))
+ return -EBUSY;
+
+ err = xen_wdt_start();
+ if (err == -EBUSY)
+ err = xen_wdt_kick();
+ return err ?: nonseekable_open(inode, file);
+}
+
+static int xen_wdt_release(struct inode *inode, struct file *file)
+{
+ if (expect_release)
+ xen_wdt_stop();
+ else {
+ printk(KERN_CRIT PFX
+ "unexpected close, not stopping watchdog!\n");
+ xen_wdt_kick();
+ }
+ is_active = false;
+ expect_release = false;
+ return 0;
+}
+
+static ssize_t xen_wdt_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ /* See if we got the magic character 'V' and reload the timer */
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ /* in case it was set long ago */
+ expect_release = false;
+
+ /* scan to see whether or not we got the magic
+ character */
+ for (i = 0; i != len; i++) {
+ char c;
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_release = true;
+ }
+ }
+
+ /* someone wrote to us, we should reload the timer */
+ xen_wdt_kick();
+ }
+ return len;
+}
+
+static long xen_wdt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int new_options, retval = -EINVAL;
+ int new_timeout;
+ int __user *argp = (void __user *)arg;
+ static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
+ .firmware_version = 0,
+ .identity = DRV_NAME,
+ };
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
+
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, argp);
+
+ case WDIOC_SETOPTIONS:
+ if (get_user(new_options, argp))
+ return -EFAULT;
+
+ if (new_options & WDIOS_DISABLECARD)
+ retval = xen_wdt_stop();
+ if (new_options & WDIOS_ENABLECARD) {
+ retval = xen_wdt_start();
+ if (retval == -EBUSY)
+ retval = xen_wdt_kick();
+ }
+ return retval;
+
+ case WDIOC_KEEPALIVE:
+ xen_wdt_kick();
+ return 0;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_timeout, argp))
+ return -EFAULT;
+ if (!new_timeout)
+ return -EINVAL;
+ timeout = new_timeout;
+ xen_wdt_kick();
+ /* fall through */
+ case WDIOC_GETTIMEOUT:
+ return put_user(timeout, argp);
+
+ case WDIOC_GETTIMELEFT:
+ retval = wdt_expires - ktime_to_timespec(ktime_get()).tv_sec;
+ return put_user(retval, argp);
+ }
+
+ return -ENOTTY;
+}
+
+static const struct file_operations xen_wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = xen_wdt_write,
+ .unlocked_ioctl = xen_wdt_ioctl,
+ .open = xen_wdt_open,
+ .release = xen_wdt_release,
+};
+
+static struct miscdevice xen_wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &xen_wdt_fops,
+};
+
+static int __devinit xen_wdt_probe(struct platform_device *dev)
+{
+ struct sched_watchdog wd = { .id = ~0 };
+ int ret = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wd);
+
+ switch (ret) {
+ case -EINVAL:
+ if (!timeout) {
+ timeout = WATCHDOG_TIMEOUT;
+ printk(KERN_INFO PFX
+ "timeout value invalid, using %d\n", timeout);
+ }
+
+ ret = misc_register(&xen_wdt_miscdev);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "cannot register miscdev on minor=%d (%d)\n",
+ WATCHDOG_MINOR, ret);
+ break;
+ }
+
+ printk(KERN_INFO PFX
+ "initialized (timeout=%ds, nowayout=%d)\n",
+ timeout, nowayout);
+ break;
+
+ case -ENOSYS:
+ printk(KERN_INFO PFX "not supported\n");
+ ret = -ENODEV;
+ break;
+
+ default:
+ printk(KERN_INFO PFX "bogus return value %d\n", ret);
+ break;
+ }
+
+ return ret;
+}
+
+static int __devexit xen_wdt_remove(struct platform_device *dev)
+{
+ /* Stop the timer before we leave */
+ if (!nowayout)
+ xen_wdt_stop();
+
+ misc_deregister(&xen_wdt_miscdev);
+
+ return 0;
+}
+
+static void xen_wdt_shutdown(struct platform_device *dev)
+{
+ xen_wdt_stop();
+}
+
+static int xen_wdt_suspend(struct platform_device *dev, pm_message_t state)
+{
+ return xen_wdt_stop();
+}
+
+static int xen_wdt_resume(struct platform_device *dev)
+{
+ return xen_wdt_start();
+}
+
+static struct platform_driver xen_wdt_driver = {
+ .probe = xen_wdt_probe,
+ .remove = __devexit_p(xen_wdt_remove),
+ .shutdown = xen_wdt_shutdown,
+ .suspend = xen_wdt_suspend,
+ .resume = xen_wdt_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ },
+};
+
+static int __init xen_wdt_init_module(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ printk(KERN_INFO PFX "Xen WatchDog Timer Driver v%s\n", DRV_VERSION);
+
+ err = platform_driver_register(&xen_wdt_driver);
+ if (err)
+ return err;
+
+ platform_device = platform_device_register_simple(DRV_NAME,
+ -1, NULL, 0);
+ if (IS_ERR(platform_device)) {
+ err = PTR_ERR(platform_device);
+ platform_driver_unregister(&xen_wdt_driver);
+ }
+
+ return err;
+}
+
+static void __exit xen_wdt_cleanup_module(void)
+{
+ platform_device_unregister(platform_device);
+ platform_driver_unregister(&xen_wdt_driver);
+ printk(KERN_INFO PFX "module unloaded\n");
+}
+
+module_init(xen_wdt_init_module);
+module_exit(xen_wdt_cleanup_module);
+
+MODULE_AUTHOR("Jan Beulich <jbeulich@novell.com>");
+MODULE_DESCRIPTION("Xen WatchDog Timer Driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 07bec09d1dad..a59638b37c1a 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -76,10 +76,20 @@ config XEN_XENBUS_FRONTEND
config XEN_GNTDEV
tristate "userspace grant access device driver"
depends on XEN
+ default m
select MMU_NOTIFIER
help
Allows userspace processes to use grants.
+config XEN_GRANT_DEV_ALLOC
+ tristate "User-space grant reference allocator driver"
+ depends on XEN
+ default m
+ help
+ Allows userspace processes to create pages with access granted
+ to other domains. This can be used to implement frontend drivers
+ or as part of an inter-domain shared memory channel.
+
config XEN_PLATFORM_PCI
tristate "xen platform pci device driver"
depends on XEN_PVHVM && PCI
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 5088cc2e6fe2..f420f1ff7f13 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,4 +1,4 @@
-obj-y += grant-table.o features.o events.o manage.o
+obj-y += grant-table.o features.o events.o manage.o balloon.o
obj-y += xenbus/
nostackp := $(call cc-option, -fno-stack-protector)
@@ -7,9 +7,10 @@ CFLAGS_features.o := $(nostackp)
obj-$(CONFIG_BLOCK) += biomerge.o
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
-obj-$(CONFIG_XEN_BALLOON) += balloon.o
+obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
+obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o
obj-$(CONFIG_XENFS) += xenfs/
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o
@@ -18,5 +19,6 @@ obj-$(CONFIG_XEN_DOM0) += pci.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
+xen-gntalloc-y := gntalloc.o
xen-platform-pci-y := platform-pci.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 718050ace08f..043af8ad6b60 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -1,6 +1,4 @@
/******************************************************************************
- * balloon.c
- *
* Xen balloon driver - enables returning/claiming memory to/from Xen.
*
* Copyright (c) 2003, B Dragovic
@@ -33,7 +31,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/mm.h>
@@ -42,13 +39,11 @@
#include <linux/highmem.h>
#include <linux/mutex.h>
#include <linux/list.h>
-#include <linux/sysdev.h>
#include <linux/gfp.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/e820.h>
@@ -58,35 +53,29 @@
#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
-#include <xen/xenbus.h>
+#include <xen/balloon.h>
#include <xen/features.h>
#include <xen/page.h>
-#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
-
-#define BALLOON_CLASS_NAME "xen_memory"
+/*
+ * balloon_process() state:
+ *
+ * BP_DONE: done or nothing to do,
+ * BP_EAGAIN: error, go to sleep,
+ * BP_ECANCELED: error, balloon operation canceled.
+ */
-struct balloon_stats {
- /* We aim for 'current allocation' == 'target allocation'. */
- unsigned long current_pages;
- unsigned long target_pages;
- /*
- * Drivers may alter the memory reservation independently, but they
- * must inform the balloon driver so we avoid hitting the hard limit.
- */
- unsigned long driver_pages;
- /* Number of pages in high- and low-memory balloons. */
- unsigned long balloon_low;
- unsigned long balloon_high;
+enum bp_state {
+ BP_DONE,
+ BP_EAGAIN,
+ BP_ECANCELED
};
-static DEFINE_MUTEX(balloon_mutex);
-
-static struct sys_device balloon_sysdev;
-static int register_balloon(struct sys_device *sysdev);
+static DEFINE_MUTEX(balloon_mutex);
-static struct balloon_stats balloon_stats;
+struct balloon_stats balloon_stats;
+EXPORT_SYMBOL_GPL(balloon_stats);
/* We increase/decrease in batches which fit in a page */
static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
@@ -104,8 +93,7 @@ static LIST_HEAD(ballooned_pages);
/* Main work function, always executed in process context. */
static void balloon_process(struct work_struct *work);
-static DECLARE_WORK(balloon_worker, balloon_process);
-static struct timer_list balloon_timer;
+static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
/* When ballooning out (allocating memory to return to Xen) we don't really
want the kernel to try too hard since that can trigger the oom killer. */
@@ -140,14 +128,17 @@ static void balloon_append(struct page *page)
}
/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
-static struct page *balloon_retrieve(void)
+static struct page *balloon_retrieve(bool prefer_highmem)
{
struct page *page;
if (list_empty(&ballooned_pages))
return NULL;
- page = list_entry(ballooned_pages.next, struct page, lru);
+ if (prefer_highmem)
+ page = list_entry(ballooned_pages.prev, struct page, lru);
+ else
+ page = list_entry(ballooned_pages.next, struct page, lru);
list_del(&page->lru);
if (PageHighMem(page)) {
@@ -177,9 +168,29 @@ static struct page *balloon_next_page(struct page *page)
return list_entry(next, struct page, lru);
}
-static void balloon_alarm(unsigned long unused)
+static enum bp_state update_schedule(enum bp_state state)
{
- schedule_work(&balloon_worker);
+ if (state == BP_DONE) {
+ balloon_stats.schedule_delay = 1;
+ balloon_stats.retry_count = 1;
+ return BP_DONE;
+ }
+
+ ++balloon_stats.retry_count;
+
+ if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
+ balloon_stats.retry_count > balloon_stats.max_retry_count) {
+ balloon_stats.schedule_delay = 1;
+ balloon_stats.retry_count = 1;
+ return BP_ECANCELED;
+ }
+
+ balloon_stats.schedule_delay <<= 1;
+
+ if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
+ balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
+
+ return BP_EAGAIN;
}
static unsigned long current_target(void)
@@ -194,11 +205,11 @@ static unsigned long current_target(void)
return target;
}
-static int increase_reservation(unsigned long nr_pages)
+static enum bp_state increase_reservation(unsigned long nr_pages)
{
+ int rc;
unsigned long pfn, i;
struct page *page;
- long rc;
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = 0,
@@ -210,7 +221,10 @@ static int increase_reservation(unsigned long nr_pages)
page = balloon_first_page();
for (i = 0; i < nr_pages; i++) {
- BUG_ON(page == NULL);
+ if (!page) {
+ nr_pages = i;
+ break;
+ }
frame_list[i] = page_to_pfn(page);
page = balloon_next_page(page);
}
@@ -218,11 +232,11 @@ static int increase_reservation(unsigned long nr_pages)
set_xen_guest_handle(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
- if (rc < 0)
- goto out;
+ if (rc <= 0)
+ return BP_EAGAIN;
for (i = 0; i < rc; i++) {
- page = balloon_retrieve();
+ page = balloon_retrieve(false);
BUG_ON(page == NULL);
pfn = page_to_pfn(page);
@@ -249,15 +263,14 @@ static int increase_reservation(unsigned long nr_pages)
balloon_stats.current_pages += rc;
- out:
- return rc < 0 ? rc : rc != nr_pages;
+ return BP_DONE;
}
-static int decrease_reservation(unsigned long nr_pages)
+static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
{
+ enum bp_state state = BP_DONE;
unsigned long pfn, i;
struct page *page;
- int need_sleep = 0;
int ret;
struct xen_memory_reservation reservation = {
.address_bits = 0,
@@ -269,9 +282,9 @@ static int decrease_reservation(unsigned long nr_pages)
nr_pages = ARRAY_SIZE(frame_list);
for (i = 0; i < nr_pages; i++) {
- if ((page = alloc_page(GFP_BALLOON)) == NULL) {
+ if ((page = alloc_page(gfp)) == NULL) {
nr_pages = i;
- need_sleep = 1;
+ state = BP_EAGAIN;
break;
}
@@ -307,7 +320,7 @@ static int decrease_reservation(unsigned long nr_pages)
balloon_stats.current_pages -= nr_pages;
- return need_sleep;
+ return state;
}
/*
@@ -318,77 +331,101 @@ static int decrease_reservation(unsigned long nr_pages)
*/
static void balloon_process(struct work_struct *work)
{
- int need_sleep = 0;
+ enum bp_state state = BP_DONE;
long credit;
mutex_lock(&balloon_mutex);
do {
credit = current_target() - balloon_stats.current_pages;
+
if (credit > 0)
- need_sleep = (increase_reservation(credit) != 0);
+ state = increase_reservation(credit);
+
if (credit < 0)
- need_sleep = (decrease_reservation(-credit) != 0);
+ state = decrease_reservation(-credit, GFP_BALLOON);
+
+ state = update_schedule(state);
#ifndef CONFIG_PREEMPT
if (need_resched())
schedule();
#endif
- } while ((credit != 0) && !need_sleep);
+ } while (credit && state == BP_DONE);
/* Schedule more work if there is some still to be done. */
- if (current_target() != balloon_stats.current_pages)
- mod_timer(&balloon_timer, jiffies + HZ);
+ if (state == BP_EAGAIN)
+ schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
mutex_unlock(&balloon_mutex);
}
/* Resets the Xen limit, sets new target, and kicks off processing. */
-static void balloon_set_new_target(unsigned long target)
+void balloon_set_new_target(unsigned long target)
{
/* No need for lock. Not read-modify-write updates. */
balloon_stats.target_pages = target;
- schedule_work(&balloon_worker);
+ schedule_delayed_work(&balloon_worker, 0);
}
+EXPORT_SYMBOL_GPL(balloon_set_new_target);
-static struct xenbus_watch target_watch =
-{
- .node = "memory/target"
-};
-
-/* React to a change in the target key */
-static void watch_target(struct xenbus_watch *watch,
- const char **vec, unsigned int len)
+/**
+ * alloc_xenballooned_pages - get pages that have been ballooned out
+ * @nr_pages: Number of pages to get
+ * @pages: pages returned
+ * @return 0 on success, error otherwise
+ */
+int alloc_xenballooned_pages(int nr_pages, struct page** pages)
{
- unsigned long long new_target;
- int err;
-
- err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
- if (err != 1) {
- /* This is ok (for domain0 at least) - so just return */
- return;
+ int pgno = 0;
+ struct page* page;
+ mutex_lock(&balloon_mutex);
+ while (pgno < nr_pages) {
+ page = balloon_retrieve(true);
+ if (page) {
+ pages[pgno++] = page;
+ } else {
+ enum bp_state st;
+ st = decrease_reservation(nr_pages - pgno, GFP_HIGHUSER);
+ if (st != BP_DONE)
+ goto out_undo;
+ }
}
-
- /* The given memory/target value is in KiB, so it needs converting to
- * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
- */
- balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
+ mutex_unlock(&balloon_mutex);
+ return 0;
+ out_undo:
+ while (pgno)
+ balloon_append(pages[--pgno]);
+ /* Free the memory back to the kernel soon */
+ schedule_delayed_work(&balloon_worker, 0);
+ mutex_unlock(&balloon_mutex);
+ return -ENOMEM;
}
+EXPORT_SYMBOL(alloc_xenballooned_pages);
-static int balloon_init_watcher(struct notifier_block *notifier,
- unsigned long event,
- void *data)
+/**
+ * free_xenballooned_pages - return pages retrieved with get_ballooned_pages
+ * @nr_pages: Number of pages
+ * @pages: pages to return
+ */
+void free_xenballooned_pages(int nr_pages, struct page** pages)
{
- int err;
+ int i;
- err = register_xenbus_watch(&target_watch);
- if (err)
- printk(KERN_ERR "Failed to set balloon watcher\n");
+ mutex_lock(&balloon_mutex);
- return NOTIFY_DONE;
-}
+ for (i = 0; i < nr_pages; i++) {
+ if (pages[i])
+ balloon_append(pages[i]);
+ }
+
+ /* The balloon may be too large now. Shrink it if needed. */
+ if (current_target() != balloon_stats.current_pages)
+ schedule_delayed_work(&balloon_worker, 0);
-static struct notifier_block xenstore_notifier;
+ mutex_unlock(&balloon_mutex);
+}
+EXPORT_SYMBOL(free_xenballooned_pages);
static int __init balloon_init(void)
{
@@ -398,7 +435,7 @@ static int __init balloon_init(void)
if (!xen_domain())
return -ENODEV;
- pr_info("xen_balloon: Initialising balloon driver.\n");
+ pr_info("xen/balloon: Initialising balloon driver.\n");
if (xen_pv_domain())
nr_pages = xen_start_info->nr_pages;
@@ -408,13 +445,11 @@ static int __init balloon_init(void)
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
- balloon_stats.driver_pages = 0UL;
-
- init_timer(&balloon_timer);
- balloon_timer.data = 0;
- balloon_timer.function = balloon_alarm;
- register_balloon(&balloon_sysdev);
+ balloon_stats.schedule_delay = 1;
+ balloon_stats.max_schedule_delay = 32;
+ balloon_stats.retry_count = 1;
+ balloon_stats.max_retry_count = RETRY_UNLIMITED;
/*
* Initialise the balloon with excess memory space. We need
@@ -436,153 +471,9 @@ static int __init balloon_init(void)
__balloon_append(page);
}
- target_watch.callback = watch_target;
- xenstore_notifier.notifier_call = balloon_init_watcher;
-
- register_xenstore_notifier(&xenstore_notifier);
-
return 0;
}
subsys_initcall(balloon_init);
-static void balloon_exit(void)
-{
- /* XXX - release balloon here */
- return;
-}
-
-module_exit(balloon_exit);
-
-#define BALLOON_SHOW(name, format, args...) \
- static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
- char *buf) \
- { \
- return sprintf(buf, format, ##args); \
- } \
- static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
-
-BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
-BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
-BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
-BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages));
-
-static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
-}
-
-static ssize_t store_target_kb(struct sys_device *dev,
- struct sysdev_attribute *attr,
- const char *buf,
- size_t count)
-{
- char *endchar;
- unsigned long long target_bytes;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- target_bytes = simple_strtoull(buf, &endchar, 0) * 1024;
-
- balloon_set_new_target(target_bytes >> PAGE_SHIFT);
-
- return count;
-}
-
-static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
- show_target_kb, store_target_kb);
-
-
-static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%llu\n",
- (unsigned long long)balloon_stats.target_pages
- << PAGE_SHIFT);
-}
-
-static ssize_t store_target(struct sys_device *dev,
- struct sysdev_attribute *attr,
- const char *buf,
- size_t count)
-{
- char *endchar;
- unsigned long long target_bytes;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- target_bytes = memparse(buf, &endchar);
-
- balloon_set_new_target(target_bytes >> PAGE_SHIFT);
-
- return count;
-}
-
-static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR,
- show_target, store_target);
-
-
-static struct sysdev_attribute *balloon_attrs[] = {
- &attr_target_kb,
- &attr_target,
-};
-
-static struct attribute *balloon_info_attrs[] = {
- &attr_current_kb.attr,
- &attr_low_kb.attr,
- &attr_high_kb.attr,
- &attr_driver_kb.attr,
- NULL
-};
-
-static struct attribute_group balloon_info_group = {
- .name = "info",
- .attrs = balloon_info_attrs,
-};
-
-static struct sysdev_class balloon_sysdev_class = {
- .name = BALLOON_CLASS_NAME,
-};
-
-static int register_balloon(struct sys_device *sysdev)
-{
- int i, error;
-
- error = sysdev_class_register(&balloon_sysdev_class);
- if (error)
- return error;
-
- sysdev->id = 0;
- sysdev->cls = &balloon_sysdev_class;
-
- error = sysdev_register(sysdev);
- if (error) {
- sysdev_class_unregister(&balloon_sysdev_class);
- return error;
- }
-
- for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
- error = sysdev_create_file(sysdev, balloon_attrs[i]);
- if (error)
- goto fail;
- }
-
- error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
- if (error)
- goto fail;
-
- return 0;
-
- fail:
- while (--i >= 0)
- sysdev_remove_file(sysdev, balloon_attrs[i]);
- sysdev_unregister(sysdev);
- sysdev_class_unregister(&balloon_sysdev_class);
- return error;
-}
-
MODULE_LICENSE("GPL");
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 65f5068afd84..02b5a9c05cfa 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -56,6 +56,8 @@
*/
static DEFINE_SPINLOCK(irq_mapping_update_lock);
+static LIST_HEAD(xen_irq_list_head);
+
/* IRQ <-> VIRQ mapping. */
static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
@@ -85,7 +87,9 @@ enum xen_irq_type {
*/
struct irq_info
{
+ struct list_head list;
enum xen_irq_type type; /* type */
+ unsigned irq;
unsigned short evtchn; /* event channel */
unsigned short cpu; /* cpu bound */
@@ -103,23 +107,10 @@ struct irq_info
#define PIRQ_NEEDS_EOI (1 << 0)
#define PIRQ_SHAREABLE (1 << 1)
-static struct irq_info *irq_info;
-static int *pirq_to_irq;
-
static int *evtchn_to_irq;
-struct cpu_evtchn_s {
- unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
-};
-static __initdata struct cpu_evtchn_s init_evtchn_mask = {
- .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul,
-};
-static struct cpu_evtchn_s __refdata *cpu_evtchn_mask_p = &init_evtchn_mask;
-
-static inline unsigned long *cpu_evtchn_mask(int cpu)
-{
- return cpu_evtchn_mask_p[cpu].bits;
-}
+static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
+ cpu_evtchn_mask);
/* Xen will never allocate port zero for any purpose. */
#define VALID_EVTCHN(chn) ((chn) != 0)
@@ -128,46 +119,86 @@ static struct irq_chip xen_dynamic_chip;
static struct irq_chip xen_percpu_chip;
static struct irq_chip xen_pirq_chip;
-/* Constructor for packed IRQ information. */
-static struct irq_info mk_unbound_info(void)
+/* Get info for IRQ */
+static struct irq_info *info_for_irq(unsigned irq)
+{
+ return get_irq_data(irq);
+}
+
+/* Constructors for packed IRQ information. */
+static void xen_irq_info_common_init(struct irq_info *info,
+ unsigned irq,
+ enum xen_irq_type type,
+ unsigned short evtchn,
+ unsigned short cpu)
{
- return (struct irq_info) { .type = IRQT_UNBOUND };
+
+ BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
+
+ info->type = type;
+ info->irq = irq;
+ info->evtchn = evtchn;
+ info->cpu = cpu;
+
+ evtchn_to_irq[evtchn] = irq;
}
-static struct irq_info mk_evtchn_info(unsigned short evtchn)
+static void xen_irq_info_evtchn_init(unsigned irq,
+ unsigned short evtchn)
{
- return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
- .cpu = 0 };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
}
-static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
+static void xen_irq_info_ipi_init(unsigned cpu,
+ unsigned irq,
+ unsigned short evtchn,
+ enum ipi_vector ipi)
{
- return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
- .cpu = 0, .u.ipi = ipi };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
+
+ info->u.ipi = ipi;
+
+ per_cpu(ipi_to_irq, cpu)[ipi] = irq;
}
-static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
+static void xen_irq_info_virq_init(unsigned cpu,
+ unsigned irq,
+ unsigned short evtchn,
+ unsigned short virq)
{
- return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
- .cpu = 0, .u.virq = virq };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
+
+ info->u.virq = virq;
+
+ per_cpu(virq_to_irq, cpu)[virq] = irq;
}
-static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq,
- unsigned short gsi, unsigned short vector)
+static void xen_irq_info_pirq_init(unsigned irq,
+ unsigned short evtchn,
+ unsigned short pirq,
+ unsigned short gsi,
+ unsigned short vector,
+ unsigned char flags)
{
- return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
- .cpu = 0,
- .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } };
+ struct irq_info *info = info_for_irq(irq);
+
+ xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
+
+ info->u.pirq.pirq = pirq;
+ info->u.pirq.gsi = gsi;
+ info->u.pirq.vector = vector;
+ info->u.pirq.flags = flags;
}
/*
* Accessors for packed IRQ information.
*/
-static struct irq_info *info_for_irq(unsigned irq)
-{
- return &irq_info[irq];
-}
-
static unsigned int evtchn_from_irq(unsigned irq)
{
if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
@@ -212,26 +243,6 @@ static unsigned pirq_from_irq(unsigned irq)
return info->u.pirq.pirq;
}
-static unsigned gsi_from_irq(unsigned irq)
-{
- struct irq_info *info = info_for_irq(irq);
-
- BUG_ON(info == NULL);
- BUG_ON(info->type != IRQT_PIRQ);
-
- return info->u.pirq.gsi;
-}
-
-static unsigned vector_from_irq(unsigned irq)
-{
- struct irq_info *info = info_for_irq(irq);
-
- BUG_ON(info == NULL);
- BUG_ON(info->type != IRQT_PIRQ);
-
- return info->u.pirq.vector;
-}
-
static enum xen_irq_type type_from_irq(unsigned irq)
{
return info_for_irq(irq)->type;
@@ -267,7 +278,7 @@ static inline unsigned long active_evtchns(unsigned int cpu,
unsigned int idx)
{
return (sh->evtchn_pending[idx] &
- cpu_evtchn_mask(cpu)[idx] &
+ per_cpu(cpu_evtchn_mask, cpu)[idx] &
~sh->evtchn_mask[idx]);
}
@@ -280,28 +291,28 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
#endif
- clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
- set_bit(chn, cpu_evtchn_mask(cpu));
+ clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
+ set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
- irq_info[irq].cpu = cpu;
+ info_for_irq(irq)->cpu = cpu;
}
static void init_evtchn_cpu_bindings(void)
{
int i;
#ifdef CONFIG_SMP
- struct irq_desc *desc;
+ struct irq_info *info;
/* By default all event channels notify CPU#0. */
- for_each_irq_desc(i, desc) {
+ list_for_each_entry(info, &xen_irq_list_head, list) {
+ struct irq_desc *desc = irq_to_desc(info->irq);
cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
}
#endif
for_each_possible_cpu(i)
- memset(cpu_evtchn_mask(i),
- (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s));
-
+ memset(per_cpu(cpu_evtchn_mask, i),
+ (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
}
static inline void clear_evtchn(int port)
@@ -376,7 +387,28 @@ static void unmask_evtchn(int port)
put_cpu();
}
-static int xen_allocate_irq_dynamic(void)
+static void xen_irq_init(unsigned irq)
+{
+ struct irq_info *info;
+ struct irq_desc *desc = irq_to_desc(irq);
+
+#ifdef CONFIG_SMP
+ /* By default all event channels notify CPU#0. */
+ cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
+#endif
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL)
+ panic("Unable to allocate metadata for IRQ%d\n", irq);
+
+ info->type = IRQT_UNBOUND;
+
+ set_irq_data(irq, info);
+
+ list_add_tail(&info->list, &xen_irq_list_head);
+}
+
+static int __must_check xen_allocate_irq_dynamic(void)
{
int first = 0;
int irq;
@@ -393,22 +425,14 @@ static int xen_allocate_irq_dynamic(void)
first = get_nr_irqs_gsi();
#endif
-retry:
irq = irq_alloc_desc_from(first, -1);
- if (irq == -ENOMEM && first > NR_IRQS_LEGACY) {
- printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n");
- first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY);
- goto retry;
- }
-
- if (irq < 0)
- panic("No available IRQ to bind to: increase nr_irqs!\n");
+ xen_irq_init(irq);
return irq;
}
-static int xen_allocate_irq_gsi(unsigned gsi)
+static int __must_check xen_allocate_irq_gsi(unsigned gsi)
{
int irq;
@@ -423,17 +447,25 @@ static int xen_allocate_irq_gsi(unsigned gsi)
/* Legacy IRQ descriptors are already allocated by the arch. */
if (gsi < NR_IRQS_LEGACY)
- return gsi;
+ irq = gsi;
+ else
+ irq = irq_alloc_desc_at(gsi, -1);
- irq = irq_alloc_desc_at(gsi, -1);
- if (irq < 0)
- panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq);
+ xen_irq_init(irq);
return irq;
}
static void xen_free_irq(unsigned irq)
{
+ struct irq_info *info = get_irq_data(irq);
+
+ list_del(&info->list);
+
+ set_irq_data(irq, NULL);
+
+ kfree(info);
+
/* Legacy IRQ descriptors are managed by the arch. */
if (irq < NR_IRQS_LEGACY)
return;
@@ -563,51 +595,39 @@ static void ack_pirq(struct irq_data *data)
static int find_irq_by_gsi(unsigned gsi)
{
- int irq;
+ struct irq_info *info;
- for (irq = 0; irq < nr_irqs; irq++) {
- struct irq_info *info = info_for_irq(irq);
-
- if (info == NULL || info->type != IRQT_PIRQ)
+ list_for_each_entry(info, &xen_irq_list_head, list) {
+ if (info->type != IRQT_PIRQ)
continue;
- if (gsi_from_irq(irq) == gsi)
- return irq;
+ if (info->u.pirq.gsi == gsi)
+ return info->irq;
}
return -1;
}
-int xen_allocate_pirq(unsigned gsi, int shareable, char *name)
+int xen_allocate_pirq_gsi(unsigned gsi)
{
- return xen_map_pirq_gsi(gsi, gsi, shareable, name);
+ return gsi;
}
-/* xen_map_pirq_gsi might allocate irqs from the top down, as a
- * consequence don't assume that the irq number returned has a low value
- * or can be used as a pirq number unless you know otherwise.
- *
- * One notable exception is when xen_map_pirq_gsi is called passing an
- * hardware gsi as argument, in that case the irq number returned
- * matches the gsi number passed as second argument.
+/*
+ * Do not make any assumptions regarding the relationship between the
+ * IRQ number returned here and the Xen pirq argument.
*
* Note: We don't assign an event channel until the irq actually started
* up. Return an existing irq if we've already got one for the gsi.
*/
-int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
+int xen_bind_pirq_gsi_to_irq(unsigned gsi,
+ unsigned pirq, int shareable, char *name)
{
- int irq = 0;
+ int irq = -1;
struct physdev_irq irq_op;
spin_lock(&irq_mapping_update_lock);
- if ((pirq > nr_irqs) || (gsi > nr_irqs)) {
- printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n",
- pirq > nr_irqs ? "pirq" :"",
- gsi > nr_irqs ? "gsi" : "");
- goto out;
- }
-
irq = find_irq_by_gsi(gsi);
if (irq != -1) {
printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
@@ -616,6 +636,8 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
}
irq = xen_allocate_irq_gsi(gsi);
+ if (irq < 0)
+ goto out;
set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
handle_level_irq, name);
@@ -633,9 +655,8 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
goto out;
}
- irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector);
- irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0;
- pirq_to_irq[pirq] = irq;
+ xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector,
+ shareable ? PIRQ_SHAREABLE : 0);
out:
spin_unlock(&irq_mapping_update_lock);
@@ -672,8 +693,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
handle_level_irq, name);
- irq_info[irq] = mk_pirq_info(0, pirq, 0, vector);
- pirq_to_irq[pirq] = irq;
+ xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0);
ret = irq_set_msi_desc(irq, msidesc);
if (ret < 0)
goto error_irq;
@@ -709,9 +729,6 @@ int xen_destroy_irq(int irq)
goto out;
}
}
- pirq_to_irq[info->u.pirq.pirq] = -1;
-
- irq_info[irq] = mk_unbound_info();
xen_free_irq(irq);
@@ -720,19 +737,26 @@ out:
return rc;
}
-int xen_vector_from_irq(unsigned irq)
+int xen_irq_from_pirq(unsigned pirq)
{
- return vector_from_irq(irq);
-}
+ int irq;
-int xen_gsi_from_irq(unsigned irq)
-{
- return gsi_from_irq(irq);
-}
+ struct irq_info *info;
-int xen_irq_from_pirq(unsigned pirq)
-{
- return pirq_to_irq[pirq];
+ spin_lock(&irq_mapping_update_lock);
+
+ list_for_each_entry(info, &xen_irq_list_head, list) {
+ if (info == NULL || info->type != IRQT_PIRQ)
+ continue;
+ irq = info->irq;
+ if (info->u.pirq.pirq == pirq)
+ goto out;
+ }
+ irq = -1;
+out:
+ spin_unlock(&irq_mapping_update_lock);
+
+ return irq;
}
int bind_evtchn_to_irq(unsigned int evtchn)
@@ -745,14 +769,16 @@ int bind_evtchn_to_irq(unsigned int evtchn)
if (irq == -1) {
irq = xen_allocate_irq_dynamic();
+ if (irq == -1)
+ goto out;
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
handle_fasteoi_irq, "event");
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_evtchn_info(evtchn);
+ xen_irq_info_evtchn_init(irq, evtchn);
}
+out:
spin_unlock(&irq_mapping_update_lock);
return irq;
@@ -782,9 +808,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
BUG();
evtchn = bind_ipi.port;
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_ipi_info(evtchn, ipi);
- per_cpu(ipi_to_irq, cpu)[ipi] = irq;
+ xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
bind_evtchn_to_cpu(evtchn, cpu);
}
@@ -821,6 +845,8 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
if (irq == -1) {
irq = xen_allocate_irq_dynamic();
+ if (irq == -1)
+ goto out;
set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
handle_percpu_irq, "virq");
@@ -832,14 +858,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
BUG();
evtchn = bind_virq.port;
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_virq_info(evtchn, virq);
-
- per_cpu(virq_to_irq, cpu)[virq] = irq;
+ xen_irq_info_virq_init(cpu, irq, evtchn, virq);
bind_evtchn_to_cpu(evtchn, cpu);
}
+out:
spin_unlock(&irq_mapping_update_lock);
return irq;
@@ -876,11 +900,9 @@ static void unbind_from_irq(unsigned int irq)
evtchn_to_irq[evtchn] = -1;
}
- if (irq_info[irq].type != IRQT_UNBOUND) {
- irq_info[irq] = mk_unbound_info();
+ BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
- xen_free_irq(irq);
- }
+ xen_free_irq(irq);
spin_unlock(&irq_mapping_update_lock);
}
@@ -894,6 +916,8 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
int retval;
irq = bind_evtchn_to_irq(evtchn);
+ if (irq < 0)
+ return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
@@ -935,6 +959,8 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
int retval;
irq = bind_virq_to_irq(virq, cpu);
+ if (irq < 0)
+ return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
if (retval != 0) {
unbind_from_irq(irq);
@@ -986,7 +1012,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
{
struct shared_info *sh = HYPERVISOR_shared_info;
int cpu = smp_processor_id();
- unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu);
+ unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
int i;
unsigned long flags;
static DEFINE_SPINLOCK(debug_lock);
@@ -1064,6 +1090,13 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
}
static DEFINE_PER_CPU(unsigned, xed_nesting_count);
+static DEFINE_PER_CPU(unsigned int, current_word_idx);
+static DEFINE_PER_CPU(unsigned int, current_bit_idx);
+
+/*
+ * Mask out the i least significant bits of w
+ */
+#define MASK_LSBS(w, i) (w & ((~0UL) << i))
/*
* Search the CPUs pending events bitmasks. For each one found, map
@@ -1076,6 +1109,9 @@ static DEFINE_PER_CPU(unsigned, xed_nesting_count);
*/
static void __xen_evtchn_do_upcall(void)
{
+ int start_word_idx, start_bit_idx;
+ int word_idx, bit_idx;
+ int i;
int cpu = get_cpu();
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
@@ -1094,17 +1130,57 @@ static void __xen_evtchn_do_upcall(void)
wmb();
#endif
pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
- while (pending_words != 0) {
+
+ start_word_idx = __this_cpu_read(current_word_idx);
+ start_bit_idx = __this_cpu_read(current_bit_idx);
+
+ word_idx = start_word_idx;
+
+ for (i = 0; pending_words != 0; i++) {
unsigned long pending_bits;
- int word_idx = __ffs(pending_words);
- pending_words &= ~(1UL << word_idx);
+ unsigned long words;
- while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) {
- int bit_idx = __ffs(pending_bits);
- int port = (word_idx * BITS_PER_LONG) + bit_idx;
- int irq = evtchn_to_irq[port];
+ words = MASK_LSBS(pending_words, word_idx);
+
+ /*
+ * If we masked out all events, wrap to beginning.
+ */
+ if (words == 0) {
+ word_idx = 0;
+ bit_idx = 0;
+ continue;
+ }
+ word_idx = __ffs(words);
+
+ pending_bits = active_evtchns(cpu, s, word_idx);
+ bit_idx = 0; /* usually scan entire word from start */
+ if (word_idx == start_word_idx) {
+ /* We scan the starting word in two parts */
+ if (i == 0)
+ /* 1st time: start in the middle */
+ bit_idx = start_bit_idx;
+ else
+ /* 2nd time: mask bits done already */
+ bit_idx &= (1UL << start_bit_idx) - 1;
+ }
+
+ do {
+ unsigned long bits;
+ int port, irq;
struct irq_desc *desc;
+ bits = MASK_LSBS(pending_bits, bit_idx);
+
+ /* If we masked out all events, move on. */
+ if (bits == 0)
+ break;
+
+ bit_idx = __ffs(bits);
+
+ /* Process port. */
+ port = (word_idx * BITS_PER_LONG) + bit_idx;
+ irq = evtchn_to_irq[port];
+
mask_evtchn(port);
clear_evtchn(port);
@@ -1113,7 +1189,21 @@ static void __xen_evtchn_do_upcall(void)
if (desc)
generic_handle_irq_desc(irq, desc);
}
- }
+
+ bit_idx = (bit_idx + 1) % BITS_PER_LONG;
+
+ /* Next caller starts at last processed + 1 */
+ __this_cpu_write(current_word_idx,
+ bit_idx ? word_idx :
+ (word_idx+1) % BITS_PER_LONG);
+ __this_cpu_write(current_bit_idx, bit_idx);
+ } while (bit_idx != 0);
+
+ /* Scan start_l1i twice; all others once. */
+ if ((word_idx != start_word_idx) || (i != 0))
+ pending_words &= ~(1UL << word_idx);
+
+ word_idx = (word_idx + 1) % BITS_PER_LONG;
}
BUG_ON(!irqs_disabled());
@@ -1163,8 +1253,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
so there should be a proper type */
BUG_ON(info->type == IRQT_UNBOUND);
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_evtchn_info(evtchn);
+ xen_irq_info_evtchn_init(irq, evtchn);
spin_unlock(&irq_mapping_update_lock);
@@ -1181,10 +1270,14 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
struct evtchn_bind_vcpu bind_vcpu;
int evtchn = evtchn_from_irq(irq);
- /* events delivered via platform PCI interrupts are always
- * routed to vcpu 0 */
- if (!VALID_EVTCHN(evtchn) ||
- (xen_hvm_domain() && !xen_have_vector_callback))
+ if (!VALID_EVTCHN(evtchn))
+ return -1;
+
+ /*
+ * Events delivered via platform PCI interrupts are always
+ * routed to vcpu 0 and hence cannot be rebound.
+ */
+ if (xen_hvm_domain() && !xen_have_vector_callback)
return -1;
/* Send future instances of this interrupt to other vcpu. */
@@ -1271,19 +1364,22 @@ static int retrigger_dynirq(struct irq_data *data)
return ret;
}
-static void restore_cpu_pirqs(void)
+static void restore_pirqs(void)
{
int pirq, rc, irq, gsi;
struct physdev_map_pirq map_irq;
+ struct irq_info *info;
- for (pirq = 0; pirq < nr_irqs; pirq++) {
- irq = pirq_to_irq[pirq];
- if (irq == -1)
+ list_for_each_entry(info, &xen_irq_list_head, list) {
+ if (info->type != IRQT_PIRQ)
continue;
+ pirq = info->u.pirq.pirq;
+ gsi = info->u.pirq.gsi;
+ irq = info->irq;
+
/* save/restore of PT devices doesn't work, so at this point the
* only devices present are GSI based emulated devices */
- gsi = gsi_from_irq(irq);
if (!gsi)
continue;
@@ -1296,8 +1392,7 @@ static void restore_cpu_pirqs(void)
if (rc) {
printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
gsi, irq, pirq, rc);
- irq_info[irq] = mk_unbound_info();
- pirq_to_irq[pirq] = -1;
+ xen_free_irq(irq);
continue;
}
@@ -1327,8 +1422,7 @@ static void restore_cpu_virqs(unsigned int cpu)
evtchn = bind_virq.port;
/* Record the new mapping. */
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_virq_info(evtchn, virq);
+ xen_irq_info_virq_init(cpu, irq, evtchn, virq);
bind_evtchn_to_cpu(evtchn, cpu);
}
}
@@ -1352,8 +1446,7 @@ static void restore_cpu_ipis(unsigned int cpu)
evtchn = bind_ipi.port;
/* Record the new mapping. */
- evtchn_to_irq[evtchn] = irq;
- irq_info[irq] = mk_ipi_info(evtchn, ipi);
+ xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
bind_evtchn_to_cpu(evtchn, cpu);
}
}
@@ -1413,7 +1506,8 @@ void xen_poll_irq(int irq)
void xen_irq_resume(void)
{
- unsigned int cpu, irq, evtchn;
+ unsigned int cpu, evtchn;
+ struct irq_info *info;
init_evtchn_cpu_bindings();
@@ -1422,8 +1516,8 @@ void xen_irq_resume(void)
mask_evtchn(evtchn);
/* No IRQ <-> event-channel mappings. */
- for (irq = 0; irq < nr_irqs; irq++)
- irq_info[irq].evtchn = 0; /* zap event-channel binding */
+ list_for_each_entry(info, &xen_irq_list_head, list)
+ info->evtchn = 0; /* zap event-channel binding */
for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
evtchn_to_irq[evtchn] = -1;
@@ -1433,7 +1527,7 @@ void xen_irq_resume(void)
restore_cpu_ipis(cpu);
}
- restore_cpu_pirqs();
+ restore_pirqs();
}
static struct irq_chip xen_dynamic_chip __read_mostly = {
@@ -1519,17 +1613,6 @@ void __init xen_init_IRQ(void)
{
int i;
- cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
- GFP_KERNEL);
- irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
-
- /* We are using nr_irqs as the maximum number of pirq available but
- * that number is actually chosen by Xen and we don't know exactly
- * what it is. Be careful choosing high pirq numbers. */
- pirq_to_irq = kcalloc(nr_irqs, sizeof(*pirq_to_irq), GFP_KERNEL);
- for (i = 0; i < nr_irqs; i++)
- pirq_to_irq[i] = -1;
-
evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
GFP_KERNEL);
for (i = 0; i < NR_EVENT_CHANNELS; i++)
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
new file mode 100644
index 000000000000..a7ffdfe19fc9
--- /dev/null
+++ b/drivers/xen/gntalloc.c
@@ -0,0 +1,545 @@
+/******************************************************************************
+ * gntalloc.c
+ *
+ * Device for creating grant references (in user-space) that may be shared
+ * with other domains.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * This driver exists to allow userspace programs in Linux to allocate kernel
+ * memory that will later be shared with another domain. Without this device,
+ * Linux userspace programs cannot create grant references.
+ *
+ * How this stuff works:
+ * X -> granting a page to Y
+ * Y -> mapping the grant from X
+ *
+ * 1. X uses the gntalloc device to allocate a page of kernel memory, P.
+ * 2. X creates an entry in the grant table that says domid(Y) can access P.
+ * This is done without a hypercall unless the grant table needs expansion.
+ * 3. X gives the grant reference identifier, GREF, to Y.
+ * 4. Y maps the page, either directly into kernel memory for use in a backend
+ * driver, or via a the gntdev device to map into the address space of an
+ * application running in Y. This is the first point at which Xen does any
+ * tracking of the page.
+ * 5. A program in X mmap()s a segment of the gntalloc device that corresponds
+ * to the shared page, and can now communicate with Y over the shared page.
+ *
+ *
+ * NOTE TO USERSPACE LIBRARIES:
+ * The grant allocation and mmap()ing are, naturally, two separate operations.
+ * You set up the sharing by calling the create ioctl() and then the mmap().
+ * Teardown requires munmap() and either close() or ioctl().
+ *
+ * WARNING: Since Xen does not allow a guest to forcibly end the use of a grant
+ * reference, this device can be used to consume kernel memory by leaving grant
+ * references mapped by another domain when an application exits. Therefore,
+ * there is a global limit on the number of pages that can be allocated. When
+ * all references to the page are unmapped, it will be freed during the next
+ * grant operation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/highmem.h>
+
+#include <xen/xen.h>
+#include <xen/page.h>
+#include <xen/grant_table.h>
+#include <xen/gntalloc.h>
+#include <xen/events.h>
+
+static int limit = 1024;
+module_param(limit, int, 0644);
+MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by "
+ "the gntalloc device");
+
+static LIST_HEAD(gref_list);
+static DEFINE_SPINLOCK(gref_lock);
+static int gref_size;
+
+struct notify_info {
+ uint16_t pgoff:12; /* Bits 0-11: Offset of the byte to clear */
+ uint16_t flags:2; /* Bits 12-13: Unmap notification flags */
+ int event; /* Port (event channel) to notify */
+};
+
+/* Metadata on a grant reference. */
+struct gntalloc_gref {
+ struct list_head next_gref; /* list entry gref_list */
+ struct list_head next_file; /* list entry file->list, if open */
+ struct page *page; /* The shared page */
+ uint64_t file_index; /* File offset for mmap() */
+ unsigned int users; /* Use count - when zero, waiting on Xen */
+ grant_ref_t gref_id; /* The grant reference number */
+ struct notify_info notify; /* Unmap notification */
+};
+
+struct gntalloc_file_private_data {
+ struct list_head list;
+ uint64_t index;
+};
+
+static void __del_gref(struct gntalloc_gref *gref);
+
+static void do_cleanup(void)
+{
+ struct gntalloc_gref *gref, *n;
+ list_for_each_entry_safe(gref, n, &gref_list, next_gref) {
+ if (!gref->users)
+ __del_gref(gref);
+ }
+}
+
+static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
+ uint32_t *gref_ids, struct gntalloc_file_private_data *priv)
+{
+ int i, rc, readonly;
+ LIST_HEAD(queue_gref);
+ LIST_HEAD(queue_file);
+ struct gntalloc_gref *gref;
+
+ readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE);
+ rc = -ENOMEM;
+ for (i = 0; i < op->count; i++) {
+ gref = kzalloc(sizeof(*gref), GFP_KERNEL);
+ if (!gref)
+ goto undo;
+ list_add_tail(&gref->next_gref, &queue_gref);
+ list_add_tail(&gref->next_file, &queue_file);
+ gref->users = 1;
+ gref->file_index = op->index + i * PAGE_SIZE;
+ gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+ if (!gref->page)
+ goto undo;
+
+ /* Grant foreign access to the page. */
+ gref->gref_id = gnttab_grant_foreign_access(op->domid,
+ pfn_to_mfn(page_to_pfn(gref->page)), readonly);
+ if (gref->gref_id < 0) {
+ rc = gref->gref_id;
+ goto undo;
+ }
+ gref_ids[i] = gref->gref_id;
+ }
+
+ /* Add to gref lists. */
+ spin_lock(&gref_lock);
+ list_splice_tail(&queue_gref, &gref_list);
+ list_splice_tail(&queue_file, &priv->list);
+ spin_unlock(&gref_lock);
+
+ return 0;
+
+undo:
+ spin_lock(&gref_lock);
+ gref_size -= (op->count - i);
+
+ list_for_each_entry(gref, &queue_file, next_file) {
+ /* __del_gref does not remove from queue_file */
+ __del_gref(gref);
+ }
+
+ /* It's possible for the target domain to map the just-allocated grant
+ * references by blindly guessing their IDs; if this is done, then
+ * __del_gref will leave them in the queue_gref list. They need to be
+ * added to the global list so that we can free them when they are no
+ * longer referenced.
+ */
+ if (unlikely(!list_empty(&queue_gref)))
+ list_splice_tail(&queue_gref, &gref_list);
+ spin_unlock(&gref_lock);
+ return rc;
+}
+
+static void __del_gref(struct gntalloc_gref *gref)
+{
+ if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
+ uint8_t *tmp = kmap(gref->page);
+ tmp[gref->notify.pgoff] = 0;
+ kunmap(gref->page);
+ }
+ if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+ notify_remote_via_evtchn(gref->notify.event);
+
+ gref->notify.flags = 0;
+
+ if (gref->gref_id > 0) {
+ if (gnttab_query_foreign_access(gref->gref_id))
+ return;
+
+ if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
+ return;
+ }
+
+ gref_size--;
+ list_del(&gref->next_gref);
+
+ if (gref->page)
+ __free_page(gref->page);
+
+ kfree(gref);
+}
+
+/* finds contiguous grant references in a file, returns the first */
+static struct gntalloc_gref *find_grefs(struct gntalloc_file_private_data *priv,
+ uint64_t index, uint32_t count)
+{
+ struct gntalloc_gref *rv = NULL, *gref;
+ list_for_each_entry(gref, &priv->list, next_file) {
+ if (gref->file_index == index && !rv)
+ rv = gref;
+ if (rv) {
+ if (gref->file_index != index)
+ return NULL;
+ index += PAGE_SIZE;
+ count--;
+ if (count == 0)
+ return rv;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * -------------------------------------
+ * File operations.
+ * -------------------------------------
+ */
+static int gntalloc_open(struct inode *inode, struct file *filp)
+{
+ struct gntalloc_file_private_data *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto out_nomem;
+ INIT_LIST_HEAD(&priv->list);
+
+ filp->private_data = priv;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ return 0;
+
+out_nomem:
+ return -ENOMEM;
+}
+
+static int gntalloc_release(struct inode *inode, struct file *filp)
+{
+ struct gntalloc_file_private_data *priv = filp->private_data;
+ struct gntalloc_gref *gref;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ spin_lock(&gref_lock);
+ while (!list_empty(&priv->list)) {
+ gref = list_entry(priv->list.next,
+ struct gntalloc_gref, next_file);
+ list_del(&gref->next_file);
+ gref->users--;
+ if (gref->users == 0)
+ __del_gref(gref);
+ }
+ kfree(priv);
+ spin_unlock(&gref_lock);
+
+ return 0;
+}
+
+static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
+ struct ioctl_gntalloc_alloc_gref __user *arg)
+{
+ int rc = 0;
+ struct ioctl_gntalloc_alloc_gref op;
+ uint32_t *gref_ids;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ if (copy_from_user(&op, arg, sizeof(op))) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ gref_ids = kzalloc(sizeof(gref_ids[0]) * op.count, GFP_TEMPORARY);
+ if (!gref_ids) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock(&gref_lock);
+ /* Clean up pages that were at zero (local) users but were still mapped
+ * by remote domains. Since those pages count towards the limit that we
+ * are about to enforce, removing them here is a good idea.
+ */
+ do_cleanup();
+ if (gref_size + op.count > limit) {
+ spin_unlock(&gref_lock);
+ rc = -ENOSPC;
+ goto out_free;
+ }
+ gref_size += op.count;
+ op.index = priv->index;
+ priv->index += op.count * PAGE_SIZE;
+ spin_unlock(&gref_lock);
+
+ rc = add_grefs(&op, gref_ids, priv);
+ if (rc < 0)
+ goto out_free;
+
+ /* Once we finish add_grefs, it is unsafe to touch the new reference,
+ * since it is possible for a concurrent ioctl to remove it (by guessing
+ * its index). If the userspace application doesn't provide valid memory
+ * to write the IDs to, then it will need to close the file in order to
+ * release - which it will do by segfaulting when it tries to access the
+ * IDs to close them.
+ */
+ if (copy_to_user(arg, &op, sizeof(op))) {
+ rc = -EFAULT;
+ goto out_free;
+ }
+ if (copy_to_user(arg->gref_ids, gref_ids,
+ sizeof(gref_ids[0]) * op.count)) {
+ rc = -EFAULT;
+ goto out_free;
+ }
+
+out_free:
+ kfree(gref_ids);
+out:
+ return rc;
+}
+
+static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
+ void __user *arg)
+{
+ int i, rc = 0;
+ struct ioctl_gntalloc_dealloc_gref op;
+ struct gntalloc_gref *gref, *n;
+
+ pr_debug("%s: priv %p\n", __func__, priv);
+
+ if (copy_from_user(&op, arg, sizeof(op))) {
+ rc = -EFAULT;
+ goto dealloc_grant_out;
+ }
+
+ spin_lock(&gref_lock);
+ gref = find_grefs(priv, op.index, op.count);
+ if (gref) {
+ /* Remove from the file list only, and decrease reference count.
+ * The later call to do_cleanup() will remove from gref_list and
+ * free the memory if the pages aren't mapped anywhere.
+ */
+ for (i = 0; i < op.count; i++) {
+ n = list_entry(gref->next_file.next,
+ struct gntalloc_gref, next_file);
+ list_del(&gref->next_file);
+ gref->users--;
+ gref = n;
+ }
+ } else {
+ rc = -EINVAL;
+ }
+
+ do_cleanup();
+
+ spin_unlock(&gref_lock);
+dealloc_grant_out:
+ return rc;
+}
+
+static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
+ void __user *arg)
+{
+ struct ioctl_gntalloc_unmap_notify op;
+ struct gntalloc_gref *gref;
+ uint64_t index;
+ int pgoff;
+ int rc;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ index = op.index & ~(PAGE_SIZE - 1);
+ pgoff = op.index & (PAGE_SIZE - 1);
+
+ spin_lock(&gref_lock);
+
+ gref = find_grefs(priv, index, 1);
+ if (!gref) {
+ rc = -ENOENT;
+ goto unlock_out;
+ }
+
+ if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) {
+ rc = -EINVAL;
+ goto unlock_out;
+ }
+
+ gref->notify.flags = op.action;
+ gref->notify.pgoff = pgoff;
+ gref->notify.event = op.event_channel_port;
+ rc = 0;
+ unlock_out:
+ spin_unlock(&gref_lock);
+ return rc;
+}
+
+static long gntalloc_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct gntalloc_file_private_data *priv = filp->private_data;
+
+ switch (cmd) {
+ case IOCTL_GNTALLOC_ALLOC_GREF:
+ return gntalloc_ioctl_alloc(priv, (void __user *)arg);
+
+ case IOCTL_GNTALLOC_DEALLOC_GREF:
+ return gntalloc_ioctl_dealloc(priv, (void __user *)arg);
+
+ case IOCTL_GNTALLOC_SET_UNMAP_NOTIFY:
+ return gntalloc_ioctl_unmap_notify(priv, (void __user *)arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return 0;
+}
+
+static void gntalloc_vma_close(struct vm_area_struct *vma)
+{
+ struct gntalloc_gref *gref = vma->vm_private_data;
+ if (!gref)
+ return;
+
+ spin_lock(&gref_lock);
+ gref->users--;
+ if (gref->users == 0)
+ __del_gref(gref);
+ spin_unlock(&gref_lock);
+}
+
+static struct vm_operations_struct gntalloc_vmops = {
+ .close = gntalloc_vma_close,
+};
+
+static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct gntalloc_file_private_data *priv = filp->private_data;
+ struct gntalloc_gref *gref;
+ int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int rv, i;
+
+ pr_debug("%s: priv %p, page %lu+%d\n", __func__,
+ priv, vma->vm_pgoff, count);
+
+ if (!(vma->vm_flags & VM_SHARED)) {
+ printk(KERN_ERR "%s: Mapping must be shared.\n", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock(&gref_lock);
+ gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count);
+ if (gref == NULL) {
+ rv = -ENOENT;
+ pr_debug("%s: Could not find grant reference",
+ __func__);
+ goto out_unlock;
+ }
+
+ vma->vm_private_data = gref;
+
+ vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTCOPY;
+ vma->vm_flags |= VM_PFNMAP | VM_PFN_AT_MMAP;
+
+ vma->vm_ops = &gntalloc_vmops;
+
+ for (i = 0; i < count; i++) {
+ gref->users++;
+ rv = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
+ gref->page);
+ if (rv)
+ goto out_unlock;
+
+ gref = list_entry(gref->next_file.next,
+ struct gntalloc_gref, next_file);
+ }
+ rv = 0;
+
+out_unlock:
+ spin_unlock(&gref_lock);
+ return rv;
+}
+
+static const struct file_operations gntalloc_fops = {
+ .owner = THIS_MODULE,
+ .open = gntalloc_open,
+ .release = gntalloc_release,
+ .unlocked_ioctl = gntalloc_ioctl,
+ .mmap = gntalloc_mmap
+};
+
+/*
+ * -------------------------------------
+ * Module creation/destruction.
+ * -------------------------------------
+ */
+static struct miscdevice gntalloc_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/gntalloc",
+ .fops = &gntalloc_fops,
+};
+
+static int __init gntalloc_init(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ err = misc_register(&gntalloc_miscdev);
+ if (err != 0) {
+ printk(KERN_ERR "Could not register misc gntalloc device\n");
+ return err;
+ }
+
+ pr_debug("Created grant allocation device at %d,%d\n",
+ MISC_MAJOR, gntalloc_miscdev.minor);
+
+ return 0;
+}
+
+static void __exit gntalloc_exit(void)
+{
+ misc_deregister(&gntalloc_miscdev);
+}
+
+module_init(gntalloc_init);
+module_exit(gntalloc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Carter Weatherly <carter.weatherly@jhuapl.edu>, "
+ "Daniel De Graaf <dgdegra@tycho.nsa.gov>");
+MODULE_DESCRIPTION("User-space grant reference allocator driver");
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 1e31cdcdae1e..017ce600fbc6 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -32,10 +32,13 @@
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
+#include <xen/balloon.h>
#include <xen/gntdev.h>
+#include <xen/events.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
@@ -45,35 +48,46 @@ MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
"Gerd Hoffmann <kraxel@redhat.com>");
MODULE_DESCRIPTION("User-space granted page access driver");
-static int limit = 1024;
+static int limit = 1024*1024;
module_param(limit, int, 0644);
-MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped at "
- "once by a gntdev instance");
+MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
+ "the gntdev device");
+
+static atomic_t pages_mapped = ATOMIC_INIT(0);
+
+static int use_ptemod;
struct gntdev_priv {
struct list_head maps;
- uint32_t used;
- uint32_t limit;
/* lock protects maps from concurrent changes */
spinlock_t lock;
struct mm_struct *mm;
struct mmu_notifier mn;
};
+struct unmap_notify {
+ int flags;
+ /* Address relative to the start of the grant_map */
+ int addr;
+ int event;
+};
+
struct grant_map {
struct list_head next;
- struct gntdev_priv *priv;
struct vm_area_struct *vma;
int index;
int count;
int flags;
- int is_mapped;
+ atomic_t users;
+ struct unmap_notify notify;
struct ioctl_gntdev_grant_ref *grants;
struct gnttab_map_grant_ref *map_ops;
struct gnttab_unmap_grant_ref *unmap_ops;
struct page **pages;
};
+static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
+
/* ------------------------------------------------------------------ */
static void gntdev_print_maps(struct gntdev_priv *priv,
@@ -82,9 +96,7 @@ static void gntdev_print_maps(struct gntdev_priv *priv,
#ifdef DEBUG
struct grant_map *map;
- pr_debug("maps list (priv %p, usage %d/%d)\n",
- priv, priv->used, priv->limit);
-
+ pr_debug("%s: maps list (priv %p)\n", __func__, priv);
list_for_each_entry(map, &priv->maps, next)
pr_debug(" index %2d, count %2d %s\n",
map->index, map->count,
@@ -111,27 +123,21 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
NULL == add->pages)
goto err;
+ if (alloc_xenballooned_pages(count, add->pages))
+ goto err;
+
for (i = 0; i < count; i++) {
- add->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
- if (add->pages[i] == NULL)
- goto err;
+ add->map_ops[i].handle = -1;
+ add->unmap_ops[i].handle = -1;
}
add->index = 0;
add->count = count;
- add->priv = priv;
-
- if (add->count + priv->used > priv->limit)
- goto err;
+ atomic_set(&add->users, 1);
return add;
err:
- if (add->pages)
- for (i = 0; i < count; i++) {
- if (add->pages[i])
- __free_page(add->pages[i]);
- }
kfree(add->pages);
kfree(add->grants);
kfree(add->map_ops);
@@ -154,7 +160,6 @@ static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
list_add_tail(&add->next, &priv->maps);
done:
- priv->used += add->count;
gntdev_print_maps(priv, "[new]", add->index);
}
@@ -166,57 +171,33 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
list_for_each_entry(map, &priv->maps, next) {
if (map->index != index)
continue;
- if (map->count != count)
+ if (count && map->count != count)
continue;
return map;
}
return NULL;
}
-static struct grant_map *gntdev_find_map_vaddr(struct gntdev_priv *priv,
- unsigned long vaddr)
+static void gntdev_put_map(struct grant_map *map)
{
- struct grant_map *map;
-
- list_for_each_entry(map, &priv->maps, next) {
- if (!map->vma)
- continue;
- if (vaddr < map->vma->vm_start)
- continue;
- if (vaddr >= map->vma->vm_end)
- continue;
- return map;
- }
- return NULL;
-}
-
-static int gntdev_del_map(struct grant_map *map)
-{
- int i;
+ if (!map)
+ return;
- if (map->vma)
- return -EBUSY;
- for (i = 0; i < map->count; i++)
- if (map->unmap_ops[i].handle)
- return -EBUSY;
+ if (!atomic_dec_and_test(&map->users))
+ return;
- map->priv->used -= map->count;
- list_del(&map->next);
- return 0;
-}
+ atomic_sub(map->count, &pages_mapped);
-static void gntdev_free_map(struct grant_map *map)
-{
- int i;
+ if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
+ notify_remote_via_evtchn(map->notify.event);
+ }
- if (!map)
- return;
+ if (map->pages) {
+ if (!use_ptemod)
+ unmap_grant_pages(map, 0, map->count);
- if (map->pages)
- for (i = 0; i < map->count; i++) {
- if (map->pages[i])
- __free_page(map->pages[i]);
- }
+ free_xenballooned_pages(map->count, map->pages);
+ }
kfree(map->pages);
kfree(map->grants);
kfree(map->map_ops);
@@ -231,18 +212,17 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token,
{
struct grant_map *map = data;
unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
+ int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
u64 pte_maddr;
BUG_ON(pgnr >= map->count);
pte_maddr = arbitrary_virt_to_machine(pte).maddr;
- gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr,
- GNTMAP_contains_pte | map->flags,
+ gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
map->grants[pgnr].ref,
map->grants[pgnr].domid);
- gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr,
- GNTMAP_contains_pte | map->flags,
- 0 /* handle */);
+ gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
+ -1 /* handle */);
return 0;
}
@@ -250,6 +230,21 @@ static int map_grant_pages(struct grant_map *map)
{
int i, err = 0;
+ if (!use_ptemod) {
+ /* Note: it could already be mapped */
+ if (map->map_ops[0].handle != -1)
+ return 0;
+ for (i = 0; i < map->count; i++) {
+ unsigned long addr = (unsigned long)
+ pfn_to_kaddr(page_to_pfn(map->pages[i]));
+ gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
+ map->grants[i].ref,
+ map->grants[i].domid);
+ gnttab_set_unmap_op(&map->unmap_ops[i], addr,
+ map->flags, -1 /* handle */);
+ }
+ }
+
pr_debug("map %d+%d\n", map->index, map->count);
err = gnttab_map_refs(map->map_ops, map->pages, map->count);
if (err)
@@ -258,28 +253,81 @@ static int map_grant_pages(struct grant_map *map)
for (i = 0; i < map->count; i++) {
if (map->map_ops[i].status)
err = -EINVAL;
- map->unmap_ops[i].handle = map->map_ops[i].handle;
+ else {
+ BUG_ON(map->map_ops[i].handle == -1);
+ map->unmap_ops[i].handle = map->map_ops[i].handle;
+ pr_debug("map handle=%d\n", map->map_ops[i].handle);
+ }
}
return err;
}
-static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
{
int i, err = 0;
- pr_debug("map %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
- err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages, pages);
+ if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
+ int pgno = (map->notify.addr >> PAGE_SHIFT);
+ if (pgno >= offset && pgno < offset + pages && use_ptemod) {
+ void __user *tmp = (void __user *)
+ map->vma->vm_start + map->notify.addr;
+ err = copy_to_user(tmp, &err, 1);
+ if (err)
+ return err;
+ map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
+ } else if (pgno >= offset && pgno < offset + pages) {
+ uint8_t *tmp = kmap(map->pages[pgno]);
+ tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
+ kunmap(map->pages[pgno]);
+ map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
+ }
+ }
+
+ err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages);
if (err)
return err;
for (i = 0; i < pages; i++) {
if (map->unmap_ops[offset+i].status)
err = -EINVAL;
- map->unmap_ops[offset+i].handle = 0;
+ pr_debug("unmap handle=%d st=%d\n",
+ map->unmap_ops[offset+i].handle,
+ map->unmap_ops[offset+i].status);
+ map->unmap_ops[offset+i].handle = -1;
}
return err;
}
+static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+{
+ int range, err = 0;
+
+ pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
+
+ /* It is possible the requested range will have a "hole" where we
+ * already unmapped some of the grants. Only unmap valid ranges.
+ */
+ while (pages && !err) {
+ while (pages && map->unmap_ops[offset].handle == -1) {
+ offset++;
+ pages--;
+ }
+ range = 0;
+ while (range < pages) {
+ if (map->unmap_ops[offset+range].handle == -1) {
+ range--;
+ break;
+ }
+ range++;
+ }
+ err = __unmap_grant_pages(map, offset, range);
+ offset += range;
+ pages -= range;
+ }
+
+ return err;
+}
+
/* ------------------------------------------------------------------ */
static void gntdev_vma_close(struct vm_area_struct *vma)
@@ -287,22 +335,13 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
struct grant_map *map = vma->vm_private_data;
pr_debug("close %p\n", vma);
- map->is_mapped = 0;
map->vma = NULL;
vma->vm_private_data = NULL;
-}
-
-static int gntdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- pr_debug("vaddr %p, pgoff %ld (shouldn't happen)\n",
- vmf->virtual_address, vmf->pgoff);
- vmf->flags = VM_FAULT_ERROR;
- return 0;
+ gntdev_put_map(map);
}
static struct vm_operations_struct gntdev_vmops = {
.close = gntdev_vma_close,
- .fault = gntdev_vma_fault,
};
/* ------------------------------------------------------------------ */
@@ -320,8 +359,6 @@ static void mn_invl_range_start(struct mmu_notifier *mn,
list_for_each_entry(map, &priv->maps, next) {
if (!map->vma)
continue;
- if (!map->is_mapped)
- continue;
if (map->vma->vm_start >= end)
continue;
if (map->vma->vm_end <= start)
@@ -386,16 +423,17 @@ static int gntdev_open(struct inode *inode, struct file *flip)
INIT_LIST_HEAD(&priv->maps);
spin_lock_init(&priv->lock);
- priv->limit = limit;
- priv->mm = get_task_mm(current);
- if (!priv->mm) {
- kfree(priv);
- return -ENOMEM;
+ if (use_ptemod) {
+ priv->mm = get_task_mm(current);
+ if (!priv->mm) {
+ kfree(priv);
+ return -ENOMEM;
+ }
+ priv->mn.ops = &gntdev_mmu_ops;
+ ret = mmu_notifier_register(&priv->mn, priv->mm);
+ mmput(priv->mm);
}
- priv->mn.ops = &gntdev_mmu_ops;
- ret = mmu_notifier_register(&priv->mn, priv->mm);
- mmput(priv->mm);
if (ret) {
kfree(priv);
@@ -412,21 +450,19 @@ static int gntdev_release(struct inode *inode, struct file *flip)
{
struct gntdev_priv *priv = flip->private_data;
struct grant_map *map;
- int err;
pr_debug("priv %p\n", priv);
spin_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
map = list_entry(priv->maps.next, struct grant_map, next);
- err = gntdev_del_map(map);
- if (WARN_ON(err))
- gntdev_free_map(map);
-
+ list_del(&map->next);
+ gntdev_put_map(map);
}
spin_unlock(&priv->lock);
- mmu_notifier_unregister(&priv->mn, priv->mm);
+ if (use_ptemod)
+ mmu_notifier_unregister(&priv->mn, priv->mm);
kfree(priv);
return 0;
}
@@ -443,16 +479,21 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
pr_debug("priv %p, add %d\n", priv, op.count);
if (unlikely(op.count <= 0))
return -EINVAL;
- if (unlikely(op.count > priv->limit))
- return -EINVAL;
err = -ENOMEM;
map = gntdev_alloc_map(priv, op.count);
if (!map)
return err;
+
+ if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
+ pr_debug("can't map: over limit\n");
+ gntdev_put_map(map);
+ return err;
+ }
+
if (copy_from_user(map->grants, &u->refs,
sizeof(map->grants[0]) * op.count) != 0) {
- gntdev_free_map(map);
+ gntdev_put_map(map);
return err;
}
@@ -461,13 +502,9 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
op.index = map->index << PAGE_SHIFT;
spin_unlock(&priv->lock);
- if (copy_to_user(u, &op, sizeof(op)) != 0) {
- spin_lock(&priv->lock);
- gntdev_del_map(map);
- spin_unlock(&priv->lock);
- gntdev_free_map(map);
- return err;
- }
+ if (copy_to_user(u, &op, sizeof(op)) != 0)
+ return -EFAULT;
+
return 0;
}
@@ -484,11 +521,12 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
spin_lock(&priv->lock);
map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
- if (map)
- err = gntdev_del_map(map);
+ if (map) {
+ list_del(&map->next);
+ gntdev_put_map(map);
+ err = 0;
+ }
spin_unlock(&priv->lock);
- if (!err)
- gntdev_free_map(map);
return err;
}
@@ -496,43 +534,66 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
struct ioctl_gntdev_get_offset_for_vaddr __user *u)
{
struct ioctl_gntdev_get_offset_for_vaddr op;
+ struct vm_area_struct *vma;
struct grant_map *map;
if (copy_from_user(&op, u, sizeof(op)) != 0)
return -EFAULT;
pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
- spin_lock(&priv->lock);
- map = gntdev_find_map_vaddr(priv, op.vaddr);
- if (map == NULL ||
- map->vma->vm_start != op.vaddr) {
- spin_unlock(&priv->lock);
+ vma = find_vma(current->mm, op.vaddr);
+ if (!vma || vma->vm_ops != &gntdev_vmops)
return -EINVAL;
- }
+
+ map = vma->vm_private_data;
+ if (!map)
+ return -EINVAL;
+
op.offset = map->index << PAGE_SHIFT;
op.count = map->count;
- spin_unlock(&priv->lock);
if (copy_to_user(u, &op, sizeof(op)) != 0)
return -EFAULT;
return 0;
}
-static long gntdev_ioctl_set_max_grants(struct gntdev_priv *priv,
- struct ioctl_gntdev_set_max_grants __user *u)
+static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
{
- struct ioctl_gntdev_set_max_grants op;
+ struct ioctl_gntdev_unmap_notify op;
+ struct grant_map *map;
+ int rc;
- if (copy_from_user(&op, u, sizeof(op)) != 0)
+ if (copy_from_user(&op, u, sizeof(op)))
return -EFAULT;
- pr_debug("priv %p, limit %d\n", priv, op.count);
- if (op.count > limit)
- return -E2BIG;
+
+ if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
+ return -EINVAL;
spin_lock(&priv->lock);
- priv->limit = op.count;
+
+ list_for_each_entry(map, &priv->maps, next) {
+ uint64_t begin = map->index << PAGE_SHIFT;
+ uint64_t end = (map->index + map->count) << PAGE_SHIFT;
+ if (op.index >= begin && op.index < end)
+ goto found;
+ }
+ rc = -ENOENT;
+ goto unlock_out;
+
+ found:
+ if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
+ (map->flags & GNTMAP_readonly)) {
+ rc = -EINVAL;
+ goto unlock_out;
+ }
+
+ map->notify.flags = op.action;
+ map->notify.addr = op.index - (map->index << PAGE_SHIFT);
+ map->notify.event = op.event_channel_port;
+ rc = 0;
+ unlock_out:
spin_unlock(&priv->lock);
- return 0;
+ return rc;
}
static long gntdev_ioctl(struct file *flip,
@@ -551,8 +612,8 @@ static long gntdev_ioctl(struct file *flip,
case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
- case IOCTL_GNTDEV_SET_MAX_GRANTS:
- return gntdev_ioctl_set_max_grants(priv, ptr);
+ case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
+ return gntdev_ioctl_notify(priv, ptr);
default:
pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
@@ -568,7 +629,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
int index = vma->vm_pgoff;
int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
struct grant_map *map;
- int err = -EINVAL;
+ int i, err = -EINVAL;
if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
return -EINVAL;
@@ -580,47 +641,70 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
map = gntdev_find_map_index(priv, index, count);
if (!map)
goto unlock_out;
- if (map->vma)
+ if (use_ptemod && map->vma)
goto unlock_out;
- if (priv->mm != vma->vm_mm) {
+ if (use_ptemod && priv->mm != vma->vm_mm) {
printk(KERN_WARNING "Huh? Other mm?\n");
goto unlock_out;
}
+ atomic_inc(&map->users);
+
vma->vm_ops = &gntdev_vmops;
vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP;
vma->vm_private_data = map;
- map->vma = vma;
- map->flags = GNTMAP_host_map | GNTMAP_application_map;
- if (!(vma->vm_flags & VM_WRITE))
- map->flags |= GNTMAP_readonly;
+ if (use_ptemod)
+ map->vma = vma;
+
+ if (map->flags) {
+ if ((vma->vm_flags & VM_WRITE) &&
+ (map->flags & GNTMAP_readonly))
+ return -EINVAL;
+ } else {
+ map->flags = GNTMAP_host_map;
+ if (!(vma->vm_flags & VM_WRITE))
+ map->flags |= GNTMAP_readonly;
+ }
spin_unlock(&priv->lock);
- err = apply_to_page_range(vma->vm_mm, vma->vm_start,
- vma->vm_end - vma->vm_start,
- find_grant_ptes, map);
- if (err) {
- printk(KERN_WARNING "find_grant_ptes() failure.\n");
- return err;
+ if (use_ptemod) {
+ err = apply_to_page_range(vma->vm_mm, vma->vm_start,
+ vma->vm_end - vma->vm_start,
+ find_grant_ptes, map);
+ if (err) {
+ printk(KERN_WARNING "find_grant_ptes() failure.\n");
+ goto out_put_map;
+ }
}
err = map_grant_pages(map);
- if (err) {
- printk(KERN_WARNING "map_grant_pages() failure.\n");
- return err;
- }
+ if (err)
+ goto out_put_map;
- map->is_mapped = 1;
+ if (!use_ptemod) {
+ for (i = 0; i < count; i++) {
+ err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
+ map->pages[i]);
+ if (err)
+ goto out_put_map;
+ }
+ }
return 0;
unlock_out:
spin_unlock(&priv->lock);
return err;
+
+out_put_map:
+ if (use_ptemod)
+ map->vma = NULL;
+ gntdev_put_map(map);
+ return err;
}
static const struct file_operations gntdev_fops = {
@@ -646,6 +730,8 @@ static int __init gntdev_init(void)
if (!xen_domain())
return -ENODEV;
+ use_ptemod = xen_pv_domain();
+
err = misc_register(&gntdev_miscdev);
if (err != 0) {
printk(KERN_ERR "Could not register gntdev device\n");
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 9ef54ebc1194..3745a318defc 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -458,7 +458,14 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
if (ret)
return ret;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return ret;
+
for (i = 0; i < count; i++) {
+ /* Do not add to override if the map failed. */
+ if (map_ops[i].status)
+ continue;
+
/* m2p override only supported for GNTMAP_contains_pte mappings */
if (!(map_ops[i].flags & GNTMAP_contains_pte))
continue;
@@ -483,6 +490,9 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
if (ret)
return ret;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return ret;
+
for (i = 0; i < count; i++) {
ret = m2p_remove_override(pages[i]);
if (ret)
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index ebb292859b59..95143dd6904d 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -61,7 +61,7 @@ static void xen_post_suspend(int cancelled)
xen_mm_unpin_all();
}
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_HIBERNATION
static int xen_suspend(void *data)
{
struct suspend_info *si = data;
@@ -69,7 +69,7 @@ static int xen_suspend(void *data)
BUG_ON(!irqs_disabled());
- err = sysdev_suspend(PMSG_SUSPEND);
+ err = sysdev_suspend(PMSG_FREEZE);
if (err) {
printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n",
err);
@@ -118,7 +118,7 @@ static void do_suspend(void)
}
#endif
- err = dpm_suspend_start(PMSG_SUSPEND);
+ err = dpm_suspend_start(PMSG_FREEZE);
if (err) {
printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err);
goto out_thaw;
@@ -127,7 +127,7 @@ static void do_suspend(void)
printk(KERN_DEBUG "suspending xenstore...\n");
xs_suspend();
- err = dpm_suspend_noirq(PMSG_SUSPEND);
+ err = dpm_suspend_noirq(PMSG_FREEZE);
if (err) {
printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
goto out_resume;
@@ -147,7 +147,7 @@ static void do_suspend(void)
err = stop_machine(xen_suspend, &si, cpumask_of(0));
- dpm_resume_noirq(PMSG_RESUME);
+ dpm_resume_noirq(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
if (err) {
printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
@@ -161,7 +161,7 @@ out_resume:
} else
xs_suspend_cancel();
- dpm_resume_end(PMSG_RESUME);
+ dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
/* Make sure timer events get retriggered on all CPUs */
clock_was_set();
@@ -173,7 +173,7 @@ out:
#endif
shutting_down = SHUTDOWN_INVALID;
}
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_HIBERNATION */
struct shutdown_handler {
const char *command;
@@ -202,7 +202,7 @@ static void shutdown_handler(struct xenbus_watch *watch,
{ "poweroff", do_poweroff },
{ "halt", do_poweroff },
{ "reboot", do_reboot },
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_HIBERNATION
{ "suspend", do_suspend },
#endif
{NULL, NULL},
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
new file mode 100644
index 000000000000..a4ff225ee868
--- /dev/null
+++ b/drivers/xen/xen-balloon.c
@@ -0,0 +1,256 @@
+/******************************************************************************
+ * Xen balloon driver - enables returning/claiming memory to/from Xen.
+ *
+ * Copyright (c) 2003, B Dragovic
+ * Copyright (c) 2003-2004, M Williamson, K Fraser
+ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/capability.h>
+
+#include <xen/xen.h>
+#include <xen/interface/xen.h>
+#include <xen/balloon.h>
+#include <xen/xenbus.h>
+#include <xen/features.h>
+#include <xen/page.h>
+
+#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
+
+#define BALLOON_CLASS_NAME "xen_memory"
+
+static struct sys_device balloon_sysdev;
+
+static int register_balloon(struct sys_device *sysdev);
+
+static struct xenbus_watch target_watch =
+{
+ .node = "memory/target"
+};
+
+/* React to a change in the target key */
+static void watch_target(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ unsigned long long new_target;
+ int err;
+
+ err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
+ if (err != 1) {
+ /* This is ok (for domain0 at least) - so just return */
+ return;
+ }
+
+ /* The given memory/target value is in KiB, so it needs converting to
+ * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
+ */
+ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
+}
+
+static int balloon_init_watcher(struct notifier_block *notifier,
+ unsigned long event,
+ void *data)
+{
+ int err;
+
+ err = register_xenbus_watch(&target_watch);
+ if (err)
+ printk(KERN_ERR "Failed to set balloon watcher\n");
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block xenstore_notifier;
+
+static int __init balloon_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ pr_info("xen-balloon: Initialising balloon driver.\n");
+
+ register_balloon(&balloon_sysdev);
+
+ target_watch.callback = watch_target;
+ xenstore_notifier.notifier_call = balloon_init_watcher;
+
+ register_xenstore_notifier(&xenstore_notifier);
+
+ return 0;
+}
+subsys_initcall(balloon_init);
+
+static void balloon_exit(void)
+{
+ /* XXX - release balloon here */
+ return;
+}
+
+module_exit(balloon_exit);
+
+#define BALLOON_SHOW(name, format, args...) \
+ static ssize_t show_##name(struct sys_device *dev, \
+ struct sysdev_attribute *attr, \
+ char *buf) \
+ { \
+ return sprintf(buf, format, ##args); \
+ } \
+ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
+
+BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
+BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
+BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
+
+static SYSDEV_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
+static SYSDEV_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
+static SYSDEV_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
+static SYSDEV_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
+
+static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
+}
+
+static ssize_t store_target_kb(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ char *endchar;
+ unsigned long long target_bytes;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ target_bytes = simple_strtoull(buf, &endchar, 0) * 1024;
+
+ balloon_set_new_target(target_bytes >> PAGE_SHIFT);
+
+ return count;
+}
+
+static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
+ show_target_kb, store_target_kb);
+
+
+static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%llu\n",
+ (unsigned long long)balloon_stats.target_pages
+ << PAGE_SHIFT);
+}
+
+static ssize_t store_target(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ char *endchar;
+ unsigned long long target_bytes;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ target_bytes = memparse(buf, &endchar);
+
+ balloon_set_new_target(target_bytes >> PAGE_SHIFT);
+
+ return count;
+}
+
+static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR,
+ show_target, store_target);
+
+
+static struct sysdev_attribute *balloon_attrs[] = {
+ &attr_target_kb,
+ &attr_target,
+ &attr_schedule_delay.attr,
+ &attr_max_schedule_delay.attr,
+ &attr_retry_count.attr,
+ &attr_max_retry_count.attr
+};
+
+static struct attribute *balloon_info_attrs[] = {
+ &attr_current_kb.attr,
+ &attr_low_kb.attr,
+ &attr_high_kb.attr,
+ NULL
+};
+
+static struct attribute_group balloon_info_group = {
+ .name = "info",
+ .attrs = balloon_info_attrs
+};
+
+static struct sysdev_class balloon_sysdev_class = {
+ .name = BALLOON_CLASS_NAME
+};
+
+static int register_balloon(struct sys_device *sysdev)
+{
+ int i, error;
+
+ error = sysdev_class_register(&balloon_sysdev_class);
+ if (error)
+ return error;
+
+ sysdev->id = 0;
+ sysdev->cls = &balloon_sysdev_class;
+
+ error = sysdev_register(sysdev);
+ if (error) {
+ sysdev_class_unregister(&balloon_sysdev_class);
+ return error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
+ error = sysdev_create_file(sysdev, balloon_attrs[i]);
+ if (error)
+ goto fail;
+ }
+
+ error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
+ if (error)
+ goto fail;
+
+ return 0;
+
+ fail:
+ while (--i >= 0)
+ sysdev_remove_file(sysdev, balloon_attrs[i]);
+ sysdev_unregister(sysdev);
+ sysdev_class_unregister(&balloon_sysdev_class);
+ return error;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index baa65e7fbbc7..739769551e33 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -577,7 +577,7 @@ void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
}
EXPORT_SYMBOL_GPL(xenbus_dev_changed);
-int xenbus_dev_suspend(struct device *dev, pm_message_t state)
+int xenbus_dev_suspend(struct device *dev)
{
int err = 0;
struct xenbus_driver *drv;
@@ -590,7 +590,7 @@ int xenbus_dev_suspend(struct device *dev, pm_message_t state)
return 0;
drv = to_xenbus_driver(dev->driver);
if (drv->suspend)
- err = drv->suspend(xdev, state);
+ err = drv->suspend(xdev);
if (err)
printk(KERN_WARNING
"xenbus: suspend %s failed: %i\n", dev_name(dev), err);
@@ -642,6 +642,14 @@ int xenbus_dev_resume(struct device *dev)
}
EXPORT_SYMBOL_GPL(xenbus_dev_resume);
+int xenbus_dev_cancel(struct device *dev)
+{
+ /* Do nothing */
+ DPRINTK("cancel");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xenbus_dev_cancel);
+
/* A flag to determine if xenstored is 'ready' (i.e. has started) */
int xenstored_ready = 0;
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 24665812316a..888b9900ca08 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -64,8 +64,9 @@ extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
extern void xenbus_dev_shutdown(struct device *_dev);
-extern int xenbus_dev_suspend(struct device *dev, pm_message_t state);
+extern int xenbus_dev_suspend(struct device *dev);
extern int xenbus_dev_resume(struct device *dev);
+extern int xenbus_dev_cancel(struct device *dev);
extern void xenbus_otherend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len,
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 5bcc2d6cf129..b6a2690c9d49 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -85,6 +85,14 @@ static struct device_attribute xenbus_frontend_dev_attrs[] = {
__ATTR_NULL
};
+static const struct dev_pm_ops xenbus_pm_ops = {
+ .suspend = xenbus_dev_suspend,
+ .resume = xenbus_dev_resume,
+ .freeze = xenbus_dev_suspend,
+ .thaw = xenbus_dev_cancel,
+ .restore = xenbus_dev_resume,
+};
+
static struct xen_bus_type xenbus_frontend = {
.root = "device",
.levels = 2, /* device/type/<id> */
@@ -100,8 +108,7 @@ static struct xen_bus_type xenbus_frontend = {
.shutdown = xenbus_dev_shutdown,
.dev_attrs = xenbus_frontend_dev_attrs,
- .suspend = xenbus_dev_suspend,
- .resume = xenbus_dev_resume,
+ .pm = &xenbus_pm_ops,
},
};